cmd/tailscaled: unify the two Windows paths + separate IPN server path

tailscaled on Windows had two entirely separate start-up paths for running
as a service vs in the foreground. It's been causing problems for ages.
This unifies the two paths, making them be the same as the path used
for every other platform.

Also, it uses the new async LocalBackend support in ipnserver.Server
so the Server can start serving HTTP immediately, even if tun takes
awhile to come up.

Updates #6535

Change-Id: Icc8c4f96d4887b54a024d7ac15ad11096b5a58cf
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
pull/6444/head
Brad Fitzpatrick 1 year ago committed by Brad Fitzpatrick
parent ea25ef8236
commit 3b0de97e07

@ -48,6 +48,7 @@ import (
"tailscale.com/paths"
"tailscale.com/safesocket"
"tailscale.com/smallzstd"
"tailscale.com/syncs"
"tailscale.com/tsweb"
"tailscale.com/types/flagtype"
"tailscale.com/types/logger"
@ -110,6 +111,9 @@ func defaultPort() uint16 {
return uint16(p)
}
}
if envknob.GOOS() == "windows" {
return 41641
}
return 0
}
@ -320,11 +324,13 @@ func ipnServerOpts() (o serverOptions) {
return o
}
func run() error {
var err error
var logPol *logpolicy.Policy
var debugMux *http.ServeMux
func run() error {
pol := logpolicy.New(logtail.CollectionNode)
pol.SetVerbosityLevel(args.verbose)
logPol = pol
defer func() {
// Finish uploading logs after closing everything else.
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
@ -368,23 +374,87 @@ func run() error {
log.Printf("error in synology migration: %v", err)
}
var debugMux *http.ServeMux
if args.debug != "" {
debugMux = newDebugMux()
}
logid := pol.PublicID.String()
return startIPNServer(context.Background(), logf, logid)
}
func startIPNServer(ctx context.Context, logf logger.Logf, logid string) error {
ln, _, err := safesocket.Listen(args.socketpath, safesocket.WindowsLocalPort)
if err != nil {
return fmt.Errorf("safesocket.Listen: %v", err)
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Exit gracefully by cancelling the ipnserver context in most common cases:
// interrupted from the TTY or killed by a service manager.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
// SIGPIPE sometimes gets generated when CLIs disconnect from
// tailscaled. The default action is to terminate the process, we
// want to keep running.
signal.Ignore(syscall.SIGPIPE)
go func() {
select {
case s := <-interrupt:
logf("tailscaled got signal %v; shutting down", s)
cancel()
case <-ctx.Done():
// continue
}
}()
srv := ipnserver.New(logf, logid)
if debugMux != nil {
debugMux.HandleFunc("/debug/ipn", srv.ServeHTMLStatus)
}
var lbErr syncs.AtomicValue[error]
go func() {
t0 := time.Now()
lb, err := getLocalBackend(ctx, logf, logid)
if err == nil {
logf("got LocalBackend in %v", time.Since(t0).Round(time.Millisecond))
srv.SetLocalBackend(lb)
return
}
lbErr.Store(err) // before the following cancel
cancel() // make srv.Run below complete
}()
err = srv.Run(ctx, ln)
if err != nil && lbErr.Load() != nil {
return fmt.Errorf("getLocalBackend error: %v", lbErr.Load())
}
// Cancelation is not an error: it is the only way to stop ipnserver.
if err != nil && !errors.Is(err, context.Canceled) {
return fmt.Errorf("ipnserver.Run: %w", err)
}
return nil
}
func getLocalBackend(ctx context.Context, logf logger.Logf, logid string) (_ *ipnlocal.LocalBackend, retErr error) {
linkMon, err := monitor.New(logf)
if err != nil {
return fmt.Errorf("monitor.New: %w", err)
return nil, fmt.Errorf("monitor.New: %w", err)
}
if logPol != nil {
logPol.Logtail.SetLinkMonitor(linkMon)
}
pol.Logtail.SetLinkMonitor(linkMon)
socksListener, httpProxyListener := mustStartProxyListeners(args.socksAddr, args.httpProxyAddr)
dialer := &tsdial.Dialer{Logf: logf} // mutated below (before used)
e, useNetstack, err := createEngine(logf, linkMon, dialer)
if err != nil {
return fmt.Errorf("createEngine: %w", err)
return nil, fmt.Errorf("createEngine: %w", err)
}
if _, ok := e.(wgengine.ResolvingEngine).GetResolver(); !ok {
panic("internal error: exit node resolver not wired up")
@ -400,7 +470,7 @@ func run() error {
ns, err := newNetstack(logf, dialer, e)
if err != nil {
return fmt.Errorf("newNetstack: %w", err)
return nil, fmt.Errorf("newNetstack: %w", err)
}
ns.ProcessLocalIPs = useNetstack
ns.ProcessSubnets = useNetstack || shouldWrapNetstack()
@ -434,36 +504,16 @@ func run() error {
e = wgengine.NewWatchdog(e)
ctx, cancel := context.WithCancel(context.Background())
// Exit gracefully by cancelling the ipnserver context in most common cases:
// interrupted from the TTY or killed by a service manager.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
// SIGPIPE sometimes gets generated when CLIs disconnect from
// tailscaled. The default action is to terminate the process, we
// want to keep running.
signal.Ignore(syscall.SIGPIPE)
go func() {
select {
case s := <-interrupt:
logf("tailscaled got signal %v; shutting down", s)
cancel()
case <-ctx.Done():
// continue
}
}()
opts := ipnServerOpts()
store, err := store.New(logf, statePathOrDefault())
if err != nil {
return fmt.Errorf("store.New: %w", err)
return nil, fmt.Errorf("store.New: %w", err)
}
logid := pol.PublicID.String()
lb, err := ipnlocal.NewLocalBackend(logf, logid, store, "", dialer, e, opts.LoginFlags)
if err != nil {
return fmt.Errorf("ipnlocal.NewLocalBackend: %w", err)
return nil, fmt.Errorf("ipnlocal.NewLocalBackend: %w", err)
}
lb.SetVarRoot(opts.VarRoot)
if root := lb.TailscaleVarRoot(); root != "" {
@ -473,31 +523,11 @@ func run() error {
return smallzstd.NewDecoder(nil)
})
configureTaildrop(logf, lb)
srv := ipnserver.New(logf, logid)
srv.SetLocalBackend(lb)
ns.SetLocalBackend(lb)
if err := ns.Start(); err != nil {
log.Fatalf("failed to start netstack: %v", err)
}
if debugMux != nil {
debugMux.HandleFunc("/debug/ipn", srv.ServeHTMLStatus)
}
ln, _, err := safesocket.Listen(args.socketpath, safesocket.WindowsLocalPort)
if err != nil {
return fmt.Errorf("safesocket.Listen: %v", err)
}
defer dialer.Close()
err = srv.Run(ctx, ln)
// Cancelation is not an error: it is the only way to stop ipnserver.
if err != nil && err != context.Canceled {
return fmt.Errorf("ipnserver.Run: %w", err)
}
return nil
return lb, nil
}
func createEngine(logf logger.Logf, linkMon *monitor.Mon, dialer *tsdial.Dialer) (e wgengine.Engine, useNetstack bool, err error) {
@ -533,6 +563,8 @@ func shouldWrapNetstack() bool {
return false
}
var tstunNew = tstun.New
func tryEngine(logf logger.Logf, linkMon *monitor.Mon, dialer *tsdial.Dialer, name string) (e wgengine.Engine, useNetstack bool, err error) {
conf := wgengine.Config{
ListenPort: args.port,
@ -564,7 +596,7 @@ func tryEngine(logf logger.Logf, linkMon *monitor.Mon, dialer *tsdial.Dialer, na
}
}
} else {
dev, devName, err := tstun.New(logf, name)
dev, devName, err := tstunNew(logf, name)
if err != nil {
tstun.Diagnose(logf, name, err)
return nil, false, fmt.Errorf("tstun.New(%q): %w", name, err)

@ -26,12 +26,10 @@ import (
"errors"
"fmt"
"log"
"net"
"net/netip"
"os"
"os/exec"
"os/signal"
"path/filepath"
"sync"
"syscall"
"time"
@ -40,29 +38,17 @@ import (
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/eventlog"
"golang.zx2c4.com/wireguard/tun"
"golang.zx2c4.com/wireguard/windows/tunnel/winipcfg"
"tailscale.com/control/controlclient"
"tailscale.com/envknob"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnlocal"
"tailscale.com/ipn/ipnserver"
"tailscale.com/ipn/store"
"tailscale.com/logpolicy"
"tailscale.com/logtail/backoff"
"tailscale.com/net/dns"
"tailscale.com/net/dnsfallback"
"tailscale.com/net/tsdial"
"tailscale.com/net/tstun"
"tailscale.com/safesocket"
"tailscale.com/smallzstd"
"tailscale.com/types/logger"
"tailscale.com/util/winutil"
"tailscale.com/version"
"tailscale.com/wf"
"tailscale.com/wgengine"
"tailscale.com/wgengine/monitor"
"tailscale.com/wgengine/netstack"
"tailscale.com/wgengine/router"
)
func init() {
@ -78,6 +64,37 @@ func init() {
const serviceName = "Tailscale"
func init() {
tstunNew = tstunNewWithWindowsRetries
}
// tstunNewOrRetry is a wrapper around tstun.New that retries on Windows for certain
// errors.
//
// TODO(bradfitz): move this into tstun and/or just fix the problems so it doesn't
// require a few tries to work.
func tstunNewWithWindowsRetries(logf logger.Logf, tunName string) (_ tun.Device, devName string, _ error) {
bo := backoff.NewBackoff("tstunNew", logf, 10*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
for {
dev, devName, err := tstun.New(logf, tunName)
if err == nil {
return dev, devName, err
}
if errors.Is(err, windows.ERROR_DEVICE_NOT_AVAILABLE) || windowsUptime() < 10*time.Minute {
// Wintun is not installing correctly. Dump the state of NetSetupSvc
// (which is a user-mode service that must be active for network devices
// to install) and its dependencies to the log.
winutil.LogSvcState(logf, "NetSetupSvc")
}
bo.BackOff(ctx, err)
if ctx.Err() != nil {
return nil, "", ctx.Err()
}
}
}
func isWindowsService() bool {
v, err := svc.IsWindowsService()
if err != nil {
@ -131,6 +148,7 @@ func (service *ipnService) Execute(args []string, r <-chan svc.ChangeRequest, ch
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
@ -236,7 +254,7 @@ func beWindowsSubprocess() bool {
}
}()
err := startIPNServer(context.Background(), logid)
err := startIPNServer(context.Background(), log.Printf, logid)
if err != nil {
log.Fatalf("ipnserver: %v", err)
}
@ -283,140 +301,6 @@ func beFirewallKillswitch() bool {
}
}
func startIPNServer(ctx context.Context, logid string) error {
var logf logger.Logf = log.Printf
linkMon, err := monitor.New(logf)
if err != nil {
return fmt.Errorf("monitor: %w", err)
}
dialer := &tsdial.Dialer{Logf: logf}
getEngineRaw := func() (wgengine.Engine, *netstack.Impl, error) {
dev, devName, err := tstun.New(logf, "Tailscale")
if err != nil {
if errors.Is(err, windows.ERROR_DEVICE_NOT_AVAILABLE) {
// Wintun is not installing correctly. Dump the state of NetSetupSvc
// (which is a user-mode service that must be active for network devices
// to install) and its dependencies to the log.
winutil.LogSvcState(logf, "NetSetupSvc")
}
return nil, nil, fmt.Errorf("TUN: %w", err)
}
r, err := router.New(logf, dev, nil)
if err != nil {
dev.Close()
return nil, nil, fmt.Errorf("router: %w", err)
}
if shouldWrapNetstack() {
r = netstack.NewSubnetRouterWrapper(r)
}
d, err := dns.NewOSConfigurator(logf, devName)
if err != nil {
r.Close()
dev.Close()
return nil, nil, fmt.Errorf("DNS: %w", err)
}
eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{
Tun: dev,
Router: r,
DNS: d,
ListenPort: 41641,
LinkMonitor: linkMon,
Dialer: dialer,
})
if err != nil {
r.Close()
dev.Close()
return nil, nil, fmt.Errorf("engine: %w", err)
}
ns, err := newNetstack(logf, dialer, eng)
if err != nil {
return nil, nil, fmt.Errorf("newNetstack: %w", err)
}
ns.ProcessLocalIPs = false
ns.ProcessSubnets = shouldWrapNetstack()
if err := ns.Start(); err != nil {
return nil, nil, fmt.Errorf("failed to start netstack: %w", err)
}
return wgengine.NewWatchdog(eng), ns, nil
}
type engineOrError struct {
Engine wgengine.Engine
Netstack *netstack.Impl
Err error
}
engErrc := make(chan engineOrError)
t0 := time.Now()
go func() {
const ms = time.Millisecond
for try := 1; ; try++ {
logf("tailscaled: getting engine... (try %v)", try)
t1 := time.Now()
eng, ns, err := getEngineRaw()
d, dt := time.Since(t1).Round(ms), time.Since(t1).Round(ms)
if err != nil {
logf("tailscaled: engine fetch error (try %v) in %v (total %v, sysUptime %v): %v",
try, d, dt, windowsUptime().Round(time.Second), err)
} else {
if try > 1 {
logf("tailscaled: got engine on try %v in %v (total %v)", try, d, dt)
} else {
logf("tailscaled: got engine in %v", d)
}
}
timer := time.NewTimer(5 * time.Second)
engErrc <- engineOrError{eng, ns, err}
if err == nil {
timer.Stop()
return
}
<-timer.C
}
}()
// getEngine is called by ipnserver to get the engine. It's
// not called concurrently and is not called again once it
// successfully returns an engine.
getEngine := func() (wgengine.Engine, *netstack.Impl, error) {
if msg := envknob.String("TS_DEBUG_WIN_FAIL"); msg != "" {
return nil, nil, fmt.Errorf("pretending to be a service failure: %v", msg)
}
for {
res := <-engErrc
if res.Engine != nil {
return res.Engine, res.Netstack, nil
}
if time.Since(t0) < time.Minute || windowsUptime() < 10*time.Minute {
// Ignore errors during early boot. Windows 10 auto logs in the GUI
// way sooner than the networking stack components start up.
// So the network will fail for a bit (and require a few tries) while
// the GUI is still fine.
continue
}
// Return nicer errors to users, annotated with logids, which helps
// when they file bugs.
return nil, nil, fmt.Errorf("%w\n\nlogid: %v", res.Err, logid)
}
}
store, err := store.New(logf, statePathOrDefault())
if err != nil {
return fmt.Errorf("store: %w", err)
}
ln, _, err := safesocket.Listen(args.socketpath, safesocket.WindowsLocalPort)
if err != nil {
return fmt.Errorf("safesocket.Listen: %v", err)
}
err = ipnServerRunWithRetries(ctx, logf, ln, store, linkMon, dialer, logid, getEngine, ipnServerOpts())
if err != nil {
logf("ipnserver.Run: %v", err)
}
return err
}
func handleSessionChange(chgRequest svc.ChangeRequest) {
if chgRequest.Cmd != svc.SessionChange || chgRequest.EventType != windows.WTS_SESSION_UNLOCK {
return
@ -563,127 +447,3 @@ func babysitProc(ctx context.Context, args []string, logf logger.Logf) {
}
}
}
// getEngineUntilItWorksWrapper returns a getEngine wrapper that does
// not call getEngine concurrently and stops calling getEngine once
// it's returned a working engine.
func getEngineUntilItWorksWrapper(getEngine func() (wgengine.Engine, *netstack.Impl, error)) func() (wgengine.Engine, *netstack.Impl, error) {
var mu sync.Mutex
var engGood wgengine.Engine
var nsGood *netstack.Impl
return func() (wgengine.Engine, *netstack.Impl, error) {
mu.Lock()
defer mu.Unlock()
if engGood != nil {
return engGood, nsGood, nil
}
e, ns, err := getEngine()
if err != nil {
return nil, nil, err
}
engGood = e
nsGood = ns
return e, ns, nil
}
}
// listenerWithReadyConn is a net.Listener wrapper that has
// one net.Conn ready to be accepted already.
type listenerWithReadyConn struct {
net.Listener
mu sync.Mutex
c net.Conn // if non-nil, ready to be Accepted
}
func (ln *listenerWithReadyConn) Accept() (net.Conn, error) {
ln.mu.Lock()
c := ln.c
ln.c = nil
ln.mu.Unlock()
if c != nil {
return c, nil
}
return ln.Listener.Accept()
}
// ipnServerRunWithRetries runs a Tailscale backend service.
//
// The getEngine func is called repeatedly, once per connection, until it
// returns an engine successfully.
//
// This works around issues on Windows with the wgengine.Engine/wintun creation
// failing or hanging. See https://github.com/tailscale/tailscale/issues/6522.
func ipnServerRunWithRetries(ctx context.Context, logf logger.Logf, ln net.Listener, store ipn.StateStore, linkMon *monitor.Mon, dialer *tsdial.Dialer, logid string, getEngine func() (wgengine.Engine, *netstack.Impl, error), opts serverOptions) error {
getEngine = getEngineUntilItWorksWrapper(getEngine)
runDone := make(chan struct{})
defer close(runDone)
// When the context is closed or when we return, whichever is first, close our listener
// and all open connections.
go func() {
select {
case <-ctx.Done():
case <-runDone:
}
ln.Close()
}()
logf("Listening on %v", ln.Addr())
bo := backoff.NewBackoff("ipnserver", logf, 30*time.Second)
var unservedConn net.Conn // if non-nil, accepted, but hasn't served yet
eng, ns, err := getEngine()
if err != nil {
logf("ipnserver: initial getEngine call: %v", err)
for i := 1; ctx.Err() == nil; i++ {
c, err := ln.Accept()
if err != nil {
logf("%d: Accept: %v", i, err)
bo.BackOff(ctx, err)
continue
}
logf("ipnserver: try%d: trying getEngine again...", i)
eng, ns, err = getEngine()
if err == nil {
logf("%d: GetEngine worked; exiting failure loop", i)
unservedConn = c
break
}
logf("ipnserver%d: getEngine failed again: %v", i, err)
// TODO(bradfitz): queue this error up for the next IPN bus watcher call
// to get for the Windows GUI? We used to send it over the pre-HTTP
// protocol to the Windows GUI. Just close it.
c.Close()
}
if err := ctx.Err(); err != nil {
return err
}
}
if unservedConn != nil {
ln = &listenerWithReadyConn{
Listener: ln,
c: unservedConn,
}
}
server := ipnserver.New(logf, logid)
lb, err := ipnlocal.NewLocalBackend(logf, logid, store, "", dialer, eng, opts.LoginFlags)
if err != nil {
return fmt.Errorf("ipnlocal.NewLocalBackend: %w", err)
}
lb.SetVarRoot(opts.VarRoot)
if root := lb.TailscaleVarRoot(); root != "" {
dnsfallback.SetCachePath(filepath.Join(root, "derpmap.cached.json"))
}
lb.SetDecompressor(func() (controlclient.Decompressor, error) {
return smallzstd.NewDecoder(nil)
})
server.SetLocalBackend(lb)
if ns != nil {
ns.SetLocalBackend(lb)
}
return server.Run(ctx, ln)
}

@ -6,12 +6,14 @@ package ipnserver
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"os/user"
"strconv"
"strings"
"sync"
"sync/atomic"
@ -25,6 +27,7 @@ import (
"tailscale.com/ipn/localapi"
"tailscale.com/types/logger"
"tailscale.com/util/mak"
"tailscale.com/util/set"
"tailscale.com/util/systemd"
)
@ -46,9 +49,10 @@ type Server struct {
// mu guards the fields that follow.
// lock order: mu, then LocalBackend.mu
mu sync.Mutex
lastUserID ipn.WindowsUserID // tracks last userid; on change, Reset state for paranoia
activeReqs map[*http.Request]*ipnauth.ConnIdentity
mu sync.Mutex
lastUserID ipn.WindowsUserID // tracks last userid; on change, Reset state for paranoia
activeReqs map[*http.Request]*ipnauth.ConnIdentity
backendWaiter set.HandleSet[context.CancelFunc] // values are wake-up funcs of lb waiters
}
func (s *Server) mustBackend() *ipnlocal.LocalBackend {
@ -59,7 +63,64 @@ func (s *Server) mustBackend() *ipnlocal.LocalBackend {
return lb
}
func (s *Server) awaitBackend(ctx context.Context) (_ *ipnlocal.LocalBackend, ok bool) {
lb := s.lb.Load()
if lb != nil {
return lb, true
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
s.mu.Lock()
h := s.backendWaiter.Add(cancel)
s.mu.Unlock()
defer func() {
s.mu.Lock()
delete(s.backendWaiter, h)
s.mu.Unlock()
}()
// Try again, now that we've registered, in case there was a
// race.
lb = s.lb.Load()
if lb != nil {
return lb, true
}
<-ctx.Done()
lb = s.lb.Load()
return lb, lb != nil
}
// serveServerStatus serves the /server-status endpoint which reports whether
// the LocalBackend is up yet.
// This is primarily for the Windows GUI, because wintun can take awhile to
// come up. See https://github.com/tailscale/tailscale/issues/6522.
func (s *Server) serveServerStatus(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
w.Header().Set("Content-Type", "application/json")
var res struct {
Error string `json:"error,omitempty"`
}
lb := s.lb.Load()
if lb == nil {
w.WriteHeader(http.StatusServiceUnavailable)
if wait, _ := strconv.ParseBool(r.FormValue("wait")); wait {
w.(http.Flusher).Flush()
lb, _ = s.awaitBackend(ctx)
}
}
if lb == nil {
res.Error = "backend not ready"
}
json.NewEncoder(w).Encode(res)
}
func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if r.Method == "CONNECT" {
if envknob.GOOS() == "windows" {
// For the GUI client when using an exit node. See docs on handleProxyConnectConn.
@ -70,11 +131,17 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) {
return
}
// TODO(bradfitz): add a status HTTP handler that returns whether there's a
// LocalBackend yet, optionally blocking until there is one. See
// https://github.com/tailscale/tailscale/issues/6522
lb := s.lb.Load()
if lb == nil {
// Check for this method before the awaitBackend call, as it reports whether
// the backend is available.
if r.Method == "GET" && r.URL.Path == "/server-status" {
s.serveServerStatus(w, r)
return
}
lb, ok := s.awaitBackend(ctx)
if !ok {
// Almost certainly because the context was canceled so the response
// here doesn't really matter. The client is gone.
http.Error(w, "no backend", http.StatusServiceUnavailable)
return
}
@ -304,6 +371,13 @@ func (s *Server) SetLocalBackend(lb *ipnlocal.LocalBackend) {
panic("already set")
}
s.startBackendIfNeeded()
s.mu.Lock()
for _, wake := range s.backendWaiter {
wake() // they'll remove themselves when woken
}
s.mu.Unlock()
// TODO(bradfitz): send status update to GUI long poller waiter. See
// https://github.com/tailscale/tailscale/issues/6522
}

@ -36,6 +36,7 @@ import (
_ "tailscale.com/safesocket"
_ "tailscale.com/smallzstd"
_ "tailscale.com/ssh/tailssh"
_ "tailscale.com/syncs"
_ "tailscale.com/tailcfg"
_ "tailscale.com/tsweb"
_ "tailscale.com/types/flagtype"

@ -36,6 +36,7 @@ import (
_ "tailscale.com/safesocket"
_ "tailscale.com/smallzstd"
_ "tailscale.com/ssh/tailssh"
_ "tailscale.com/syncs"
_ "tailscale.com/tailcfg"
_ "tailscale.com/tsweb"
_ "tailscale.com/types/flagtype"

@ -36,6 +36,7 @@ import (
_ "tailscale.com/safesocket"
_ "tailscale.com/smallzstd"
_ "tailscale.com/ssh/tailssh"
_ "tailscale.com/syncs"
_ "tailscale.com/tailcfg"
_ "tailscale.com/tsweb"
_ "tailscale.com/types/flagtype"

@ -35,6 +35,7 @@ import (
_ "tailscale.com/paths"
_ "tailscale.com/safesocket"
_ "tailscale.com/smallzstd"
_ "tailscale.com/syncs"
_ "tailscale.com/tailcfg"
_ "tailscale.com/tsweb"
_ "tailscale.com/types/flagtype"

@ -16,6 +16,7 @@ import (
_ "golang.org/x/sys/windows/svc"
_ "golang.org/x/sys/windows/svc/eventlog"
_ "golang.org/x/sys/windows/svc/mgr"
_ "golang.zx2c4.com/wireguard/tun"
_ "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg"
_ "tailscale.com/cmd/tailscaled/childproc"
_ "tailscale.com/control/controlclient"
@ -41,6 +42,7 @@ import (
_ "tailscale.com/paths"
_ "tailscale.com/safesocket"
_ "tailscale.com/smallzstd"
_ "tailscale.com/syncs"
_ "tailscale.com/tailcfg"
_ "tailscale.com/tsweb"
_ "tailscale.com/types/flagtype"

Loading…
Cancel
Save