syncs, all: move to using Go's new atomic types instead of ours

Fixes #5185

Change-Id: I850dd532559af78c3895e2924f8237ccc328449d
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
pull/5056/head
Brad Fitzpatrick 2 years ago committed by Brad Fitzpatrick
parent 9bb5a038e5
commit 4950fe60bd

@ -29,7 +29,6 @@ import (
"tailscale.com/ipn"
"tailscale.com/paths"
"tailscale.com/safesocket"
"tailscale.com/syncs"
"tailscale.com/version/distro"
)
@ -230,8 +229,6 @@ var rootArgs struct {
socket string
}
var gotSignal syncs.AtomicBool
func connect(ctx context.Context) (net.Conn, *ipn.BackendClient, context.Context, context.CancelFunc) {
s := safesocket.DefaultConnectionStrategy(rootArgs.socket)
c, err := safesocket.Connect(s)
@ -257,7 +254,6 @@ func connect(ctx context.Context) (net.Conn, *ipn.BackendClient, context.Context
signal.Reset(syscall.SIGINT, syscall.SIGTERM)
return
}
gotSignal.Set(true)
c.Close()
cancel()
}()

@ -71,7 +71,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+
tailscale.com/paths from tailscale.com/cmd/tailscale/cli+
tailscale.com/safesocket from tailscale.com/cmd/tailscale/cli+
tailscale.com/syncs from tailscale.com/net/interfaces+
tailscale.com/syncs from tailscale.com/net/netcheck
tailscale.com/tailcfg from tailscale.com/cmd/tailscale/cli+
tailscale.com/tka from tailscale.com/types/key
W tailscale.com/tsconst from tailscale.com/net/interfaces

@ -241,7 +241,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/safesocket from tailscale.com/client/tailscale+
tailscale.com/smallzstd from tailscale.com/ipn/ipnserver+
LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled
tailscale.com/syncs from tailscale.com/control/controlknobs+
tailscale.com/syncs from tailscale.com/net/netcheck+
tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+
LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh
tailscale.com/tka from tailscale.com/types/key+

@ -7,12 +7,13 @@
package controlknobs
import (
"sync/atomic"
"tailscale.com/envknob"
"tailscale.com/syncs"
)
// disableUPnP indicates whether to attempt UPnP mapping.
var disableUPnP syncs.AtomicBool
var disableUPnP atomic.Bool
func init() {
SetDisableUPnP(envknob.Bool("TS_DISABLE_UPNP"))
@ -21,11 +22,11 @@ func init() {
// DisableUPnP reports the last reported value from control
// whether UPnP portmapping should be disabled.
func DisableUPnP() bool {
return disableUPnP.Get()
return disableUPnP.Load()
}
// SetDisableUPnP sets whether control says that UPnP should be
// disabled.
func SetDisableUPnP(v bool) {
disableUPnP.Set(v)
disableUPnP.Store(v)
}

@ -41,7 +41,6 @@ import (
"tailscale.com/disco"
"tailscale.com/envknob"
"tailscale.com/metrics"
"tailscale.com/syncs"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/pad32"
@ -232,7 +231,7 @@ type dupClientSet struct {
}
func (s *dupClientSet) ActiveClient() *sclient {
if s.last != nil && !s.last.isDisabled.Get() {
if s.last != nil && !s.last.isDisabled.Load() {
return s.last
}
return nil
@ -499,8 +498,8 @@ func (s *Server) registerClient(c *sclient) {
s.dupClientConns.Add(2) // both old and new count
s.dupClientConnTotal.Add(1)
old := set.ActiveClient()
old.isDup.Set(true)
c.isDup.Set(true)
old.isDup.Store(true)
c.isDup.Store(true)
s.clients[c.key] = &dupClientSet{
last: c,
set: map[*sclient]bool{
@ -512,7 +511,7 @@ func (s *Server) registerClient(c *sclient) {
case *dupClientSet:
s.dupClientConns.Add(1) // the gauge
s.dupClientConnTotal.Add(1) // the counter
c.isDup.Set(true)
c.isDup.Store(true)
set.set[c] = true
set.last = c
set.sendHistory = append(set.sendHistory, c)
@ -571,8 +570,8 @@ func (s *Server) unregisterClient(c *sclient) {
if remain == nil {
panic("unexpected nil remain from single element dup set")
}
remain.isDisabled.Set(false)
remain.isDup.Set(false)
remain.isDisabled.Store(false)
remain.isDup.Store(false)
s.clients[c.key] = singleClient{remain}
}
}
@ -1073,11 +1072,11 @@ func (s *Server) sendServerKey(lw *lazyBufioWriter) error {
}
func (s *Server) noteClientActivity(c *sclient) {
if !c.isDup.Get() {
if !c.isDup.Load() {
// Fast path for clients that aren't in a dup set.
return
}
if c.isDisabled.Get() {
if c.isDisabled.Load() {
// If they're already disabled, no point checking more.
return
}
@ -1112,7 +1111,7 @@ func (s *Server) noteClientActivity(c *sclient) {
for _, prior := range ds.sendHistory {
if prior == c {
ds.ForeachClient(func(c *sclient) {
c.isDisabled.Set(true)
c.isDisabled.Store(true)
})
break
}
@ -1253,8 +1252,8 @@ type sclient struct {
peerGone chan key.NodePublic // write request that a previous sender has disconnected (not used by mesh peers)
meshUpdate chan struct{} // write request to write peerStateChange
canMesh bool // clientInfo had correct mesh token for inter-region routing
isDup syncs.AtomicBool // whether more than 1 sclient for key is connected
isDisabled syncs.AtomicBool // whether sends to this peer are disabled due to active/active dups
isDup atomic.Bool // whether more than 1 sclient for key is connected
isDisabled atomic.Bool // whether sends to this peer are disabled due to active/active dups
// replaceLimiter controls how quickly two connections with
// the same client key can kick each other off the server by

@ -958,10 +958,10 @@ func TestServerDupClients(t *testing.T) {
t.Error("wrong single client")
return
}
if want.isDup.Get() {
if want.isDup.Load() {
t.Errorf("unexpected isDup on singleClient")
}
if want.isDisabled.Get() {
if want.isDisabled.Load() {
t.Errorf("unexpected isDisabled on singleClient")
}
case nil:
@ -1004,13 +1004,13 @@ func TestServerDupClients(t *testing.T) {
}
checkDup := func(t *testing.T, c *sclient, want bool) {
t.Helper()
if got := c.isDup.Get(); got != want {
if got := c.isDup.Load(); got != want {
t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want)
}
}
checkDisabled := func(t *testing.T, c *sclient, want bool) {
t.Helper()
if got := c.isDisabled.Get(); got != want {
if got := c.isDisabled.Load(); got != want {
t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want)
}
}

@ -40,7 +40,6 @@ import (
"tailscale.com/net/tsdial"
"tailscale.com/paths"
"tailscale.com/portlist"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/tka"
"tailscale.com/types/dnstype"
@ -127,7 +126,7 @@ type LocalBackend struct {
serverURL string // tailcontrol URL
newDecompressor func() (controlclient.Decompressor, error)
varRoot string // or empty if SetVarRoot never called
sshAtomicBool syncs.AtomicBool
sshAtomicBool atomic.Bool
shutdownCalled bool // if Shutdown has been called
filterAtomic atomic.Pointer[filter.Filter]
@ -1740,7 +1739,7 @@ func (b *LocalBackend) loadStateLocked(key ipn.StateKey, prefs *ipn.Prefs) (err
// setAtomicValuesFromPrefs populates sshAtomicBool and containsViaIPFuncAtomic
// from the prefs p, which may be nil.
func (b *LocalBackend) setAtomicValuesFromPrefs(p *ipn.Prefs) {
b.sshAtomicBool.Set(p != nil && p.RunSSH && canSSH)
b.sshAtomicBool.Store(p != nil && p.RunSSH && canSSH)
if p == nil {
b.containsViaIPFuncAtomic.Store(tsaddr.NewContainsIPFunc(nil))
@ -3053,7 +3052,7 @@ func (b *LocalBackend) ResetForClientDisconnect() {
b.setAtomicValuesFromPrefs(nil)
}
func (b *LocalBackend) ShouldRunSSH() bool { return b.sshAtomicBool.Get() && canSSH }
func (b *LocalBackend) ShouldRunSSH() bool { return b.sshAtomicBool.Load() && canSSH }
// ShouldHandleViaIP reports whether whether ip is an IPv6 address in the
// Tailscale ULA's v6 "via" range embedding an IPv4 address to be forwarded to

@ -26,6 +26,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unicode"
"unicode/utf8"
@ -41,7 +42,6 @@ import (
"tailscale.com/net/interfaces"
"tailscale.com/net/netaddr"
"tailscale.com/net/netutil"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/util/clientmetric"
"tailscale.com/wgengine"
@ -58,7 +58,7 @@ type peerAPIServer struct {
b *LocalBackend
rootDir string // empty means file receiving unavailable
selfNode *tailcfg.Node
knownEmpty syncs.AtomicBool
knownEmpty atomic.Bool
resolver *resolver.Resolver
// directFileMode is whether we're writing files directly to a
@ -144,7 +144,7 @@ func (s *peerAPIServer) hasFilesWaiting() bool {
if s == nil || s.rootDir == "" || s.directFileMode {
return false
}
if s.knownEmpty.Get() {
if s.knownEmpty.Load() {
// Optimization: this is usually empty, so avoid opening
// the directory and checking. We can't cache the actual
// has-files-or-not values as the macOS/iOS client might
@ -185,7 +185,7 @@ func (s *peerAPIServer) hasFilesWaiting() bool {
}
}
if err == io.EOF {
s.knownEmpty.Set(true)
s.knownEmpty.Store(true)
}
if err != nil {
break
@ -808,7 +808,7 @@ func (h *peerAPIHandler) handlePeerPut(w http.ResponseWriter, r *http.Request) {
// TODO: some real response
success = true
io.WriteString(w, "{}\n")
h.ps.knownEmpty.Set(false)
h.ps.knownEmpty.Store(false)
h.ps.b.sendFileNotify()
}

@ -504,7 +504,7 @@ func TestDeletedMarkers(t *testing.T) {
nothingWaiting := func() {
t.Helper()
ps.knownEmpty.Set(false)
ps.knownEmpty.Store(false)
if ps.hasFilesWaiting() {
t.Fatal("unexpected files waiting")
}

@ -7,6 +7,7 @@ package ipnlocal
import (
"context"
"sync"
"sync/atomic"
"testing"
"time"
@ -15,7 +16,6 @@ import (
"tailscale.com/control/controlclient"
"tailscale.com/ipn"
"tailscale.com/ipn/store/mem"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/types/empty"
"tailscale.com/types/key"
@ -91,7 +91,7 @@ type mockControl struct {
opts controlclient.Options
logfActual logger.Logf
statusFunc func(controlclient.Status)
preventLog syncs.AtomicBool
preventLog atomic.Bool
mu sync.Mutex
calls []string
@ -108,7 +108,7 @@ func newMockControl(tb testing.TB) *mockControl {
}
func (cc *mockControl) logf(format string, args ...any) {
if cc.preventLog.Get() || cc.logfActual == nil {
if cc.preventLog.Load() || cc.logfActual == nil {
return
}
cc.logfActual(format, args...)
@ -292,7 +292,7 @@ func TestStateMachine(t *testing.T) {
cc := newMockControl(t)
cc.statusFunc = b.setClientStatus
t.Cleanup(func() { cc.preventLog.Set(true) }) // hacky way to pacify issue 3020
t.Cleanup(func() { cc.preventLog.Store(true) }) // hacky way to pacify issue 3020
b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) {
cc.mu.Lock()
@ -311,7 +311,7 @@ func TestStateMachine(t *testing.T) {
notifies.expect(0)
b.SetNotifyCallback(func(n ipn.Notify) {
if cc.preventLog.Get() {
if cc.preventLog.Load() {
return
}
if n.State != nil ||
@ -920,7 +920,7 @@ func TestStateMachine(t *testing.T) {
type testStateStorage struct {
mem mem.Store
written syncs.AtomicBool
written atomic.Bool
}
func (s *testStateStorage) ReadState(id ipn.StateKey) ([]byte, error) {
@ -928,18 +928,18 @@ func (s *testStateStorage) ReadState(id ipn.StateKey) ([]byte, error) {
}
func (s *testStateStorage) WriteState(id ipn.StateKey, bs []byte) error {
s.written.Set(true)
s.written.Store(true)
return s.mem.WriteState(id, bs)
}
// awaitWrite clears the "I've seen writes" bit, in prep for a future
// call to sawWrite to see if a write arrived.
func (s *testStateStorage) awaitWrite() { s.written.Set(false) }
func (s *testStateStorage) awaitWrite() { s.written.Store(false) }
// sawWrite reports whether there's been a WriteState call since the most
// recent awaitWrite call.
func (s *testStateStorage) sawWrite() bool {
v := s.written.Get()
v := s.written.Load()
s.awaitWrite()
return v
}

@ -23,7 +23,6 @@ import (
"tailscale.com/logtail/backoff"
"tailscale.com/net/interfaces"
"tailscale.com/syncs"
tslogger "tailscale.com/types/logger"
"tailscale.com/wgengine/monitor"
)
@ -448,15 +447,15 @@ func (l *Logger) Flush() error {
}
// logtailDisabled is whether logtail uploads to logcatcher are disabled.
var logtailDisabled syncs.AtomicBool
var logtailDisabled atomic.Bool
// Disable disables logtail uploads for the lifetime of the process.
func Disable() {
logtailDisabled.Set(true)
logtailDisabled.Store(true)
}
func (l *Logger) sendLocked(jsonBlob []byte) (int, error) {
if logtailDisabled.Get() {
if logtailDisabled.Load() {
return len(jsonBlob), nil
}
n, err := l.buffer.Write(jsonBlob)

@ -17,13 +17,13 @@ import (
"os/exec"
"runtime"
"strings"
"sync/atomic"
"github.com/jsimonetti/rtnetlink"
"github.com/mdlayher/netlink"
"go4.org/mem"
"golang.org/x/sys/unix"
"tailscale.com/net/netaddr"
"tailscale.com/syncs"
"tailscale.com/util/lineread"
)
@ -31,7 +31,7 @@ func init() {
likelyHomeRouterIP = likelyHomeRouterIPLinux
}
var procNetRouteErr syncs.AtomicBool
var procNetRouteErr atomic.Bool
// errStopReading is a sentinel error value used internally by
// lineread.File callers to stop reading. It doesn't escape to
@ -47,7 +47,7 @@ ens18 00000000 0100000A 0003 0 0 0 00000000
ens18 0000000A 00000000 0001 0 0 0 0000FFFF 0 0 0
*/
func likelyHomeRouterIPLinux() (ret netip.Addr, ok bool) {
if procNetRouteErr.Get() {
if procNetRouteErr.Load() {
// If we failed to read /proc/net/route previously, don't keep trying.
// But if we're on Android, go into the Android path.
if runtime.GOOS == "android" {
@ -93,7 +93,7 @@ func likelyHomeRouterIPLinux() (ret netip.Addr, ok bool) {
err = nil
}
if err != nil {
procNetRouteErr.Set(true)
procNetRouteErr.Store(true)
if runtime.GOOS == "android" {
return likelyHomeRouterIPAndroid()
}

@ -18,25 +18,25 @@ import (
"context"
"net"
"net/netip"
"sync/atomic"
"tailscale.com/net/netknob"
"tailscale.com/syncs"
"tailscale.com/types/logger"
)
var disabled syncs.AtomicBool
var disabled atomic.Bool
// SetEnabled enables or disables netns for the process.
// It defaults to being enabled.
func SetEnabled(on bool) {
disabled.Set(!on)
disabled.Store(!on)
}
// Listener returns a new net.Listener with its Control hook func
// initialized as necessary to run in logical network namespace that
// doesn't route back into Tailscale.
func Listener(logf logger.Logf) *net.ListenConfig {
if disabled.Get() {
if disabled.Load() {
return new(net.ListenConfig)
}
return &net.ListenConfig{Control: control(logf)}
@ -57,7 +57,7 @@ func NewDialer(logf logger.Logf) Dialer {
// handles using a SOCKS if configured in the environment with
// ALL_PROXY.
func FromDialer(logf logger.Logf, d *net.Dialer) Dialer {
if disabled.Get() {
if disabled.Load() {
return d
}
d.Control = control(logf)

@ -12,10 +12,10 @@ import (
"net/http/httptest"
"net/netip"
"sync"
"sync/atomic"
"testing"
"tailscale.com/net/netaddr"
"tailscale.com/syncs"
"tailscale.com/types/logger"
)
@ -26,7 +26,7 @@ type TestIGD struct {
pxpConn net.PacketConn // for NAT-PMP and/or PCP
ts *httptest.Server
logf logger.Logf
closed syncs.AtomicBool
closed atomic.Bool
// do* will log which packets are sent, but will not reply to unexpected packets.
@ -71,7 +71,7 @@ func NewTestIGD(logf logger.Logf, t TestIGDOptions) (*TestIGD, error) {
d.logf = func(msg string, args ...any) {
// Don't log after the device has closed;
// stray trailing logging angers testing.T.Logf.
if d.closed.Get() {
if d.closed.Load() {
return
}
logf(msg, args...)
@ -107,7 +107,7 @@ func testIPAndGateway() (gw, ip netip.Addr, ok bool) {
}
func (d *TestIGD) Close() error {
d.closed.Set(true)
d.closed.Store(true)
d.ts.Close()
d.upnpConn.Close()
d.pxpConn.Close()
@ -135,7 +135,7 @@ func (d *TestIGD) serveUPnPDiscovery() {
for {
n, src, err := d.upnpConn.ReadFrom(buf)
if err != nil {
if !d.closed.Get() {
if !d.closed.Load() {
d.logf("serveUPnP failed: %v", err)
}
return
@ -162,7 +162,7 @@ func (d *TestIGD) servePxP() {
for {
n, a, err := d.pxpConn.ReadFrom(buf)
if err != nil {
if !d.closed.Get() {
if !d.closed.Load() {
d.logf("servePxP failed: %v", err)
}
return

@ -14,12 +14,12 @@ import (
"runtime"
"strconv"
"strings"
"sync/atomic"
"syscall"
"time"
"go4.org/mem"
"golang.org/x/sys/unix"
"tailscale.com/syncs"
)
// Reading the sockfiles on Linux is very fast, so we can do it often.
@ -27,7 +27,7 @@ const pollInterval = 1 * time.Second
var sockfiles = []string{"/proc/net/tcp", "/proc/net/tcp6", "/proc/net/udp", "/proc/net/udp6"}
var sawProcNetPermissionErr syncs.AtomicBool
var sawProcNetPermissionErr atomic.Bool
const (
v6Localhost = "00000000000000000000000001000000:"
@ -37,7 +37,7 @@ const (
)
func listPorts() (List, error) {
if sawProcNetPermissionErr.Get() {
if sawProcNetPermissionErr.Load() {
return nil, nil
}
var ret []Port
@ -48,13 +48,13 @@ func listPorts() (List, error) {
// https://developer.android.com/about/versions/10/privacy/changes#proc-net-filesystem
// Ignore it rather than have the system log about our violation.
if runtime.GOOS == "android" && syscall.Access(fname, unix.R_OK) != nil {
sawProcNetPermissionErr.Set(true)
sawProcNetPermissionErr.Store(true)
return nil, nil
}
f, err := os.Open(fname)
if os.IsPermission(err) {
sawProcNetPermissionErr.Set(true)
sawProcNetPermissionErr.Store(true)
return nil, nil
}
if err != nil {

@ -12,11 +12,11 @@ import (
"fmt"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"tailscale.com/syncs"
"tailscale.com/tstest"
"tailscale.com/tsweb"
)
@ -131,10 +131,10 @@ func TestExpvar(t *testing.T) {
clk := newFakeTime()
p := newForTest(clk.Now, clk.NewTicker)
var succeed syncs.AtomicBool
var succeed atomic.Bool
p.Run("probe", probeInterval, map[string]string{"label": "value"}, func(context.Context) error {
clk.Advance(aFewMillis)
if succeed.Get() {
if succeed.Load() {
return nil
}
return errors.New("failing, as instructed by test")
@ -172,7 +172,7 @@ func TestExpvar(t *testing.T) {
Result: false,
})
succeed.Set(true)
succeed.Store(true)
clk.Advance(probeInterval + halfProbeInterval)
st := epoch.Add(probeInterval + halfProbeInterval + aFewMillis)
@ -189,10 +189,10 @@ func TestPrometheus(t *testing.T) {
clk := newFakeTime()
p := newForTest(clk.Now, clk.NewTicker)
var succeed syncs.AtomicBool
var succeed atomic.Bool
p.Run("testprobe", probeInterval, map[string]string{"label": "value"}, func(context.Context) error {
clk.Advance(aFewMillis)
if succeed.Get() {
if succeed.Load() {
return nil
}
return errors.New("failing, as instructed by test")
@ -219,7 +219,7 @@ probe_result{name="testprobe",label="value"} 0
t.Fatal(err)
}
succeed.Set(true)
succeed.Store(true)
clk.Advance(probeInterval + halfProbeInterval)
err = tstest.WaitFor(convergenceTimeout, func() error {

@ -30,6 +30,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
gossh "github.com/tailscale/golang-x-crypto/ssh"
@ -37,7 +38,6 @@ import (
"tailscale.com/ipn/ipnlocal"
"tailscale.com/logtail/backoff"
"tailscale.com/net/tsaddr"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/tempfork/gliderlabs/ssh"
"tailscale.com/types/logger"
@ -645,7 +645,7 @@ func (c *conn) resolveTerminalActionLocked(s ssh.Session, cr *contextReader) (ac
action = c.action0
var awaitReadOnce sync.Once // to start Reads on cr
var sawInterrupt syncs.AtomicBool
var sawInterrupt atomic.Bool
var wg sync.WaitGroup
defer wg.Wait() // wait for awaitIntrOnce's goroutine to exit
@ -687,7 +687,7 @@ func (c *conn) resolveTerminalActionLocked(s ssh.Session, cr *contextReader) (ac
return
}
if n > 0 && buf[0] == 0x03 { // Ctrl-C
sawInterrupt.Set(true)
sawInterrupt.Store(true)
s.Stderr().Write([]byte("Canceled.\r\n"))
s.Exit(1)
return
@ -699,7 +699,7 @@ func (c *conn) resolveTerminalActionLocked(s ssh.Session, cr *contextReader) (ac
var err error
action, err = c.fetchSSHAction(ctx, url)
if err != nil {
if sawInterrupt.Get() {
if sawInterrupt.Load() {
metricTerminalInterrupt.Add(1)
return nil, fmt.Errorf("aborted by user")
} else {

@ -68,42 +68,6 @@ func (wg *WaitGroupChan) Decr() {
// Wait blocks until the WaitGroupChan counter is zero.
func (wg *WaitGroupChan) Wait() { <-wg.done }
// AtomicBool is an atomic boolean.
type AtomicBool int32
func (b *AtomicBool) Set(v bool) {
var n int32
if v {
n = 1
}
atomic.StoreInt32((*int32)(b), n)
}
// Swap sets b to v and reports whether it changed.
func (b *AtomicBool) Swap(v bool) (changed bool) {
var n int32
if v {
n = 1
}
old := atomic.SwapInt32((*int32)(b), n)
return old != n
}
func (b *AtomicBool) Get() bool {
return atomic.LoadInt32((*int32)(b)) != 0
}
// AtomicUint32 is an atomic uint32.
type AtomicUint32 uint32
func (b *AtomicUint32) Set(v uint32) {
atomic.StoreUint32((*uint32)(b), v)
}
func (b *AtomicUint32) Get() uint32 {
return atomic.LoadUint32((*uint32)(b))
}
// Semaphore is a counting semaphore.
//
// Use NewSemaphore to create one.

@ -286,20 +286,20 @@ type Conn struct {
// is named negatively because in early start-up, we don't yet
// necessarily have a netcheck.Report and don't want to skip
// logging.
noV4, noV6 syncs.AtomicBool
noV4, noV6 atomic.Bool
// noV4Send is whether IPv4 UDP is known to be unable to transmit
// at all. This could happen if the socket is in an invalid state
// (as can happen on darwin after a network link status change).
noV4Send syncs.AtomicBool
noV4Send atomic.Bool
// networkUp is whether the network is up (some interface is up
// with IPv4 or IPv6). It's used to suppress log spam and prevent
// new connection that'll fail.
networkUp syncs.AtomicBool
networkUp atomic.Bool
// havePrivateKey is whether privateKey is non-zero.
havePrivateKey syncs.AtomicBool
havePrivateKey atomic.Bool
publicKeyAtomic atomic.Value // of key.NodePublic (or NodeKey zero value if !havePrivateKey)
// derpMapAtomic is the same as derpMap, but without requiring
@ -310,7 +310,7 @@ type Conn struct {
lastNetCheckReport atomic.Pointer[netcheck.Report]
// port is the preferred port from opts.Port; 0 means auto.
port syncs.AtomicUint32
port atomic.Uint32
// ============================================================
// mu guards all following fields; see userspaceEngine lock
@ -531,7 +531,7 @@ func newConn() *Conn {
}
c.bind = &connBind{Conn: c, closed: true}
c.muCond = sync.NewCond(&c.mu)
c.networkUp.Set(true) // assume up until told otherwise
c.networkUp.Store(true) // assume up until told otherwise
return c
}
@ -542,7 +542,7 @@ func newConn() *Conn {
// It doesn't start doing anything until Start is called.
func NewConn(opts Options) (*Conn, error) {
c := newConn()
c.port.Set(uint32(opts.Port))
c.port.Store(uint32(opts.Port))
c.logf = opts.logf()
c.epFunc = opts.endpointsFunc()
c.derpActiveFunc = opts.derpActiveFunc()
@ -634,7 +634,7 @@ func (c *Conn) updateEndpoints(why string) {
c.muCond.Broadcast()
}()
c.logf("[v1] magicsock: starting endpoint update (%s)", why)
if c.noV4Send.Get() && runtime.GOOS != "js" {
if c.noV4Send.Load() && runtime.GOOS != "js" {
c.mu.Lock()
closed := c.closed
c.mu.Unlock()
@ -736,9 +736,9 @@ func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) {
}
c.lastNetCheckReport.Store(report)
c.noV4.Set(!report.IPv4)
c.noV6.Set(!report.IPv6)
c.noV4Send.Set(!report.IPv4CanSend)
c.noV4.Store(!report.IPv4)
c.noV6.Store(!report.IPv6)
c.noV4Send.Store(!report.IPv4CanSend)
ni := &tailcfg.NetInfo{
DERPLatency: map[string]float64{},
@ -1074,7 +1074,7 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro
// port mapping on their router to the same explicit
// port that tailscaled is running with. Worst case
// it's an invalid candidate mapping.
if port := c.port.Get(); nr.MappingVariesByDestIP.EqualBool(true) && port != 0 {
if port := c.port.Load(); nr.MappingVariesByDestIP.EqualBool(true) && port != 0 {
if ip, _, err := net.SplitHostPort(nr.GlobalV4); err == nil {
addAddr(ipp(net.JoinHostPort(ip, strconv.Itoa(int(port)))), tailcfg.EndpointSTUN4LocalPort)
}
@ -1167,7 +1167,7 @@ func (c *Conn) LocalPort() uint16 {
var errNetworkDown = errors.New("magicsock: network down")
func (c *Conn) networkDown() bool { return !c.networkUp.Get() }
func (c *Conn) networkDown() bool { return !c.networkUp.Load() }
func (c *Conn) Send(b []byte, ep conn.Endpoint) error {
metricSendData.Add(1)
@ -1207,7 +1207,7 @@ func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error)
switch {
case addr.Addr().Is4():
_, err = c.pconn4.WriteToUDPAddrPort(b, addr)
if err != nil && (c.noV4.Get() || neterror.TreatAsLostUDP(err)) {
if err != nil && (c.noV4.Load() || neterror.TreatAsLostUDP(err)) {
return false, nil
}
case addr.Addr().Is6():
@ -1216,7 +1216,7 @@ func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error)
return false, nil
}
_, err = c.pconn6.WriteToUDPAddrPort(b, addr)
if err != nil && (c.noV6.Get() || neterror.TreatAsLostUDP(err)) {
if err != nil && (c.noV6.Load() || neterror.TreatAsLostUDP(err)) {
return false, nil
}
default:
@ -1674,7 +1674,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *ippEndpointCache)
if c.handleDiscoMessage(b, ipp, key.NodePublic{}) {
return nil, false
}
if !c.havePrivateKey.Get() {
if !c.havePrivateKey.Load() {
// If we have no private key, we're logged out or
// stopped. Don't try to pass these wireguard packets
// up to wireguard-go; it'll just complain (issue 1167).
@ -2140,12 +2140,12 @@ func (c *Conn) discoInfoLocked(k key.DiscoPublic) *discoInfo {
func (c *Conn) SetNetworkUp(up bool) {
c.mu.Lock()
defer c.mu.Unlock()
if c.networkUp.Get() == up {
if c.networkUp.Load() == up {
return
}
c.logf("magicsock: SetNetworkUp(%v)", up)
c.networkUp.Set(up)
c.networkUp.Store(up)
if up {
c.startDerpHomeConnectLocked()
@ -2157,10 +2157,10 @@ func (c *Conn) SetNetworkUp(up bool) {
// SetPreferredPort sets the connection's preferred local port.
func (c *Conn) SetPreferredPort(port uint16) {
if uint16(c.port.Get()) == port {
if uint16(c.port.Load()) == port {
return
}
c.port.Set(uint32(port))
c.port.Store(uint32(port))
if err := c.rebind(dropCurrentPort); err != nil {
c.logf("%w", err)
@ -2185,7 +2185,7 @@ func (c *Conn) SetPrivateKey(privateKey key.NodePrivate) error {
return nil
}
c.privateKey = newKey
c.havePrivateKey.Set(!newKey.IsZero())
c.havePrivateKey.Store(!newKey.IsZero())
if newKey.IsZero() {
c.publicKeyAtomic.Store(key.NodePublic{})
@ -2835,7 +2835,7 @@ func (c *Conn) bindSocket(rucPtr **RebindingUDPConn, network string, curPortFate
// Second best is the port that is currently in use.
// If those fail, fall back to 0.
var ports []uint16
if port := uint16(c.port.Get()); port != 0 {
if port := uint16(c.port.Load()); port != 0 {
ports = append(ports, port)
}
if ruc.pconn != nil && curPortFate == keepCurrentPort {

@ -14,6 +14,7 @@ import (
"os/exec"
"strconv"
"strings"
"sync/atomic"
"syscall"
"time"
@ -25,7 +26,6 @@ import (
"golang.zx2c4.com/wireguard/tun"
"tailscale.com/envknob"
"tailscale.com/net/tsaddr"
"tailscale.com/syncs"
"tailscale.com/types/logger"
"tailscale.com/types/preftype"
"tailscale.com/util/multierr"
@ -84,7 +84,7 @@ type netfilterRunner interface {
}
type linuxRouter struct {
closed syncs.AtomicBool
closed atomic.Bool
logf func(fmt string, args ...any)
tunname string
linkMon *monitor.Mon
@ -97,7 +97,7 @@ type linuxRouter struct {
// ruleRestorePending is whether a timer has been started to
// restore deleted ip rules.
ruleRestorePending syncs.AtomicBool
ruleRestorePending atomic.Bool
ipRuleFixLimiter *rate.Limiter
// Various feature checks for the network stack.
@ -233,7 +233,7 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) {
return
}
time.AfterFunc(rr.Delay()+250*time.Millisecond, func() {
if r.ruleRestorePending.Swap(false) && !r.closed.Get() {
if r.ruleRestorePending.Swap(false) && !r.closed.Load() {
r.logf("somebody (likely systemd-networkd) deleted ip rules; restoring Tailscale's")
r.justAddIPRules()
}
@ -258,7 +258,7 @@ func (r *linuxRouter) Up() error {
}
func (r *linuxRouter) Close() error {
r.closed.Set(true)
r.closed.Store(true)
if r.unregLinkMon != nil {
r.unregLinkMon()
}

Loading…
Cancel
Save