From d16946854f9af21a0cd32b2e2f3031eb5537b5a3 Mon Sep 17 00:00:00 2001 From: Maisem Ali Date: Wed, 9 Aug 2023 19:56:43 -0700 Subject: [PATCH] control/controlclient: add Auto.updateRoutine Instead of having updates replace the map polls, create a third goroutine which is solely responsible for making sure that control is aware of the latest client state. This also makes it so that the streaming map polls are only broken when there are auth changes, or the client is paused. Updates tailscale/corp#5761 Signed-off-by: Maisem Ali --- control/controlclient/auto.go | 330 +++++++++++++-------------- control/controlclient/direct.go | 60 +++-- control/controlclient/direct_test.go | 5 +- tailcfg/tailcfg.go | 32 ++- 4 files changed, 216 insertions(+), 211 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index e28608f4e..1e258080f 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -9,6 +9,7 @@ import ( "fmt" "net/http" "sync" + "sync/atomic" "time" "tailscale.com/health" @@ -46,6 +47,91 @@ func (g *LoginGoal) sendLogoutError(err error) { var _ Client = (*Auto)(nil) +// waitUnpause waits until the client is unpaused then returns. It only +// returns an error if the client is closed. +func (c *Auto) waitUnpause(routineLogName string) error { + c.mu.Lock() + if !c.paused { + c.mu.Unlock() + return nil + } + unpaused := c.unpausedChanLocked() + c.mu.Unlock() + c.logf("%s: awaiting unpause", routineLogName) + select { + case <-unpaused: + c.logf("%s: unpaused", routineLogName) + return nil + case <-c.quit: + return errors.New("quit") + } +} + +// updateRoutine is responsible for informing the server of worthy changes to +// our local state. It runs in its own goroutine. +func (c *Auto) updateRoutine() { + defer close(c.updateDone) + bo := backoff.NewBackoff("updateRoutine", c.logf, 30*time.Second) + for { + if err := c.waitUnpause("updateRoutine"); err != nil { + c.logf("updateRoutine: exiting") + return + } + c.mu.Lock() + gen := c.lastUpdateGen + ctx := c.mapCtx + needUpdate := gen > 0 && gen != c.lastUpdateGenInformed && c.loggedIn + c.mu.Unlock() + + if needUpdate { + select { + case <-c.quit: + c.logf("updateRoutine: exiting") + return + default: + } + } else { + // Nothing to do, wait for a signal. + select { + case <-c.quit: + c.logf("updateRoutine: exiting") + return + case <-c.updateCh: + continue + } + } + + t0 := c.clock.Now() + err := c.direct.SendUpdate(ctx) + d := time.Since(t0).Round(time.Millisecond) + if err != nil { + if ctx.Err() == nil { + c.direct.logf("lite map update error after %v: %v", d, err) + } + bo.BackOff(ctx, err) + continue + } + bo.BackOff(ctx, nil) + c.direct.logf("[v1] successful lite map update in %v", d) + + c.mu.Lock() + c.lastUpdateGenInformed = gen + c.mu.Unlock() + } +} + +// atomicGen is an atomic int64 generator. It is used to generate monotonically +// increasing numbers for updateGen. +var atomicGen atomic.Int64 + +func nextUpdateGen() updateGen { + return updateGen(atomicGen.Add(1)) +} + +// updateGen is a monotonically increasing number that represents a particular +// update to the local state. +type updateGen int64 + // Auto connects to a tailcontrol server for a node. // It's a concrete implementation of the Client interface. type Auto struct { @@ -54,6 +140,7 @@ type Auto struct { logf logger.Logf expiry *time.Time closed bool + updateCh chan struct{} // readable when we should inform the server of a change newMapCh chan struct{} // readable when we must restart a map request statusFunc func(Status) // called to update Client status; always non-nil @@ -61,25 +148,29 @@ type Auto struct { mu sync.Mutex // mutex guards the following fields - paused bool // whether we should stop making HTTP requests - unpauseWaiters []chan struct{} - loggedIn bool // true if currently logged in - loginGoal *LoginGoal // non-nil if some login activity is desired - synced bool // true if our netmap is up-to-date - inPollNetMap bool // true if currently running a PollNetMap - inLiteMapUpdate bool // true if a lite (non-streaming) map request is outstanding - liteMapUpdateCancel context.CancelFunc // cancels a lite map update, may be nil - liteMapUpdateCancels int // how many times we've canceled a lite map update - inSendStatus int // number of sendStatus calls currently in progress - state State + // lastUpdateGen is the gen of last update we had an update worth sending to + // the server. + lastUpdateGen updateGen + // lastUpdateGenInformed is the value of lastUpdateAt that we've successfully + // informed the server of. + lastUpdateGenInformed updateGen + + paused bool // whether we should stop making HTTP requests + unpauseWaiters []chan struct{} + loggedIn bool // true if currently logged in + loginGoal *LoginGoal // non-nil if some login activity is desired + synced bool // true if our netmap is up-to-date + inSendStatus int // number of sendStatus calls currently in progress + state State authCtx context.Context // context used for auth requests - mapCtx context.Context // context used for netmap requests - authCancel func() // cancel the auth context - mapCancel func() // cancel the netmap context + mapCtx context.Context // context used for netmap and update requests + authCancel func() // cancel authCtx + mapCancel func() // cancel mapCtx quit chan struct{} // when closed, goroutines should all exit - authDone chan struct{} // when closed, auth goroutine is done - mapDone chan struct{} // when closed, map goroutine is done + authDone chan struct{} // when closed, authRoutine is done + mapDone chan struct{} // when closed, mapRoutine is done + updateDone chan struct{} // when closed, updateRoutine is done } // New creates and starts a new Auto. @@ -116,10 +207,12 @@ func NewNoStart(opts Options) (_ *Auto, err error) { direct: direct, clock: opts.Clock, logf: opts.Logf, + updateCh: make(chan struct{}, 1), newMapCh: make(chan struct{}, 1), quit: make(chan struct{}), authDone: make(chan struct{}), mapDone: make(chan struct{}), + updateDone: make(chan struct{}), statusFunc: opts.Status, } c.authCtx, c.authCancel = context.WithCancel(context.Background()) @@ -162,85 +255,34 @@ func (c *Auto) SetPaused(paused bool) { func (c *Auto) Start() { go c.authRoutine() go c.mapRoutine() + go c.updateRoutine() } -// sendNewMapRequest either sends a new OmitPeers, non-streaming map request -// (to just send Hostinfo/Netinfo/Endpoints info, while keeping an existing -// streaming response open), or start a new streaming one if necessary. +// updateControl sends a new OmitPeers, non-streaming map request (to just send +// Hostinfo/Netinfo/Endpoints info, while keeping an existing streaming response +// open). // // It should be called whenever there's something new to tell the server. -func (c *Auto) sendNewMapRequest() { +func (c *Auto) updateControl() { + gen := nextUpdateGen() c.mu.Lock() - - // If we're not already streaming a netmap, then tear down everything - // and start a new stream (which starts by sending a new map request) - if !c.inPollNetMap || !c.loggedIn { + if gen < c.lastUpdateGen { + // This update is out of date. c.mu.Unlock() - c.cancelMapSafely() return } + c.lastUpdateGen = gen + c.mu.Unlock() - // If we are already in process of doing a LiteMapUpdate, cancel it and - // try a new one. If this is the 10th time we have done this - // cancelation, tear down everything and start again. - const maxLiteMapUpdateAttempts = 10 - if c.inLiteMapUpdate { - // Always cancel the in-flight lite map update, regardless of - // whether we cancel the streaming map request or not. - c.liteMapUpdateCancel() - c.inLiteMapUpdate = false - - if c.liteMapUpdateCancels >= maxLiteMapUpdateAttempts { - // Not making progress - c.mu.Unlock() - c.cancelMapSafely() - return - } - - // Increment our cancel counter and continue below to start a - // new lite update. - c.liteMapUpdateCancels++ + select { + case c.updateCh <- struct{}{}: + default: } - - // Otherwise, send a lite update that doesn't keep a - // long-running stream response. - defer c.mu.Unlock() - c.inLiteMapUpdate = true - ctx, cancel := context.WithTimeout(c.mapCtx, 10*time.Second) - c.liteMapUpdateCancel = cancel - go func() { - defer cancel() - t0 := c.clock.Now() - err := c.direct.SendLiteMapUpdate(ctx) - d := time.Since(t0).Round(time.Millisecond) - - c.mu.Lock() - c.inLiteMapUpdate = false - c.liteMapUpdateCancel = nil - if err == nil { - c.liteMapUpdateCancels = 0 - } - c.mu.Unlock() - - if err == nil { - c.logf("[v1] successful lite map update in %v", d) - return - } - if ctx.Err() == nil { - c.logf("lite map update after %v: %v", d, err) - } - if !errors.Is(ctx.Err(), context.Canceled) { - // Fall back to restarting the long-polling map - // request (the old heavy way) if the lite update - // failed for reasons other than the context being - // canceled. - c.cancelMapSafely() - } - }() } func (c *Auto) cancelAuth() { c.mu.Lock() + defer c.mu.Unlock() if c.authCancel != nil { c.authCancel() } @@ -248,9 +290,9 @@ func (c *Auto) cancelAuth() { c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, c.logf) } - c.mu.Unlock() } +// cancelMapLocked is like cancelMap, but assumes the caller holds c.mu. func (c *Auto) cancelMapLocked() { if c.mapCancel != nil { c.mapCancel() @@ -258,56 +300,33 @@ func (c *Auto) cancelMapLocked() { if !c.closed { c.mapCtx, c.mapCancel = context.WithCancel(context.Background()) c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, c.logf) - } } -func (c *Auto) cancelMapUnsafely() { +// cancelMap cancels the existing mapPoll and liteUpdates. +func (c *Auto) cancelMap() { c.mu.Lock() + defer c.mu.Unlock() c.cancelMapLocked() - c.mu.Unlock() } -func (c *Auto) cancelMapSafely() { - c.mu.Lock() - defer c.mu.Unlock() - - // Always reset our lite map cancels counter if we're canceling - // everything, since we're about to restart with a new map update; this - // allows future calls to sendNewMapRequest to retry sending lite - // updates. - c.liteMapUpdateCancels = 0 +// restartMap cancels the existing mapPoll and liteUpdates, and then starts a +// new one. +func (c *Auto) restartMap() { + c.cancelMap() - c.logf("[v1] cancelMapSafely: synced=%v", c.synced) + c.logf("[v1] restartMap: synced=%v", c.synced) - if c.inPollNetMap { - // received at least one netmap since the last - // interruption. That means the server has already - // fully processed our last request, which might - // include UpdateEndpoints(). Interrupt it and try - // again. - c.cancelMapLocked() - } else { - // !synced means we either haven't done a netmap - // request yet, or it hasn't answered yet. So the - // server is in an undefined state. If we send - // another netmap request too soon, it might race - // with the last one, and if we're very unlucky, - // the new request will be applied before the old one, - // and the wrong endpoints will get registered. We - // have to tell the client to abort politely, only - // after it receives a response to its existing netmap - // request. - select { - case c.newMapCh <- struct{}{}: - c.logf("[v1] cancelMapSafely: wrote to channel") - default: - // if channel write failed, then there was already - // an outstanding newMapCh request. One is enough, - // since it'll always use the latest endpoints. - c.logf("[v1] cancelMapSafely: channel was full") - } + select { + case c.newMapCh <- struct{}{}: + c.logf("[v1] restartMap: wrote to channel") + default: + // if channel write failed, then there was already + // an outstanding newMapCh request. One is enough, + // since it'll always use the latest endpoints. + c.logf("[v1] restartMap: channel was full") } + c.updateControl() } func (c *Auto) authRoutine() { @@ -428,7 +447,7 @@ func (c *Auto) authRoutine() { c.mu.Unlock() c.sendStatus("authRoutine-success", nil, "", nil) - c.cancelMapSafely() + c.restartMap() bo.BackOff(ctx, nil) } } @@ -458,25 +477,19 @@ func (c *Auto) unpausedChanLocked() <-chan struct{} { return unpaused } +// mapRoutine is responsible for keeping a read-only streaming connection to the +// control server, and keeping the netmap up to date. func (c *Auto) mapRoutine() { defer close(c.mapDone) bo := backoff.NewBackoff("mapRoutine", c.logf, 30*time.Second) for { - c.mu.Lock() - if c.paused { - unpaused := c.unpausedChanLocked() - c.mu.Unlock() - c.logf("mapRoutine: awaiting unpause") - select { - case <-unpaused: - c.logf("mapRoutine: unpaused") - case <-c.quit: - c.logf("mapRoutine: quit") - return - } - continue + if err := c.waitUnpause("mapRoutine"); err != nil { + c.logf("mapRoutine: exiting") + return } + + c.mu.Lock() c.logf("[v1] mapRoutine: %s", c.state) loggedIn := c.loggedIn ctx := c.mapCtx @@ -513,43 +526,21 @@ func (c *Auto) mapRoutine() { c.logf("[v1] mapRoutine: new map needed while idle.") } } else { - // Be sure this is false when we're not inside - // PollNetMap, so that cancelMapSafely() can notify - // us correctly. - c.mu.Lock() - c.inPollNetMap = false - c.mu.Unlock() health.SetInPollNetMap(false) err := c.direct.PollNetMap(ctx, func(nm *netmap.NetworkMap) { health.SetInPollNetMap(true) - c.mu.Lock() - - select { - case <-c.newMapCh: - c.logf("[v1] mapRoutine: new map request during PollNetMap. canceling.") - c.cancelMapLocked() - - // Don't emit this netmap; we're - // about to request a fresh one. - c.mu.Unlock() - return - default: - } + c.mu.Lock() c.synced = true - c.inPollNetMap = true if c.loggedIn { c.state = StateSynchronized } - exp := nm.Expiry - c.expiry = &exp + c.expiry = ptr.To(nm.Expiry) stillAuthed := c.loggedIn - state := c.state - + c.logf("[v1] mapRoutine: netmap received: %s", c.state) c.mu.Unlock() - c.logf("[v1] mapRoutine: netmap received: %s", state) if stillAuthed { c.sendStatus("mapRoutine-got-netmap", nil, "", nm) } @@ -560,7 +551,6 @@ func (c *Auto) mapRoutine() { health.SetInPollNetMap(false) c.mu.Lock() c.synced = false - c.inPollNetMap = false if c.state == StateSynchronized { c.state = StateAuthenticated } @@ -602,7 +592,7 @@ func (c *Auto) SetHostinfo(hi *tailcfg.Hostinfo) { } // Send new Hostinfo to server - c.sendNewMapRequest() + c.updateControl() } func (c *Auto) SetNetInfo(ni *tailcfg.NetInfo) { @@ -614,12 +604,17 @@ func (c *Auto) SetNetInfo(ni *tailcfg.NetInfo) { } // Send new NetInfo to server - c.sendNewMapRequest() + c.updateControl() } // SetTKAHead updates the TKA head hash that map-request infrastructure sends. func (c *Auto) SetTKAHead(headHash string) { - c.direct.SetTKAHead(headHash) + if !c.direct.SetTKAHead(headHash) { + return + } + + // Send new TKAHead to server + c.updateControl() } func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkMap) { @@ -728,7 +723,7 @@ func (c *Auto) SetExpirySooner(ctx context.Context, expiry time.Time) error { func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { changed := c.direct.SetEndpoints(endpoints) if changed { - c.sendNewMapRequest() + c.updateControl() } } @@ -750,8 +745,9 @@ func (c *Auto) Shutdown() { close(c.quit) c.cancelAuth() <-c.authDone - c.cancelMapUnsafely() + c.cancelMap() <-c.mapDone + <-c.updateDone if direct != nil { direct.Close() } diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 99d46ffb8..d777deb8e 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -51,6 +51,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/opt" "tailscale.com/types/persist" + "tailscale.com/types/ptr" "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" "tailscale.com/util/multierr" @@ -259,10 +260,8 @@ func NewDirect(opts Options) (*Direct, error) { if opts.Hostinfo == nil { c.SetHostinfo(hostinfo.New()) } else { - ni := opts.Hostinfo.NetInfo - opts.Hostinfo.NetInfo = nil c.SetHostinfo(opts.Hostinfo) - if ni != nil { + if ni := opts.Hostinfo.NetInfo; ni != nil { c.SetNetInfo(ni) } } @@ -294,6 +293,8 @@ func (c *Direct) SetHostinfo(hi *tailcfg.Hostinfo) bool { if hi == nil { panic("nil Hostinfo") } + hi = ptr.To(*hi) + hi.NetInfo = nil c.mu.Lock() defer c.mu.Unlock() @@ -771,13 +772,13 @@ func (c *Direct) SetEndpoints(endpoints []tailcfg.Endpoint) (changed bool) { // It always returns a non-nil error describing the reason for the failure // or why the request ended. func (c *Direct) PollNetMap(ctx context.Context, cb func(*netmap.NetworkMap)) error { - return c.sendMapRequest(ctx, -1, false, cb) + return c.sendMapRequest(ctx, true, cb) } -// FetchNetMap fetches the netmap once. -func (c *Direct) FetchNetMap(ctx context.Context) (*netmap.NetworkMap, error) { +// FetchNetMapForTest fetches the netmap once. +func (c *Direct) FetchNetMapForTest(ctx context.Context) (*netmap.NetworkMap, error) { var ret *netmap.NetworkMap - err := c.sendMapRequest(ctx, 1, false, func(nm *netmap.NetworkMap) { + err := c.sendMapRequest(ctx, false, func(nm *netmap.NetworkMap) { ret = nm }) if err == nil && ret == nil { @@ -786,11 +787,11 @@ func (c *Direct) FetchNetMap(ctx context.Context) (*netmap.NetworkMap, error) { return ret, err } -// SendLiteMapUpdate makes a /map request to update the server of our latest state, -// but does not fetch anything. It returns an error if the server did not return a +// SendUpdate makes a /map request to update the server of our latest state, but +// does not fetch anything. It returns an error if the server did not return a // successful 200 OK response. -func (c *Direct) SendLiteMapUpdate(ctx context.Context) error { - return c.sendMapRequest(ctx, 1, false, nil) +func (c *Direct) SendUpdate(ctx context.Context) error { + return c.sendMapRequest(ctx, false, nil) } // If we go more than pollTimeout without hearing from the server, @@ -798,17 +799,21 @@ func (c *Direct) SendLiteMapUpdate(ctx context.Context) error { // every minute. const pollTimeout = 120 * time.Second -// sendMapRequest makes a /map request to download the network map, calling cb with -// each new netmap. If maxPolls is -1, it will poll forever and only returns if -// the context expires or the server returns an error/closes the connection and as -// such always returns a non-nil error. +// sendMapRequest makes a /map request to download the network map, calling cb +// with each new netmap. If isStreaming, it will poll forever and only returns +// if the context expires or the server returns an error/closes the connection +// and as such always returns a non-nil error. // // If cb is nil, OmitPeers will be set to true. -func (c *Direct) sendMapRequest(ctx context.Context, maxPolls int, readOnly bool, cb func(*netmap.NetworkMap)) error { +func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, cb func(*netmap.NetworkMap)) error { + if isStreaming && cb == nil { + panic("cb must be non-nil if isStreaming is true") + } + metricMapRequests.Add(1) metricMapRequestsActive.Add(1) defer metricMapRequestsActive.Add(-1) - if maxPolls == -1 { + if isStreaming { metricMapRequestsPoll.Add(1) } else { metricMapRequestsLite.Add(1) @@ -844,8 +849,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, maxPolls int, readOnly bool return errors.New("hostinfo: BackendLogID missing") } - allowStream := maxPolls != 1 - c.logf("[v1] PollNetMap: stream=%v ep=%v", allowStream, epStrs) + c.logf("[v1] PollNetMap: stream=%v ep=%v", isStreaming, epStrs) vlogf := logger.Discard if DevKnob.DumpNetMaps() { @@ -861,23 +865,11 @@ func (c *Direct) sendMapRequest(ctx context.Context, maxPolls int, readOnly bool DiscoKey: c.discoPubKey, Endpoints: epStrs, EndpointTypes: epTypes, - Stream: allowStream, + Stream: isStreaming, Hostinfo: hi, DebugFlags: c.debugFlags, OmitPeers: cb == nil, TKAHead: c.tkaHead, - - // Previously we'd set ReadOnly to true if we didn't have any endpoints - // yet as we expected to learn them in a half second and restart the full - // streaming map poll, however as we are trying to reduce the number of - // times we restart the full streaming map poll we now just set ReadOnly - // false when we're doing a full streaming map poll. - // - // TODO(maisem/bradfitz): really ReadOnly should be set to true if for - // all streams and we should only do writes via lite map updates. - // However that requires an audit and a bunch of testing to make sure we - // don't break anything. - ReadOnly: readOnly && !allowStream, } var extraDebugFlags []string if hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && @@ -994,7 +986,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, maxPolls int, readOnly bool // the same format before just closing the connection. // We can use this same read loop either way. var msg []byte - for i := 0; i < maxPolls || maxPolls < 0; i++ { + for i := 0; i == 0 || isStreaming; i++ { vlogf("netmap: starting size read after %v (poll %v)", time.Since(t0).Round(time.Millisecond), i) var siz [4]byte if _, err := io.ReadFull(res.Body, siz[:]); err != nil { @@ -1018,7 +1010,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, maxPolls int, readOnly bool metricMapResponseMessages.Add(1) - if allowStream { + if isStreaming { health.GotStreamedMapResponse() } diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index eee079a49..169fcc471 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -42,7 +42,10 @@ func TestNewDirect(t *testing.T) { t.Errorf("c.serverURL got %v want %v", c.serverURL, opts.ServerURL) } - if !hi.Equal(c.hostinfo) { + // hi is stored without its NetInfo field. + hiWithoutNi := *hi + hiWithoutNi.NetInfo = nil + if !hiWithoutNi.Equal(c.hostinfo) { t.Errorf("c.hostinfo got %v want %v", c.hostinfo, hi) } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 2f53be39b..96d18e3de 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -105,7 +105,8 @@ type CapabilityVersion int // - 65: 2023-07-12: Client understands DERPMap.HomeParams + incremental DERPMap updates with params // - 66: 2023-07-23: UserProfile.Groups added (available via WhoIs) // - 67: 2023-07-25: Client understands PeerCapMap -const CurrentCapabilityVersion CapabilityVersion = 67 +// - 68: 2023-08-09: Client has dedicated updateRoutine; MapRequest.Stream true means ignore Hostinfo+Endpoints +const CurrentCapabilityVersion CapabilityVersion = 68 type StableID string @@ -1082,8 +1083,21 @@ type MapRequest struct { NodeKey key.NodePublic DiscoKey key.DiscoPublic IncludeIPv6 bool `json:",omitempty"` // include IPv6 endpoints in returned Node Endpoints (for Version 4 clients) - Stream bool // if true, multiple MapResponse objects are returned - Hostinfo *Hostinfo + + // Stream is whether the client wants to receive multiple MapResponses over + // the same HTTP connection. + // + // If false, the server will send a single MapResponse and then close the + // connection. + // + // If true and Version >= 68, the server should treat this as a read-only + // request and ignore any Hostinfo or other fields that might be set. + Stream bool + + // Hostinfo is the client's current Hostinfo. Although it is always included + // in the request, the server may choose to ignore it when Stream is true + // and Version >= 68. + Hostinfo *Hostinfo // MapSessionHandle, if non-empty, is a request to reattach to a previous // map session after a previous map session was interrupted for whatever @@ -1105,6 +1119,7 @@ type MapRequest struct { MapSessionSeq int64 `json:",omitempty"` // Endpoints are the client's magicsock UDP ip:port endpoints (IPv4 or IPv6). + // These can be ignored if Stream is true and Version >= 68. Endpoints []string // EndpointTypes are the types of the corresponding endpoints in Endpoints. EndpointTypes []EndpointType `json:",omitempty"` @@ -1114,13 +1129,12 @@ type MapRequest struct { // It is encoded as tka.AUMHash.MarshalText. TKAHead string `json:",omitempty"` - // ReadOnly is whether the client just wants to fetch the - // MapResponse, without updating their Endpoints. The - // Endpoints field will be ignored and LastSeen will not be - // updated and peers will not be notified of changes. + // ReadOnly was set when client just wanted to fetch the MapResponse, + // without updating their Endpoints. The intended use was for clients to + // discover the DERP map at start-up before their first real endpoint + // update. // - // The intended use is for clients to discover the DERP map at - // start-up before their first real endpoint update. + // Deprecated: always false as of Version 68. ReadOnly bool `json:",omitempty"` // OmitPeers is whether the client is okay with the Peers list being omitted