@ -9,6 +9,7 @@ import (
"fmt"
"fmt"
"net/http"
"net/http"
"sync"
"sync"
"sync/atomic"
"time"
"time"
"tailscale.com/health"
"tailscale.com/health"
@ -46,6 +47,91 @@ func (g *LoginGoal) sendLogoutError(err error) {
var _ Client = ( * Auto ) ( nil )
var _ Client = ( * Auto ) ( nil )
// waitUnpause waits until the client is unpaused then returns. It only
// returns an error if the client is closed.
func ( c * Auto ) waitUnpause ( routineLogName string ) error {
c . mu . Lock ( )
if ! c . paused {
c . mu . Unlock ( )
return nil
}
unpaused := c . unpausedChanLocked ( )
c . mu . Unlock ( )
c . logf ( "%s: awaiting unpause" , routineLogName )
select {
case <- unpaused :
c . logf ( "%s: unpaused" , routineLogName )
return nil
case <- c . quit :
return errors . New ( "quit" )
}
}
// updateRoutine is responsible for informing the server of worthy changes to
// our local state. It runs in its own goroutine.
func ( c * Auto ) updateRoutine ( ) {
defer close ( c . updateDone )
bo := backoff . NewBackoff ( "updateRoutine" , c . logf , 30 * time . Second )
for {
if err := c . waitUnpause ( "updateRoutine" ) ; err != nil {
c . logf ( "updateRoutine: exiting" )
return
}
c . mu . Lock ( )
gen := c . lastUpdateGen
ctx := c . mapCtx
needUpdate := gen > 0 && gen != c . lastUpdateGenInformed && c . loggedIn
c . mu . Unlock ( )
if needUpdate {
select {
case <- c . quit :
c . logf ( "updateRoutine: exiting" )
return
default :
}
} else {
// Nothing to do, wait for a signal.
select {
case <- c . quit :
c . logf ( "updateRoutine: exiting" )
return
case <- c . updateCh :
continue
}
}
t0 := c . clock . Now ( )
err := c . direct . SendUpdate ( ctx )
d := time . Since ( t0 ) . Round ( time . Millisecond )
if err != nil {
if ctx . Err ( ) == nil {
c . direct . logf ( "lite map update error after %v: %v" , d , err )
}
bo . BackOff ( ctx , err )
continue
}
bo . BackOff ( ctx , nil )
c . direct . logf ( "[v1] successful lite map update in %v" , d )
c . mu . Lock ( )
c . lastUpdateGenInformed = gen
c . mu . Unlock ( )
}
}
// atomicGen is an atomic int64 generator. It is used to generate monotonically
// increasing numbers for updateGen.
var atomicGen atomic . Int64
func nextUpdateGen ( ) updateGen {
return updateGen ( atomicGen . Add ( 1 ) )
}
// updateGen is a monotonically increasing number that represents a particular
// update to the local state.
type updateGen int64
// Auto connects to a tailcontrol server for a node.
// Auto connects to a tailcontrol server for a node.
// It's a concrete implementation of the Client interface.
// It's a concrete implementation of the Client interface.
type Auto struct {
type Auto struct {
@ -54,6 +140,7 @@ type Auto struct {
logf logger . Logf
logf logger . Logf
expiry * time . Time
expiry * time . Time
closed bool
closed bool
updateCh chan struct { } // readable when we should inform the server of a change
newMapCh chan struct { } // readable when we must restart a map request
newMapCh chan struct { } // readable when we must restart a map request
statusFunc func ( Status ) // called to update Client status; always non-nil
statusFunc func ( Status ) // called to update Client status; always non-nil
@ -61,25 +148,29 @@ type Auto struct {
mu sync . Mutex // mutex guards the following fields
mu sync . Mutex // mutex guards the following fields
paused bool // whether we should stop making HTTP requests
// lastUpdateGen is the gen of last update we had an update worth sending to
unpauseWaiters [ ] chan struct { }
// the server.
loggedIn bool // true if currently logged in
lastUpdateGen updateGen
loginGoal * LoginGoal // non-nil if some login activity is desired
// lastUpdateGenInformed is the value of lastUpdateAt that we've successfully
synced bool // true if our netmap is up-to-date
// informed the server of.
inPollNetMap bool // true if currently running a PollNetMap
lastUpdateGenInformed updateGen
inLiteMapUpdate bool // true if a lite (non-streaming) map request is outstanding
liteMapUpdateCancel context . CancelFunc // cancels a lite map update, may be nil
paused bool // whether we should stop making HTTP requests
liteMapUpdateCancels int // how many times we've canceled a lite map update
unpauseWaiters [ ] chan struct { }
inSendStatus int // number of sendStatus calls currently in progress
loggedIn bool // true if currently logged in
state State
loginGoal * LoginGoal // non-nil if some login activity is desired
synced bool // true if our netmap is up-to-date
inSendStatus int // number of sendStatus calls currently in progress
state State
authCtx context . Context // context used for auth requests
authCtx context . Context // context used for auth requests
mapCtx context . Context // context used for netmap requests
mapCtx context . Context // context used for netmap and update requests
authCancel func ( ) // cancel the auth context
authCancel func ( ) // cancel authCtx
mapCancel func ( ) // cancel the netmap context
mapCancel func ( ) // cancel mapCtx
quit chan struct { } // when closed, goroutines should all exit
quit chan struct { } // when closed, goroutines should all exit
authDone chan struct { } // when closed, auth goroutine is done
authDone chan struct { } // when closed, authRoutine is done
mapDone chan struct { } // when closed, map goroutine is done
mapDone chan struct { } // when closed, mapRoutine is done
updateDone chan struct { } // when closed, updateRoutine is done
}
}
// New creates and starts a new Auto.
// New creates and starts a new Auto.
@ -116,10 +207,12 @@ func NewNoStart(opts Options) (_ *Auto, err error) {
direct : direct ,
direct : direct ,
clock : opts . Clock ,
clock : opts . Clock ,
logf : opts . Logf ,
logf : opts . Logf ,
updateCh : make ( chan struct { } , 1 ) ,
newMapCh : make ( chan struct { } , 1 ) ,
newMapCh : make ( chan struct { } , 1 ) ,
quit : make ( chan struct { } ) ,
quit : make ( chan struct { } ) ,
authDone : make ( chan struct { } ) ,
authDone : make ( chan struct { } ) ,
mapDone : make ( chan struct { } ) ,
mapDone : make ( chan struct { } ) ,
updateDone : make ( chan struct { } ) ,
statusFunc : opts . Status ,
statusFunc : opts . Status ,
}
}
c . authCtx , c . authCancel = context . WithCancel ( context . Background ( ) )
c . authCtx , c . authCancel = context . WithCancel ( context . Background ( ) )
@ -162,85 +255,34 @@ func (c *Auto) SetPaused(paused bool) {
func ( c * Auto ) Start ( ) {
func ( c * Auto ) Start ( ) {
go c . authRoutine ( )
go c . authRoutine ( )
go c . mapRoutine ( )
go c . mapRoutine ( )
go c . updateRoutine ( )
}
}
// sendNewMapRequest either sends a new OmitPeers, non-streaming map request
// updateControl sends a new OmitPeers, non-streaming map request (to just send
// (to just send Hostinfo/Netinfo/Endpoints info, while keeping an existing
// Hostinfo/Netinfo/Endpoints info, while keeping an existing streaming response
// streaming response open), or start a new streaming one if necessary .
// open).
//
//
// It should be called whenever there's something new to tell the server.
// It should be called whenever there's something new to tell the server.
func ( c * Auto ) sendNewMapRequest ( ) {
func ( c * Auto ) updateControl ( ) {
gen := nextUpdateGen ( )
c . mu . Lock ( )
c . mu . Lock ( )
if gen < c . lastUpdateGen {
// If we're not already streaming a netmap, then tear down everything
// This update is out of date.
// and start a new stream (which starts by sending a new map request)
if ! c . inPollNetMap || ! c . loggedIn {
c . mu . Unlock ( )
c . mu . Unlock ( )
c . cancelMapSafely ( )
return
return
}
}
c . lastUpdateGen = gen
c . mu . Unlock ( )
// If we are already in process of doing a LiteMapUpdate, cancel it and
select {
// try a new one. If this is the 10th time we have done this
case c . updateCh <- struct { } { } :
// cancelation, tear down everything and start again.
default :
const maxLiteMapUpdateAttempts = 10
if c . inLiteMapUpdate {
// Always cancel the in-flight lite map update, regardless of
// whether we cancel the streaming map request or not.
c . liteMapUpdateCancel ( )
c . inLiteMapUpdate = false
if c . liteMapUpdateCancels >= maxLiteMapUpdateAttempts {
// Not making progress
c . mu . Unlock ( )
c . cancelMapSafely ( )
return
}
// Increment our cancel counter and continue below to start a
// new lite update.
c . liteMapUpdateCancels ++
}
}
// Otherwise, send a lite update that doesn't keep a
// long-running stream response.
defer c . mu . Unlock ( )
c . inLiteMapUpdate = true
ctx , cancel := context . WithTimeout ( c . mapCtx , 10 * time . Second )
c . liteMapUpdateCancel = cancel
go func ( ) {
defer cancel ( )
t0 := c . clock . Now ( )
err := c . direct . SendLiteMapUpdate ( ctx )
d := time . Since ( t0 ) . Round ( time . Millisecond )
c . mu . Lock ( )
c . inLiteMapUpdate = false
c . liteMapUpdateCancel = nil
if err == nil {
c . liteMapUpdateCancels = 0
}
c . mu . Unlock ( )
if err == nil {
c . logf ( "[v1] successful lite map update in %v" , d )
return
}
if ctx . Err ( ) == nil {
c . logf ( "lite map update after %v: %v" , d , err )
}
if ! errors . Is ( ctx . Err ( ) , context . Canceled ) {
// Fall back to restarting the long-polling map
// request (the old heavy way) if the lite update
// failed for reasons other than the context being
// canceled.
c . cancelMapSafely ( )
}
} ( )
}
}
func ( c * Auto ) cancelAuth ( ) {
func ( c * Auto ) cancelAuth ( ) {
c . mu . Lock ( )
c . mu . Lock ( )
defer c . mu . Unlock ( )
if c . authCancel != nil {
if c . authCancel != nil {
c . authCancel ( )
c . authCancel ( )
}
}
@ -248,9 +290,9 @@ func (c *Auto) cancelAuth() {
c . authCtx , c . authCancel = context . WithCancel ( context . Background ( ) )
c . authCtx , c . authCancel = context . WithCancel ( context . Background ( ) )
c . authCtx = sockstats . WithSockStats ( c . authCtx , sockstats . LabelControlClientAuto , c . logf )
c . authCtx = sockstats . WithSockStats ( c . authCtx , sockstats . LabelControlClientAuto , c . logf )
}
}
c . mu . Unlock ( )
}
}
// cancelMapLocked is like cancelMap, but assumes the caller holds c.mu.
func ( c * Auto ) cancelMapLocked ( ) {
func ( c * Auto ) cancelMapLocked ( ) {
if c . mapCancel != nil {
if c . mapCancel != nil {
c . mapCancel ( )
c . mapCancel ( )
@ -258,56 +300,33 @@ func (c *Auto) cancelMapLocked() {
if ! c . closed {
if ! c . closed {
c . mapCtx , c . mapCancel = context . WithCancel ( context . Background ( ) )
c . mapCtx , c . mapCancel = context . WithCancel ( context . Background ( ) )
c . mapCtx = sockstats . WithSockStats ( c . mapCtx , sockstats . LabelControlClientAuto , c . logf )
c . mapCtx = sockstats . WithSockStats ( c . mapCtx , sockstats . LabelControlClientAuto , c . logf )
}
}
}
}
func ( c * Auto ) cancelMapUnsafely ( ) {
// cancelMap cancels the existing mapPoll and liteUpdates.
func ( c * Auto ) cancelMap ( ) {
c . mu . Lock ( )
c . mu . Lock ( )
defer c . mu . Unlock ( )
c . cancelMapLocked ( )
c . cancelMapLocked ( )
c . mu . Unlock ( )
}
}
func ( c * Auto ) cancelMapSafely ( ) {
// restartMap cancels the existing mapPoll and liteUpdates, and then starts a
c . mu . Lock ( )
// new one.
defer c . mu . Unlock ( )
func ( c * Auto ) restartMap ( ) {
c . cancelMap ( )
// Always reset our lite map cancels counter if we're canceling
// everything, since we're about to restart with a new map update; this
// allows future calls to sendNewMapRequest to retry sending lite
// updates.
c . liteMapUpdateCancels = 0
c . logf ( "[v1] cancelMapSafely : synced=%v", c . synced )
c . logf ( "[v1] restartMap: synced=%v" , c . synced )
if c . inPollNetMap {
select {
// received at least one netmap since the last
case c . newMapCh <- struct { } { } :
// interruption. That means the server has already
c . logf ( "[v1] restartMap: wrote to channel" )
// fully processed our last request, which might
default :
// include UpdateEndpoints(). Interrupt it and try
// if channel write failed, then there was already
// again.
// an outstanding newMapCh request. One is enough,
c . cancelMapLocked ( )
// since it'll always use the latest endpoints.
} else {
c . logf ( "[v1] restartMap: channel was full" )
// !synced means we either haven't done a netmap
// request yet, or it hasn't answered yet. So the
// server is in an undefined state. If we send
// another netmap request too soon, it might race
// with the last one, and if we're very unlucky,
// the new request will be applied before the old one,
// and the wrong endpoints will get registered. We
// have to tell the client to abort politely, only
// after it receives a response to its existing netmap
// request.
select {
case c . newMapCh <- struct { } { } :
c . logf ( "[v1] cancelMapSafely: wrote to channel" )
default :
// if channel write failed, then there was already
// an outstanding newMapCh request. One is enough,
// since it'll always use the latest endpoints.
c . logf ( "[v1] cancelMapSafely: channel was full" )
}
}
}
c . updateControl ( )
}
}
func ( c * Auto ) authRoutine ( ) {
func ( c * Auto ) authRoutine ( ) {
@ -428,7 +447,7 @@ func (c *Auto) authRoutine() {
c . mu . Unlock ( )
c . mu . Unlock ( )
c . sendStatus ( "authRoutine-success" , nil , "" , nil )
c . sendStatus ( "authRoutine-success" , nil , "" , nil )
c . cancelMapSafely ( )
c . restartMap ( )
bo . BackOff ( ctx , nil )
bo . BackOff ( ctx , nil )
}
}
}
}
@ -458,25 +477,19 @@ func (c *Auto) unpausedChanLocked() <-chan struct{} {
return unpaused
return unpaused
}
}
// mapRoutine is responsible for keeping a read-only streaming connection to the
// control server, and keeping the netmap up to date.
func ( c * Auto ) mapRoutine ( ) {
func ( c * Auto ) mapRoutine ( ) {
defer close ( c . mapDone )
defer close ( c . mapDone )
bo := backoff . NewBackoff ( "mapRoutine" , c . logf , 30 * time . Second )
bo := backoff . NewBackoff ( "mapRoutine" , c . logf , 30 * time . Second )
for {
for {
c . mu . Lock ( )
if err := c . waitUnpause ( "mapRoutine" ) ; err != nil {
if c . paused {
c . logf ( "mapRoutine: exiting" )
unpaused := c . unpausedChanLocked ( )
return
c . mu . Unlock ( )
c . logf ( "mapRoutine: awaiting unpause" )
select {
case <- unpaused :
c . logf ( "mapRoutine: unpaused" )
case <- c . quit :
c . logf ( "mapRoutine: quit" )
return
}
continue
}
}
c . mu . Lock ( )
c . logf ( "[v1] mapRoutine: %s" , c . state )
c . logf ( "[v1] mapRoutine: %s" , c . state )
loggedIn := c . loggedIn
loggedIn := c . loggedIn
ctx := c . mapCtx
ctx := c . mapCtx
@ -513,43 +526,21 @@ func (c *Auto) mapRoutine() {
c . logf ( "[v1] mapRoutine: new map needed while idle." )
c . logf ( "[v1] mapRoutine: new map needed while idle." )
}
}
} else {
} else {
// Be sure this is false when we're not inside
// PollNetMap, so that cancelMapSafely() can notify
// us correctly.
c . mu . Lock ( )
c . inPollNetMap = false
c . mu . Unlock ( )
health . SetInPollNetMap ( false )
health . SetInPollNetMap ( false )
err := c . direct . PollNetMap ( ctx , func ( nm * netmap . NetworkMap ) {
err := c . direct . PollNetMap ( ctx , func ( nm * netmap . NetworkMap ) {
health . SetInPollNetMap ( true )
health . SetInPollNetMap ( true )
c . mu . Lock ( )
select {
case <- c . newMapCh :
c . logf ( "[v1] mapRoutine: new map request during PollNetMap. canceling." )
c . cancelMapLocked ( )
// Don't emit this netmap; we're
// about to request a fresh one.
c . mu . Unlock ( )
return
default :
}
c . mu . Lock ( )
c . synced = true
c . synced = true
c . inPollNetMap = true
if c . loggedIn {
if c . loggedIn {
c . state = StateSynchronized
c . state = StateSynchronized
}
}
exp := nm . Expiry
c . expiry = ptr . To ( nm . Expiry )
c . expiry = & exp
stillAuthed := c . loggedIn
stillAuthed := c . loggedIn
state := c . state
c . logf ( "[v1] mapRoutine: netmap received: %s" , c . state )
c . mu . Unlock ( )
c . mu . Unlock ( )
c . logf ( "[v1] mapRoutine: netmap received: %s" , state )
if stillAuthed {
if stillAuthed {
c . sendStatus ( "mapRoutine-got-netmap" , nil , "" , nm )
c . sendStatus ( "mapRoutine-got-netmap" , nil , "" , nm )
}
}
@ -560,7 +551,6 @@ func (c *Auto) mapRoutine() {
health . SetInPollNetMap ( false )
health . SetInPollNetMap ( false )
c . mu . Lock ( )
c . mu . Lock ( )
c . synced = false
c . synced = false
c . inPollNetMap = false
if c . state == StateSynchronized {
if c . state == StateSynchronized {
c . state = StateAuthenticated
c . state = StateAuthenticated
}
}
@ -602,7 +592,7 @@ func (c *Auto) SetHostinfo(hi *tailcfg.Hostinfo) {
}
}
// Send new Hostinfo to server
// Send new Hostinfo to server
c . sendNewMapRequest ( )
c . updateControl ( )
}
}
func ( c * Auto ) SetNetInfo ( ni * tailcfg . NetInfo ) {
func ( c * Auto ) SetNetInfo ( ni * tailcfg . NetInfo ) {
@ -614,12 +604,17 @@ func (c *Auto) SetNetInfo(ni *tailcfg.NetInfo) {
}
}
// Send new NetInfo to server
// Send new NetInfo to server
c . sendNewMapRequest ( )
c . updateControl ( )
}
}
// SetTKAHead updates the TKA head hash that map-request infrastructure sends.
// SetTKAHead updates the TKA head hash that map-request infrastructure sends.
func ( c * Auto ) SetTKAHead ( headHash string ) {
func ( c * Auto ) SetTKAHead ( headHash string ) {
c . direct . SetTKAHead ( headHash )
if ! c . direct . SetTKAHead ( headHash ) {
return
}
// Send new TKAHead to server
c . updateControl ( )
}
}
func ( c * Auto ) sendStatus ( who string , err error , url string , nm * netmap . NetworkMap ) {
func ( c * Auto ) sendStatus ( who string , err error , url string , nm * netmap . NetworkMap ) {
@ -728,7 +723,7 @@ func (c *Auto) SetExpirySooner(ctx context.Context, expiry time.Time) error {
func ( c * Auto ) UpdateEndpoints ( endpoints [ ] tailcfg . Endpoint ) {
func ( c * Auto ) UpdateEndpoints ( endpoints [ ] tailcfg . Endpoint ) {
changed := c . direct . SetEndpoints ( endpoints )
changed := c . direct . SetEndpoints ( endpoints )
if changed {
if changed {
c . sendNewMapRequest ( )
c . updateControl ( )
}
}
}
}
@ -750,8 +745,9 @@ func (c *Auto) Shutdown() {
close ( c . quit )
close ( c . quit )
c . cancelAuth ( )
c . cancelAuth ( )
<- c . authDone
<- c . authDone
c . cancelMap Unsafely ( )
c . cancelMap ( )
<- c . mapDone
<- c . mapDone
<- c . updateDone
if direct != nil {
if direct != nil {
direct . Close ( )
direct . Close ( )
}
}