|
|
@ -6,7 +6,6 @@ package controlclient
|
|
|
|
import (
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
|
|
|
|
"net/netip"
|
|
|
|
"net/netip"
|
|
|
|
"sort"
|
|
|
|
"sort"
|
|
|
|
|
|
|
|
|
|
|
@ -16,6 +15,7 @@ import (
|
|
|
|
"tailscale.com/types/key"
|
|
|
|
"tailscale.com/types/key"
|
|
|
|
"tailscale.com/types/logger"
|
|
|
|
"tailscale.com/types/logger"
|
|
|
|
"tailscale.com/types/netmap"
|
|
|
|
"tailscale.com/types/netmap"
|
|
|
|
|
|
|
|
"tailscale.com/types/ptr"
|
|
|
|
"tailscale.com/types/views"
|
|
|
|
"tailscale.com/types/views"
|
|
|
|
"tailscale.com/util/cmpx"
|
|
|
|
"tailscale.com/util/cmpx"
|
|
|
|
"tailscale.com/wgengine/filter"
|
|
|
|
"tailscale.com/wgengine/filter"
|
|
|
@ -33,6 +33,7 @@ type mapSession struct {
|
|
|
|
// Immutable fields.
|
|
|
|
// Immutable fields.
|
|
|
|
nu NetmapUpdater // called on changes (in addition to the optional hooks below)
|
|
|
|
nu NetmapUpdater // called on changes (in addition to the optional hooks below)
|
|
|
|
privateNodeKey key.NodePrivate
|
|
|
|
privateNodeKey key.NodePrivate
|
|
|
|
|
|
|
|
publicNodeKey key.NodePublic
|
|
|
|
logf logger.Logf
|
|
|
|
logf logger.Logf
|
|
|
|
vlogf logger.Logf
|
|
|
|
vlogf logger.Logf
|
|
|
|
machinePubKey key.MachinePublic
|
|
|
|
machinePubKey key.MachinePublic
|
|
|
@ -63,6 +64,8 @@ type mapSession struct {
|
|
|
|
|
|
|
|
|
|
|
|
// Fields storing state over the course of multiple MapResponses.
|
|
|
|
// Fields storing state over the course of multiple MapResponses.
|
|
|
|
lastNode tailcfg.NodeView
|
|
|
|
lastNode tailcfg.NodeView
|
|
|
|
|
|
|
|
peers map[tailcfg.NodeID]*tailcfg.NodeView // pointer to view (oddly). same pointers as sortedPeers.
|
|
|
|
|
|
|
|
sortedPeers []*tailcfg.NodeView // same pointers as peers, but sorted by Node.ID
|
|
|
|
lastDNSConfig *tailcfg.DNSConfig
|
|
|
|
lastDNSConfig *tailcfg.DNSConfig
|
|
|
|
lastDERPMap *tailcfg.DERPMap
|
|
|
|
lastDERPMap *tailcfg.DERPMap
|
|
|
|
lastUserProfile map[tailcfg.UserID]tailcfg.UserProfile
|
|
|
|
lastUserProfile map[tailcfg.UserID]tailcfg.UserProfile
|
|
|
@ -70,7 +73,6 @@ type mapSession struct {
|
|
|
|
lastParsedPacketFilter []filter.Match
|
|
|
|
lastParsedPacketFilter []filter.Match
|
|
|
|
lastSSHPolicy *tailcfg.SSHPolicy
|
|
|
|
lastSSHPolicy *tailcfg.SSHPolicy
|
|
|
|
collectServices bool
|
|
|
|
collectServices bool
|
|
|
|
previousPeers []*tailcfg.Node // for delta-purposes
|
|
|
|
|
|
|
|
lastDomain string
|
|
|
|
lastDomain string
|
|
|
|
lastDomainAuditLogID string
|
|
|
|
lastDomainAuditLogID string
|
|
|
|
lastHealth []string
|
|
|
|
lastHealth []string
|
|
|
@ -78,10 +80,6 @@ type mapSession struct {
|
|
|
|
stickyDebug tailcfg.Debug // accumulated opt.Bool values
|
|
|
|
stickyDebug tailcfg.Debug // accumulated opt.Bool values
|
|
|
|
lastTKAInfo *tailcfg.TKAInfo
|
|
|
|
lastTKAInfo *tailcfg.TKAInfo
|
|
|
|
lastNetmapSummary string // from NetworkMap.VeryConcise
|
|
|
|
lastNetmapSummary string // from NetworkMap.VeryConcise
|
|
|
|
|
|
|
|
|
|
|
|
// netMapBuilding is non-nil during a netmapForResponse call,
|
|
|
|
|
|
|
|
// containing the value to be returned, once fully populated.
|
|
|
|
|
|
|
|
netMapBuilding *netmap.NetworkMap
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// newMapSession returns a mostly unconfigured new mapSession.
|
|
|
|
// newMapSession returns a mostly unconfigured new mapSession.
|
|
|
@ -93,6 +91,7 @@ func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater) *mapSession
|
|
|
|
ms := &mapSession{
|
|
|
|
ms := &mapSession{
|
|
|
|
nu: nu,
|
|
|
|
nu: nu,
|
|
|
|
privateNodeKey: privateNodeKey,
|
|
|
|
privateNodeKey: privateNodeKey,
|
|
|
|
|
|
|
|
publicNodeKey: privateNodeKey.Public(),
|
|
|
|
lastDNSConfig: new(tailcfg.DNSConfig),
|
|
|
|
lastDNSConfig: new(tailcfg.DNSConfig),
|
|
|
|
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{},
|
|
|
|
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{},
|
|
|
|
watchdogReset: make(chan struct{}),
|
|
|
|
watchdogReset: make(chan struct{}),
|
|
|
@ -184,7 +183,9 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t
|
|
|
|
// Call Node.InitDisplayNames on any changed nodes.
|
|
|
|
// Call Node.InitDisplayNames on any changed nodes.
|
|
|
|
initDisplayNames(cmpx.Or(resp.Node.View(), ms.lastNode), resp)
|
|
|
|
initDisplayNames(cmpx.Or(resp.Node.View(), ms.lastNode), resp)
|
|
|
|
|
|
|
|
|
|
|
|
nm := ms.netmapForResponse(resp)
|
|
|
|
ms.updateStateFromResponse(resp)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
nm := ms.netmap()
|
|
|
|
|
|
|
|
|
|
|
|
ms.lastNetmapSummary = nm.VeryConcise()
|
|
|
|
ms.lastNetmapSummary = nm.VeryConcise()
|
|
|
|
ms.onConciseNetMapSummary(ms.lastNetmapSummary)
|
|
|
|
ms.onConciseNetMapSummary(ms.lastNetmapSummary)
|
|
|
@ -198,30 +199,28 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t
|
|
|
|
return nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func (ms *mapSession) addUserProfile(userID tailcfg.UserID) {
|
|
|
|
// updateStats are some stats from updateStateFromResponse, primarily for
|
|
|
|
if userID == 0 {
|
|
|
|
// testing. It's meant to be cheap enough to always compute, though. It doesn't
|
|
|
|
return
|
|
|
|
// allocate.
|
|
|
|
}
|
|
|
|
type updateStats struct {
|
|
|
|
nm := ms.netMapBuilding
|
|
|
|
allNew bool
|
|
|
|
if _, dup := nm.UserProfiles[userID]; dup {
|
|
|
|
added int
|
|
|
|
// Already populated it from a previous peer.
|
|
|
|
removed int
|
|
|
|
return
|
|
|
|
changed int
|
|
|
|
}
|
|
|
|
|
|
|
|
if up, ok := ms.lastUserProfile[userID]; ok {
|
|
|
|
|
|
|
|
nm.UserProfiles[userID] = up
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// netmapForResponse returns a fully populated NetworkMap from a full
|
|
|
|
// updateStateFromResponse updates ms from res. It takes ownership of res.
|
|
|
|
// or incremental MapResponse within the session, filling in omitted
|
|
|
|
func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) {
|
|
|
|
// information from prior MapResponse values.
|
|
|
|
ms.updatePeersStateFromResponse(resp)
|
|
|
|
func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.NetworkMap {
|
|
|
|
|
|
|
|
undeltaPeers(resp, ms.previousPeers)
|
|
|
|
if resp.Node != nil {
|
|
|
|
|
|
|
|
ms.lastNode = resp.Node.View()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ms.previousPeers = cloneNodes(resp.Peers) // defensive/lazy clone, since this escapes to who knows where
|
|
|
|
|
|
|
|
for _, up := range resp.UserProfiles {
|
|
|
|
for _, up := range resp.UserProfiles {
|
|
|
|
ms.lastUserProfile[up.ID] = up
|
|
|
|
ms.lastUserProfile[up.ID] = up
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(bradfitz): clean up old user profiles? maybe not worth it.
|
|
|
|
|
|
|
|
|
|
|
|
if dm := resp.DERPMap; dm != nil {
|
|
|
|
if dm := resp.DERPMap; dm != nil {
|
|
|
|
ms.vlogf("netmap: new map contains DERP map")
|
|
|
|
ms.vlogf("netmap: new map contains DERP map")
|
|
|
@ -277,206 +276,216 @@ func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.Netwo
|
|
|
|
if resp.TKAInfo != nil {
|
|
|
|
if resp.TKAInfo != nil {
|
|
|
|
ms.lastTKAInfo = resp.TKAInfo
|
|
|
|
ms.lastTKAInfo = resp.TKAInfo
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// TODO(bradfitz): now that this is a view, remove some of the defensive
|
|
|
|
// updatePeersStateFromResponseres updates ms.peers and ms.sortedPeers from res. It takes ownership of res.
|
|
|
|
// cloning elsewhere in mapSession.
|
|
|
|
func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (stats updateStats) {
|
|
|
|
peerViews := make([]tailcfg.NodeView, len(resp.Peers))
|
|
|
|
defer func() {
|
|
|
|
for i, n := range resp.Peers {
|
|
|
|
if stats.removed > 0 || stats.added > 0 {
|
|
|
|
peerViews[i] = n.View()
|
|
|
|
ms.rebuildSorted()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
|
|
nm := &netmap.NetworkMap{
|
|
|
|
if ms.peers == nil {
|
|
|
|
NodeKey: ms.privateNodeKey.Public(),
|
|
|
|
ms.peers = make(map[tailcfg.NodeID]*tailcfg.NodeView)
|
|
|
|
PrivateKey: ms.privateNodeKey,
|
|
|
|
|
|
|
|
MachineKey: ms.machinePubKey,
|
|
|
|
|
|
|
|
Peers: peerViews,
|
|
|
|
|
|
|
|
UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfile),
|
|
|
|
|
|
|
|
Domain: ms.lastDomain,
|
|
|
|
|
|
|
|
DomainAuditLogID: ms.lastDomainAuditLogID,
|
|
|
|
|
|
|
|
DNS: *ms.lastDNSConfig,
|
|
|
|
|
|
|
|
PacketFilter: ms.lastParsedPacketFilter,
|
|
|
|
|
|
|
|
PacketFilterRules: ms.lastPacketFilterRules,
|
|
|
|
|
|
|
|
SSHPolicy: ms.lastSSHPolicy,
|
|
|
|
|
|
|
|
CollectServices: ms.collectServices,
|
|
|
|
|
|
|
|
DERPMap: ms.lastDERPMap,
|
|
|
|
|
|
|
|
ControlHealth: ms.lastHealth,
|
|
|
|
|
|
|
|
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ms.netMapBuilding = nm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {
|
|
|
|
if len(resp.Peers) > 0 {
|
|
|
|
if err := nm.TKAHead.UnmarshalText([]byte(ms.lastTKAInfo.Head)); err != nil {
|
|
|
|
// Not delta encoded.
|
|
|
|
ms.logf("error unmarshalling TKAHead: %v", err)
|
|
|
|
stats.allNew = true
|
|
|
|
nm.TKAEnabled = false
|
|
|
|
keep := make(map[tailcfg.NodeID]bool, len(resp.Peers))
|
|
|
|
}
|
|
|
|
for _, n := range resp.Peers {
|
|
|
|
|
|
|
|
keep[n.ID] = true
|
|
|
|
|
|
|
|
if vp, ok := ms.peers[n.ID]; ok {
|
|
|
|
|
|
|
|
stats.changed++
|
|
|
|
|
|
|
|
*vp = n.View()
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
stats.added++
|
|
|
|
|
|
|
|
ms.peers[n.ID] = ptr.To(n.View())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if resp.Node != nil {
|
|
|
|
|
|
|
|
ms.lastNode = resp.Node.View()
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if node := ms.lastNode; node.Valid() {
|
|
|
|
for id := range ms.peers {
|
|
|
|
nm.SelfNode = node
|
|
|
|
if !keep[id] {
|
|
|
|
nm.Expiry = node.KeyExpiry()
|
|
|
|
stats.removed++
|
|
|
|
nm.Name = node.Name()
|
|
|
|
delete(ms.peers, id)
|
|
|
|
nm.Addresses = filterSelfAddresses(node.Addresses().AsSlice())
|
|
|
|
|
|
|
|
if node.Hostinfo().Valid() {
|
|
|
|
|
|
|
|
nm.Hostinfo = *node.Hostinfo().AsStruct()
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if node.MachineAuthorized() {
|
|
|
|
|
|
|
|
nm.MachineStatus = tailcfg.MachineAuthorized
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
nm.MachineStatus = tailcfg.MachineUnauthorized
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peers precludes all other delta operations so just return.
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ms.addUserProfile(nm.User())
|
|
|
|
for _, id := range resp.PeersRemoved {
|
|
|
|
for _, peer := range resp.Peers {
|
|
|
|
if _, ok := ms.peers[id]; ok {
|
|
|
|
ms.addUserProfile(peer.Sharer)
|
|
|
|
delete(ms.peers, id)
|
|
|
|
ms.addUserProfile(peer.User)
|
|
|
|
stats.removed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if DevKnob.ForceProxyDNS() {
|
|
|
|
|
|
|
|
nm.DNS.Proxied = true
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ms.netMapBuilding = nil
|
|
|
|
|
|
|
|
return nm
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// undeltaPeers updates mapRes.Peers to be complete based on the
|
|
|
|
for _, n := range resp.PeersChanged {
|
|
|
|
// provided previous peer list and the PeersRemoved and PeersChanged
|
|
|
|
if vp, ok := ms.peers[n.ID]; ok {
|
|
|
|
// fields in mapRes, as well as the PeerSeenChange and OnlineChange
|
|
|
|
stats.changed++
|
|
|
|
// maps.
|
|
|
|
*vp = n.View()
|
|
|
|
//
|
|
|
|
} else {
|
|
|
|
// It then also nils out the delta fields.
|
|
|
|
stats.added++
|
|
|
|
func undeltaPeers(mapRes *tailcfg.MapResponse, prev []*tailcfg.Node) {
|
|
|
|
ms.peers[n.ID] = ptr.To(n.View())
|
|
|
|
if len(mapRes.Peers) > 0 {
|
|
|
|
|
|
|
|
// Not delta encoded.
|
|
|
|
|
|
|
|
if !nodesSorted(mapRes.Peers) {
|
|
|
|
|
|
|
|
log.Printf("netmap: undeltaPeers: MapResponse.Peers not sorted; sorting")
|
|
|
|
|
|
|
|
sortNodes(mapRes.Peers)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
var removed map[tailcfg.NodeID]bool
|
|
|
|
for nodeID, seen := range resp.PeerSeenChange {
|
|
|
|
if pr := mapRes.PeersRemoved; len(pr) > 0 {
|
|
|
|
if vp, ok := ms.peers[nodeID]; ok {
|
|
|
|
removed = make(map[tailcfg.NodeID]bool, len(pr))
|
|
|
|
mut := vp.AsStruct()
|
|
|
|
for _, id := range pr {
|
|
|
|
if seen {
|
|
|
|
removed[id] = true
|
|
|
|
mut.LastSeen = ptr.To(clock.Now())
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
mut.LastSeen = nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
*vp = mut.View()
|
|
|
|
|
|
|
|
stats.changed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
changed := mapRes.PeersChanged
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if !nodesSorted(changed) {
|
|
|
|
for nodeID, online := range resp.OnlineChange {
|
|
|
|
log.Printf("netmap: undeltaPeers: MapResponse.PeersChanged not sorted; sorting")
|
|
|
|
if vp, ok := ms.peers[nodeID]; ok {
|
|
|
|
sortNodes(changed)
|
|
|
|
mut := vp.AsStruct()
|
|
|
|
|
|
|
|
mut.Online = ptr.To(online)
|
|
|
|
|
|
|
|
*vp = mut.View()
|
|
|
|
|
|
|
|
stats.changed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !nodesSorted(prev) {
|
|
|
|
|
|
|
|
// Internal error (unrelated to the network) if we get here.
|
|
|
|
|
|
|
|
log.Printf("netmap: undeltaPeers: [unexpected] prev not sorted; sorting")
|
|
|
|
|
|
|
|
sortNodes(prev)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
newFull := prev
|
|
|
|
for _, pc := range resp.PeersChangedPatch {
|
|
|
|
if len(removed) > 0 || len(changed) > 0 {
|
|
|
|
vp, ok := ms.peers[pc.NodeID]
|
|
|
|
newFull = make([]*tailcfg.Node, 0, len(prev)-len(removed))
|
|
|
|
if !ok {
|
|
|
|
for len(prev) > 0 && len(changed) > 0 {
|
|
|
|
|
|
|
|
pID := prev[0].ID
|
|
|
|
|
|
|
|
cID := changed[0].ID
|
|
|
|
|
|
|
|
if removed[pID] {
|
|
|
|
|
|
|
|
prev = prev[1:]
|
|
|
|
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
stats.changed++
|
|
|
|
case pID < cID:
|
|
|
|
mut := vp.AsStruct()
|
|
|
|
newFull = append(newFull, prev[0])
|
|
|
|
if pc.DERPRegion != 0 {
|
|
|
|
prev = prev[1:]
|
|
|
|
mut.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, pc.DERPRegion)
|
|
|
|
case pID == cID:
|
|
|
|
|
|
|
|
newFull = append(newFull, changed[0])
|
|
|
|
|
|
|
|
prev, changed = prev[1:], changed[1:]
|
|
|
|
|
|
|
|
case cID < pID:
|
|
|
|
|
|
|
|
newFull = append(newFull, changed[0])
|
|
|
|
|
|
|
|
changed = changed[1:]
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if pc.Cap != 0 {
|
|
|
|
|
|
|
|
mut.Cap = pc.Cap
|
|
|
|
}
|
|
|
|
}
|
|
|
|
newFull = append(newFull, changed...)
|
|
|
|
if pc.Endpoints != nil {
|
|
|
|
for _, n := range prev {
|
|
|
|
mut.Endpoints = pc.Endpoints
|
|
|
|
if !removed[n.ID] {
|
|
|
|
|
|
|
|
newFull = append(newFull, n)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if pc.Key != nil {
|
|
|
|
|
|
|
|
mut.Key = *pc.Key
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sortNodes(newFull)
|
|
|
|
if pc.DiscoKey != nil {
|
|
|
|
|
|
|
|
mut.DiscoKey = *pc.DiscoKey
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if v := pc.Online; v != nil {
|
|
|
|
if len(mapRes.PeerSeenChange) != 0 || len(mapRes.OnlineChange) != 0 || len(mapRes.PeersChangedPatch) != 0 {
|
|
|
|
mut.Online = ptr.To(*v)
|
|
|
|
peerByID := make(map[tailcfg.NodeID]*tailcfg.Node, len(newFull))
|
|
|
|
|
|
|
|
for _, n := range newFull {
|
|
|
|
|
|
|
|
peerByID[n.ID] = n
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
now := clock.Now()
|
|
|
|
if v := pc.LastSeen; v != nil {
|
|
|
|
for nodeID, seen := range mapRes.PeerSeenChange {
|
|
|
|
mut.LastSeen = ptr.To(*v)
|
|
|
|
if n, ok := peerByID[nodeID]; ok {
|
|
|
|
|
|
|
|
if seen {
|
|
|
|
|
|
|
|
n.LastSeen = &now
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
n.LastSeen = nil
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if v := pc.KeyExpiry; v != nil {
|
|
|
|
|
|
|
|
mut.KeyExpiry = *v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if v := pc.Capabilities; v != nil {
|
|
|
|
|
|
|
|
mut.Capabilities = *v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for nodeID, online := range mapRes.OnlineChange {
|
|
|
|
if v := pc.KeySignature; v != nil {
|
|
|
|
if n, ok := peerByID[nodeID]; ok {
|
|
|
|
mut.KeySignature = v
|
|
|
|
online := online
|
|
|
|
|
|
|
|
n.Online = &online
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*vp = mut.View()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, ec := range mapRes.PeersChangedPatch {
|
|
|
|
|
|
|
|
if n, ok := peerByID[ec.NodeID]; ok {
|
|
|
|
return
|
|
|
|
if ec.DERPRegion != 0 {
|
|
|
|
}
|
|
|
|
n.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, ec.DERPRegion)
|
|
|
|
|
|
|
|
|
|
|
|
// rebuildSorted rebuilds ms.sortedPeers from ms.peers. It should be called
|
|
|
|
|
|
|
|
// after any additions or removals from peers.
|
|
|
|
|
|
|
|
func (ms *mapSession) rebuildSorted() {
|
|
|
|
|
|
|
|
if ms.sortedPeers == nil {
|
|
|
|
|
|
|
|
ms.sortedPeers = make([]*tailcfg.NodeView, 0, len(ms.peers))
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
if len(ms.sortedPeers) > len(ms.peers) {
|
|
|
|
|
|
|
|
clear(ms.sortedPeers[len(ms.peers):])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ec.Cap != 0 {
|
|
|
|
ms.sortedPeers = ms.sortedPeers[:0]
|
|
|
|
n.Cap = ec.Cap
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ec.Endpoints != nil {
|
|
|
|
for _, p := range ms.peers {
|
|
|
|
n.Endpoints = ec.Endpoints
|
|
|
|
ms.sortedPeers = append(ms.sortedPeers, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ec.Key != nil {
|
|
|
|
sort.Slice(ms.sortedPeers, func(i, j int) bool {
|
|
|
|
n.Key = *ec.Key
|
|
|
|
return ms.sortedPeers[i].ID() < ms.sortedPeers[j].ID()
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserID) {
|
|
|
|
|
|
|
|
if userID == 0 {
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ec.DiscoKey != nil {
|
|
|
|
if _, dup := nm.UserProfiles[userID]; dup {
|
|
|
|
n.DiscoKey = *ec.DiscoKey
|
|
|
|
// Already populated it from a previous peer.
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v := ec.Online; v != nil {
|
|
|
|
if up, ok := ms.lastUserProfile[userID]; ok {
|
|
|
|
n.Online = ptrCopy(v)
|
|
|
|
nm.UserProfiles[userID] = up
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v := ec.LastSeen; v != nil {
|
|
|
|
}
|
|
|
|
n.LastSeen = ptrCopy(v)
|
|
|
|
|
|
|
|
|
|
|
|
// netmap returns a fully populated NetworkMap from the last state seen from
|
|
|
|
|
|
|
|
// a call to updateStateFromResponse, filling in omitted
|
|
|
|
|
|
|
|
// information from prior MapResponse values.
|
|
|
|
|
|
|
|
func (ms *mapSession) netmap() *netmap.NetworkMap {
|
|
|
|
|
|
|
|
peerViews := make([]tailcfg.NodeView, len(ms.sortedPeers))
|
|
|
|
|
|
|
|
for i, vp := range ms.sortedPeers {
|
|
|
|
|
|
|
|
peerViews[i] = *vp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v := ec.KeyExpiry; v != nil {
|
|
|
|
|
|
|
|
n.KeyExpiry = *v
|
|
|
|
nm := &netmap.NetworkMap{
|
|
|
|
|
|
|
|
NodeKey: ms.publicNodeKey,
|
|
|
|
|
|
|
|
PrivateKey: ms.privateNodeKey,
|
|
|
|
|
|
|
|
MachineKey: ms.machinePubKey,
|
|
|
|
|
|
|
|
Peers: peerViews,
|
|
|
|
|
|
|
|
UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfile),
|
|
|
|
|
|
|
|
Domain: ms.lastDomain,
|
|
|
|
|
|
|
|
DomainAuditLogID: ms.lastDomainAuditLogID,
|
|
|
|
|
|
|
|
DNS: *ms.lastDNSConfig,
|
|
|
|
|
|
|
|
PacketFilter: ms.lastParsedPacketFilter,
|
|
|
|
|
|
|
|
PacketFilterRules: ms.lastPacketFilterRules,
|
|
|
|
|
|
|
|
SSHPolicy: ms.lastSSHPolicy,
|
|
|
|
|
|
|
|
CollectServices: ms.collectServices,
|
|
|
|
|
|
|
|
DERPMap: ms.lastDERPMap,
|
|
|
|
|
|
|
|
ControlHealth: ms.lastHealth,
|
|
|
|
|
|
|
|
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v := ec.Capabilities; v != nil {
|
|
|
|
|
|
|
|
n.Capabilities = *v
|
|
|
|
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {
|
|
|
|
|
|
|
|
if err := nm.TKAHead.UnmarshalText([]byte(ms.lastTKAInfo.Head)); err != nil {
|
|
|
|
|
|
|
|
ms.logf("error unmarshalling TKAHead: %v", err)
|
|
|
|
|
|
|
|
nm.TKAEnabled = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v := ec.KeySignature; v != nil {
|
|
|
|
|
|
|
|
n.KeySignature = v
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if node := ms.lastNode; node.Valid() {
|
|
|
|
|
|
|
|
nm.SelfNode = node
|
|
|
|
|
|
|
|
nm.Expiry = node.KeyExpiry()
|
|
|
|
|
|
|
|
nm.Name = node.Name()
|
|
|
|
|
|
|
|
nm.Addresses = filterSelfAddresses(node.Addresses().AsSlice())
|
|
|
|
|
|
|
|
if node.Hostinfo().Valid() {
|
|
|
|
|
|
|
|
nm.Hostinfo = *node.Hostinfo().AsStruct()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node.MachineAuthorized() {
|
|
|
|
|
|
|
|
nm.MachineStatus = tailcfg.MachineAuthorized
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
nm.MachineStatus = tailcfg.MachineUnauthorized
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
mapRes.Peers = newFull
|
|
|
|
ms.addUserProfile(nm, nm.User())
|
|
|
|
mapRes.PeersChanged = nil
|
|
|
|
for _, peer := range peerViews {
|
|
|
|
mapRes.PeersRemoved = nil
|
|
|
|
ms.addUserProfile(nm, peer.Sharer())
|
|
|
|
}
|
|
|
|
ms.addUserProfile(nm, peer.User())
|
|
|
|
|
|
|
|
|
|
|
|
// ptrCopy returns a pointer to a newly allocated shallow copy of *v.
|
|
|
|
|
|
|
|
func ptrCopy[T any](v *T) *T {
|
|
|
|
|
|
|
|
if v == nil {
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret := new(T)
|
|
|
|
if DevKnob.ForceProxyDNS() {
|
|
|
|
*ret = *v
|
|
|
|
nm.DNS.Proxied = true
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
return nm
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func nodesSorted(v []*tailcfg.Node) bool {
|
|
|
|
func nodesSorted(v []*tailcfg.Node) bool {
|
|
|
|