ipn/ipnstate: record assigned Tailscale IPs.

wgengine/magicsock: use ipnstate to find assigned Tailscale IPs.

Signed-off-by: David Anderson <danderson@tailscale.com>
reviewable/pr609/r1
David Anderson 4 years ago committed by Dave Anderson
parent c3958898f1
commit 0249236cc0

@ -18,6 +18,7 @@ import (
"sync" "sync"
"time" "time"
"inet.af/netaddr"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/key" "tailscale.com/types/key"
) )
@ -25,6 +26,7 @@ import (
// Status represents the entire state of the IPN network. // Status represents the entire state of the IPN network.
type Status struct { type Status struct {
BackendState string BackendState string
TailscaleIPs []netaddr.IP // Tailscale IP(s) assigned to this node
Peer map[key.Public]*PeerStatus Peer map[key.Public]*PeerStatus
User map[tailcfg.UserID]tailcfg.UserProfile User map[tailcfg.UserID]tailcfg.UserProfile
} }
@ -109,6 +111,18 @@ func (sb *StatusBuilder) AddUser(id tailcfg.UserID, up tailcfg.UserProfile) {
sb.st.User[id] = up sb.st.User[id] = up
} }
// AddIP adds a Tailscale IP address to the status.
func (sb *StatusBuilder) AddTailscaleIP(ip netaddr.IP) {
sb.mu.Lock()
defer sb.mu.Unlock()
if sb.locked {
log.Printf("[unexpected] ipnstate: AddIP after Locked")
return
}
sb.st.TailscaleIPs = append(sb.st.TailscaleIPs, ip)
}
// AddPeer adds a peer node to the status. // AddPeer adds a peer node to the status.
// //
// Its PeerStatus is mixed with any previous status already added. // Its PeerStatus is mixed with any previous status already added.
@ -218,6 +232,12 @@ table tbody tr:nth-child(even) td { background-color: #f5f5f5; }
//f("<p><b>logid:</b> %s</p>\n", logid) //f("<p><b>logid:</b> %s</p>\n", logid)
//f("<p><b>opts:</b> <code>%s</code></p>\n", html.EscapeString(fmt.Sprintf("%+v", opts))) //f("<p><b>opts:</b> <code>%s</code></p>\n", html.EscapeString(fmt.Sprintf("%+v", opts)))
ips := make([]string, 0, len(st.TailscaleIPs))
for _, ip := range st.TailscaleIPs {
ips = append(ips, ip.String())
}
f("<p>Tailscale IP: %s", strings.Join(ips, ", "))
f("<table>\n<thead>\n") f("<table>\n<thead>\n")
f("<tr><th>Peer</th><th>Node</th><th>Owner</th><th>Rx</th><th>Tx</th><th>Activity</th><th>Endpoints</th></tr>\n") f("<tr><th>Peer</th><th>Node</th><th>Owner</th><th>Rx</th><th>Tx</th><th>Activity</th><th>Endpoints</th></tr>\n")
f("</thead>\n<tbody>\n") f("</thead>\n<tbody>\n")

@ -81,9 +81,8 @@ var (
debugReSTUNStopOnIdle, _ = strconv.ParseBool(os.Getenv("TS_DEBUG_RESTUN_STOP_ON_IDLE")) debugReSTUNStopOnIdle, _ = strconv.ParseBool(os.Getenv("TS_DEBUG_RESTUN_STOP_ON_IDLE"))
) )
// inTest binds magicsock to 127.0.0.1 instead of its usual 0.0.0.0, // inTest reports whether the running program is a test that set the
// to avoid macOS prompting for firewall permissions during // IN_TS_TEST environment variable.
// interactive tests.
// //
// Unlike the other debug tweakables above, this one needs to be // Unlike the other debug tweakables above, this one needs to be
// checked every time at runtime, because tests set this after program // checked every time at runtime, because tests set this after program
@ -2860,6 +2859,17 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
if c.netMap != nil {
for _, addr := range c.netMap.Addresses {
if (addr.IP.Is4() && addr.Mask != 32) || (addr.IP.Is6() && addr.Mask != 128) {
continue
}
if ip, ok := netaddr.FromStdIP(addr.IP.IP()); ok {
sb.AddTailscaleIP(ip)
}
}
}
for dk, n := range c.nodeOfDisco { for dk, n := range c.nodeOfDisco {
ps := &ipnstate.PeerStatus{InMagicSock: true} ps := &ipnstate.PeerStatus{InMagicSock: true}
ps.Addrs = append(ps.Addrs, n.Endpoints...) ps.Addrs = append(ps.Addrs, n.Endpoints...)

@ -118,7 +118,6 @@ type magicStack struct {
tun *tuntest.ChannelTUN // tuntap device to send/receive packets tun *tuntest.ChannelTUN // tuntap device to send/receive packets
tsTun *tstun.TUN // wrapped tun that implements filtering and wgengine hooks tsTun *tstun.TUN // wrapped tun that implements filtering and wgengine hooks
dev *device.Device // the wireguard-go Device that connects the previous things dev *device.Device // the wireguard-go Device that connects the previous things
tsIP chan netaddr.IP // buffered, guaranteed to yield at least 1 value
} }
// newMagicStack builds and initializes an idle magicsock and // newMagicStack builds and initializes an idle magicsock and
@ -181,7 +180,6 @@ func newMagicStack(t *testing.T, logf logger.Logf, l nettype.PacketListener, der
tun: tun, tun: tun,
tsTun: tsTun, tsTun: tsTun,
dev: dev, dev: dev,
tsIP: make(chan netaddr.IP, 1),
} }
} }
@ -205,18 +203,20 @@ func (s *magicStack) Status() *ipnstate.Status {
return sb.Status() return sb.Status()
} }
// AwaitIP waits for magicStack to receive a Tailscale IP address on // IP returns the Tailscale IP address assigned to this magicStack.
// tsIP, and returns the IP. It's intended for use with magicStacks //
// that have been meshed with meshStacks, to wait for configs to have // Something external needs to provide a NetworkMap and WireGuard
// propagated enough that everyone has a Tailscale IP that should // configs to the magicStack in order for it to acquire an IP
// work. // address. See meshStacks for one possible source of netmaps and IPs.
func (s *magicStack) AwaitIP() netaddr.IP { func (s *magicStack) IP(t *testing.T) netaddr.IP {
select { for deadline := time.Now().Add(5 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {
case ip := <-s.tsIP: st := s.Status()
return ip if len(st.TailscaleIPs) > 0 {
case <-time.After(2 * time.Second): return st.TailscaleIPs[0]
panic("timed out waiting for magicStack to get an IP") }
} }
t.Fatal("timed out waiting for magicstack to get an IP assigned")
panic("unreachable") // compiler doesn't know t.Fatal panics
} }
// meshStacks monitors epCh on all given ms, and plumbs network maps // meshStacks monitors epCh on all given ms, and plumbs network maps
@ -271,11 +271,6 @@ func meshStacks(logf logger.Logf, ms []*magicStack) (cleanup func()) {
for i, m := range ms { for i, m := range ms {
netmap := buildNetmapLocked(i) netmap := buildNetmapLocked(i)
nip, _ := netaddr.FromStdIP(netmap.Addresses[0].IP.IP())
select {
case m.tsIP <- nip:
default:
}
m.conn.SetNetworkMap(netmap) m.conn.SetNetworkMap(netmap)
peerSet := make(map[key.Public]struct{}, len(netmap.Peers)) peerSet := make(map[key.Public]struct{}, len(netmap.Peers))
for _, peer := range netmap.Peers { for _, peer := range netmap.Peers {
@ -707,7 +702,7 @@ type devices struct {
// newPinger starts continuously sending test packets from srcM to // newPinger starts continuously sending test packets from srcM to
// dstM, until cleanup is invoked to stop it. Each ping has 1 second // dstM, until cleanup is invoked to stop it. Each ping has 1 second
// to transit the network. It is a test failure to lose a ping. // to transit the network. It is a test failure to lose a ping.
func newPinger(t *testing.T, logf logger.Logf, srcM, dstM *magicStack, srcIP, dstIP netaddr.IP) (cleanup func()) { func newPinger(t *testing.T, logf logger.Logf, src, dst *magicStack) (cleanup func()) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
done := make(chan struct{}) done := make(chan struct{})
one := func() bool { one := func() bool {
@ -717,10 +712,10 @@ func newPinger(t *testing.T, logf logger.Logf, srcM, dstM *magicStack, srcIP, ds
// failure). Figure out what kind of thing would be // failure). Figure out what kind of thing would be
// acceptable to test instead of "every ping must // acceptable to test instead of "every ping must
// transit". // transit".
pkt := tuntest.Ping(dstIP.IPAddr().IP, srcIP.IPAddr().IP) pkt := tuntest.Ping(dst.IP(t).IPAddr().IP, src.IP(t).IPAddr().IP)
srcM.tun.Outbound <- pkt src.tun.Outbound <- pkt
select { select {
case <-dstM.tun.Inbound: case <-dst.tun.Inbound:
return true return true
case <-time.After(10 * time.Second): case <-time.After(10 * time.Second):
// Very generous timeout here because depending on // Very generous timeout here because depending on
@ -737,7 +732,7 @@ func newPinger(t *testing.T, logf logger.Logf, srcM, dstM *magicStack, srcIP, ds
// natlab may still be delivering a packet to us from a // natlab may still be delivering a packet to us from a
// goroutine. // goroutine.
select { select {
case <-dstM.tun.Inbound: case <-dst.tun.Inbound:
case <-time.After(time.Second): case <-time.After(time.Second):
} }
return false return false
@ -758,7 +753,7 @@ func newPinger(t *testing.T, logf logger.Logf, srcM, dstM *magicStack, srcIP, ds
} }
go func() { go func() {
logf("sending ping stream from %s (%s) to %s (%s)", srcM, srcIP, dstM, dstIP) logf("sending ping stream from %s (%s) to %s (%s)", src, src.IP(t), dst, dst.IP(t))
defer close(done) defer close(done)
for one() { for one() {
} }
@ -792,11 +787,11 @@ func testActiveDiscovery(t *testing.T, d *devices) {
cleanup = meshStacks(logf, []*magicStack{m1, m2}) cleanup = meshStacks(logf, []*magicStack{m1, m2})
defer cleanup() defer cleanup()
m1IP := m1.AwaitIP() m1IP := m1.IP(t)
m2IP := m2.AwaitIP() m2IP := m2.IP(t)
logf("IPs: %s %s", m1IP, m2IP) logf("IPs: %s %s", m1IP, m2IP)
cleanup = newPinger(t, logf, m1, m2, m1IP, m2IP) cleanup = newPinger(t, logf, m1, m2)
defer cleanup() defer cleanup()
// Everything is now up and running, active discovery should find // Everything is now up and running, active discovery should find

Loading…
Cancel
Save