tstest/integration{,/testcontrol}: add node update support, two node test

Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
apenwarr/fixes
Brad Fitzpatrick 4 years ago
parent d32667011d
commit 5a7c6f1678

@ -10,6 +10,7 @@ import (
crand "crypto/rand" crand "crypto/rand"
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"errors"
"flag" "flag"
"fmt" "fmt"
"io" "io"
@ -89,10 +90,7 @@ func TestOneNodeUp_NoAuth(t *testing.T) {
t.Error(err) t.Error(err)
} }
t.Logf("Running up --login-server=%s ...", env.ControlServer.URL) n1.MustUp()
if err := n1.Tailscale("up", "--login-server="+env.ControlServer.URL).Run(); err != nil {
t.Fatalf("up: %v", err)
}
if d, _ := time.ParseDuration(os.Getenv("TS_POST_UP_SLEEP")); d > 0 { if d, _ := time.ParseDuration(os.Getenv("TS_POST_UP_SLEEP")); d > 0 {
t.Logf("Sleeping for %v to give 'up' time to misbehave (https://github.com/tailscale/tailscale/issues/1840) ...", d) t.Logf("Sleeping for %v to give 'up' time to misbehave (https://github.com/tailscale/tailscale/issues/1840) ...", d)
@ -116,7 +114,6 @@ func TestOneNodeUp_Auth(t *testing.T) {
env.Control.RequireAuth = true env.Control.RequireAuth = true
n1 := newTestNode(t, env) n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t) d1 := n1.StartDaemon(t)
defer d1.Kill() defer d1.Kill()
@ -155,6 +152,50 @@ func TestOneNodeUp_Auth(t *testing.T) {
} }
func TestTwoNodes(t *testing.T) {
t.Parallel()
bins := buildTestBinaries(t)
env := newTestEnv(t, bins)
defer env.Close()
// Create two nodes:
n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t)
defer d1.Kill()
n2 := newTestNode(t, env)
d2 := n2.StartDaemon(t)
defer d2.Kill()
n1.AwaitListening(t)
n2.AwaitListening(t)
n1.MustUp()
n2.MustUp()
n1.AwaitRunning(t)
n2.AwaitRunning(t)
if err := tstest.WaitFor(2*time.Second, func() error {
st := n1.MustStatus(t)
if len(st.Peer) == 0 {
return errors.New("no peers")
}
if len(st.Peer) > 1 {
return fmt.Errorf("got %d peers; want 1", len(st.Peer))
}
peer := st.Peer[st.Peers()[0]]
if peer.ID == st.Self.ID {
return errors.New("peer is self")
}
return nil
}); err != nil {
t.Error(err)
}
d1.MustCleanShutdown(t)
d2.MustCleanShutdown(t)
}
// testBinaries are the paths to a tailscaled and tailscale binary. // testBinaries are the paths to a tailscaled and tailscale binary.
// These can be shared by multiple nodes. // These can be shared by multiple nodes.
type testBinaries struct { type testBinaries struct {
@ -298,6 +339,14 @@ func (n *testNode) StartDaemon(t testing.TB) *Daemon {
} }
} }
func (n *testNode) MustUp() {
t := n.env.t
t.Logf("Running up --login-server=%s ...", n.env.ControlServer.URL)
if err := n.Tailscale("up", "--login-server="+n.env.ControlServer.URL).Run(); err != nil {
t.Fatalf("up: %v", err)
}
}
// AwaitListening waits for the tailscaled to be serving local clients // AwaitListening waits for the tailscaled to be serving local clients
// over its localhost IPC mechanism. (Unix socket, etc) // over its localhost IPC mechanism. (Unix socket, etc)
func (n *testNode) AwaitListening(t testing.TB) { func (n *testNode) AwaitListening(t testing.TB) {

@ -18,6 +18,7 @@ import (
"math/rand" "math/rand"
"net/http" "net/http"
"net/url" "net/url"
"sort"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -167,6 +168,18 @@ func (s *Server) Node(nodeKey tailcfg.NodeKey) *tailcfg.Node {
return s.nodes[nodeKey].Clone() return s.nodes[nodeKey].Clone()
} }
func (s *Server) AllNodes() (nodes []*tailcfg.Node) {
s.mu.Lock()
defer s.mu.Unlock()
for _, n := range s.nodes {
nodes = append(nodes, n.Clone())
}
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].StableID < nodes[j].StableID
})
return nodes
}
func (s *Server) getUser(nodeKey tailcfg.NodeKey) (*tailcfg.User, *tailcfg.Login) { func (s *Server) getUser(nodeKey tailcfg.NodeKey) (*tailcfg.User, *tailcfg.Login) {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
@ -366,6 +379,21 @@ func sendUpdate(dst chan<- updateType, updateType updateType) {
} }
} }
func (s *Server) UpdateNode(n *tailcfg.Node) (peersToUpdate []tailcfg.NodeID) {
s.mu.Lock()
defer s.mu.Unlock()
if n.Key.IsZero() {
panic("zero nodekey")
}
s.nodes[n.Key] = n.Clone()
for _, n2 := range s.nodes {
if n.ID != n2.ID {
peersToUpdate = append(peersToUpdate, n2.ID)
}
}
return peersToUpdate
}
func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey tailcfg.MachineKey) { func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey tailcfg.MachineKey) {
ctx := r.Context() ctx := r.Context()
@ -391,10 +419,8 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey tailcfg.M
if !req.ReadOnly { if !req.ReadOnly {
endpoints := filterInvalidIPv6Endpoints(req.Endpoints) endpoints := filterInvalidIPv6Endpoints(req.Endpoints)
node.Endpoints = endpoints node.Endpoints = endpoints
// TODO: more node.DiscoKey = req.DiscoKey
// TODO: register node, peersToUpdate = s.UpdateNode(node)
//s.UpdateEndpoint(mkey, req.NodeKey,
// XXX
} }
nodeID := node.ID nodeID := node.ID
@ -501,6 +527,12 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse,
CollectServices: "true", CollectServices: "true",
PacketFilter: tailcfg.FilterAllowAll, PacketFilter: tailcfg.FilterAllowAll,
} }
for _, p := range s.AllNodes() {
if p.StableID != node.StableID {
res.Peers = append(res.Peers, p)
}
}
res.Node.Addresses = []netaddr.IPPrefix{ res.Node.Addresses = []netaddr.IPPrefix{
netaddr.MustParseIPPrefix(fmt.Sprintf("100.64.%d.%d/32", uint8(node.ID>>8), uint8(node.ID))), netaddr.MustParseIPPrefix(fmt.Sprintf("100.64.%d.%d/32", uint8(node.ID>>8), uint8(node.ID))),
} }

Loading…
Cancel
Save