appc,ipn/ipnlocal: Add split DNS entries for conn25 peers

If conn25 config is sent in the netmap: add split DNS entries to use
appropriately tagged peers' PeerAPI to resolve DNS requests for those
domains.

This will enable future work where we use the peers as connectors for
the configured domains.

Updates tailscale/corp#34252

Signed-off-by: Fran Bull <fran@tailscale.com>
pull/18525/head
Fran Bull 2 weeks ago committed by franbull
parent 1183f7a191
commit 9d13a6df9c

@ -4,10 +4,15 @@
package appc
import (
"cmp"
"net/netip"
"slices"
"sync"
"tailscale.com/tailcfg"
"tailscale.com/types/appctype"
"tailscale.com/util/mak"
"tailscale.com/util/set"
)
// Conn25 holds the developing state for the as yet nascent next generation app connector.
@ -108,3 +113,61 @@ type ConnectorTransitIPResponse struct {
// correspond to the order of [ConnectorTransitIPRequest.TransitIPs].
TransitIPs []TransitIPResponse `json:"transitIPs,omitempty"`
}
const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental"
// PickSplitDNSPeers looks at the netmap peers capabilities and finds which peers
// want to be connectors for which domains.
func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg.NodeView, peers map[tailcfg.NodeID]tailcfg.NodeView) map[string][]tailcfg.NodeView {
var m map[string][]tailcfg.NodeView
if !hasCap(AppConnectorsExperimentalAttrName) {
return m
}
apps, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.AppConnectorAttr](self.CapMap(), AppConnectorsExperimentalAttrName)
if err != nil {
return m
}
tagToDomain := make(map[string][]string)
for _, app := range apps {
for _, tag := range app.Connectors {
tagToDomain[tag] = append(tagToDomain[tag], app.Domains...)
}
}
// NodeIDs are Comparable, and we have a map of NodeID to NodeView anyway, so
// use a Set of NodeIDs to deduplicate, and populate into a []NodeView later.
var work map[string]set.Set[tailcfg.NodeID]
for _, peer := range peers {
if !peer.Valid() || !peer.Hostinfo().Valid() {
continue
}
if isConn, _ := peer.Hostinfo().AppConnector().Get(); !isConn {
continue
}
for _, t := range peer.Tags().All() {
domains := tagToDomain[t]
for _, domain := range domains {
if work[domain] == nil {
mak.Set(&work, domain, set.Set[tailcfg.NodeID]{})
}
work[domain].Add(peer.ID())
}
}
}
// Populate m. Make a []tailcfg.NodeView from []tailcfg.NodeID using the peers map.
// And sort it to our preference.
for domain, ids := range work {
nodes := make([]tailcfg.NodeView, 0, ids.Len())
for id := range ids {
nodes = append(nodes, peers[id])
}
// The ordering of the nodes in the map vals is semantic (dnsConfigForNetmap uses the first node it can
// get a peer api url for as its split dns target). We can think of it as a preference order, except that
// we don't (currently 2026-01-14) have any preference over which node is chosen.
slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int {
return cmp.Compare(a.ID(), b.ID())
})
mak.Set(&m, domain, nodes)
}
return m
}

@ -4,10 +4,14 @@
package appc
import (
"encoding/json"
"net/netip"
"reflect"
"testing"
"tailscale.com/tailcfg"
"tailscale.com/types/appctype"
"tailscale.com/types/opt"
)
// TestHandleConnectorTransitIPRequestZeroLength tests that if sent a
@ -186,3 +190,122 @@ func TestTransitIPTargetUnknownTIP(t *testing.T) {
t.Fatalf("Unknown transit addr, want: %v, got %v", want, got)
}
}
func TestPickSplitDNSPeers(t *testing.T) {
getBytesForAttr := func(name string, domains []string, tags []string) []byte {
attr := appctype.AppConnectorAttr{
Name: name,
Domains: domains,
Connectors: tags,
}
bs, err := json.Marshal(attr)
if err != nil {
t.Fatalf("test setup: %v", err)
}
return bs
}
appOneBytes := getBytesForAttr("app1", []string{"example.com"}, []string{"tag:one"})
appTwoBytes := getBytesForAttr("app2", []string{"a.example.com"}, []string{"tag:two"})
appThreeBytes := getBytesForAttr("app3", []string{"woo.b.example.com", "hoo.b.example.com"}, []string{"tag:three1", "tag:three2"})
appFourBytes := getBytesForAttr("app4", []string{"woo.b.example.com", "c.example.com"}, []string{"tag:four1", "tag:four2"})
makeNodeView := func(id tailcfg.NodeID, name string, tags []string) tailcfg.NodeView {
return (&tailcfg.Node{
ID: id,
Name: name,
Tags: tags,
Hostinfo: (&tailcfg.Hostinfo{AppConnector: opt.NewBool(true)}).View(),
}).View()
}
nvp1 := makeNodeView(1, "p1", []string{"tag:one"})
nvp2 := makeNodeView(2, "p2", []string{"tag:four1", "tag:four2"})
nvp3 := makeNodeView(3, "p3", []string{"tag:two", "tag:three1"})
nvp4 := makeNodeView(4, "p4", []string{"tag:two", "tag:three2", "tag:four2"})
for _, tt := range []struct {
name string
want map[string][]tailcfg.NodeView
peers []tailcfg.NodeView
config []tailcfg.RawMessage
}{
{
name: "empty",
},
{
name: "bad-config", // bad config should return a nil map rather than error.
config: []tailcfg.RawMessage{tailcfg.RawMessage(`hey`)},
},
{
name: "no-peers",
config: []tailcfg.RawMessage{tailcfg.RawMessage(appOneBytes)},
},
{
name: "peers-that-are-not-connectors",
config: []tailcfg.RawMessage{tailcfg.RawMessage(appOneBytes)},
peers: []tailcfg.NodeView{
(&tailcfg.Node{
ID: 5,
Name: "p5",
Tags: []string{"tag:one"},
}).View(),
(&tailcfg.Node{
ID: 6,
Name: "p6",
Tags: []string{"tag:one"},
}).View(),
},
},
{
name: "peers-that-dont-match-tags",
config: []tailcfg.RawMessage{tailcfg.RawMessage(appOneBytes)},
peers: []tailcfg.NodeView{
makeNodeView(5, "p5", []string{"tag:seven"}),
makeNodeView(6, "p6", nil),
},
},
{
name: "matching-tagged-connector-peers",
config: []tailcfg.RawMessage{
tailcfg.RawMessage(appOneBytes),
tailcfg.RawMessage(appTwoBytes),
tailcfg.RawMessage(appThreeBytes),
tailcfg.RawMessage(appFourBytes),
},
peers: []tailcfg.NodeView{
nvp1,
nvp2,
nvp3,
nvp4,
makeNodeView(5, "p5", nil),
},
want: map[string][]tailcfg.NodeView{
// p5 has no matching tags and so doesn't appear
"example.com": {nvp1},
"a.example.com": {nvp3, nvp4},
"woo.b.example.com": {nvp2, nvp3, nvp4},
"hoo.b.example.com": {nvp3, nvp4},
"c.example.com": {nvp2, nvp4},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
selfNode := &tailcfg.Node{}
if tt.config != nil {
selfNode.CapMap = tailcfg.NodeCapMap{
tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): tt.config,
}
}
selfView := selfNode.View()
peers := map[tailcfg.NodeID]tailcfg.NodeView{}
for _, p := range tt.peers {
peers[p.ID()] = p
}
got := PickSplitDNSPeers(func(_ tailcfg.NodeCapability) bool {
return true
}, selfView, peers)
if !reflect.DeepEqual(got, tt.want) {
t.Fatalf("got %v, want %v", got, tt.want)
}
})
}
}

@ -10,14 +10,17 @@ import (
"reflect"
"testing"
"tailscale.com/appc"
"tailscale.com/ipn"
"tailscale.com/net/dns"
"tailscale.com/tailcfg"
"tailscale.com/tstest"
"tailscale.com/types/dnstype"
"tailscale.com/types/netmap"
"tailscale.com/types/opt"
"tailscale.com/util/cloudenv"
"tailscale.com/util/dnsname"
"tailscale.com/util/set"
)
func ipps(ippStrs ...string) (ipps []netip.Prefix) {
@ -349,6 +352,94 @@ func TestDNSConfigForNetmap(t *testing.T) {
prefs: &ipn.Prefs{},
want: &dns.Config{},
},
{
name: "conn25-split-dns",
nm: &netmap.NetworkMap{
SelfNode: (&tailcfg.Node{
Name: "a",
Addresses: ipps("100.101.101.101"),
CapMap: tailcfg.NodeCapMap{
tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName): []tailcfg.RawMessage{
tailcfg.RawMessage(`{"name":"app1","connectors":["tag:woo"],"domains":["example.com"]}`),
},
},
}).View(),
AllCaps: set.Of(tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName)),
},
peers: nodeViews([]*tailcfg.Node{
{
ID: 1,
Name: "p1",
Addresses: ipps("100.102.0.1"),
Tags: []string{"tag:woo"},
Hostinfo: (&tailcfg.Hostinfo{
Services: []tailcfg.Service{
{
Proto: tailcfg.PeerAPI4,
Port: 1234,
},
},
AppConnector: opt.NewBool(true),
}).View(),
},
}),
prefs: &ipn.Prefs{
CorpDNS: true,
},
want: &dns.Config{
Hosts: map[dnsname.FQDN][]netip.Addr{
"a.": ips("100.101.101.101"),
"p1.": ips("100.102.0.1"),
},
Routes: map[dnsname.FQDN][]*dnstype.Resolver{
dnsname.FQDN("example.com."): {
{Addr: "http://100.102.0.1:1234/dns-query"},
},
},
},
},
{
name: "conn25-split-dns-no-matching-peers",
nm: &netmap.NetworkMap{
SelfNode: (&tailcfg.Node{
Name: "a",
Addresses: ipps("100.101.101.101"),
CapMap: tailcfg.NodeCapMap{
tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName): []tailcfg.RawMessage{
tailcfg.RawMessage(`{"name":"app1","connectors":["tag:woo"],"domains":["example.com"]}`),
},
},
}).View(),
AllCaps: set.Of(tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName)),
},
peers: nodeViews([]*tailcfg.Node{
{
ID: 1,
Name: "p1",
Addresses: ipps("100.102.0.1"),
Tags: []string{"tag:nomatch"},
Hostinfo: (&tailcfg.Hostinfo{
Services: []tailcfg.Service{
{
Proto: tailcfg.PeerAPI4,
Port: 1234,
},
},
AppConnector: opt.NewBool(true),
}).View(),
},
}),
prefs: &ipn.Prefs{
CorpDNS: true,
},
want: &dns.Config{
Routes: map[dnsname.FQDN][]*dnstype.Resolver{},
Hosts: map[dnsname.FQDN][]netip.Addr{
"a.": ips("100.101.101.101"),
"p1.": ips("100.102.0.1"),
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

@ -6,12 +6,14 @@ package ipnlocal
import (
"cmp"
"context"
"fmt"
"net/netip"
"slices"
"sync"
"sync/atomic"
"go4.org/netipx"
"tailscale.com/appc"
"tailscale.com/feature/buildfeatures"
"tailscale.com/ipn"
"tailscale.com/net/dns"
@ -842,6 +844,25 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.
// Add split DNS routes, with no regard to exit node configuration.
addSplitDNSRoutes(nm.DNS.Routes)
// Add split DNS routes for conn25
conn25DNSTargets := appc.PickSplitDNSPeers(nm.HasCap, nm.SelfNode, peers)
if conn25DNSTargets != nil {
var m map[string][]*dnstype.Resolver
for domain, candidateSplitDNSPeers := range conn25DNSTargets {
for _, peer := range candidateSplitDNSPeers {
base := peerAPIBase(nm, peer)
if base == "" {
continue
}
mak.Set(&m, domain, []*dnstype.Resolver{{Addr: fmt.Sprintf("%s/dns-query", base)}})
break // Just make one resolver for the first peer we can get a peerAPIBase for.
}
}
if m != nil {
addSplitDNSRoutes(m)
}
}
// Set FallbackResolvers as the default resolvers in the
// scenarios that can't handle a purely split-DNS config. See
// https://github.com/tailscale/tailscale/issues/1743 for

Loading…
Cancel
Save