tka: optimize common case of processing updates built from head

Signed-off-by: Tom DNetto <tom@tailscale.com>
pull/5426/head
Tom DNetto 2 years ago committed by Tom
parent 039def3b50
commit 472529af38

@ -88,7 +88,6 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key) error {
return errors.New("no netmap: are you logged into tailscale?")
}
// Generates a genesis AUM representing trust in the provided keys.
// We use an in-memory tailchonk because we don't want to commit to
// the filesystem until we've finished the initialization sequence,
@ -118,14 +117,14 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key) error {
// If we don't get these signatures ahead of time, everyone will loose
// connectivity because control won't have any signatures to send which
// satisfy network-lock checks.
var sigs []tkatype.MarshaledSignature
for _, nkp := range initResp.NeedSignatures {
nks, err := signNodeKey(nkp, b.nlPrivKey)
sigs := make(map[tailcfg.NodeID]tkatype.MarshaledSignature, len(initResp.NeedSignatures))
for _, nodeInfo := range initResp.NeedSignatures {
nks, err := signNodeKey(nodeInfo, b.nlPrivKey)
if err != nil {
return fmt.Errorf("generating signature: %v", err)
}
sigs = append(sigs, nks.Serialize())
sigs[nodeInfo.NodeID] = nks.Serialize()
}
// Finalize enablement by transmitting signature for all nodes to Control.
@ -133,16 +132,17 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key) error {
return err
}
func signNodeKey(nk key.NodePublic, signer key.NLPrivate) (*tka.NodeKeySignature, error) {
p, err := nk.MarshalBinary()
func signNodeKey(nodeInfo tailcfg.TKASignInfo, signer key.NLPrivate) (*tka.NodeKeySignature, error) {
p, err := nodeInfo.NodePublic.MarshalBinary()
if err != nil {
return nil, err
}
sig := tka.NodeKeySignature{
SigKind: tka.SigDirect,
KeyID: signer.KeyID(),
Pubkey: p,
SigKind: tka.SigDirect,
KeyID: signer.KeyID(),
Pubkey: p,
RotationPubkey: nodeInfo.RotationPubkey,
}
sig.Signature, err = signer.SignNKS(sig.SigHash())
if err != nil {
@ -154,7 +154,7 @@ func signNodeKey(nk key.NodePublic, signer key.NLPrivate) (*tka.NodeKeySignature
func (b *LocalBackend) tkaInitBegin(nm *netmap.NetworkMap, aum tka.AUM) (*tailcfg.TKAInitBeginResponse, error) {
var req bytes.Buffer
if err := json.NewEncoder(&req).Encode(tailcfg.TKAInitBeginRequest{
NodeID: nm.SelfNode.ID,
NodeID: nm.SelfNode.ID,
GenesisAUM: aum.Serialize(),
}); err != nil {
return nil, fmt.Errorf("encoding request: %v", err)
@ -192,10 +192,10 @@ func (b *LocalBackend) tkaInitBegin(nm *netmap.NetworkMap, aum tka.AUM) (*tailcf
}
}
func (b *LocalBackend) tkaInitFinish(nm *netmap.NetworkMap, nks []tkatype.MarshaledSignature) (*tailcfg.TKAInitFinishResponse, error) {
func (b *LocalBackend) tkaInitFinish(nm *netmap.NetworkMap, nks map[tailcfg.NodeID]tkatype.MarshaledSignature) (*tailcfg.TKAInitFinishResponse, error) {
var req bytes.Buffer
if err := json.NewEncoder(&req).Encode(tailcfg.TKAInitFinishRequest{
NodeID: nm.SelfNode.ID,
NodeID: nm.SelfNode.ID,
Signatures: nks,
}); err != nil {
return nil, fmt.Errorf("encoding request: %v", err)

@ -1829,25 +1829,31 @@ type PeerChange struct {
// TKAInitBeginRequest submits a genesis AUM to seed the creation of the
// tailnet's key authority.
type TKAInitBeginRequest struct {
NodeID NodeID
NodeID NodeID // NodeID of the initiating client
GenesisAUM tkatype.MarshaledAUM
}
// TKAInitBeginResponse describes a set of NodeKeys which must be signed to
// TKASignInfo describes information about an existing node that needs
// to be signed into a node-key signature.
type TKASignInfo struct {
NodeID NodeID // NodeID of the node-key being signed
NodePublic key.NodePublic
RotationPubkey []byte
}
// TKAInitBeginResponse describes node information which must be signed to
// complete initialization of the tailnets' key authority.
type TKAInitBeginResponse struct {
NodeID NodeID
NeedSignatures []key.NodePublic
NeedSignatures []TKASignInfo
}
// TKAInitFinishRequest finalizes initialization of the tailnet key authority
// by submitting node-key signatures for all existing nodes.
type TKAInitFinishRequest struct {
NodeID NodeID
NodeID NodeID // NodeID of the initiating client
Signatures []tkatype.MarshaledSignature
Signatures map[NodeID]tkatype.MarshaledSignature
}
// TKAInitFinishResponse describes the successful enablement of the tailnet's

@ -553,13 +553,31 @@ func Bootstrap(storage Chonk, bootstrap AUM) (*Authority, error) {
// should be ordered oldest to newest. An error is returned if any
// of the updates could not be processed.
func (a *Authority) Inform(updates []AUM) error {
if len(updates) == 0 {
return errors.New("inform called with empty slice")
}
stateAt := make(map[AUMHash]State, len(updates)+1)
toCommit := make([]AUM, 0, len(updates))
prevHash := a.Head()
// The state at HEAD is the current state of the authority. Its likely
// to be needed, so we prefill it rather than computing it.
stateAt[prevHash] = a.state
// Optimization: If the set of updates is a chain building from
// the current head, EG:
// <a.Head()> ==> updates[0] ==> updates[1] ...
// Then theres no need to recompute the resulting state from the
// stored ancestor, because the last state computed during iteration
// is the new state. This should be the common case.
// isHeadChain keeps track of this.
isHeadChain := true
for i, update := range updates {
hash := update.Hash()
// Check if we already have this AUM thus don't need to process it.
if _, err := a.storage.AUM(hash); err == nil {
// Already have this AUM.
isHeadChain = false // Disable the head-chain optimization.
continue
}
@ -583,6 +601,11 @@ func (a *Authority) Inform(updates []AUM) error {
if stateAt[hash], err = state.applyVerifiedAUM(update); err != nil {
return fmt.Errorf("update %d cannot be applied: %v", i, err)
}
if isHeadChain && parent != prevHash {
isHeadChain = false
}
prevHash = hash
toCommit = append(toCommit, update)
}
@ -590,18 +613,22 @@ func (a *Authority) Inform(updates []AUM) error {
return fmt.Errorf("commit: %v", err)
}
// TODO(tom): Theres no need to recompute the state from scratch
// in every case. We should detect when updates were
// a linear, non-forking series applied to head, and
// just use the last State we computed.
oldestAncestor := a.oldestAncestor.Hash()
c, err := computeActiveChain(a.storage, &oldestAncestor, 2000)
if err != nil {
return fmt.Errorf("recomputing active chain: %v", err)
if isHeadChain {
// Head-chain fastpath: We can use the state we computed
// in the last iteration.
a.head = updates[len(updates)-1]
a.state = stateAt[prevHash]
} else {
oldestAncestor := a.oldestAncestor.Hash()
c, err := computeActiveChain(a.storage, &oldestAncestor, 2000)
if err != nil {
return fmt.Errorf("recomputing active chain: %v", err)
}
a.head = c.Head
a.oldestAncestor = c.Oldest
a.state = c.state
}
a.head = c.Head
a.oldestAncestor = c.Oldest
a.state = c.state
return nil
}

@ -325,7 +325,7 @@ func TestCreateBootstrapAuthority(t *testing.T) {
}
}
func TestAuthorityInform(t *testing.T) {
func TestAuthorityInformNonLinear(t *testing.T) {
pub, priv := testingKey25519(t, 1)
key := Key{Kind: Key25519, Public: pub, Votes: 2}
@ -351,6 +351,8 @@ func TestAuthorityInform(t *testing.T) {
t.Fatalf("Bootstrap() failed: %v", err)
}
// L2 does not chain from L1, disabling the isHeadChain optimization
// and forcing Inform() to take the slow path.
informAUMs := []AUM{c.AUMs["L1"], c.AUMs["L2"], c.AUMs["L3"], c.AUMs["L4"], c.AUMs["L5"]}
if err := a.Inform(informAUMs); err != nil {
@ -371,3 +373,46 @@ func TestAuthorityInform(t *testing.T) {
t.Fatal("authority did not converge to correct AUM")
}
}
func TestAuthorityInformLinear(t *testing.T) {
pub, priv := testingKey25519(t, 1)
key := Key{Kind: Key25519, Public: pub, Votes: 2}
c := newTestchain(t, `
G1 -> L1 -> L2 -> L3
G1.template = genesis
`,
optTemplate("genesis", AUM{MessageKind: AUMCheckpoint, State: &State{
Keys: []Key{key},
DisablementSecrets: [][]byte{disablementKDF([]byte{1, 2, 3})},
}}),
optKey("key", key, priv),
optSignAllUsing("key"))
storage := &Mem{}
a, err := Bootstrap(storage, c.AUMs["G1"])
if err != nil {
t.Fatalf("Bootstrap() failed: %v", err)
}
informAUMs := []AUM{c.AUMs["L1"], c.AUMs["L2"], c.AUMs["L3"]}
if err := a.Inform(informAUMs); err != nil {
t.Fatalf("Inform() failed: %v", err)
}
for i, update := range informAUMs {
stored, err := storage.AUM(update.Hash())
if err != nil {
t.Errorf("reading stored update %d: %v", i, err)
continue
}
if diff := cmp.Diff(update, stored); diff != "" {
t.Errorf("update %d differs (-want, +got):\n%s", i, diff)
}
}
if a.Head() != c.AUMHashes["L3"] {
t.Fatal("authority did not converge to correct AUM")
}
}

Loading…
Cancel
Save