util/deephash: improve cycle detection (#2470)

The previous algorithm used a map of all visited pointers.
The strength of this approach is that it quickly prunes any nodes
that we have ever visited before. The detriment of the approach
is that pruning is heavily dependent on the order that pointers
were visited. This is especially relevant for hashing a map
where map entries are visited in a non-deterministic manner,
which would cause the map hash to be non-deterministic
(which defeats the point of a hash).

This new algorithm uses a stack of all visited pointers,
similar to how github.com/google/go-cmp performs cycle detection.
When we visit a pointer, we push it onto the stack, and when
we leave a pointer, we pop it from the stack.
Before visiting a pointer, we first check whether the pointer exists
anywhere in the stack. If yes, then we prune the node.
The detriment of this approach is that we may hash a node more often
than before since we do not prune as aggressively.

The set of visited pointers up until any node is only the
path of nodes up to that node and not any other pointers
that may have been visited elsewhere. This provides us
deterministic hashing regardless of visit order.
We can now delete hashMapFallback and associated complexity,
which only exists because the previous approach was non-deterministic
in the presence of cycles.

This fixes a failure of the old algorithm where obviously different
values are treated as equal because the pruning was too aggresive.
See https://github.com/tailscale/tailscale/issues/2443#issuecomment-883653534

The new algorithm is slightly slower since it prunes less aggresively:
	name              old time/op    new time/op    delta
	Hash-8              66.1µs ± 1%    68.8µs ± 1%   +4.09%        (p=0.000 n=19+19)
	HashMapAcyclic-8    63.0µs ± 1%    62.5µs ± 1%   -0.76%        (p=0.000 n=18+19)
	TailcfgNode-8       9.79µs ± 2%    9.88µs ± 1%   +0.95%        (p=0.000 n=19+17)
	HashArray-8          643ns ± 1%     653ns ± 1%   +1.64%        (p=0.000 n=19+19)
However, a slower but more correct algorithm seems
more favorable than a faster but incorrect algorithm.

Signed-off-by: Joe Tsai <joetsai@digital-static.net>
pull/2503/head
Joe Tsai 3 years ago committed by GitHub
parent 7b295f3d21
commit d145c594ad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -143,7 +143,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/structs from tailscale.com/control/controlclient+
tailscale.com/types/wgkey from tailscale.com/control/controlclient+ tailscale.com/types/wgkey from tailscale.com/control/controlclient+
L tailscale.com/util/cmpver from tailscale.com/net/dns L tailscale.com/util/cmpver from tailscale.com/net/dns
tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+
tailscale.com/util/dnsname from tailscale.com/ipn/ipnstate+ tailscale.com/util/dnsname from tailscale.com/ipn/ipnstate+
LW tailscale.com/util/endian from tailscale.com/net/netns+ LW tailscale.com/util/endian from tailscale.com/net/netns+
tailscale.com/util/groupmember from tailscale.com/ipn/ipnserver tailscale.com/util/groupmember from tailscale.com/ipn/ipnserver

@ -24,6 +24,7 @@ import (
"strconv" "strconv"
"sync" "sync"
"time" "time"
"unsafe"
) )
const scratchSize = 128 const scratchSize = 128
@ -31,18 +32,15 @@ const scratchSize = 128
// hasher is reusable state for hashing a value. // hasher is reusable state for hashing a value.
// Get one via hasherPool. // Get one via hasherPool.
type hasher struct { type hasher struct {
h hash.Hash h hash.Hash
bw *bufio.Writer bw *bufio.Writer
scratch [scratchSize]byte scratch [scratchSize]byte
visited map[uintptr]bool visitStack visitStack
} }
// newHasher initializes a new hasher, for use by hasherPool. // newHasher initializes a new hasher, for use by hasherPool.
func newHasher() *hasher { func newHasher() *hasher {
h := &hasher{ h := &hasher{h: sha256.New()}
h: sha256.New(),
visited: map[uintptr]bool{},
}
h.bw = bufio.NewWriterSize(h.h, h.h.BlockSize()) h.bw = bufio.NewWriterSize(h.h, h.h.BlockSize())
return h return h
} }
@ -98,9 +96,6 @@ var hasherPool = &sync.Pool{
func Hash(v interface{}) Sum { func Hash(v interface{}) Sum {
h := hasherPool.Get().(*hasher) h := hasherPool.Get().(*hasher)
defer hasherPool.Put(h) defer hasherPool.Put(h)
for k := range h.visited {
delete(h.visited, k)
}
return h.Hash(v) return h.Hash(v)
} }
@ -133,15 +128,12 @@ func (h *hasher) int(i int) {
var uint8Type = reflect.TypeOf(byte(0)) var uint8Type = reflect.TypeOf(byte(0))
// print hashes v into w. func (h *hasher) print(v reflect.Value) {
// It reports whether it was able to do so without hitting a cycle.
func (h *hasher) print(v reflect.Value) (acyclic bool) {
if !v.IsValid() { if !v.IsValid() {
return true return
} }
w := h.bw w := h.bw
visited := h.visited
if v.CanInterface() { if v.CanInterface() {
// Use AppendTo methods, if available and cheap. // Use AppendTo methods, if available and cheap.
@ -151,32 +143,41 @@ func (h *hasher) print(v reflect.Value) (acyclic bool) {
record := a.AppendTo(size) record := a.AppendTo(size)
binary.LittleEndian.PutUint64(record, uint64(len(record)-len(size))) binary.LittleEndian.PutUint64(record, uint64(len(record)-len(size)))
w.Write(record) w.Write(record)
return true return
} }
} }
// TODO(dsnet): Avoid cycle detection for types that cannot have cycles.
// Generic handling. // Generic handling.
switch v.Kind() { switch v.Kind() {
default: default:
panic(fmt.Sprintf("unhandled kind %v for type %v", v.Kind(), v.Type())) panic(fmt.Sprintf("unhandled kind %v for type %v", v.Kind(), v.Type()))
case reflect.Ptr: case reflect.Ptr:
ptr := v.Pointer() if v.IsNil() {
if visited[ptr] { w.WriteByte(0) // indicates nil
return false return
}
// Check for cycle.
ptr := pointerOf(v)
if idx, ok := h.visitStack.seen(ptr); ok {
w.WriteByte(2) // indicates cycle
h.uint(uint64(idx))
return
} }
visited[ptr] = true h.visitStack.push(ptr)
return h.print(v.Elem()) defer h.visitStack.pop(ptr)
w.WriteByte(1) // indicates visiting a pointer
h.print(v.Elem())
case reflect.Struct: case reflect.Struct:
acyclic = true
w.WriteString("struct") w.WriteString("struct")
h.int(v.NumField()) h.int(v.NumField())
for i, n := 0, v.NumField(); i < n; i++ { for i, n := 0, v.NumField(); i < n; i++ {
h.int(i) h.int(i)
if !h.print(v.Field(i)) { h.print(v.Field(i))
acyclic = false
}
} }
return acyclic
case reflect.Slice, reflect.Array: case reflect.Slice, reflect.Array:
vLen := v.Len() vLen := v.Len()
if v.Kind() == reflect.Slice { if v.Kind() == reflect.Slice {
@ -190,56 +191,41 @@ func (h *hasher) print(v reflect.Value) (acyclic bool) {
// to allocate, so it's not a win. // to allocate, so it's not a win.
n := reflect.Copy(reflect.ValueOf(&h.scratch).Elem(), v) n := reflect.Copy(reflect.ValueOf(&h.scratch).Elem(), v)
w.Write(h.scratch[:n]) w.Write(h.scratch[:n])
return true return
} }
fmt.Fprintf(w, "%s", v.Interface()) fmt.Fprintf(w, "%s", v.Interface())
return true return
} }
acyclic = true
for i := 0; i < vLen; i++ { for i := 0; i < vLen; i++ {
// TODO(dsnet): Perform cycle detection for slices,
// which is functionally a list of pointers.
// See https://github.com/google/go-cmp/blob/402949e8139bb890c71a707b6faf6dd05c92f4e5/cmp/compare.go#L438-L450
h.int(i) h.int(i)
if !h.print(v.Index(i)) { h.print(v.Index(i))
acyclic = false
}
} }
return acyclic
case reflect.Interface: case reflect.Interface:
if v.IsNil() { if v.IsNil() {
w.WriteByte(0) // indicates nil w.WriteByte(0) // indicates nil
return true return
} }
v = v.Elem() v = v.Elem()
w.WriteByte(1) // indicates visiting interface value w.WriteByte(1) // indicates visiting interface value
h.hashType(v.Type()) h.hashType(v.Type())
return h.print(v) h.print(v)
case reflect.Map: case reflect.Map:
// TODO(bradfitz): ideally we'd avoid these map // Check for cycle.
// operations to detect cycles if we knew from the map ptr := pointerOf(v)
// element type that there no way to form a cycle, if idx, ok := h.visitStack.seen(ptr); ok {
// which is the common case. Notably, we don't care w.WriteByte(2) // indicates cycle
// about hashing the same map+contents twice in h.uint(uint64(idx))
// different parts of the tree. In fact, we should return
// ideally. (And this prevents it) We should only stop
// hashing when there's a cycle. What we should
// probably do is make sure we enumerate the data
// structure tree is a fixed order and then give each
// pointer an increasing number, and when we hit a
// dup, rather than emitting nothing, we should emit a
// "value #12" reference. Which implies that all things
// emit to the bufio.Writer should be type-tagged so
// we can distinguish loop references without risk of
// collisions.
ptr := v.Pointer()
if visited[ptr] {
return false
} }
visited[ptr] = true h.visitStack.push(ptr)
defer h.visitStack.pop(ptr)
if h.hashMapAcyclic(v) { w.WriteByte(1) // indicates visiting a map
return true h.hashMap(v)
}
return h.hashMapFallback(v)
case reflect.String: case reflect.String:
h.int(v.Len()) h.int(v.Len())
w.WriteString(v.String()) w.WriteString(v.String())
@ -254,7 +240,6 @@ func (h *hasher) print(v reflect.Value) (acyclic bool) {
case reflect.Complex64, reflect.Complex128: case reflect.Complex64, reflect.Complex128:
fmt.Fprintf(w, "%v", v.Complex()) fmt.Fprintf(w, "%v", v.Complex())
} }
return true
} }
type mapHasher struct { type mapHasher struct {
@ -309,10 +294,11 @@ func (c valueCache) get(t reflect.Type) reflect.Value {
return v return v
} }
// hashMapAcyclic is the faster sort-free version of map hashing. If // hashMap hashes a map in a sort-free manner.
// it detects a cycle it returns false and guarantees that nothing was // It relies on a map being a functionally an unordered set of KV entries.
// written to w. // So long as we hash each KV entry together, we can XOR all
func (h *hasher) hashMapAcyclic(v reflect.Value) (acyclic bool) { // of the individual hashes to produce a unique hash for the entire map.
func (h *hasher) hashMap(v reflect.Value) {
mh := mapHasherPool.Get().(*mapHasher) mh := mapHasherPool.Get().(*mapHasher)
defer mapHasherPool.Put(mh) defer mapHasherPool.Put(mh)
mh.Reset() mh.Reset()
@ -329,35 +315,42 @@ func (h *hasher) hashMapAcyclic(v reflect.Value) (acyclic bool) {
key := iterKey(iter, k) key := iterKey(iter, k)
val := iterVal(iter, e) val := iterVal(iter, e)
mh.startEntry() mh.startEntry()
if !h.print(key) { h.print(key)
return false h.print(val)
}
if !h.print(val) {
return false
}
mh.endEntry() mh.endEntry()
} }
oldw.Write(mh.xbuf[:]) oldw.Write(mh.xbuf[:])
return true
} }
func (h *hasher) hashMapFallback(v reflect.Value) (acyclic bool) { // visitStack is a stack of pointers visited.
acyclic = true // Pointers are pushed onto the stack when visited, and popped when leaving.
sm := newSortedMap(v) // The integer value is the depth at which the pointer was visited.
w := h.bw // The length of this stack should be zero after every hashing operation.
fmt.Fprintf(w, "map[%d]{\n", len(sm.Key)) type visitStack map[pointer]int
for i, k := range sm.Key {
if !h.print(k) { func (v visitStack) seen(p pointer) (int, bool) {
acyclic = false idx, ok := v[p]
} return idx, ok
w.WriteString(": ") }
if !h.print(sm.Value[i]) {
acyclic = false func (v *visitStack) push(p pointer) {
} if *v == nil {
w.WriteString("\n") *v = make(map[pointer]int)
} }
w.WriteString("}\n") (*v)[p] = len(*v)
return acyclic }
func (v visitStack) pop(p pointer) {
delete(v, p)
}
// pointer is a thin wrapper over unsafe.Pointer.
// We only rely on comparability of pointers; we cannot rely on uintptr since
// that would break if Go ever switched to a moving GC.
type pointer struct{ p unsafe.Pointer }
func pointerOf(v reflect.Value) pointer {
return pointer{unsafe.Pointer(v.Pointer())}
} }
// hashType hashes a reflect.Type. // hashType hashes a reflect.Type.

@ -47,6 +47,19 @@ func TestHash(t *testing.T) {
{in: tuple{iface{&MyHeader{}}, iface{&tar.Header{}}}, wantEq: false}, {in: tuple{iface{&MyHeader{}}, iface{&tar.Header{}}}, wantEq: false},
{in: tuple{iface{[]map[string]MyBool{}}, iface{[]map[string]MyBool{}}}, wantEq: true}, {in: tuple{iface{[]map[string]MyBool{}}, iface{[]map[string]MyBool{}}}, wantEq: true},
{in: tuple{iface{[]map[string]bool{}}, iface{[]map[string]MyBool{}}}, wantEq: false}, {in: tuple{iface{[]map[string]bool{}}, iface{[]map[string]MyBool{}}}, wantEq: false},
{in: tuple{false, true}, wantEq: false},
{in: tuple{true, true}, wantEq: true},
{in: tuple{false, false}, wantEq: true},
{
in: func() tuple {
i1 := 1
i2 := 2
v1 := [3]*int{&i1, &i2, &i1}
v2 := [3]*int{&i1, &i2, &i2}
return tuple{v1, v2}
}(),
wantEq: false,
},
} }
for _, tt := range tests { for _, tt := range tests {
@ -192,13 +205,8 @@ func TestHashMapAcyclic(t *testing.T) {
v := reflect.ValueOf(m) v := reflect.ValueOf(m)
buf.Reset() buf.Reset()
bw.Reset(&buf) bw.Reset(&buf)
h := &hasher{ h := &hasher{bw: bw}
bw: bw, h.hashMap(v)
visited: map[uintptr]bool{},
}
if !h.hashMapAcyclic(v) {
t.Fatal("returned false")
}
if got[string(buf.Bytes())] { if got[string(buf.Bytes())] {
continue continue
} }
@ -213,13 +221,10 @@ func TestPrintArray(t *testing.T) {
type T struct { type T struct {
X [32]byte X [32]byte
} }
x := &T{X: [32]byte{1: 1, 31: 31}} x := T{X: [32]byte{1: 1, 31: 31}}
var got bytes.Buffer var got bytes.Buffer
bw := bufio.NewWriter(&got) bw := bufio.NewWriter(&got)
h := &hasher{ h := &hasher{bw: bw}
bw: bw,
visited: map[uintptr]bool{},
}
h.print(reflect.ValueOf(x)) h.print(reflect.ValueOf(x))
bw.Flush() bw.Flush()
const want = "struct" + const want = "struct" +
@ -243,17 +248,12 @@ func BenchmarkHashMapAcyclic(b *testing.B) {
bw := bufio.NewWriter(&buf) bw := bufio.NewWriter(&buf)
v := reflect.ValueOf(m) v := reflect.ValueOf(m)
h := &hasher{ h := &hasher{bw: bw}
bw: bw,
visited: map[uintptr]bool{},
}
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
buf.Reset() buf.Reset()
bw.Reset(&buf) bw.Reset(&buf)
if !h.hashMapAcyclic(v) { h.hashMap(v)
b.Fatal("returned false")
}
} }
} }

@ -1,224 +0,0 @@
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// and
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This is a slightly modified fork of Go's src/internal/fmtsort/sort.go
package deephash
import (
"reflect"
"sort"
)
// Note: Throughout this package we avoid calling reflect.Value.Interface as
// it is not always legal to do so and it's easier to avoid the issue than to face it.
// sortedMap represents a map's keys and values. The keys and values are
// aligned in index order: Value[i] is the value in the map corresponding to Key[i].
type sortedMap struct {
Key []reflect.Value
Value []reflect.Value
}
func (o *sortedMap) Len() int { return len(o.Key) }
func (o *sortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 }
func (o *sortedMap) Swap(i, j int) {
o.Key[i], o.Key[j] = o.Key[j], o.Key[i]
o.Value[i], o.Value[j] = o.Value[j], o.Value[i]
}
// Sort accepts a map and returns a sortedMap that has the same keys and
// values but in a stable sorted order according to the keys, modulo issues
// raised by unorderable key values such as NaNs.
//
// The ordering rules are more general than with Go's < operator:
//
// - when applicable, nil compares low
// - ints, floats, and strings order by <
// - NaN compares less than non-NaN floats
// - bool compares false before true
// - complex compares real, then imag
// - pointers compare by machine address
// - channel values compare by machine address
// - structs compare each field in turn
// - arrays compare each element in turn.
// Otherwise identical arrays compare by length.
// - interface values compare first by reflect.Type describing the concrete type
// and then by concrete value as described in the previous rules.
//
func newSortedMap(mapValue reflect.Value) *sortedMap {
if mapValue.Type().Kind() != reflect.Map {
return nil
}
// Note: this code is arranged to not panic even in the presence
// of a concurrent map update. The runtime is responsible for
// yelling loudly if that happens. See issue 33275.
n := mapValue.Len()
key := make([]reflect.Value, 0, n)
value := make([]reflect.Value, 0, n)
iter := mapValue.MapRange()
for iter.Next() {
key = append(key, iter.Key())
value = append(value, iter.Value())
}
sorted := &sortedMap{
Key: key,
Value: value,
}
sort.Stable(sorted)
return sorted
}
// compare compares two values of the same type. It returns -1, 0, 1
// according to whether a > b (1), a == b (0), or a < b (-1).
// If the types differ, it returns -1.
// See the comment on Sort for the comparison rules.
func compare(aVal, bVal reflect.Value) int {
aType, bType := aVal.Type(), bVal.Type()
if aType != bType {
return -1 // No good answer possible, but don't return 0: they're not equal.
}
switch aVal.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
a, b := aVal.Int(), bVal.Int()
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
a, b := aVal.Uint(), bVal.Uint()
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
case reflect.String:
a, b := aVal.String(), bVal.String()
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
case reflect.Float32, reflect.Float64:
return floatCompare(aVal.Float(), bVal.Float())
case reflect.Complex64, reflect.Complex128:
a, b := aVal.Complex(), bVal.Complex()
if c := floatCompare(real(a), real(b)); c != 0 {
return c
}
return floatCompare(imag(a), imag(b))
case reflect.Bool:
a, b := aVal.Bool(), bVal.Bool()
switch {
case a == b:
return 0
case a:
return 1
default:
return -1
}
case reflect.Ptr:
a, b := aVal.Pointer(), bVal.Pointer()
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
case reflect.Chan:
if c, ok := nilCompare(aVal, bVal); ok {
return c
}
ap, bp := aVal.Pointer(), bVal.Pointer()
switch {
case ap < bp:
return -1
case ap > bp:
return 1
default:
return 0
}
case reflect.Struct:
for i := 0; i < aVal.NumField(); i++ {
if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 {
return c
}
}
return 0
case reflect.Array:
for i := 0; i < aVal.Len(); i++ {
if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 {
return c
}
}
return 0
case reflect.Interface:
if c, ok := nilCompare(aVal, bVal); ok {
return c
}
c := compare(reflect.ValueOf(aVal.Elem().Type()), reflect.ValueOf(bVal.Elem().Type()))
if c != 0 {
return c
}
return compare(aVal.Elem(), bVal.Elem())
default:
// Certain types cannot appear as keys (maps, funcs, slices), but be explicit.
panic("bad type in compare: " + aType.String())
}
}
// nilCompare checks whether either value is nil. If not, the boolean is false.
// If either value is nil, the boolean is true and the integer is the comparison
// value. The comparison is defined to be 0 if both are nil, otherwise the one
// nil value compares low. Both arguments must represent a chan, func,
// interface, map, pointer, or slice.
func nilCompare(aVal, bVal reflect.Value) (int, bool) {
if aVal.IsNil() {
if bVal.IsNil() {
return 0, true
}
return -1, true
}
if bVal.IsNil() {
return 1, true
}
return 0, false
}
// floatCompare compares two floating-point values. NaNs compare low.
func floatCompare(a, b float64) int {
switch {
case isNaN(a):
return -1 // No good answer if b is a NaN so don't bother checking.
case isNaN(b):
return 1
case a < b:
return -1
case a > b:
return 1
}
return 0
}
func isNaN(a float64) bool {
return a != a
}
Loading…
Cancel
Save