util/lru: replace container/list with a custom ring implementation

pre-generics container/list is quite unpleasant to use, and the pointer
manipulation operations for an LRU are simple enough to implement directly
now that we have generic types.

With this change, the LRU uses a ring (aka circularly linked list) rather
than a simple doubly-linked list as its internals, because the ring makes
list manipulation edge cases more regular: the only remaining edge case is
the transition between 0 and 1 elements, rather than also having to deal
specially with manipulating the first and last members of the list.

While the primary purpose was improved readability of the code, as it
turns out removing the indirection through an interface box also speeds
up the LRU:

       │ before.txt  │              after.txt              │
       │   sec/op    │   sec/op     vs base                │
LRU-32   67.05n ± 2%   59.73n ± 2%  -10.90% (p=0.000 n=20)

       │ before.txt │             after.txt              │
       │    B/op    │    B/op     vs base                │
LRU-32   21.00 ± 0%   10.00 ± 0%  -52.38% (p=0.000 n=20)

       │ before.txt │           after.txt            │
       │ allocs/op  │ allocs/op   vs base            │
LRU-32   0.000 ± 0%   0.000 ± 0%  ~ (p=1.000 n=20) ¹

Updates #cleanup

Signed-off-by: David Anderson <danderson@tailscale.com>
pull/9296/head
David Anderson 1 year ago committed by Dave Anderson
parent 472eb6f6f5
commit 0909e90890

@ -4,10 +4,6 @@
// Package lru contains a typed Least-Recently-Used cache.
package lru
import (
"container/list"
)
// Cache is container type keyed by K, storing V, optionally evicting the least
// recently used items if a maximum size is exceeded.
//
@ -22,12 +18,25 @@ type Cache[K comparable, V any] struct {
// an item is evicted. Zero means no limit.
MaxEntries int
ll *list.List
m map[K]*list.Element // of *entry[K,V]
// head is a ring of LRU values. head points to the most recently
// used element, head.prev is the least recently used.
//
// An LRU is technically a simple list rather than a ring, but
// implementing it as a ring makes the list manipulation
// operations more regular, because the first/last positions in
// the list stop being special.
//
// head is nil when the LRU is empty.
head *entry[K, V]
// lookup is a map of all the LRU entries contained in
// head. lookup and head always contain exactly the same elements;
// lookup is just there to allow O(1) lookups of keys.
lookup map[K]*entry[K, V]
}
// entry is the element type for the container/list.Element.
// entry is an entry of Cache.
type entry[K comparable, V any] struct {
prev, next *entry[K, V]
key K
value V
}
@ -38,19 +47,18 @@ type entry[K comparable, V any] struct {
// If MaxEntries is non-zero and the length of the cache is greater
// after any addition, the least recently used value is evicted.
func (c *Cache[K, V]) Set(key K, value V) {
if c.m == nil {
c.m = make(map[K]*list.Element)
c.ll = list.New()
if c.lookup == nil {
c.lookup = make(map[K]*entry[K, V])
}
if ee, ok := c.m[key]; ok {
c.ll.MoveToFront(ee)
ee.Value.(*entry[K, V]).value = value
if ent, ok := c.lookup[key]; ok {
c.moveToFront(ent)
ent.value = value
return
}
ele := c.ll.PushFront(&entry[K, V]{key, value})
c.m[key] = ele
ent := c.newAtFront(key, value)
c.lookup[key] = ent
if c.MaxEntries != 0 && c.Len() > c.MaxEntries {
c.DeleteOldest()
c.deleteOldest()
}
}
@ -71,14 +79,14 @@ func (c *Cache[K, V]) Contains(key K) bool {
return ok
}
// GetOk looks up a key's value from the cache, also reporting
// whether it was present.
// GetOk looks up a key's value from the cache, also reporting whether
// it was present.
//
// If found, key is moved to the front of the LRU.
func (c *Cache[K, V]) GetOk(key K) (value V, ok bool) {
if ele, hit := c.m[key]; hit {
c.ll.MoveToFront(ele)
return ele.Value.(*entry[K, V]).value, true
if ent, hit := c.lookup[key]; hit {
c.moveToFront(ent)
return ent.value, true
}
var zero V
return zero, false
@ -91,8 +99,8 @@ func (c *Cache[K, V]) GetOk(key K) (value V, ok bool) {
// LRU. This should mostly be used for non-intrusive debug inspection
// of the cache.
func (c *Cache[K, V]) PeekOk(key K) (value V, ok bool) {
if ele, hit := c.m[key]; hit {
return ele.Value.(*entry[K, V]).value, true
if ent, hit := c.lookup[key]; hit {
return ent.value, true
}
var zero V
return zero, false
@ -100,25 +108,66 @@ func (c *Cache[K, V]) PeekOk(key K) (value V, ok bool) {
// Delete removes the provided key from the cache if it was present.
func (c *Cache[K, V]) Delete(key K) {
if e, ok := c.m[key]; ok {
c.deleteElement(e)
if ent, ok := c.lookup[key]; ok {
c.deleteElement(ent)
}
}
// DeleteOldest removes the item from the cache that was least recently
// accessed. It is a no-op if the cache is empty.
// DeleteOldest removes the item from the cache that was least
// recently accessed. It is a no-op if the cache is empty.
func (c *Cache[K, V]) DeleteOldest() {
if c.ll != nil {
if e := c.ll.Back(); e != nil {
c.deleteElement(e)
if c.head != nil {
c.deleteOldest()
}
}
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int { return len(c.lookup) }
// newAtFront creates a new LRU entry using key and value, and inserts
// it at the front of c.head.
func (c *Cache[K, V]) newAtFront(key K, value V) *entry[K, V] {
ret := &entry[K, V]{key: key, value: value}
if c.head == nil {
ret.prev = ret
ret.next = ret
} else {
ret.next = c.head
ret.prev = c.head.prev
c.head.prev.next = ret
c.head.prev = ret
}
c.head = ret
return ret
}
func (c *Cache[K, V]) deleteElement(e *list.Element) {
c.ll.Remove(e)
delete(c.m, e.Value.(*entry[K, V]).key)
// moveToFront moves ent, which must be an existing element of the
// cache, to the front of c.head.
func (c *Cache[K, V]) moveToFront(ent *entry[K, V]) {
if c.head == ent {
return
}
ent.prev.next = ent.next
ent.next.prev = ent.prev
ent.prev = c.head.prev
ent.next = c.head
c.head.prev.next = ent
c.head.prev = ent
c.head = ent
}
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int { return len(c.m) }
// deleteOldest removes the oldest entry in the cache. It panics if
// there are no entries in the cache.
func (c *Cache[K, V]) deleteOldest() { c.deleteElement(c.head.prev) }
// deleteElement removes ent from the cache. ent must be an existing
// current element of the cache.
func (c *Cache[K, V]) deleteElement(ent *entry[K, V]) {
if ent.next == ent {
c.head = nil
} else {
ent.next.prev = ent.prev
ent.prev.next = ent.next
}
delete(c.lookup, ent.key)
}

Loading…
Cancel
Save