Issue #160: Apply 2.x workarounds to lru_cache backport

all() was introduced in Python 2.5
with: blocks arrived in Python 2.6
nonlocal and default utf-8 source encoding were added in Python 3.x
pull/178/head
Alex Willmer 8 years ago
parent fc0fb732a3
commit 0b46b90c62

@ -1,3 +1,6 @@
"""Selected backports from Python stdlib collections module
"""
__all__ = [ __all__ = [
'namedtuple', 'namedtuple',
] ]
@ -6,6 +9,15 @@ from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword from keyword import iskeyword as _iskeyword
import sys as _sys import sys as _sys
try:
all([])
except NameError:
def all(iterable):
for element in iterable:
if not element:
return False
return True
def namedtuple(typename, field_names, verbose=False): def namedtuple(typename, field_names, verbose=False):
"""Returns a new subclass of tuple with named fields. """Returns a new subclass of tuple with named fields.

@ -1,7 +1,14 @@
# encoding: utf-8
"""Selected backports from Python stdlib functools module
"""
# Written by Nick Coghlan <ncoghlan at gmail.com>,
# Raymond Hettinger <python at rcn.com>,
# and Łukasz Langa <lukasz at langa.pl>.
# Copyright (C) 2006-2013 Python Software Foundation.
__all__ = [ __all__ = [
'update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', 'update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
'lru_cache', 'reduce', 'lru_cache',
] ]
try: try:
@ -114,7 +121,7 @@ class _HashedSeq(list):
def _make_key(args, kwds, typed, def _make_key(args, kwds, typed,
kwd_mark = (object(),), kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)}, fasttypes = set([int, str, frozenset, type(None)]),
sorted=sorted, tuple=tuple, type=type, len=len): sorted=sorted, tuple=tuple, type=type, len=len):
"""Make a cache key from optionally typed positional and keyword arguments """Make a cache key from optionally typed positional and keyword arguments
The key is constructed in a way that is flat as possible rather than The key is constructed in a way that is flat as possible rather than
@ -175,47 +182,46 @@ def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
cache = {} cache = {}
hits = misses = 0
full = False
cache_get = cache.get # bound method to lookup a key or return None cache_get = cache.get # bound method to lookup a key or return None
lock = RLock() # because linkedlist updates aren't threadsafe lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self root[:] = [root, root, None, None] # initialize by pointing to self
hits_misses_full_root = [0, 0, False, root]
HITS,MISSES,FULL,ROOT = 0, 1, 2, 3
if maxsize == 0: if maxsize == 0:
def wrapper(*args, **kwds): def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call # No caching -- just a statistics update after a successful call
nonlocal misses
result = user_function(*args, **kwds) result = user_function(*args, **kwds)
misses += 1 hits_misses_full_root[MISSES] += 1
return result return result
elif maxsize is None: elif maxsize is None:
def wrapper(*args, **kwds): def wrapper(*args, **kwds):
# Simple caching without ordering or size limit # Simple caching without ordering or size limit
nonlocal hits, misses
key = make_key(args, kwds, typed) key = make_key(args, kwds, typed)
result = cache_get(key, sentinel) result = cache_get(key, sentinel)
if result is not sentinel: if result is not sentinel:
hits += 1 hits_misses_full_root[HITS] += 1
return result return result
result = user_function(*args, **kwds) result = user_function(*args, **kwds)
cache[key] = result cache[key] = result
misses += 1 hits_misses_full_root[MISSES] += 1
return result return result
else: else:
def wrapper(*args, **kwds): def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency # Size limited caching that tracks accesses by recency
nonlocal root, hits, misses, full
key = make_key(args, kwds, typed) key = make_key(args, kwds, typed)
with lock: lock.acquire()
try:
link = cache_get(key) link = cache_get(key)
if link is not None: if link is not None:
# Move the link to the front of the circular queue # Move the link to the front of the circular queue
root = hits_misses_full_root[ROOT]
link_prev, link_next, _key, result = link link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next link_prev[NEXT] = link_next
link_next[PREV] = link_prev link_next[PREV] = link_prev
@ -223,19 +229,22 @@ def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
last[NEXT] = root[PREV] = link last[NEXT] = root[PREV] = link
link[PREV] = last link[PREV] = last
link[NEXT] = root link[NEXT] = root
hits += 1 hits_misses_full_root[HITS] += 1
return result return result
finally:
lock.release()
result = user_function(*args, **kwds) result = user_function(*args, **kwds)
with lock: lock.acquire()
try:
if key in cache: if key in cache:
# Getting here means that this same key was added to the # Getting here means that this same key was added to the
# cache while the lock was released. Since the link # cache while the lock was released. Since the link
# update is already done, we need only return the # update is already done, we need only return the
# computed result and update the count of misses. # computed result and update the count of misses.
pass pass
elif full: elif hits_misses_full_root[FULL]:
# Use the old root to store the new key and result. # Use the old root to store the new key and result.
oldroot = root oldroot = root = hits_misses_full_root[ROOT]
oldroot[KEY] = key oldroot[KEY] = key
oldroot[RESULT] = result oldroot[RESULT] = result
# Empty the oldest link and make it the new root. # Empty the oldest link and make it the new root.
@ -244,7 +253,7 @@ def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
# update. That will prevent potentially arbitrary object # update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're # clean-up code (i.e. __del__) from running while we're
# still adjusting the links. # still adjusting the links.
root = oldroot[NEXT] root = hits_misses_full_root[ROOT] = oldroot[NEXT]
oldkey = root[KEY] oldkey = root[KEY]
oldresult = root[RESULT] oldresult = root[RESULT]
root[KEY] = root[RESULT] = None root[KEY] = root[RESULT] = None
@ -256,28 +265,38 @@ def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
cache[key] = oldroot cache[key] = oldroot
else: else:
# Put result in a new link at the front of the queue. # Put result in a new link at the front of the queue.
root = hits_misses_full_root[ROOT]
last = root[PREV] last = root[PREV]
link = [last, root, key, result] link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link last[NEXT] = root[PREV] = cache[key] = link
# Use the __len__() method instead of the len() function # Use the __len__() method instead of the len() function
# which could potentially be wrapped in an lru_cache itself. # which could potentially be wrapped in an lru_cache itself.
full = (cache.__len__() >= maxsize) hits_misses_full_root[FULL] = (cache.__len__() >= maxsize)
misses += 1 hits_misses_full_root[MISSES]
finally:
lock.release()
return result return result
def cache_info(): def cache_info():
"""Report cache statistics""" """Report cache statistics"""
with lock: lock.acquire()
try:
return _CacheInfo(hits, misses, maxsize, cache.__len__()) return _CacheInfo(hits, misses, maxsize, cache.__len__())
finally:
lock.release()
def cache_clear(): def cache_clear():
"""Clear the cache and cache statistics""" """Clear the cache and cache statistics"""
nonlocal hits, misses, full lock.acquire()
with lock: try:
cache.clear() cache.clear()
root = hits_misses_full_root[ROOT]
root[:] = [root, root, None, None] root[:] = [root, root, None, None]
hits = misses = 0 hits_misses_full[HITS] = 0
full = False hits_misses_full[MISSES] = 0
hits_misses_full[FULL] = False
finally:
lock.release()
wrapper.cache_info = cache_info wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear wrapper.cache_clear = cache_clear

Loading…
Cancel
Save