leetcode lru-cache

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77


import collections
import functools
from itertools import ifilterfalse


def lru_cache(maxsize=100, on_purge=None):
"""Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Clear the cache with f.clear().
"""
maxqueue = maxsize * 10

def decorating_function(user_function):
cache = {}
queue = collections.deque()
refcount = collections.defaultdict(int)
sentinel = object()
kwd_mark = object()

# lookup optimizations (ugly but fast)
queue_append, queue_popleft = queue.append, queue.popleft
queue_appendleft, queue_pop = queue.appendleft, queue.pop

@functools.wraps(user_function)
def wrapper(*args, **kwargs):
# cache key records both positional and keyword args
key = args
if kwargs:
key += (kwd_mark,) + tuple(sorted(kwargs.items()))

# record recent use of this key
queue_append(key)
refcount[key] += 1

# get cache entry or compute if not found
try:
result = cache[key]
except KeyError:
result = user_function(*args, **kwargs)
cache[key] = result

# purge least recently used cache entry
if len(cache) > maxsize:
key = queue_popleft()
refcount[key] -= 1
while refcount[key]:
key = queue_popleft()
refcount[key] -= 1
if on_purge:
on_purge(cache[key])
del cache[key], refcount[key]

# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access
if len(queue) > maxqueue:
refcount.clear()
queue_appendleft(sentinel)
for key in ifilterfalse(refcount.__contains__,
iter(queue_pop, sentinel)):
queue_appendleft(key)
refcount[key] = 1
return result

def clear():
if on_purge:
for value in cache.itervalues():
on_purge(value)
cache.clear()
queue.clear()
refcount.clear()

wrapper._cache = cache
wrapper.clear = clear
return wrapper
return decorating_function