speed up in Caching
Verfasst: Montag 11. April 2022, 16:38
Ich habe einen funktionierenden Code, und wie die auskommentierten Stellen zeigen auch schon verschiedenes probiert. Aber die Geschwindigkeit ist nicht so gut, wie sie sein sollte. Wie erreiche ich ein speed up in Caching?
Code: Alles auswählen
# A program to batch-compute the number of prime numbers below each of the given numbers in the input set
# TODO: Extend get_primes() to utilize caching
# TODO: Modify code to use more appropriate data structures where possible
from functools import lru_cache, wraps
from timeit import repeat
from datetime import datetime, timedelta
# from functools import timed_lru_cache, lru_cache, wraps
# cache = TTLCache(maxsize=100, ttl=86400)
# @lru_cache(maxsize=2000)
def memoize(function):
memo = {}
@wraps(function)
def wrapper(*args):
try:
return memo[args]
except KeyError:
rv = function(*args)
memo[args] = rv
return rv
return wrapper
# @memoize
def is_prime(n):
sieved = []
factor = 2
while factor * factor <= n + 1:
if factor not in sieved:
for i in range(factor*factor, n + 1, factor):
sieved.append(i)
factor += 1
return n not in sieved
# @lru_cache(maxsize=8)
"""
def timed_lru_cache(seconds: int, maxsize: int = 128):
def wrapper_cache(func):
func = lru_cache(maxsize=maxsize)(func)
func.lifetime = timedelta(seconds=seconds)
func.expiration = datetime.utcnow() + func.lifetime
@wraps(func)
def wrapped_func(*args, **kwargs):
if datetime.utcnow() >= func.expiration:
func.cache_clear()
func.expiration = datetime.utcnow() + func.lifetime
return func(*args, **kwargs)
return wrapped_func
return wrapper_cache
"""
# @timed_lru_cache(10)
# @cached(cache)
# @timed_lru_cache(10)
# @timed_lru_cache(maxsize=16)
# @lru_cache(maxsize=2000)
@memoize
def get_primes(n):
return [p for p in range(1, n) if is_prime(p)]
if __name__ == 'builtins' or __name__ == '__main__':
input_data = [6843, 1028, 3324]
for n in input_data:
result = get_primes(n)
print(f'Found {len(result)} primes below {n}')