Refactore preemptive flask cache into a proper class
Also now tracks timestamps of last access to a preemptively cached resource and cleans up stuff that hasn't been accessed in a while (7 days by default)
This commit is contained in:
parent
fab5fc4899
commit
6473937b75
5 changed files with 194 additions and 56 deletions
|
|
@ -622,6 +622,9 @@ class Server():
|
|||
response.headers.add("X-Clacks-Overhead", "GNU Terry Pratchett")
|
||||
return response
|
||||
|
||||
preemptive_cache = octoprint.server.util.flask.PreemptiveCache(os.path.join(settings().getBaseFolder("data"), "preemptive_flask_cache.yaml"))
|
||||
preemptive_cache.attach_to_app(app)
|
||||
|
||||
def _setup_i18n(self, app):
|
||||
global babel
|
||||
global LOCALES
|
||||
|
|
@ -668,10 +671,18 @@ class Server():
|
|||
self._register_template_plugins()
|
||||
|
||||
def _execute_preemptive_flask_caching(self):
|
||||
from octoprint.server.util.flask import get_preemptive_cache_data
|
||||
from werkzeug.test import EnvironBuilder
|
||||
import time
|
||||
|
||||
cache_data = get_preemptive_cache_data()
|
||||
if not hasattr(app, "preemptive_cache"):
|
||||
return
|
||||
|
||||
# we clean up entries from our preemptive cache settings that haven't been
|
||||
# accessed longer than server.preemptiveCache.until days
|
||||
preemptive_cache_timeout = settings().getInt(["server", "preemptiveCache", "until"])
|
||||
cutoff_timestamp = time.time() + preemptive_cache_timeout * 24 * 60 * 60
|
||||
|
||||
cache_data = app.preemptive_cache.clean_all_data(lambda root, entries: filter(lambda entry: "_timestamp" in entry and entry["_timestamp"] <= cutoff_timestamp, entries))
|
||||
if not cache_data:
|
||||
return
|
||||
|
||||
|
|
@ -679,10 +690,15 @@ class Server():
|
|||
for route in sorted(cache_data.keys(), key=lambda x: (x.count("/"), x)):
|
||||
entries = cache_data[route]
|
||||
for kwargs in entries:
|
||||
additional_request_data = kwargs.get("_additional_request_data", dict())
|
||||
kwargs = dict((k, v) for k, v in kwargs.items() if not k.startswith("_"))
|
||||
kwargs.update(additional_request_data)
|
||||
try:
|
||||
|
||||
self._logger.info("Preemptively caching {} for {!r}".format(route, kwargs))
|
||||
builder = EnvironBuilder(**kwargs)
|
||||
app(builder.get_environ(), lambda *a, **kw: None)
|
||||
with app.preemptive_cache.disable_timestamp_update():
|
||||
app(builder.get_environ(), lambda *a, **kw: None)
|
||||
except:
|
||||
self._logger.exception("Error while trying to preemptively cache {} for {!r}".format(route, kwargs))
|
||||
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ import flask.ext.assets
|
|||
import webassets.updater
|
||||
import webassets.utils
|
||||
import functools
|
||||
import contextlib
|
||||
import time
|
||||
import uuid
|
||||
import threading
|
||||
|
|
@ -376,69 +377,147 @@ def cache_check_response_headers(response):
|
|||
|
||||
return False
|
||||
|
||||
_preemptive_flask_cache = "preemptive_flask_cache.yaml"
|
||||
|
||||
def preemptively_cached(data, unless=None):
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def decorated_function(*args, **kwargs):
|
||||
if not (callable(unless) and unless()):
|
||||
entry_data = data
|
||||
if callable(entry_data):
|
||||
entry_data = entry_data()
|
||||
class PreemptiveCache(object):
|
||||
|
||||
if entry_data is not None:
|
||||
from flask import request
|
||||
from octoprint.util import atomic_write
|
||||
import yaml
|
||||
def __init__(self, cachefile):
|
||||
self.cachefile = cachefile
|
||||
|
||||
data_folder = settings().getBaseFolder("data")
|
||||
cache_data_file = os.path.join(data_folder, _preemptive_flask_cache)
|
||||
cache_data = get_preemptive_cache_data()
|
||||
self._lock = threading.RLock()
|
||||
self._logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
|
||||
self._update_timestamp = True
|
||||
|
||||
if not request.path in cache_data:
|
||||
cache_data[request.path] = []
|
||||
def recorded(self, data, unless=None):
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def decorated_function(*args, **kwargs):
|
||||
if not (callable(unless) and unless()):
|
||||
entry_data = data
|
||||
if callable(entry_data):
|
||||
entry_data = entry_data()
|
||||
|
||||
cache_data_for_path = cache_data.get(request.path, [])
|
||||
if all(map(lambda entry: entry_data != entry, cache_data_for_path)):
|
||||
logging.getLogger(__name__).info("Adding {} for {!r} to views to preemptively cache".format(request.path, entry_data))
|
||||
cache_data[request.path] = cache_data_for_path + [entry_data]
|
||||
try:
|
||||
with atomic_write(cache_data_file, "wb", prefix="octoprint-{}-".format(_preemptive_flask_cache[:-len(".yaml")]), suffix=".yaml") as handle:
|
||||
yaml.safe_dump(cache_data, handle,default_flow_style=False, indent=" ", allow_unicode=True)
|
||||
except:
|
||||
logging.getLogger(__name__).exception("Error while writing {}".format(_preemptive_flask_cache))
|
||||
if entry_data is not None:
|
||||
from flask import request
|
||||
self.add_data(request.path, entry_data)
|
||||
return f(*args, **kwargs)
|
||||
return decorated_function
|
||||
return decorator
|
||||
|
||||
return f(*args, **kwargs)
|
||||
@contextlib.contextmanager
|
||||
def disable_timestamp_update(self):
|
||||
with self._lock:
|
||||
self._update_timestamp = False
|
||||
yield
|
||||
self._update_timestamp = True
|
||||
|
||||
return decorated_function
|
||||
def clean_all_data(self, cleanup_function):
|
||||
assert callable(cleanup_function)
|
||||
|
||||
return decorator
|
||||
with self._lock:
|
||||
all_data = self.get_all_data()
|
||||
for root, entries in all_data.items():
|
||||
old_count = len(entries)
|
||||
entries = cleanup_function(root, entries)
|
||||
if not entries:
|
||||
del all_data[root]
|
||||
self._logger.debug("Removed root {} from preemptive cache".format(root))
|
||||
elif len(entries) < old_count:
|
||||
all_data[root] = entries
|
||||
self._logger.debug("Removed {} from preemptive cache for root {}".format(old_count - len(entries), root))
|
||||
self.set_all_data(all_data)
|
||||
|
||||
return all_data
|
||||
|
||||
def get_preemptive_cache_data(root=None):
|
||||
import yaml
|
||||
def get_all_data(self):
|
||||
import yaml
|
||||
|
||||
data_folder = settings().getBaseFolder("data")
|
||||
cache_data_file = os.path.join(data_folder, _preemptive_flask_cache)
|
||||
if not os.path.isfile(cache_data_file):
|
||||
return dict()
|
||||
cache_data = None
|
||||
with self._lock:
|
||||
try:
|
||||
with open(self.cachefile, "r") as f:
|
||||
cache_data = yaml.safe_load(f)
|
||||
except:
|
||||
self._logger.exception("Error while reading {}".format(self.cachefile))
|
||||
|
||||
cache_data = None
|
||||
try:
|
||||
with open(cache_data_file, "r") as f:
|
||||
cache_data = yaml.safe_load(f)
|
||||
except:
|
||||
logging.getLogger(__name__).exception("Error while reading {}".format(_preemptive_flask_cache))
|
||||
if cache_data is None:
|
||||
cache_data = dict()
|
||||
|
||||
if cache_data is None:
|
||||
cache_data = dict()
|
||||
|
||||
if root:
|
||||
return cache_data.get(root, dict())
|
||||
else:
|
||||
return cache_data
|
||||
|
||||
def get_data(self, root):
|
||||
cache_data = self.get_all_data()
|
||||
return cache_data.get(root, dict())
|
||||
|
||||
def set_all_data(self, data):
|
||||
from octoprint.util import atomic_write
|
||||
import yaml
|
||||
|
||||
with self._lock:
|
||||
try:
|
||||
with atomic_write(self.cachefile, "wb") as handle:
|
||||
yaml.safe_dump(data, handle,default_flow_style=False, indent=" ", allow_unicode=True)
|
||||
except:
|
||||
self._logger.exception("Error while writing {}".format(self.cachefile))
|
||||
|
||||
def set_data(self, root, data):
|
||||
with self._lock:
|
||||
all_data = self.get_all_data()
|
||||
all_data[root] = data
|
||||
self.set_all_data(all_data)
|
||||
|
||||
def add_data(self, root, data):
|
||||
from octoprint.util import dict_filter
|
||||
|
||||
def strip_ignored(d):
|
||||
return dict_filter(d, lambda k, v: not k.startswith("_"))
|
||||
|
||||
def compare(a, b):
|
||||
return set(strip_ignored(a).items()) == set(strip_ignored(b).items())
|
||||
|
||||
def split_matched_and_unmatched(entry, entries):
|
||||
matched = []
|
||||
unmatched = []
|
||||
|
||||
for e in entries:
|
||||
if compare(e, entry):
|
||||
matched.append(e)
|
||||
else:
|
||||
unmatched.append(e)
|
||||
|
||||
return matched, unmatched
|
||||
|
||||
with self._lock:
|
||||
cache_data = self.get_all_data()
|
||||
|
||||
if not root in cache_data:
|
||||
cache_data[root] = []
|
||||
|
||||
existing, other = split_matched_and_unmatched(data, cache_data[root])
|
||||
|
||||
def get_newest(entries):
|
||||
result = None
|
||||
for entry in entries:
|
||||
if "_timestamp" in entry and (result is None or ("_timestamp" in entry and result["_timestamp"] < entry["_timestamp"])):
|
||||
result = entry
|
||||
return result
|
||||
|
||||
to_persist = get_newest(existing)
|
||||
if not to_persist:
|
||||
import copy
|
||||
to_persist = copy.deepcopy(data)
|
||||
to_persist["_timestamp"] = time.time()
|
||||
self._logger.info("Adding entry for {} and {!r}".format(root, to_persist))
|
||||
elif self._update_timestamp:
|
||||
to_persist["_timestamp"] = time.time()
|
||||
self._logger.debug("Updating timestamp for {} and {!r}".format(root, data))
|
||||
else:
|
||||
self._logger.debug("Not updating timestamp for {} and {!r}, currently flagged as disabled".format(root, data))
|
||||
|
||||
self.set_data(root, [to_persist] + other)
|
||||
|
||||
def attach_to_app(self, app):
|
||||
app.preemptive_cache = self
|
||||
|
||||
|
||||
def add_non_caching_response_headers(response):
|
||||
response.headers["Cache-Control"] = "no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0"
|
||||
|
|
|
|||
|
|
@ -27,12 +27,12 @@ _valid_id_re = re.compile("[a-z_]+")
|
|||
_valid_div_re = re.compile("[a-zA-Z_-]+")
|
||||
|
||||
@app.route("/")
|
||||
@app.preemptive_cache.recorded(data=lambda: dict(path=request.path, base_url=request.url_root, query_string="l10n={}".format(g.locale.language)) if g.locale else None,
|
||||
unless=lambda: request.url_root in settings().get(["server", "preemptiveCache", "exceptions"]))
|
||||
@util.flask.cached(timeout=-1,
|
||||
refreshif=lambda: util.flask.cache_check_headers() or "_refresh" in request.values,
|
||||
key=lambda: "view:{}:{}".format(request.base_url, g.locale.language if g.locale else "default"),
|
||||
unless_response=util.flask.cache_check_response_headers)
|
||||
@util.flask.preemptively_cached(data=lambda: dict(path=request.path, base_url=request.url_root, query_string="l10n={}".format(g.locale.language)) if g.locale else None,
|
||||
unless=lambda: request.url_root in settings().get(["server", "preemptiveCache", "exceptions"]))
|
||||
def index():
|
||||
#~~ a bunch of settings
|
||||
|
||||
|
|
@ -403,11 +403,11 @@ def robotsTxt():
|
|||
|
||||
|
||||
@app.route("/i18n/<string:locale>/<string:domain>.js")
|
||||
@app.preemptive_cache.recorded(data=lambda: dict(path=request.path, base_url=request.url_root),
|
||||
unless=lambda: request.url_root in settings().get(["server", "preemptiveCache", "exceptions"]))
|
||||
@util.flask.cached(timeout=-1,
|
||||
refreshif=lambda: util.flask.cache_check_headers() or "_refresh" in request.values,
|
||||
key=lambda: "view:{}".format(request.base_url))
|
||||
@util.flask.preemptively_cached(data=lambda: dict(path=request.path, base_url=request.url_root) if g.locale else None,
|
||||
unless=lambda: request.url_root in settings().get(["server", "preemptiveCache", "exceptions"]))
|
||||
def localeJs(locale, domain):
|
||||
messages = dict()
|
||||
plural_expr = None
|
||||
|
|
|
|||
|
|
@ -116,7 +116,8 @@ default_settings = {
|
|||
"critical": 200 * 1024 * 1024, # 200 MB
|
||||
},
|
||||
"preemptiveCache": {
|
||||
"exceptions": []
|
||||
"exceptions": [],
|
||||
"until": 7
|
||||
}
|
||||
},
|
||||
"webcam": {
|
||||
|
|
|
|||
|
|
@ -542,6 +542,48 @@ def dict_contains_keys(keys, dictionary):
|
|||
|
||||
return True
|
||||
|
||||
|
||||
def dict_filter(dictionary, filter_function):
|
||||
"""
|
||||
Filters a dictionary with the provided filter_function
|
||||
|
||||
Example::
|
||||
|
||||
>>> data = dict(key1="value1", key2="value2", other_key="other_value", foo="bar", bar="foo")
|
||||
>>> dict_filter(data, lambda k, v: k.startswith("key")) == dict(key1="value1", key2="value2")
|
||||
True
|
||||
>>> dict_filter(data, lambda k, v: v.startswith("value")) == dict(key1="value1", key2="value2")
|
||||
True
|
||||
>>> dict_filter(data, lambda k, v: k == "foo" or v == "foo") == dict(foo="bar", bar="foo")
|
||||
True
|
||||
>>> dict_filter(data, lambda k, v: False) == dict()
|
||||
True
|
||||
>>> dict_filter(data, lambda k, v: True) == data
|
||||
True
|
||||
>>> dict_filter(None, lambda k, v: True)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError
|
||||
>>> dict_filter(data, None)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError
|
||||
|
||||
Arguments:
|
||||
dictionary (dict): The dictionary to filter
|
||||
filter_function (callable): The filter function to apply, called with key and
|
||||
value of an entry in the dictionary, must return ``True`` for values to
|
||||
keep and ``False`` for values to strip
|
||||
|
||||
Returns:
|
||||
dict: A shallow copy of the provided dictionary, stripped of the key-value-pairs
|
||||
for which the ``filter_function`` returned ``False``
|
||||
"""
|
||||
assert isinstance(dictionary, dict)
|
||||
assert callable(filter_function)
|
||||
return dict((k, v) for k, v in dictionary.items() if filter_function(k, v))
|
||||
|
||||
|
||||
class Object(object):
|
||||
pass
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue