Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Model Cache Bug Fix #24

Merged
merged 8 commits into from
Feb 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion gen/generators/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def create_basic_generator(
template_dir=None,
has_dependencies=True,
camel_case_filename=False,
ignore_cache=False
):
#
# Dynamically generate a class of the form:
Expand All @@ -31,6 +32,7 @@ def init_func(self):
template_dir=template_dir,
has_dependencies=has_dependencies,
camel_case_filename=camel_case_filename,
ignore_cache=ignore_cache
)

cls_name = (
Expand Down Expand Up @@ -62,6 +64,7 @@ def add_basic_generator_to_module(
template_dir=None,
has_dependencies=True,
camel_case_filename=False,
ignore_cache=False
):
module_name = module["__name__"]
cls = create_basic_generator(
Expand All @@ -71,6 +74,7 @@ def add_basic_generator_to_module(
template_dir=template_dir,
has_dependencies=has_dependencies,
camel_case_filename=camel_case_filename,
ignore_cache=ignore_cache
)
add_class_to_module(cls, module)

Expand All @@ -83,6 +87,7 @@ def add_basic_generators_to_module(
template_dir=None,
has_dependencies=True,
camel_case_filename=False,
ignore_cache=False
):
for template in templates:
add_basic_generator_to_module(
Expand All @@ -92,6 +97,7 @@ def add_basic_generators_to_module(
template_dir=template_dir,
has_dependencies=has_dependencies,
camel_case_filename=camel_case_filename,
ignore_cache=ignore_cache
)


Expand Down Expand Up @@ -138,13 +144,15 @@ def __init__(
template_dir=None,
has_dependencies=True,
camel_case_filename=False,
ignore_cache=False
):
# Set the generator model class and type name:
self.model_cls = model_class
self._model_obj = {}
self.model_type = self.model_cls.__name__.lower()
self.has_dependencies = has_dependencies
self.camel_case_filename = camel_case_filename
self.ignore_cache = ignore_cache

# If a full template filename is not given, then form one using
# the model_type:
Expand All @@ -162,7 +170,7 @@ def __init__(
# Cache model object for speed:
def model_object(self, input_filename):
if input_filename not in self._model_obj:
self._model_obj[input_filename] = self.model_cls(input_filename)
self._model_obj[input_filename] = self.model_cls(input_filename, ignore_cache=self.ignore_cache)
return self._model_obj[input_filename]

def input_file_regex(self):
Expand Down
14 changes: 12 additions & 2 deletions gen/generators/component.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from generators.basic import add_basic_generators_to_module
from generators.basic import add_basic_generators_to_module, add_basic_generator_to_module
from models import component

# This module contains generators that produce products related
Expand Down Expand Up @@ -26,7 +26,6 @@
"component/name_enums.tex",
"component/name_interrupts.tex",
"component/name_requirements.tex",
"component/name_unit_test.tex",
]

component_templates_no_deps = ["component/name.dot", "component/name.tex"]
Expand All @@ -41,3 +40,14 @@
module=this_module,
has_dependencies=False,
)
# We cannot use the cached component model for anything to do with unit
# tests, since unit tests are not in the path, and need to be found by
# searching the filesystem each time. By doing this, we will always detect
# any new unit tests models that might appear on the filesystem for a
# component.
add_basic_generator_to_module(
component.component,
"component/name_unit_test.tex",
module=this_module,
ignore_cache=True
)
6 changes: 5 additions & 1 deletion gen/generators/tests.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from base_classes.generator_base import generator_base
from generators.basic import basic_generator
from models import tests
from models import tests, component
from util import error
from util import model_loader
import os.path
Expand Down Expand Up @@ -134,6 +134,10 @@ def generate(self, input_filename):
component_name, model_types="component"
)
if component_model_path:
# Override the model class. We are going to use the component model
# to generate this output instead of the test model, since it contains
# all the info needed.
self.model_cls = component.component
basic_generator.generate(self, component_model_path)

# Depend on the component model and any commands, data products, events models that the component uses:
Expand Down
63 changes: 51 additions & 12 deletions gen/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,26 @@ def render(self, template_file, template_path=None):
)


# We use this meta class to intercept calls to create a new "base" object. It
# looks for arguments that should only be passed to __new__, currently this only
# includes an option to ignore the model cache on load, and filters those arguments
# before passing the remaining arguments to __init__. This differs slightly from the
# default python behavior, and allows us to not propagate arguments like "ignore_cache"
# through the __init__ for all model objects that inherit from base.
#
# This meta class also extends the abc.ABCMeta class, so we get the features from that
# as well.
class base_meta(abc.ABCMeta):
def __call__(cls, *args, **kwargs):
ignore_cache = kwargs.pop("ignore_cache", False)
instance = cls.__new__(cls, *args, **kwargs, ignore_cache=ignore_cache)
cls.__init__(instance, *args, **kwargs)
return instance


# The model base class. All python models that load yaml files should
# inherit from this class.
class base(renderable_object, metaclass=abc.ABCMeta):
class base(renderable_object, metaclass=base_meta):
#################################################
# Model Caching:
#################################################
Expand All @@ -105,7 +122,7 @@ def is_model_cached_this_session(filename):
return False

def is_cached_model_up_to_date(filename):
# See if the model is stored in the database cache stored on disk:
# See if the model is stored in the database cache stored on disk:
with model_cache_database() as db:
# Get the time when we last cached the model from this file:
cache_time_stamp = db.get_model_time_stamp(filename)
Expand All @@ -129,13 +146,33 @@ def mark_model_cached_this_session(filename):
with model_cache_database(mode=DATABASE_MODE.READ_WRITE) as db:
db.mark_cached_model_up_to_date_for_session(filename)

def were_new_submodels_created(filename):
cached_submodels = None
with model_cache_database() as db:
cached_submodels = db.get_model_submodels(filename)

if cached_submodels:
_, _, model_name, _, _ = redo_arg.split_model_filename(filename)
new_submodels = model_loader._get_model_file_paths(model_name)
return not (cached_submodels == new_submodels)

# If this model does not have submodels, then return False
return False

# If the model was cached this redo session, then we know it is safe to use
# directly from cache without any additional checking. Dependencies do not
# need to be checked, since this would have been done earlier in this session,
# ie. milliseconds ago.
if is_model_cached_this_session(filename):
return do_load_from_cache(filename)

# If this model has submodels, we need to make sure a new submodel
# as not been created. For example, if a name.events.yaml for name.component.yaml
# gets created on disk, this means the cached entry for name.component.yaml is
# invalid. This is true for the parent model of any newly created submodel.
if were_new_submodels_created(filename):
return None

# If the model was written from a previous session, then we need to check its
# write timestamp against the file timestamp to determine if the cached entry is
# still valid:
Expand Down Expand Up @@ -176,16 +213,24 @@ def save_to_cache(self):
def __new__(cls, filename, *args, **kwargs):
# Try to load the model from the cache:
if filename:
# See if we are requested to ignore the cache for this model
# load:
ignore_cache = kwargs.get("ignore_cache")
full_filename = os.path.abspath(filename)
model = cls.load_from_cache(cls, full_filename)
if model:
model = None
if not ignore_cache:
model = cls.load_from_cache(cls, full_filename)

# If we are not ignoring the cache, and the model was found in
# the cache, then use it, otherwise create the model from scratch
# by reading in the file.
if not ignore_cache and model:
# Create from cached model:
self = model
self.from_cache = True
self.filename = os.path.basename(filename)
self.full_filename = full_filename
self.do_save_to_cache = True
# import sys
# sys.stderr.write("lcache " + self.filename + "\n")
else:
# Create from scratch:
Expand All @@ -194,7 +239,6 @@ def __new__(cls, filename, *args, **kwargs):
self.filename = os.path.basename(filename)
self.full_filename = full_filename
self.do_save_to_cache = True
# import sys
# sys.stderr.write("lfile " + self.filename + "\n")
else:
# Create from scratch. This is usually only called when
Expand Down Expand Up @@ -231,6 +275,7 @@ def __init__(self, filename, schema):
self.schema = os.path.splitext(os.path.basename(self.full_schema))[0]
self.time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
self.dependencies = []
self.submodels = None

# Save off some local functions:
self.zip = zip
Expand Down Expand Up @@ -325,13 +370,9 @@ def _loadYaml(yaml_text):
import warnings

warnings.simplefilter("ignore", yaml.error.UnsafeLoaderWarning)
# with open(self.full_filename, 'r') as stream:
try:
yml = yaml.YAML(typ='rt')
return yml.load(yaml_text)
# import sys
# sys.stderr.write(str(self.data) + "\n")
# sys.stderr.write(str(type(self.data)) + "\n")
except yaml.YAMLError as exc:
raise ModelException(str(exc))

Expand All @@ -358,11 +399,9 @@ def _loadYaml(yaml_text):
tokens = list(jinja_lexer.tokenize(contents))
has_jinja_directives = False
for token in tokens:
# sys.stderr.write(token.type + "\n")
if token.type != "data":
has_jinja_directives = True
break
# sys.stderr.write(self.full_filename + " has jinja directives? " + str(has_jinja_directives) + "\n")

# If there are jinja directives in the file, then lets render them using
# the global project configuration YAML.
Expand Down
4 changes: 2 additions & 2 deletions gen/models/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@ def load(self):
self.description = self.data["description"]
if "preamble" in self.data:
self.preamble = self.data["preamble"]
tests = self.data["tests"]
for t in tests:
the_tests = self.data["tests"]
for t in the_tests:
name = ada.formatType(t["name"])
description = None
if "description" in t:
Expand Down
7 changes: 7 additions & 0 deletions redo/database/model_cache_database.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from database.database import DATABASE_MODE
from time import time as curr_time
from os import environ, sep
from util import model_loader

# The purpose of the model cache is to save off YAML file model load
# data structures after they have been read from a file, validated, and
Expand All @@ -27,6 +28,9 @@ def store_model(self, model_file, model_object):
self.store(model_file, model_object)
self.store(model_file + "_time@st@@", curr_time())
self.store(model_file + "_sess@id@@", environ["ADAMANT_SESSION_ID"])
if model_object.submodels is not None:
submodel_paths = model_loader._get_model_file_paths(model_object.model_name)
self.store(model_file + "_submod@paths@@", submodel_paths)

# Update the session ID for a model. We use this to indicate that a model has been
# fully validated (ie. not outdated) for this redo session.
Expand All @@ -43,6 +47,9 @@ def get_model_session_id(self, model_file):
def get_model_time_stamp(self, model_file):
return self.try_fetch(model_file + "_time@st@@")

def get_model_submodels(self, model_file):
return self.try_fetch(model_file + "_submod@paths@@")


# Create an empty model cache database file, if one does
# not already exist:
Expand Down
Loading