diff --git a/gen/generators/basic.py b/gen/generators/basic.py index 751aef20..fd21969d 100644 --- a/gen/generators/basic.py +++ b/gen/generators/basic.py @@ -14,6 +14,7 @@ def create_basic_generator( template_dir=None, has_dependencies=True, camel_case_filename=False, + ignore_cache=False ): # # Dynamically generate a class of the form: @@ -31,6 +32,7 @@ def init_func(self): template_dir=template_dir, has_dependencies=has_dependencies, camel_case_filename=camel_case_filename, + ignore_cache=ignore_cache ) cls_name = ( @@ -62,6 +64,7 @@ def add_basic_generator_to_module( template_dir=None, has_dependencies=True, camel_case_filename=False, + ignore_cache=False ): module_name = module["__name__"] cls = create_basic_generator( @@ -71,6 +74,7 @@ def add_basic_generator_to_module( template_dir=template_dir, has_dependencies=has_dependencies, camel_case_filename=camel_case_filename, + ignore_cache=ignore_cache ) add_class_to_module(cls, module) @@ -83,6 +87,7 @@ def add_basic_generators_to_module( template_dir=None, has_dependencies=True, camel_case_filename=False, + ignore_cache=False ): for template in templates: add_basic_generator_to_module( @@ -92,6 +97,7 @@ def add_basic_generators_to_module( template_dir=template_dir, has_dependencies=has_dependencies, camel_case_filename=camel_case_filename, + ignore_cache=ignore_cache ) @@ -138,6 +144,7 @@ def __init__( template_dir=None, has_dependencies=True, camel_case_filename=False, + ignore_cache=False ): # Set the generator model class and type name: self.model_cls = model_class @@ -145,6 +152,7 @@ def __init__( self.model_type = self.model_cls.__name__.lower() self.has_dependencies = has_dependencies self.camel_case_filename = camel_case_filename + self.ignore_cache = ignore_cache # If a full template filename is not given, then form one using # the model_type: @@ -162,7 +170,7 @@ def __init__( # Cache model object for speed: def model_object(self, input_filename): if input_filename not in self._model_obj: - self._model_obj[input_filename] = self.model_cls(input_filename) + self._model_obj[input_filename] = self.model_cls(input_filename, ignore_cache=self.ignore_cache) return self._model_obj[input_filename] def input_file_regex(self): diff --git a/gen/generators/component.py b/gen/generators/component.py index 6de437cb..7d46c03b 100644 --- a/gen/generators/component.py +++ b/gen/generators/component.py @@ -1,4 +1,4 @@ -from generators.basic import add_basic_generators_to_module +from generators.basic import add_basic_generators_to_module, add_basic_generator_to_module from models import component # This module contains generators that produce products related @@ -26,7 +26,6 @@ "component/name_enums.tex", "component/name_interrupts.tex", "component/name_requirements.tex", - "component/name_unit_test.tex", ] component_templates_no_deps = ["component/name.dot", "component/name.tex"] @@ -41,3 +40,14 @@ module=this_module, has_dependencies=False, ) +# We cannot use the cached component model for anything to do with unit +# tests, since unit tests are not in the path, and need to be found by +# searching the filesystem each time. By doing this, we will always detect +# any new unit tests models that might appear on the filesystem for a +# component. +add_basic_generator_to_module( + component.component, + "component/name_unit_test.tex", + module=this_module, + ignore_cache=True +) diff --git a/gen/generators/tests.py b/gen/generators/tests.py index 084f8c7a..a7dc9e2b 100644 --- a/gen/generators/tests.py +++ b/gen/generators/tests.py @@ -1,6 +1,6 @@ from base_classes.generator_base import generator_base from generators.basic import basic_generator -from models import tests +from models import tests, component from util import error from util import model_loader import os.path @@ -134,6 +134,10 @@ def generate(self, input_filename): component_name, model_types="component" ) if component_model_path: + # Override the model class. We are going to use the component model + # to generate this output instead of the test model, since it contains + # all the info needed. + self.model_cls = component.component basic_generator.generate(self, component_model_path) # Depend on the component model and any commands, data products, events models that the component uses: diff --git a/gen/models/base.py b/gen/models/base.py index cc2c133d..dca0c7ac 100644 --- a/gen/models/base.py +++ b/gen/models/base.py @@ -88,9 +88,26 @@ def render(self, template_file, template_path=None): ) +# We use this meta class to intercept calls to create a new "base" object. It +# looks for arguments that should only be passed to __new__, currently this only +# includes an option to ignore the model cache on load, and filters those arguments +# before passing the remaining arguments to __init__. This differs slightly from the +# default python behavior, and allows us to not propagate arguments like "ignore_cache" +# through the __init__ for all model objects that inherit from base. +# +# This meta class also extends the abc.ABCMeta class, so we get the features from that +# as well. +class base_meta(abc.ABCMeta): + def __call__(cls, *args, **kwargs): + ignore_cache = kwargs.pop("ignore_cache", False) + instance = cls.__new__(cls, *args, **kwargs, ignore_cache=ignore_cache) + cls.__init__(instance, *args, **kwargs) + return instance + + # The model base class. All python models that load yaml files should # inherit from this class. -class base(renderable_object, metaclass=abc.ABCMeta): +class base(renderable_object, metaclass=base_meta): ################################################# # Model Caching: ################################################# @@ -105,7 +122,7 @@ def is_model_cached_this_session(filename): return False def is_cached_model_up_to_date(filename): - # See if the model is stored in the database cache stored on disk: + # See if the model is stored in the database cache stored on disk: with model_cache_database() as db: # Get the time when we last cached the model from this file: cache_time_stamp = db.get_model_time_stamp(filename) @@ -129,6 +146,19 @@ def mark_model_cached_this_session(filename): with model_cache_database(mode=DATABASE_MODE.READ_WRITE) as db: db.mark_cached_model_up_to_date_for_session(filename) + def were_new_submodels_created(filename): + cached_submodels = None + with model_cache_database() as db: + cached_submodels = db.get_model_submodels(filename) + + if cached_submodels: + _, _, model_name, _, _ = redo_arg.split_model_filename(filename) + new_submodels = model_loader._get_model_file_paths(model_name) + return not (cached_submodels == new_submodels) + + # If this model does not have submodels, then return False + return False + # If the model was cached this redo session, then we know it is safe to use # directly from cache without any additional checking. Dependencies do not # need to be checked, since this would have been done earlier in this session, @@ -136,6 +166,13 @@ def mark_model_cached_this_session(filename): if is_model_cached_this_session(filename): return do_load_from_cache(filename) + # If this model has submodels, we need to make sure a new submodel + # as not been created. For example, if a name.events.yaml for name.component.yaml + # gets created on disk, this means the cached entry for name.component.yaml is + # invalid. This is true for the parent model of any newly created submodel. + if were_new_submodels_created(filename): + return None + # If the model was written from a previous session, then we need to check its # write timestamp against the file timestamp to determine if the cached entry is # still valid: @@ -176,16 +213,24 @@ def save_to_cache(self): def __new__(cls, filename, *args, **kwargs): # Try to load the model from the cache: if filename: + # See if we are requested to ignore the cache for this model + # load: + ignore_cache = kwargs.get("ignore_cache") full_filename = os.path.abspath(filename) - model = cls.load_from_cache(cls, full_filename) - if model: + model = None + if not ignore_cache: + model = cls.load_from_cache(cls, full_filename) + + # If we are not ignoring the cache, and the model was found in + # the cache, then use it, otherwise create the model from scratch + # by reading in the file. + if not ignore_cache and model: # Create from cached model: self = model self.from_cache = True self.filename = os.path.basename(filename) self.full_filename = full_filename self.do_save_to_cache = True - # import sys # sys.stderr.write("lcache " + self.filename + "\n") else: # Create from scratch: @@ -194,7 +239,6 @@ def __new__(cls, filename, *args, **kwargs): self.filename = os.path.basename(filename) self.full_filename = full_filename self.do_save_to_cache = True - # import sys # sys.stderr.write("lfile " + self.filename + "\n") else: # Create from scratch. This is usually only called when @@ -231,6 +275,7 @@ def __init__(self, filename, schema): self.schema = os.path.splitext(os.path.basename(self.full_schema))[0] self.time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") self.dependencies = [] + self.submodels = None # Save off some local functions: self.zip = zip @@ -325,13 +370,9 @@ def _loadYaml(yaml_text): import warnings warnings.simplefilter("ignore", yaml.error.UnsafeLoaderWarning) - # with open(self.full_filename, 'r') as stream: try: yml = yaml.YAML(typ='rt') return yml.load(yaml_text) - # import sys - # sys.stderr.write(str(self.data) + "\n") - # sys.stderr.write(str(type(self.data)) + "\n") except yaml.YAMLError as exc: raise ModelException(str(exc)) @@ -358,11 +399,9 @@ def _loadYaml(yaml_text): tokens = list(jinja_lexer.tokenize(contents)) has_jinja_directives = False for token in tokens: - # sys.stderr.write(token.type + "\n") if token.type != "data": has_jinja_directives = True break - # sys.stderr.write(self.full_filename + " has jinja directives? " + str(has_jinja_directives) + "\n") # If there are jinja directives in the file, then lets render them using # the global project configuration YAML. diff --git a/gen/models/tests.py b/gen/models/tests.py index 5d0078dd..46cff652 100644 --- a/gen/models/tests.py +++ b/gen/models/tests.py @@ -47,8 +47,8 @@ def load(self): self.description = self.data["description"] if "preamble" in self.data: self.preamble = self.data["preamble"] - tests = self.data["tests"] - for t in tests: + the_tests = self.data["tests"] + for t in the_tests: name = ada.formatType(t["name"]) description = None if "description" in t: diff --git a/redo/database/model_cache_database.py b/redo/database/model_cache_database.py index 0f52b1ae..8607c1ea 100644 --- a/redo/database/model_cache_database.py +++ b/redo/database/model_cache_database.py @@ -2,6 +2,7 @@ from database.database import DATABASE_MODE from time import time as curr_time from os import environ, sep +from util import model_loader # The purpose of the model cache is to save off YAML file model load # data structures after they have been read from a file, validated, and @@ -27,6 +28,9 @@ def store_model(self, model_file, model_object): self.store(model_file, model_object) self.store(model_file + "_time@st@@", curr_time()) self.store(model_file + "_sess@id@@", environ["ADAMANT_SESSION_ID"]) + if model_object.submodels is not None: + submodel_paths = model_loader._get_model_file_paths(model_object.model_name) + self.store(model_file + "_submod@paths@@", submodel_paths) # Update the session ID for a model. We use this to indicate that a model has been # fully validated (ie. not outdated) for this redo session. @@ -43,6 +47,9 @@ def get_model_session_id(self, model_file): def get_model_time_stamp(self, model_file): return self.try_fetch(model_file + "_time@st@@") + def get_model_submodels(self, model_file): + return self.try_fetch(model_file + "_submod@paths@@") + # Create an empty model cache database file, if one does # not already exist: