From 3d4e7649cc0a32cd4b4b10e4ae5a5aee3d930d2d Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Wed, 8 Oct 2025 09:40:14 -0700 Subject: [PATCH 01/20] Added new exceptions based on RuntimeError inventory results --- activitysim/core/exceptions.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/activitysim/core/exceptions.py b/activitysim/core/exceptions.py index 29d8f03a1a..fbf33785a7 100644 --- a/activitysim/core/exceptions.py +++ b/activitysim/core/exceptions.py @@ -56,3 +56,23 @@ class ReadOnlyError(IOError): class MissingInputTableDefinition(RuntimeError): """An input table definition was expected but not found.""" + + +class SettingsConfigurationError(RuntimeError): + """An error in the system configuration (possibly in settings.yaml) was found.""" + + +class ModelConfigurationError(RuntimeError): + """An error in the model configuration was found.""" + + +class InvalidTravelError(RuntimeError): + """Travel behavior could not be completed in a valid way.""" + + +class TableSliceError(RuntimeError): + """An error occurred trying to slice a table.""" + + +class InputPopulationError(RuntimeError): + """An issue with the input population was found.""" \ No newline at end of file From 962cd6052e00743e15d1fe02e61538e96898402e Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Wed, 8 Oct 2025 10:25:53 -0700 Subject: [PATCH 02/20] Changed RuntimeErrors in abm\models --- activitysim/abm/models/joint_tour_participation.py | 5 +++-- activitysim/abm/models/location_choice.py | 3 ++- activitysim/abm/models/parking_location_choice.py | 3 ++- activitysim/abm/models/trip_destination.py | 5 +++-- activitysim/abm/models/trip_purpose.py | 3 ++- activitysim/abm/models/trip_scheduling.py | 5 +++-- activitysim/abm/models/util/cdap.py | 7 ++++--- activitysim/abm/models/util/probabilistic_scheduling.py | 3 ++- activitysim/abm/models/util/vectorize_tour_scheduling.py | 2 +- 9 files changed, 22 insertions(+), 14 deletions(-) diff --git a/activitysim/abm/models/joint_tour_participation.py b/activitysim/abm/models/joint_tour_participation.py index 47bc2b8ff9..e0d4d37fb3 100644 --- a/activitysim/abm/models/joint_tour_participation.py +++ b/activitysim/abm/models/joint_tour_participation.py @@ -21,6 +21,7 @@ from activitysim.core.configuration.base import ComputeSettings, PreprocessorSettings from activitysim.core.configuration.logit import LogitComponentSettings from activitysim.core.util import assign_in_place, reindex +from activitysim.core.exceptions import InvalidTravelError logger = logging.getLogger(__name__) @@ -218,11 +219,11 @@ def participants_chooser( non_choice_col = [col for col in probs.columns if col != choice_col][0] probs[non_choice_col] = 1 - probs[choice_col] if iter > MAX_ITERATIONS + 1: - raise RuntimeError( + raise InvalidTravelError( f"{num_tours_remaining} tours could not be satisfied even with forcing participation" ) else: - raise RuntimeError( + raise InvalidTravelError( f"{num_tours_remaining} tours could not be satisfied after {iter} iterations" ) diff --git a/activitysim/abm/models/location_choice.py b/activitysim/abm/models/location_choice.py index e21653cf92..7f032a8ae6 100644 --- a/activitysim/abm/models/location_choice.py +++ b/activitysim/abm/models/location_choice.py @@ -18,6 +18,7 @@ from activitysim.core.interaction_sample import interaction_sample from activitysim.core.interaction_sample_simulate import interaction_sample_simulate from activitysim.core.util import reindex +from activitysim.core.exceptions import DuplicateWorkflowTableError """ The school/workplace location model predicts the zones in which various people will @@ -1125,7 +1126,7 @@ def iterate_location_choice( assert len(save_sample_df.index.get_level_values(0).unique()) == len(choices_df) # lest they try to put school and workplace samples into the same table if state.is_table(sample_table_name): - raise RuntimeError( + raise DuplicateWorkflowTableError( "dest choice sample table %s already exists" % sample_table_name ) state.extend_table(sample_table_name, save_sample_df) diff --git a/activitysim/abm/models/parking_location_choice.py b/activitysim/abm/models/parking_location_choice.py index 075bf6174d..65aef85ecc 100644 --- a/activitysim/abm/models/parking_location_choice.py +++ b/activitysim/abm/models/parking_location_choice.py @@ -23,6 +23,7 @@ from activitysim.core.interaction_sample_simulate import interaction_sample_simulate from activitysim.core.tracing import print_elapsed_time from activitysim.core.util import assign_in_place, drop_unused_columns +from activitysim.core.exceptions import DuplicateWorkflowTableError logger = logging.getLogger(__name__) @@ -500,7 +501,7 @@ def parking_location( # lest they try to put tour samples into the same table if state.is_table(sample_table_name): - raise RuntimeError("sample table %s already exists" % sample_table_name) + raise DuplicateWorkflowTableError("sample table %s already exists" % sample_table_name) state.extend_table(sample_table_name, save_sample_df) expressions.annotate_tables( diff --git a/activitysim/abm/models/trip_destination.py b/activitysim/abm/models/trip_destination.py index 6584134efb..d33f2efa48 100644 --- a/activitysim/abm/models/trip_destination.py +++ b/activitysim/abm/models/trip_destination.py @@ -35,6 +35,7 @@ from activitysim.core.skim_dictionary import DataFrameMatrix from activitysim.core.tracing import print_elapsed_time from activitysim.core.util import assign_in_place, reindex +from activitysim.core.exceptions import InvalidTravelError, DuplicateWorkflowTableError logger = logging.getLogger(__name__) @@ -1664,7 +1665,7 @@ def trip_destination( # testing feature t0 make sure at least one trip fails so trip_purpose_and_destination model is run if state.settings.testing_fail_trip_destination and not trips_df.failed.any(): if (trips_df.trip_num < trips_df.trip_count).sum() == 0: - raise RuntimeError( + raise InvalidTravelError( "can't honor 'testing_fail_trip_destination' setting because no intermediate trips" ) @@ -1745,7 +1746,7 @@ def trip_destination( # lest they try to put tour samples into the same table if state.is_table(sample_table_name): - raise RuntimeError("sample table %s already exists" % sample_table_name) + raise DuplicateWorkflowTableError("sample table %s already exists" % sample_table_name) state.extend_table(sample_table_name, save_sample_df) expressions.annotate_tables( diff --git a/activitysim/abm/models/trip_purpose.py b/activitysim/abm/models/trip_purpose.py index 695882938d..73ccaaf8c0 100644 --- a/activitysim/abm/models/trip_purpose.py +++ b/activitysim/abm/models/trip_purpose.py @@ -22,6 +22,7 @@ ) from activitysim.core.configuration.base import PreprocessorSettings, PydanticReadable from activitysim.core.util import reindex +from activitysim.core.exceptions import InvalidTravelError logger = logging.getLogger(__name__) @@ -134,7 +135,7 @@ def choose_intermediate_trip_purpose( state.tracing.write_csv( unmatched_choosers, file_name=file_name, transpose=False ) - raise RuntimeError( + raise InvalidTravelError( "Some trips could not be matched to probs based on join columns %s." % probs_join_cols ) diff --git a/activitysim/abm/models/trip_scheduling.py b/activitysim/abm/models/trip_scheduling.py index 0e45d463dc..18595f84cd 100644 --- a/activitysim/abm/models/trip_scheduling.py +++ b/activitysim/abm/models/trip_scheduling.py @@ -18,6 +18,7 @@ from activitysim.core import chunk, config, estimation, expressions, tracing, workflow from activitysim.core.configuration.base import PreprocessorSettings, PydanticReadable from activitysim.core.util import reindex +from activitysim.core.exceptions import InvalidTravelError, PipelineError logger = logging.getLogger(__name__) @@ -615,7 +616,7 @@ def trip_scheduling( logger.info("%s %s failed", trace_label_i, failed.sum()) if (failed.sum() > 0) & (model_settings.scheduling_mode == "relative"): - raise RuntimeError("failed trips with relative scheduling mode") + raise InvalidTravelError("failed trips with relative scheduling mode") if not is_last_iteration: # boolean series of trips whose leg scheduling failed @@ -653,7 +654,7 @@ def trip_scheduling( ) if failfix != FAILFIX_DROP_AND_CLEANUP: - raise RuntimeError( + raise PipelineError( "%s setting '%s' not enabled in settings" % (FAILFIX, FAILFIX_DROP_AND_CLEANUP) ) diff --git a/activitysim/abm/models/util/cdap.py b/activitysim/abm/models/util/cdap.py index 3b4c41466e..b456d61e98 100644 --- a/activitysim/abm/models/util/cdap.py +++ b/activitysim/abm/models/util/cdap.py @@ -10,6 +10,7 @@ from activitysim.core import chunk, logit, simulate, tracing, workflow from activitysim.core.configuration.base import ComputeSettings +from activitysim.core.exceptions import ModelConfigurationError logger = logging.getLogger(__name__) @@ -48,7 +49,7 @@ def add_pn(col, pnum): elif isinstance(col, (list, tuple)): return [c if c == _hh_id_ else "%s_p%s" % (c, pnum) for c in col] else: - raise RuntimeError("add_pn col not list or str") + raise TypeError("add_pn col not list or str") def assign_cdap_rank( @@ -270,7 +271,7 @@ def preprocess_interaction_coefficients(interaction_coefficients): "Error in cdap_interaction_coefficients at row %s. Expect only M, N, or H!" % coefficients[~coefficients["activity"].isin(["M", "N", "H"])].index.values ) - raise RuntimeError(msg) + raise ModelConfigurationError(msg) coefficients["cardinality"] = ( coefficients["interaction_ptypes"].astype(str).str.len() @@ -470,7 +471,7 @@ def build_cdap_spec( continue if not (0 <= row.cardinality <= MAX_INTERACTION_CARDINALITY): - raise RuntimeError( + raise ModelConfigurationError( "Bad row cardinality %d for %s" % (row.cardinality, row.slug) ) diff --git a/activitysim/abm/models/util/probabilistic_scheduling.py b/activitysim/abm/models/util/probabilistic_scheduling.py index cdaf64da5a..193a1b703d 100644 --- a/activitysim/abm/models/util/probabilistic_scheduling.py +++ b/activitysim/abm/models/util/probabilistic_scheduling.py @@ -8,6 +8,7 @@ import pandas as pd from activitysim.core import chunk, logit, tracing, workflow +from activitysim.core.exceptions import InvalidTravelError logger = logging.getLogger(__name__) @@ -210,7 +211,7 @@ def _postprocess_scheduling_choices( if scheduling_mode == "relative": if failed.any(): - RuntimeError( + InvalidTravelError( f"Failed trips in realtive mode for {failed.sum()} trips: {choosers[failed]}" ) diff --git a/activitysim/abm/models/util/vectorize_tour_scheduling.py b/activitysim/abm/models/util/vectorize_tour_scheduling.py index d4593c21fa..0ec73c3662 100644 --- a/activitysim/abm/models/util/vectorize_tour_scheduling.py +++ b/activitysim/abm/models/util/vectorize_tour_scheduling.py @@ -82,7 +82,7 @@ def skims_for_logsums( elif isinstance(destination_for_tour_purpose, dict): dest_col_name = destination_for_tour_purpose.get(tour_purpose) else: - raise RuntimeError( + raise TypeError( f"expected string or dict DESTINATION_FOR_TOUR_PURPOSE model_setting for {tour_purpose}" ) From 8a58872c2e87a63a174088e24f919d29ed2145c5 Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Wed, 8 Oct 2025 11:23:13 -0700 Subject: [PATCH 03/20] Corrected name of SystemConfigurationError --- activitysim/core/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/activitysim/core/exceptions.py b/activitysim/core/exceptions.py index fbf33785a7..d4970d12bc 100644 --- a/activitysim/core/exceptions.py +++ b/activitysim/core/exceptions.py @@ -58,7 +58,7 @@ class MissingInputTableDefinition(RuntimeError): """An input table definition was expected but not found.""" -class SettingsConfigurationError(RuntimeError): +class SystemConfigurationError(RuntimeError): """An error in the system configuration (possibly in settings.yaml) was found.""" From 07fb9e614bfb4ca1896343cd1914ec82ef735ffe Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Wed, 8 Oct 2025 11:23:31 -0700 Subject: [PATCH 04/20] Updated RuntimeErrors in abm\tables --- activitysim/abm/tables/households.py | 3 ++- activitysim/abm/tables/persons.py | 7 ++++--- activitysim/abm/tables/shadow_pricing.py | 21 +++++++++++---------- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/activitysim/abm/tables/households.py b/activitysim/abm/tables/households.py index 9f121e7082..c0c33dcbcf 100644 --- a/activitysim/abm/tables/households.py +++ b/activitysim/abm/tables/households.py @@ -11,6 +11,7 @@ from activitysim.abm.tables.util import simple_table_join from activitysim.core import tracing, workflow from activitysim.core.input import read_input_table +from activitysim.core.exceptions import MissingInputTableDefinition logger = logging.getLogger(__name__) @@ -45,7 +46,7 @@ def households(state: workflow.State) -> pd.DataFrame: ) if df.shape[0] == 0: - raise RuntimeError("No override households found in store") + raise MissingInputTableDefinition("No override households found in store") # if we are tracing hh exclusively elif _trace_hh_id and households_sample_size == 1: diff --git a/activitysim/abm/tables/persons.py b/activitysim/abm/tables/persons.py index d5ab67fb57..24526d36f3 100644 --- a/activitysim/abm/tables/persons.py +++ b/activitysim/abm/tables/persons.py @@ -10,6 +10,7 @@ from activitysim.abm.tables.util import simple_table_join from activitysim.core import workflow from activitysim.core.input import read_input_table +from activitysim.core.exceptions import InputPopulationError logger = logging.getLogger(__name__) @@ -55,7 +56,7 @@ def persons(state: workflow.State) -> pd.DataFrame: f"{persons_without_households.sum()} persons out of {len(df)} without households\n" f"{pd.Series({'person_id': persons_without_households.index.values})}" ) - raise RuntimeError( + raise InputPopulationError( f"{persons_without_households.sum()} persons with bad household_id" ) @@ -67,7 +68,7 @@ def persons(state: workflow.State) -> pd.DataFrame: f"{households_without_persons.sum()} households out of {len(households.index)} without persons\n" f"{pd.Series({'household_id': households_without_persons.index.values})}" ) - raise RuntimeError( + raise InputPopulationError( f"{households_without_persons.sum()} households with no persons" ) @@ -107,5 +108,5 @@ def persons_merged( left_on="person_id", ) if n_persons != len(persons): - raise RuntimeError("number of persons changed") + raise InputPopulationError("number of persons changed") return persons diff --git a/activitysim/abm/tables/shadow_pricing.py b/activitysim/abm/tables/shadow_pricing.py index 586924efc9..5d58e56864 100644 --- a/activitysim/abm/tables/shadow_pricing.py +++ b/activitysim/abm/tables/shadow_pricing.py @@ -18,6 +18,7 @@ from activitysim.core.configuration import PydanticReadable from activitysim.core.configuration.logit import TourLocationComponentSettings from activitysim.core.input import read_input_table +from activitysim.core.exceptions import SystemConfigurationError, MissingNameError logger = logging.getLogger(__name__) @@ -181,7 +182,7 @@ def __init__( logger.warning( "deprecated combination of multiprocessing and not fail_fast" ) - raise RuntimeError( + raise SystemConfigurationError( "Shadow pricing requires fail_fast setting in multiprocessing mode" ) @@ -904,7 +905,7 @@ def update_shadow_prices(self, state): self.sampled_persons = sampled_persons else: - raise RuntimeError("unknown SHADOW_PRICE_METHOD %s" % shadow_price_method) + raise SystemConfigurationError("unknown SHADOW_PRICE_METHOD %s, method must be one of 'ctramp', 'daysim', or 'simulation'" % shadow_price_method) def dest_size_terms(self, segment): assert segment in self.segment_ids @@ -922,8 +923,8 @@ def dest_size_terms(self, segment): elif shadow_price_method == "simulation": utility_adjustment = self.shadow_prices[segment] else: - raise RuntimeError( - "unknown SHADOW_PRICE_METHOD %s" % shadow_price_method + raise SystemConfigurationError( + "unknown SHADOW_PRICE_METHOD %s, method must be one of 'ctramp', 'daysim', or 'simulation'" % shadow_price_method ) size_terms = pd.DataFrame( @@ -1036,7 +1037,7 @@ def buffers_for_shadow_pricing(shadow_pricing_info): if np.issubdtype(dtype, np.int64): typecode = ctypes.c_int64 else: - raise RuntimeError( + raise TypeError( "buffer_for_shadow_pricing unrecognized dtype %s" % dtype ) @@ -1085,7 +1086,7 @@ def buffers_for_shadow_pricing_choice(state, shadow_pricing_choice_info): if np.issubdtype(dtype, np.int64): typecode = ctypes.c_int64 else: - raise RuntimeError( + raise TypeError( "buffer_for_shadow_pricing unrecognized dtype %s" % dtype ) @@ -1145,12 +1146,12 @@ def shadow_price_data_from_buffers_choice( block_shapes = shadow_pricing_info["block_shapes"] if model_selector not in block_shapes: - raise RuntimeError( + raise MissingNameError( "Model selector %s not in shadow_pricing_info" % model_selector ) if block_name(model_selector + "_choice") not in data_buffers: - raise RuntimeError( + raise MissingNameError( "Block %s not in data_buffers" % block_name(model_selector + "_choice") ) @@ -1195,12 +1196,12 @@ def shadow_price_data_from_buffers(data_buffers, shadow_pricing_info, model_sele block_shapes = shadow_pricing_info["block_shapes"] if model_selector not in block_shapes: - raise RuntimeError( + raise MissingNameError( "Model selector %s not in shadow_pricing_info" % model_selector ) if block_name(model_selector) not in data_buffers: - raise RuntimeError("Block %s not in data_buffers" % block_name(model_selector)) + raise MissingNameError("Block %s not in data_buffers" % block_name(model_selector)) shape = block_shapes[model_selector] data = data_buffers[block_name(model_selector)] From dc60c3860cfd2362fbf314e7c34a91530e7cd0d2 Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Wed, 8 Oct 2025 12:11:59 -0700 Subject: [PATCH 05/20] Added SubprocessError --- activitysim/core/exceptions.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/activitysim/core/exceptions.py b/activitysim/core/exceptions.py index d4970d12bc..a5f4c47694 100644 --- a/activitysim/core/exceptions.py +++ b/activitysim/core/exceptions.py @@ -75,4 +75,7 @@ class TableSliceError(RuntimeError): class InputPopulationError(RuntimeError): - """An issue with the input population was found.""" \ No newline at end of file + """An issue with the input population was found.""" + +class SubprocessError(RuntimeError): + """An error occurred in a subprocess.""" \ No newline at end of file From 182a8cf61f817f21e00e9d938466fb418cabf04b Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Wed, 8 Oct 2025 12:12:16 -0700 Subject: [PATCH 06/20] Updated RuntimeErrors in core directory --- activitysim/core/config.py | 5 +- activitysim/core/configuration/filesystem.py | 4 +- activitysim/core/estimation.py | 9 +-- activitysim/core/input.py | 6 +- activitysim/core/logit.py | 7 ++- activitysim/core/mp_tasks.py | 61 ++++++++++---------- activitysim/core/pathbuilder_cache.py | 5 +- activitysim/core/random.py | 3 +- activitysim/core/simulate.py | 9 +-- activitysim/core/skim_dict_factory.py | 3 +- activitysim/core/test/_tools.py | 4 +- activitysim/core/timetable.py | 3 +- activitysim/core/tracing.py | 4 +- activitysim/core/util.py | 2 +- activitysim/core/workflow/checkpoint.py | 15 ++--- activitysim/core/workflow/runner.py | 4 +- activitysim/core/workflow/state.py | 12 ++-- activitysim/core/workflow/tracing.py | 5 +- 18 files changed, 88 insertions(+), 73 deletions(-) diff --git a/activitysim/core/config.py b/activitysim/core/config.py index 730c383157..a88bc6233b 100644 --- a/activitysim/core/config.py +++ b/activitysim/core/config.py @@ -7,6 +7,7 @@ from activitysim.core import workflow from activitysim.core.configuration.base import PydanticBase from activitysim.core.configuration.logit import LogitComponentSettings +from activitysim.core.exceptions import ModelConfigurationError # ActivitySim # See full license in LICENSE.txt. @@ -123,13 +124,13 @@ def get_logit_model_settings( if logit_type not in ["NL", "MNL"]: logger.error("Unrecognized logit type '%s'" % logit_type) - raise RuntimeError("Unrecognized logit type '%s'" % logit_type) + raise ModelConfigurationError("Unrecognized logit type '%s'. Logit type must be 'NL' for nested logit or 'MNL' for multinomial logit" % logit_type) if logit_type == "NL": nests = model_settings.get("NESTS", None) if nests is None: logger.error("No NEST found in model spec for NL model type") - raise RuntimeError("No NEST found in model spec for NL model type") + raise ModelConfigurationError("No NEST found in model spec for NL model type") return nests diff --git a/activitysim/core/configuration/filesystem.py b/activitysim/core/configuration/filesystem.py index ba2049e31c..b766a81f27 100644 --- a/activitysim/core/configuration/filesystem.py +++ b/activitysim/core/configuration/filesystem.py @@ -16,7 +16,7 @@ from activitysim.core.configuration.base import PydanticBase from activitysim.core.configuration.logit import LogitComponentSettings -from activitysim.core.exceptions import SettingsFileNotFoundError +from activitysim.core.exceptions import SettingsFileNotFoundError, SystemConfigurationError from activitysim.core.util import parse_suffix_args, suffix_tables_in_settings logger = logging.getLogger(__name__) @@ -768,7 +768,7 @@ def backfill_settings(settings, backfill): logger.error( f"Unexpected additional settings: {additional_settings}" ) - raise RuntimeError( + raise SystemConfigurationError( "'include_settings' must appear alone in settings file." ) diff --git a/activitysim/core/estimation.py b/activitysim/core/estimation.py index 5b29de6960..abfc36f537 100644 --- a/activitysim/core/estimation.py +++ b/activitysim/core/estimation.py @@ -16,6 +16,7 @@ from activitysim.core.configuration.base import PydanticBase from activitysim.core.util import reindex from activitysim.core.yaml_tools import safe_dump +from activitysim.core.exceptions import DuplicateWorkflowTableError, DuplicateLoadableObjectError logger = logging.getLogger("estimation") @@ -433,7 +434,7 @@ def write_table( def cache_table(df, table_name, append): if table_name in self.tables and not append: - raise RuntimeError( + raise DuplicateWorkflowTableError( "cache_table %s append=False and table exists" % (table_name,) ) if table_name in self.tables: @@ -450,7 +451,7 @@ def write_table(df, table_name, index, append, bundle_directory, filetype): # check if file exists file_exists = os.path.isfile(file_path) if file_exists and not append: - raise RuntimeError( + raise DuplicateLoadableObjectError( "write_table %s append=False and file exists: %s" % (table_name, file_path) ) @@ -470,7 +471,7 @@ def write_table(df, table_name, index, append, bundle_directory, filetype): elif filetype == "pkl": self.write_pickle(df, file_path, index, append) else: - raise RuntimeError( + raise IOError( f"Unsupported filetype: {filetype}, allowed options are csv, parquet, pkl" ) @@ -564,7 +565,7 @@ def write_omnibus_table(self): self.write_pickle(df, file_path, index=True, append=False) else: - raise RuntimeError(f"Unsupported filetype: {filetype}") + raise IOError(f"Unsupported filetype: {filetype}") self.debug("wrote_omnibus_choosers: %s" % file_path) diff --git a/activitysim/core/input.py b/activitysim/core/input.py index 734679996a..6ac0794ce6 100644 --- a/activitysim/core/input.py +++ b/activitysim/core/input.py @@ -10,7 +10,7 @@ from activitysim.core import util, workflow from activitysim.core.configuration import InputTable -from activitysim.core.exceptions import MissingInputTableDefinition +from activitysim.core.exceptions import MissingInputTableDefinition, ModelConfigurationError logger = logging.getLogger(__name__) @@ -204,7 +204,7 @@ def read_from_table_info(table_info: InputTable, state): f"index_col '{index_col}' specified in configs but not in {tablename} table!" ) logger.error(f"{tablename} columns are: {list(df.columns)}") - raise RuntimeError(f"index_col '{index_col}' not in {tablename} table!") + raise ModelConfigurationError(f"index_col '{index_col}' not in {tablename} table!") if keep_columns: logger.debug("keeping columns: %s" % keep_columns) @@ -214,7 +214,7 @@ def read_from_table_info(table_info: InputTable, state): f"{list(set(keep_columns).difference(set(df.columns)))}" ) logger.error(f"{tablename} table has columns: {list(df.columns)}") - raise RuntimeError(f"Required columns missing from {tablename} table") + raise ModelConfigurationError(f"Required columns missing from {tablename} table") df = df[keep_columns] diff --git a/activitysim/core/logit.py b/activitysim/core/logit.py index 034d970ca6..83bc9f7aee 100644 --- a/activitysim/core/logit.py +++ b/activitysim/core/logit.py @@ -11,6 +11,7 @@ from activitysim.core import tracing, workflow from activitysim.core.choosing import choice_maker from activitysim.core.configuration.logit import LogitNestSpec +from activitysim.core.exceptions import InvalidTravelError, ModelConfigurationError logger = logging.getLogger(__name__) @@ -83,7 +84,7 @@ def report_bad_choices( logger.warning(row_msg) if raise_error: - raise RuntimeError(msg_with_count) + raise InvalidTravelError(msg_with_count) def utils_to_logsums(utils, exponentiated=False, allow_zero_probs=False): @@ -469,7 +470,7 @@ def validate_nest_spec(nest_spec: dict | LogitNestSpec, trace_label: str): # nest.print() if duplicates: - raise RuntimeError( + raise ModelConfigurationError( f"validate_nest_spec:duplicate nest key/s '{duplicates}' in nest spec - {trace_label}" ) @@ -568,7 +569,7 @@ def each_nest(nest_spec: dict | LogitNestSpec, type=None, post_order=False): Nest object with info about the current node (nest or leaf) """ if type is not None and type not in Nest.nest_types(): - raise RuntimeError("Unknown nest type '%s' in call to each_nest" % type) + raise ModelConfigurationError("Unknown nest type '%s' in call to each_nest" % type) if isinstance(nest_spec, dict): nest_spec = LogitNestSpec.model_validate(nest_spec) diff --git a/activitysim/core/mp_tasks.py b/activitysim/core/mp_tasks.py index 7d31134bc0..27d26f255d 100644 --- a/activitysim/core/mp_tasks.py +++ b/activitysim/core/mp_tasks.py @@ -26,6 +26,7 @@ NON_TABLE_COLUMNS, ParquetStore, ) +from activitysim.core.exceptions import * logger = logging.getLogger(__name__) @@ -440,7 +441,7 @@ def build_slice_rules(state: workflow.State, slice_info, pipeline_tables): tables[table_name] = pipeline_tables[table_name] if primary_slicer not in tables: - raise RuntimeError("primary slice table '%s' not in pipeline" % primary_slicer) + raise SystemConfigurationError("primary slice table '%s' not in pipeline" % primary_slicer) # allow wildcard 'True' to avoid slicing (or coalescing) any tables no explicitly listed in slice_info.tables # populationsim uses slice.except wildcards to avoid listing control tables (etc) that should not be sliced, @@ -532,7 +533,7 @@ def apportion_pipeline(state: workflow.State, sub_proc_names, step_info): """ slice_info = step_info.get("slice", None) if slice_info is None: - raise RuntimeError("missing slice_info.slice") + raise SystemConfigurationError("missing slice_info.slice") multiprocess_step_name = step_info.get("name", None) pipeline_file_name = state.get_injectable("pipeline_file_name") @@ -542,14 +543,14 @@ def apportion_pipeline(state: workflow.State, sub_proc_names, step_info): "last_checkpoint_in_previous_multiprocess_step", None ) if last_checkpoint_in_previous_multiprocess_step is None: - raise RuntimeError("missing last_checkpoint_in_previous_multiprocess_step") + raise CheckpointNameNotFoundError("missing last_checkpoint_in_previous_multiprocess_step") state.checkpoint.restore(resume_after=last_checkpoint_in_previous_multiprocess_step) # ensure all tables are in the pipeline checkpointed_tables = state.checkpoint.list_tables() for table_name in slice_info["tables"]: if table_name not in checkpointed_tables: - raise RuntimeError(f"slicer table {table_name} not found in pipeline") + raise StateAccessError(f"slicer table {table_name} not found in pipeline") checkpoints_df = state.checkpoint.get_inventory() @@ -601,7 +602,7 @@ def apportion_pipeline(state: workflow.State, sub_proc_names, step_info): if rule["slice_by"] is not None and num_sub_procs > len(df): # almost certainly a configuration error - raise RuntimeError( + raise SystemConfigurationError( f"apportion_pipeline: multiprocess step {multiprocess_step_name} " f"slice table {table_name} has fewer rows {df.shape} " f"than num_processes ({num_sub_procs})." @@ -634,7 +635,7 @@ def apportion_pipeline(state: workflow.State, sub_proc_names, step_info): # don't slice mirrored tables sliced_tables[table_name] = df else: - raise RuntimeError( + raise TableSliceError( "Unrecognized slice rule '%s' for table %s" % (rule["slice_by"], table_name) ) @@ -678,7 +679,7 @@ def apportion_pipeline(state: workflow.State, sub_proc_names, step_info): if rule["slice_by"] is not None and num_sub_procs > len(df): # almost certainly a configuration error - raise RuntimeError( + raise SystemConfigurationError( f"apportion_pipeline: multiprocess step {multiprocess_step_name} " f"slice table {table_name} has fewer rows {df.shape} " f"than num_processes ({num_sub_procs})." @@ -711,7 +712,7 @@ def apportion_pipeline(state: workflow.State, sub_proc_names, step_info): # don't slice mirrored tables sliced_tables[table_name] = df else: - raise RuntimeError( + raise TableSliceError( "Unrecognized slice rule '%s' for table %s" % (rule["slice_by"], table_name) ) @@ -970,7 +971,7 @@ def adjust_chunk_size_for_shared_memory(chunk_size, data_buffers, num_processes) ) if adjusted_chunk_size <= 0: - raise RuntimeError( + raise SystemConfigurationError( f"adjust_chunk_size_for_shared_memory: chunk_size too small for shared memory. " f"adjusted_chunk_size: {adjusted_chunk_size}" ) @@ -1366,7 +1367,7 @@ def check_proc_status(state: workflow.State): state, f"error terminating process {op.name}: {e}", ) - raise RuntimeError("Process %s failed" % (p.name,)) + raise SubprocessError("Process %s failed" % (p.name,)) step_name = step_info["name"] @@ -1531,7 +1532,7 @@ def run_sub_task(state: workflow.State, p): if p.exitcode: error(state, f"Process {p.name} returned exitcode {p.exitcode}") - raise RuntimeError("Process %s returned exitcode %s" % (p.name, p.exitcode)) + raise SubprocessError("Process %s returned exitcode %s" % (p.name, p.exitcode)) def drop_breadcrumb(state: workflow.State, step_name, crumb, value=True): @@ -1596,7 +1597,7 @@ def run_multiprocess(state: workflow.State, injectables): run_list = get_run_list(state) if not run_list["multiprocess"]: - raise RuntimeError( + raise SubprocessError( "run_multiprocess called but multiprocess flag is %s" % run_list["multiprocess"] ) @@ -1719,7 +1720,7 @@ def find_breadcrumb(crumb, default=None): ) if len(completed) != num_processes: - raise RuntimeError( + raise SubprocessError( "%s processes failed in step %s" % (num_processes - len(completed), step_name) ) @@ -1787,7 +1788,7 @@ def get_breadcrumbs(state: workflow.State, run_list): # - can't resume multiprocess without breadcrumbs file if not breadcrumbs: error(state, f"empty breadcrumbs for resume_after '{resume_after}'") - raise RuntimeError("empty breadcrumbs for resume_after '%s'" % resume_after) + raise CheckpointNameNotFoundError("empty breadcrumbs for resume_after '%s'" % resume_after) # if resume_after is specified by name if resume_after != LAST_CHECKPOINT: @@ -1808,7 +1809,7 @@ def get_breadcrumbs(state: workflow.State, run_list): if resume_step_name not in previous_steps: error(state, f"resume_after model '{resume_after}' not in breadcrumbs") - raise RuntimeError( + raise CheckpointNameNotFoundError( "resume_after model '%s' not in breadcrumbs" % resume_after ) @@ -1826,7 +1827,7 @@ def get_breadcrumbs(state: workflow.State, run_list): multiprocess_step_names = [step["name"] for step in run_list["multiprocess_steps"]] if list(breadcrumbs.keys()) != multiprocess_step_names[: len(breadcrumbs)]: - raise RuntimeError( + raise CheckpointNameNotFoundError( "last run steps don't match run list: %s" % list(breadcrumbs.keys()) ) @@ -1911,15 +1912,15 @@ def get_run_list(state: workflow.State): } if not models or not isinstance(models, list): - raise RuntimeError("No models list in settings file") + raise SystemConfigurationError("No models list in settings file") if resume_after == models[-1]: - raise RuntimeError( + raise SystemConfigurationError( "resume_after '%s' is last model in models list" % resume_after ) if multiprocess: if not multiprocess_steps: - raise RuntimeError( + raise SystemConfigurationError( "multiprocess setting is %s but no multiprocess_steps setting" % multiprocess ) @@ -1935,15 +1936,15 @@ def get_run_list(state: workflow.State): # - validate step name name = step.get("name", None) if not name: - raise RuntimeError( + raise SystemConfigurationError( "missing name for step %s" " in multiprocess_steps" % istep ) if name in step_names: - raise RuntimeError( + raise SystemConfigurationError( "duplicate step name %s" " in multiprocess_steps" % name ) if name in models: - raise RuntimeError( + raise SystemConfigurationError( f"multiprocess_steps step name '{name}' cannot also be a model name" ) @@ -1953,7 +1954,7 @@ def get_run_list(state: workflow.State): num_processes = step.get("num_processes", 0) or 0 if not isinstance(num_processes, int) or num_processes < 0: - raise RuntimeError( + raise SystemConfigurationError( "bad value (%s) for num_processes for step %s" " in multiprocess_steps" % (num_processes, name) ) @@ -1975,7 +1976,7 @@ def get_run_list(state: workflow.State): if num_processes == 0: num_processes = 1 if num_processes > 1: - raise RuntimeError( + raise SystemConfigurationError( "num_processes > 1 but no slice info for step %s" " in multiprocess_steps" % name ) @@ -2004,19 +2005,19 @@ def get_run_list(state: workflow.State): slice = step.get("slice", None) if slice: if "tables" not in slice: - raise RuntimeError( + raise SystemConfigurationError( "missing tables list for step %s" " in multiprocess_steps" % istep ) start = step.get(start_tag, None) if not name: - raise RuntimeError( + raise SystemConfigurationError( "missing %s tag for step '%s' (%s)" " in multiprocess_steps" % (start_tag, name, istep) ) if start not in models: - raise RuntimeError( + raise SystemConfigurationError( "%s tag '%s' for step '%s' (%s) not in models list" % (start_tag, start, name, istep) ) @@ -2024,14 +2025,14 @@ def get_run_list(state: workflow.State): starts[istep] = models.index(start) if istep == 0 and starts[istep] != 0: - raise RuntimeError( + raise SystemConfigurationError( "%s tag '%s' for first step '%s' (%s)" " is not first model in models list" % (start_tag, start, name, istep) ) if istep > 0 and starts[istep] <= starts[istep - 1]: - raise RuntimeError( + raise SystemConfigurationError( "%s tag '%s' for step '%s' (%s)" " falls before that of prior step in models list" % (start_tag, start, name, istep) @@ -2048,7 +2049,7 @@ def get_run_list(state: workflow.State): step_models = models[starts[istep] : starts[istep + 1]] if step_models[-1][0] == LAST_CHECKPOINT: - raise RuntimeError( + raise CheckpointNameNotFoundError( "Final model '%s' in step %s models list not checkpointed" % (step_models[-1], name) ) diff --git a/activitysim/core/pathbuilder_cache.py b/activitysim/core/pathbuilder_cache.py index a59ca17ec4..6ed3a7061e 100644 --- a/activitysim/core/pathbuilder_cache.py +++ b/activitysim/core/pathbuilder_cache.py @@ -14,6 +14,7 @@ import psutil from activitysim.core import config, los, util +from activitysim.core.exceptions import StateAccessError, TableTypeError logger = logging.getLogger(__name__) @@ -177,7 +178,7 @@ def open(self): f"TVPBCache.open {self.cache_tag} read fully_populated data array from mmap file" ) else: - raise RuntimeError( + raise StateAccessError( f"Pathbuilder cache not found. Did you forget to run initialize tvpb?" f"Expected cache file: {self.cache_path}" ) @@ -253,7 +254,7 @@ def allocate_data_buffer(self, shared=False): elif dtype_name == "float32": typecode = "f" else: - raise RuntimeError( + raise TableTypeError( "allocate_data_buffer unrecognized dtype %s" % dtype_name ) diff --git a/activitysim/core/random.py b/activitysim/core/random.py index 9960e98e27..713f2e1492 100644 --- a/activitysim/core/random.py +++ b/activitysim/core/random.py @@ -10,6 +10,7 @@ import pandas as pd from activitysim.core.util import reindex +from activitysim.core.exceptions import DuplicateLoadableObjectError from .tracing import print_elapsed_time @@ -528,7 +529,7 @@ def set_base_seed(self, seed=None): """ if self.step_name is not None or self.channels: - raise RuntimeError("Can only call set_base_seed before the first step.") + raise DuplicateLoadableObjectError("Can only call set_base_seed before the first step.") assert len(list(self.channels.keys())) == 0 diff --git a/activitysim/core/simulate.py b/activitysim/core/simulate.py index 6cd4a51f62..479023451d 100644 --- a/activitysim/core/simulate.py +++ b/activitysim/core/simulate.py @@ -40,6 +40,7 @@ SPEC_EXPRESSION_NAME, SPEC_LABEL_NAME, ) +from activitysim.core.exceptions import ModelConfigurationError logger = logging.getLogger(__name__) @@ -187,13 +188,13 @@ def read_model_coefficients( f"duplicate coefficients in {file_path}\n" f"{coefficients[coefficients.index.duplicated(keep=False)]}" ) - raise RuntimeError(f"duplicate coefficients in {file_path}") + raise ModelConfigurationError(f"duplicate coefficients in {file_path}") if coefficients.value.isnull().any(): logger.warning( f"null coefficients in {file_path}\n{coefficients[coefficients.value.isnull()]}" ) - raise RuntimeError(f"null coefficients in {file_path}") + raise ModelConfigurationError(f"null coefficients in {file_path}") return coefficients @@ -250,7 +251,7 @@ def spec_for_segment( try: assert (spec.astype(float) == spec).all(axis=None) except (ValueError, AssertionError): - raise RuntimeError( + raise ModelConfigurationError( f"No coefficient file specified for {spec_file_name} " f"but not all spec column values are numeric" ) from None @@ -395,7 +396,7 @@ def get_segment_coefficients( FutureWarning, ) else: - raise RuntimeError("No COEFFICIENTS setting in model_settings") + raise ModelConfigurationError("No COEFFICIENTS setting in model_settings") if legacy: constants = config.get_model_constants(model_settings) diff --git a/activitysim/core/skim_dict_factory.py b/activitysim/core/skim_dict_factory.py index e0bd23ef3e..2ca90c4c66 100644 --- a/activitysim/core/skim_dict_factory.py +++ b/activitysim/core/skim_dict_factory.py @@ -14,6 +14,7 @@ import openmatrix as omx from activitysim.core import skim_dictionary, util +from activitysim.core.exceptions import TableTypeError logger = logging.getLogger(__name__) @@ -462,7 +463,7 @@ def allocate_skim_buffer(self, skim_info, shared=False): elif dtype_name == "float32": typecode = "f" else: - raise RuntimeError( + raise TableTypeError( "allocate_skim_buffer unrecognized dtype %s" % dtype_name ) diff --git a/activitysim/core/test/_tools.py b/activitysim/core/test/_tools.py index 618b467bb9..3169b771c2 100644 --- a/activitysim/core/test/_tools.py +++ b/activitysim/core/test/_tools.py @@ -7,6 +7,8 @@ import pandas as pd +from activitysim.core.exceptions import PipelineError + def run_if_exists(filename): import pytest @@ -176,6 +178,6 @@ def progressive_checkpoint_test( # generate the reference pipeline if it did not exist if not ref_target.exists(): state.checkpoint.store.make_zip_archive(ref_target) - raise RuntimeError( + raise PipelineError( f"Reference pipeline {ref_target} did not exist, so it was created." ) diff --git a/activitysim/core/timetable.py b/activitysim/core/timetable.py index 5743aeef0a..e37ec36efc 100644 --- a/activitysim/core/timetable.py +++ b/activitysim/core/timetable.py @@ -10,6 +10,7 @@ import pandas as pd from activitysim.core import chunk, configuration, workflow +from activitysim.core.exceptions import DuplicateWorkflowTableError logger = logging.getLogger(__name__) @@ -463,7 +464,7 @@ def replace_table(self, state: workflow.State): % self.windows_table_name, level=logging.ERROR, ) - raise RuntimeError("Attempt to replace_table while in transaction") + raise DuplicateWorkflowTableError("Attempt to replace_table while in transaction") # get windows_df from bottleneck function in case updates to self.person_window # do not write through to pandas dataframe diff --git a/activitysim/core/tracing.py b/activitysim/core/tracing.py index 88bf0fc167..82815aa97f 100644 --- a/activitysim/core/tracing.py +++ b/activitysim/core/tracing.py @@ -11,6 +11,8 @@ import numpy as np import pandas as pd +from activitysim.core.exceptions import TableSliceError + # Configurations ASIM_LOGGER = "activitysim" CSV_FILE_TYPE = "csv" @@ -247,7 +249,7 @@ def slice_ids(df, ids, column=None): except KeyError: # this happens if specified slicer column is not in df # df = df[0:0] - raise RuntimeError("slice_ids slicer column '%s' not in dataframe" % column) + raise TableSliceError("slice_ids slicer column '%s' not in dataframe" % column) return df diff --git a/activitysim/core/util.py b/activitysim/core/util.py index 429d3eee01..c3f0385152 100644 --- a/activitysim/core/util.py +++ b/activitysim/core/util.py @@ -298,7 +298,7 @@ def quick_loc_series(loc_list, target_series): elif isinstance(loc_list, np.ndarray) or isinstance(loc_list, list): left_df = pd.DataFrame({left_on: loc_list}) else: - raise RuntimeError( + raise TypeError( "quick_loc_series loc_list of unexpected type %s" % type(loc_list) ) diff --git a/activitysim/core/workflow/checkpoint.py b/activitysim/core/workflow/checkpoint.py index d5a7286359..c3fe7d8ae7 100644 --- a/activitysim/core/workflow/checkpoint.py +++ b/activitysim/core/workflow/checkpoint.py @@ -18,6 +18,7 @@ TableNameNotFound, ) from activitysim.core.workflow.accessor import FromState, StateAccessor +from activitysim.core.exceptions import CheckpointNameNotFoundError, PipelineError logger = logging.getLogger(__name__) @@ -565,7 +566,7 @@ def open_store( """ if self._checkpoint_store is not None: - raise RuntimeError("Pipeline store is already open!") + raise PipelineError("Pipeline store is already open!") if pipeline_file_name is None: pipeline_file_path = self.default_pipeline_file_path() @@ -775,7 +776,7 @@ def load(self, checkpoint_name: str, store=None): msg = f"Couldn't find checkpoint '{checkpoint_name}' in checkpoints" print(checkpoints[CHECKPOINT_NAME]) logger.error(msg) - raise RuntimeError(msg) from None + raise CheckpointNameNotFoundError(msg) from None # convert pandas dataframe back to array of checkpoint dicts checkpoints = checkpoints.to_dict(orient="records") @@ -1201,7 +1202,7 @@ def load_dataframe(self, table_name, checkpoint_name=None): if table_name not in self.last_checkpoint and self._obj.is_table(table_name): if checkpoint_name is not None: - raise RuntimeError( + raise CheckpointNameNotFoundError( f"checkpoint.dataframe: checkpoint_name ({checkpoint_name!r}) not " f"supported for non-checkpointed table {table_name!r}" ) @@ -1211,10 +1212,10 @@ def load_dataframe(self, table_name, checkpoint_name=None): # if there is no checkpoint name given, do not attempt to read from store if checkpoint_name is None: if table_name not in self.last_checkpoint: - raise RuntimeError("table '%s' never checkpointed." % table_name) + raise CheckpointNameNotFoundError("table '%s' never checkpointed." % table_name) if not self.last_checkpoint[table_name]: - raise RuntimeError("table '%s' was dropped." % table_name) + raise CheckpointNameNotFoundError("table '%s' was dropped." % table_name) return self._obj.get_dataframe(table_name) @@ -1224,13 +1225,13 @@ def load_dataframe(self, table_name, checkpoint_name=None): None, ) if checkpoint is None: - raise RuntimeError("checkpoint '%s' not in checkpoints." % checkpoint_name) + raise CheckpointNameNotFoundError("checkpoint '%s' not in checkpoints." % checkpoint_name) # find the checkpoint that table was written to store last_checkpoint_name = checkpoint.get(table_name, None) if not last_checkpoint_name: - raise RuntimeError( + raise CheckpointNameNotFoundError( "table '%s' not in checkpoint '%s'." % (table_name, checkpoint_name) ) diff --git a/activitysim/core/workflow/runner.py b/activitysim/core/workflow/runner.py index 8eba53cbef..a45b731000 100644 --- a/activitysim/core/workflow/runner.py +++ b/activitysim/core/workflow/runner.py @@ -7,7 +7,7 @@ from datetime import timedelta from activitysim.core import tracing -from activitysim.core.exceptions import DuplicateWorkflowNameError +from activitysim.core.exceptions import DuplicateWorkflowNameError, TableSliceError from activitysim.core.workflow.accessor import FromState, StateAccessor from activitysim.core.workflow.checkpoint import ( CHECKPOINT_NAME, @@ -265,7 +265,7 @@ def _pre_run_step(self, model_name: str) -> bool | None: if model_name in checkpointed_models: if self._obj.settings.duplicate_step_execution == "error": checkpointed_model_bullets = "\n - ".join(checkpointed_models) - raise RuntimeError( + raise TableSliceError( f"Checkpointed Models:\n - {checkpointed_model_bullets}\n" f"Cannot run model '{model_name}' more than once" ) diff --git a/activitysim/core/workflow/state.py b/activitysim/core/workflow/state.py index 6178f24884..2a428f78a0 100644 --- a/activitysim/core/workflow/state.py +++ b/activitysim/core/workflow/state.py @@ -20,7 +20,7 @@ import activitysim.core.random from activitysim.core.configuration import FileSystem, NetworkSettings, Settings -from activitysim.core.exceptions import StateAccessError +from activitysim.core.exceptions import StateAccessError, CheckpointNameNotFoundError from activitysim.core.workflow.checkpoint import LAST_CHECKPOINT, Checkpoints from activitysim.core.workflow.chunking import Chunking from activitysim.core.workflow.dataset import Datasets @@ -1017,7 +1017,7 @@ def get_table(self, table_name, checkpoint_name=None): table_name ): if checkpoint_name is not None: - raise RuntimeError( + raise CheckpointNameNotFoundError( f"get_table: checkpoint_name ({checkpoint_name!r}) not " f"supported for non-checkpointed table {table_name!r}" ) @@ -1027,10 +1027,10 @@ def get_table(self, table_name, checkpoint_name=None): # if they want current version of table, no need to read from pipeline store if checkpoint_name is None: if table_name not in self.checkpoint.last_checkpoint: - raise RuntimeError("table '%s' never checkpointed." % table_name) + raise CheckpointNameNotFoundError("table '%s' never checkpointed." % table_name) if not self.checkpoint.last_checkpoint[table_name]: - raise RuntimeError("table '%s' was dropped." % table_name) + raise CheckpointNameNotFoundError("table '%s' was dropped." % table_name) return self._context.get(table_name) @@ -1044,13 +1044,13 @@ def get_table(self, table_name, checkpoint_name=None): None, ) if checkpoint is None: - raise RuntimeError("checkpoint '%s' not in checkpoints." % checkpoint_name) + raise CheckpointNameNotFoundError("checkpoint '%s' not in checkpoints." % checkpoint_name) # find the checkpoint that table was written to store last_checkpoint_name = checkpoint.get(table_name, None) if not last_checkpoint_name: - raise RuntimeError( + raise CheckpointNameNotFoundError( "table '%s' not in checkpoint '%s'." % (table_name, checkpoint_name) ) diff --git a/activitysim/core/workflow/tracing.py b/activitysim/core/workflow/tracing.py index 580c6fad9b..f987d32133 100644 --- a/activitysim/core/workflow/tracing.py +++ b/activitysim/core/workflow/tracing.py @@ -21,6 +21,7 @@ from activitysim.core import tracing from activitysim.core.test import assert_equal, assert_frame_substantively_equal from activitysim.core.workflow.accessor import FromState, StateAccessor +from activitysim.core.exceptions import TableSliceError logger = logging.getLogger(__name__) @@ -548,7 +549,7 @@ def interaction_trace_rows(self, interaction_df, choosers, sample_size=None): targets = traceable_table_ids["proto_tours"] else: print(choosers.columns) - raise RuntimeError( + raise TableSliceError( "interaction_trace_rows don't know how to slice index '%s'" % choosers.index.name ) @@ -629,7 +630,7 @@ def get_trace_target(self, df: pd.DataFrame, slicer: str, column: Any = None): column = slicer if column is None and df.index.name != slicer: - raise RuntimeError( + raise TableSliceError( "bad slicer '%s' for df with index '%s'" % (slicer, df.index.name) ) From aa78a076a4c1f14993494ea581b760ab6e662e1d Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Wed, 8 Oct 2025 12:12:52 -0700 Subject: [PATCH 07/20] Renamed `TableSliceError` to `TableSlicingError` --- activitysim/core/exceptions.py | 2 +- activitysim/core/mp_tasks.py | 4 ++-- activitysim/core/tracing.py | 4 ++-- activitysim/core/workflow/runner.py | 4 ++-- activitysim/core/workflow/tracing.py | 6 +++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/activitysim/core/exceptions.py b/activitysim/core/exceptions.py index a5f4c47694..1e877cd35b 100644 --- a/activitysim/core/exceptions.py +++ b/activitysim/core/exceptions.py @@ -70,7 +70,7 @@ class InvalidTravelError(RuntimeError): """Travel behavior could not be completed in a valid way.""" -class TableSliceError(RuntimeError): +class TableSlicingError(RuntimeError): """An error occurred trying to slice a table.""" diff --git a/activitysim/core/mp_tasks.py b/activitysim/core/mp_tasks.py index 27d26f255d..c89a14cd6e 100644 --- a/activitysim/core/mp_tasks.py +++ b/activitysim/core/mp_tasks.py @@ -635,7 +635,7 @@ def apportion_pipeline(state: workflow.State, sub_proc_names, step_info): # don't slice mirrored tables sliced_tables[table_name] = df else: - raise TableSliceError( + raise TableSlicingError( "Unrecognized slice rule '%s' for table %s" % (rule["slice_by"], table_name) ) @@ -712,7 +712,7 @@ def apportion_pipeline(state: workflow.State, sub_proc_names, step_info): # don't slice mirrored tables sliced_tables[table_name] = df else: - raise TableSliceError( + raise TableSlicingError( "Unrecognized slice rule '%s' for table %s" % (rule["slice_by"], table_name) ) diff --git a/activitysim/core/tracing.py b/activitysim/core/tracing.py index 82815aa97f..270c8d6d95 100644 --- a/activitysim/core/tracing.py +++ b/activitysim/core/tracing.py @@ -11,7 +11,7 @@ import numpy as np import pandas as pd -from activitysim.core.exceptions import TableSliceError +from activitysim.core.exceptions import TableSlicingError # Configurations ASIM_LOGGER = "activitysim" @@ -249,7 +249,7 @@ def slice_ids(df, ids, column=None): except KeyError: # this happens if specified slicer column is not in df # df = df[0:0] - raise TableSliceError("slice_ids slicer column '%s' not in dataframe" % column) + raise TableSlicingError("slice_ids slicer column '%s' not in dataframe" % column) return df diff --git a/activitysim/core/workflow/runner.py b/activitysim/core/workflow/runner.py index a45b731000..639ffc0ef9 100644 --- a/activitysim/core/workflow/runner.py +++ b/activitysim/core/workflow/runner.py @@ -7,7 +7,7 @@ from datetime import timedelta from activitysim.core import tracing -from activitysim.core.exceptions import DuplicateWorkflowNameError, TableSliceError +from activitysim.core.exceptions import DuplicateWorkflowNameError, TableSlicingError from activitysim.core.workflow.accessor import FromState, StateAccessor from activitysim.core.workflow.checkpoint import ( CHECKPOINT_NAME, @@ -265,7 +265,7 @@ def _pre_run_step(self, model_name: str) -> bool | None: if model_name in checkpointed_models: if self._obj.settings.duplicate_step_execution == "error": checkpointed_model_bullets = "\n - ".join(checkpointed_models) - raise TableSliceError( + raise TableSlicingError( f"Checkpointed Models:\n - {checkpointed_model_bullets}\n" f"Cannot run model '{model_name}' more than once" ) diff --git a/activitysim/core/workflow/tracing.py b/activitysim/core/workflow/tracing.py index f987d32133..2669e5b9b8 100644 --- a/activitysim/core/workflow/tracing.py +++ b/activitysim/core/workflow/tracing.py @@ -21,7 +21,7 @@ from activitysim.core import tracing from activitysim.core.test import assert_equal, assert_frame_substantively_equal from activitysim.core.workflow.accessor import FromState, StateAccessor -from activitysim.core.exceptions import TableSliceError +from activitysim.core.exceptions import TableSlicingError logger = logging.getLogger(__name__) @@ -549,7 +549,7 @@ def interaction_trace_rows(self, interaction_df, choosers, sample_size=None): targets = traceable_table_ids["proto_tours"] else: print(choosers.columns) - raise TableSliceError( + raise TableSlicingError( "interaction_trace_rows don't know how to slice index '%s'" % choosers.index.name ) @@ -630,7 +630,7 @@ def get_trace_target(self, df: pd.DataFrame, slicer: str, column: Any = None): column = slicer if column is None and df.index.name != slicer: - raise TableSliceError( + raise TableSlicingError( "bad slicer '%s' for df with index '%s'" % (slicer, df.index.name) ) From a9d57a23348377fbffa400822164cca24088a3ff Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Wed, 8 Oct 2025 12:51:37 -0700 Subject: [PATCH 08/20] Classified RuntimeErrors in input and settings checkers as ModelConfigurationErrors --- activitysim/abm/models/input_checker.py | 3 ++- activitysim/abm/models/settings_checker.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/activitysim/abm/models/input_checker.py b/activitysim/abm/models/input_checker.py index 568da851e1..68274ba694 100644 --- a/activitysim/abm/models/input_checker.py +++ b/activitysim/abm/models/input_checker.py @@ -13,6 +13,7 @@ from activitysim.core import workflow from activitysim.core.input import read_input_table +from activitysim.core.exceptions import ModelConfigurationError logger = logging.getLogger(__name__) file_logger = logger.getChild("logfile") @@ -468,6 +469,6 @@ def input_checker(state: workflow.State): if input_check_failure: logger.error("Run is killed due to input checker failure!!") - raise RuntimeError( + raise ModelConfigurationError( "Encountered error in input checker, see input_checker.log for details" ) diff --git a/activitysim/abm/models/settings_checker.py b/activitysim/abm/models/settings_checker.py index 907e7e9995..77be268b13 100644 --- a/activitysim/abm/models/settings_checker.py +++ b/activitysim/abm/models/settings_checker.py @@ -21,6 +21,7 @@ eval_nest_coefficients, read_model_coefficient_template, ) +from activitysim.core.exceptions import ModelConfigurationError # import model settings from activitysim.abm.models.accessibility import AccessibilitySettings @@ -760,7 +761,7 @@ def check_model_settings( for e in all_errors: logger.error(f"\t{str(e)}") file_logger.error(f"\t{str(e)}") - raise RuntimeError( + raise ModelConfigurationError( f"Encountered one or more errors in settings checker. See f{log_file} for details." ) msg = f"Setting Checker Complete. No runtime errors were raised. Check f{log_file} for warnings. These *may* prevent model from successfully running." From c44d07124c84ab8db507b514cbabb6931658c8ca Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Wed, 8 Oct 2025 12:59:31 -0700 Subject: [PATCH 09/20] Created `SegmentedSpecificationError` for when there's an issue with a spec table that's created for a specific segment --- activitysim/abm/models/trip_departure_choice.py | 3 ++- activitysim/core/exceptions.py | 5 ++++- activitysim/core/interaction_sample.py | 3 ++- activitysim/core/interaction_sample_simulate.py | 3 ++- activitysim/core/interaction_simulate.py | 3 ++- 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/activitysim/abm/models/trip_departure_choice.py b/activitysim/abm/models/trip_departure_choice.py index 0e4dd05d9d..7b34f8e742 100644 --- a/activitysim/abm/models/trip_departure_choice.py +++ b/activitysim/abm/models/trip_departure_choice.py @@ -27,6 +27,7 @@ from activitysim.core.skim_dataset import SkimDataset from activitysim.core.skim_dictionary import SkimDict from activitysim.core.util import reindex +from activitysim.core.exceptions import SegmentedSpecificationError logger = logging.getLogger(__name__) @@ -219,7 +220,7 @@ def choose_tour_leg_pattern( ) if len(spec.columns) > 1: - raise RuntimeError("spec must have only one column") + raise SegmentedSpecificationError("spec must have only one column") # - join choosers and alts # in vanilla interaction_simulate interaction_df is cross join of choosers and alternatives diff --git a/activitysim/core/exceptions.py b/activitysim/core/exceptions.py index 1e877cd35b..7b6b3bd8da 100644 --- a/activitysim/core/exceptions.py +++ b/activitysim/core/exceptions.py @@ -78,4 +78,7 @@ class InputPopulationError(RuntimeError): """An issue with the input population was found.""" class SubprocessError(RuntimeError): - """An error occurred in a subprocess.""" \ No newline at end of file + """An error occurred in a subprocess.""" + +class SegmentedSpecificationError(RuntimeError): + """An error was caused by creating an invalid spec table for a segmented model component.""" \ No newline at end of file diff --git a/activitysim/core/interaction_sample.py b/activitysim/core/interaction_sample.py index 8db19a416d..52116638ba 100644 --- a/activitysim/core/interaction_sample.py +++ b/activitysim/core/interaction_sample.py @@ -20,6 +20,7 @@ from activitysim.core.configuration.base import ComputeSettings from activitysim.core.skim_dataset import DatasetWrapper from activitysim.core.skim_dictionary import SkimWrapper +from activitysim.core.exceptions import SegmentedSpecificationError logger = logging.getLogger(__name__) @@ -219,7 +220,7 @@ def _interaction_sample( ) if len(spec.columns) > 1: - raise RuntimeError("spec must have only one column") + raise SegmentedSpecificationError("spec must have only one column") # if using skims, copy index into the dataframe, so it will be # available as the "destination" for set_skim_wrapper_targets diff --git a/activitysim/core/interaction_sample_simulate.py b/activitysim/core/interaction_sample_simulate.py index 9cdea3292d..7ba9e66ea2 100644 --- a/activitysim/core/interaction_sample_simulate.py +++ b/activitysim/core/interaction_sample_simulate.py @@ -10,6 +10,7 @@ from activitysim.core import chunk, interaction_simulate, logit, tracing, util, workflow from activitysim.core.configuration.base import ComputeSettings from activitysim.core.simulate import set_skim_wrapper_targets +from activitysim.core.exceptions import SegmentedSpecificationError logger = logging.getLogger(__name__) @@ -115,7 +116,7 @@ def _interaction_sample_simulate( ) if len(spec.columns) > 1: - raise RuntimeError("spec must have only one column") + raise SegmentedSpecificationError("spec must have only one column") # if using skims, copy index into the dataframe, so it will be # available as the "destination" for the skims dereference below diff --git a/activitysim/core/interaction_simulate.py b/activitysim/core/interaction_simulate.py index d0af58e77c..6b776c6d94 100644 --- a/activitysim/core/interaction_simulate.py +++ b/activitysim/core/interaction_simulate.py @@ -16,6 +16,7 @@ from activitysim.core import chunk, logit, simulate, timing, tracing, util, workflow from activitysim.core.configuration.base import ComputeSettings from activitysim.core.fast_eval import fast_eval +from activitysim.core.exceptions import SegmentedSpecificationError logger = logging.getLogger(__name__) @@ -722,7 +723,7 @@ def _interaction_simulate( ) if len(spec.columns) > 1: - raise RuntimeError("spec must have only one column") + raise SegmentedSpecificationError("spec must have only one column") sample_size = sample_size or len(alternatives) From 92951cf48203dc8934331c9522eb4385a798e4ed Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Mon, 13 Oct 2025 16:02:02 -0700 Subject: [PATCH 10/20] Added TableIndexError --- activitysim/core/exceptions.py | 8 +++++++- activitysim/core/logit.py | 6 +++--- activitysim/core/random.py | 4 ++-- activitysim/core/skim_dictionary.py | 4 ++-- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/activitysim/core/exceptions.py b/activitysim/core/exceptions.py index 7b6b3bd8da..6a711dc998 100644 --- a/activitysim/core/exceptions.py +++ b/activitysim/core/exceptions.py @@ -77,8 +77,14 @@ class TableSlicingError(RuntimeError): class InputPopulationError(RuntimeError): """An issue with the input population was found.""" + class SubprocessError(RuntimeError): """An error occurred in a subprocess.""" + class SegmentedSpecificationError(RuntimeError): - """An error was caused by creating an invalid spec table for a segmented model component.""" \ No newline at end of file + """An error was caused by creating an invalid spec table for a segmented model component.""" + + +class TableIndexError(RuntimeError): + """An error related to the index of a table in the pipeline.""" \ No newline at end of file diff --git a/activitysim/core/logit.py b/activitysim/core/logit.py index 83bc9f7aee..9d289bf2ef 100644 --- a/activitysim/core/logit.py +++ b/activitysim/core/logit.py @@ -11,7 +11,7 @@ from activitysim.core import tracing, workflow from activitysim.core.choosing import choice_maker from activitysim.core.configuration.logit import LogitNestSpec -from activitysim.core.exceptions import InvalidTravelError, ModelConfigurationError +from activitysim.core.exceptions import InvalidTravelError, ModelConfigurationError, TableIndexError logger = logging.getLogger(__name__) @@ -361,11 +361,11 @@ def interaction_dataset( """ if not choosers.index.is_unique: - raise RuntimeError( + raise TableIndexError( "ERROR: choosers index is not unique, " "sample will not work correctly" ) if not alternatives.index.is_unique: - raise RuntimeError( + raise TableIndexError( "ERROR: alternatives index is not unique, " "sample will not work correctly" ) diff --git a/activitysim/core/random.py b/activitysim/core/random.py index 713f2e1492..41450449e9 100644 --- a/activitysim/core/random.py +++ b/activitysim/core/random.py @@ -10,7 +10,7 @@ import pandas as pd from activitysim.core.util import reindex -from activitysim.core.exceptions import DuplicateLoadableObjectError +from activitysim.core.exceptions import DuplicateLoadableObjectError, TableIndexError from .tracing import print_elapsed_time @@ -396,7 +396,7 @@ def get_channel_for_df(self, df): channel_name = self.index_to_channel.get(df.index.name, None) if channel_name is None: - raise RuntimeError("No channel with index name '%s'" % df.index.name) + raise TableIndexError("No channel with index name '%s'" % df.index.name) return self.channels[channel_name] # step handling diff --git a/activitysim/core/skim_dictionary.py b/activitysim/core/skim_dictionary.py index 020002e252..dd31983e28 100644 --- a/activitysim/core/skim_dictionary.py +++ b/activitysim/core/skim_dictionary.py @@ -10,7 +10,7 @@ import pandas as pd from activitysim.core import workflow -from activitysim.core.exceptions import StateAccessError +from activitysim.core.exceptions import StateAccessError, TableIndexError logger = logging.getLogger(__name__) @@ -907,7 +907,7 @@ def get(self, row_ids, col_ids): not_in_skim = not_in_skim.values logger.warning(f"row_ids: {row_ids[not_in_skim]}") logger.warning(f"col_ids: {col_ids[not_in_skim]}") - raise RuntimeError( + raise TableIndexError( f"DataFrameMatrix: {not_in_skim.sum()} row_ids of {len(row_ids)} not in skim." ) From 0dadec19def5720ea2091ab44c3c4cb0e0b58240 Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Mon, 13 Oct 2025 16:09:54 -0700 Subject: [PATCH 11/20] Classified RuntimeError in initialize_tours as InputPopulationError --- activitysim/abm/models/initialize_tours.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/activitysim/abm/models/initialize_tours.py b/activitysim/abm/models/initialize_tours.py index da69e8d227..4ec9a1e97a 100644 --- a/activitysim/abm/models/initialize_tours.py +++ b/activitysim/abm/models/initialize_tours.py @@ -11,6 +11,7 @@ from activitysim.core.configuration import PydanticReadable from activitysim.core.configuration.base import PreprocessorSettings from activitysim.core.input import read_input_table +from activitysim.core.exceptions import InputPopulationError logger = logging.getLogger(__name__) @@ -140,7 +141,7 @@ def initialize_tours( f"{tours_without_persons.sum()} tours out of {len(persons)} without persons\n" f"{pd.Series({'person_id': tours_without_persons.index.values})}" ) - raise RuntimeError(f"{tours_without_persons.sum()} tours with bad person_id") + raise InputPopulationError(f"{tours_without_persons.sum()} tours with bad person_id") if trace_hh_id: state.tracing.trace_df(tours, label="initialize_tours", warn_if_empty=True) From ce0cb70e3c2830d50f763221994e3a1988caecc8 Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Mon, 13 Oct 2025 16:28:33 -0700 Subject: [PATCH 12/20] Created EstimationDataError --- activitysim/core/estimation.py | 8 ++++---- activitysim/core/exceptions.py | 6 +++++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/activitysim/core/estimation.py b/activitysim/core/estimation.py index abfc36f537..835142cee4 100644 --- a/activitysim/core/estimation.py +++ b/activitysim/core/estimation.py @@ -16,7 +16,7 @@ from activitysim.core.configuration.base import PydanticBase from activitysim.core.util import reindex from activitysim.core.yaml_tools import safe_dump -from activitysim.core.exceptions import DuplicateWorkflowTableError, DuplicateLoadableObjectError +from activitysim.core.exceptions import DuplicateWorkflowTableError, DuplicateLoadableObjectError, EstimationDataError logger = logging.getLogger("estimation") @@ -537,7 +537,7 @@ def write_omnibus_table(self): elif "household_id" in df.columns: df.set_index("household_id", inplace=True) else: - RuntimeError( + EstimationDataError( f"No index column found in omnibus table {omnibus_table}: {df}" ) @@ -946,7 +946,7 @@ def get_survey_values(self, model_values, table_name, column_names): % (missing_columns, table_name) ) print("survey table columns: %s" % (survey_df.columns,)) - raise RuntimeError( + raise EstimationDataError( "missing columns (%s) in survey table %s" % (missing_columns, table_name) ) @@ -999,7 +999,7 @@ def get_survey_values(self, model_values, table_name, column_names): logger.error( "couldn't get_survey_values for %s in %s\n" % (c, table_name) ) - raise RuntimeError( + raise EstimationDataError( "couldn't get_survey_values for %s in %s\n" % (c, table_name) ) diff --git a/activitysim/core/exceptions.py b/activitysim/core/exceptions.py index 6a711dc998..b8b2b62d47 100644 --- a/activitysim/core/exceptions.py +++ b/activitysim/core/exceptions.py @@ -87,4 +87,8 @@ class SegmentedSpecificationError(RuntimeError): class TableIndexError(RuntimeError): - """An error related to the index of a table in the pipeline.""" \ No newline at end of file + """An error related to the index of a table in the pipeline.""" + + +class EstimationDataError(RuntimeError): + """An error related to estimation data.""" \ No newline at end of file From 11d0c10f509b928502fe1dc94f98d07ec7b37040 Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Tue, 14 Oct 2025 14:52:47 -0700 Subject: [PATCH 13/20] Improved clarity of a couple error messages --- activitysim/abm/models/util/cdap.py | 2 +- activitysim/core/chunk.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/activitysim/abm/models/util/cdap.py b/activitysim/abm/models/util/cdap.py index b456d61e98..849007bbc2 100644 --- a/activitysim/abm/models/util/cdap.py +++ b/activitysim/abm/models/util/cdap.py @@ -472,7 +472,7 @@ def build_cdap_spec( if not (0 <= row.cardinality <= MAX_INTERACTION_CARDINALITY): raise ModelConfigurationError( - "Bad row cardinality %d for %s" % (row.cardinality, row.slug) + "Bad row cardinality %d for %s. Try checking that all interaction terms include 3 or fewer person types." % (row.cardinality, row.slug) ) # for all other interaction rules, we need to generate a row in the spec for each diff --git a/activitysim/core/chunk.py b/activitysim/core/chunk.py index 7f09187f1f..32d00fd15c 100644 --- a/activitysim/core/chunk.py +++ b/activitysim/core/chunk.py @@ -1078,7 +1078,7 @@ def ledger(self): if mem_monitor is not None: if not mem_monitor.is_alive(): logger.error(f"mem_monitor for {self.trace_label} died!") - raise RuntimeError("bug") + raise RuntimeError("mem_monitor for {self.trace_label} died!") if stop_snooping is not None: stop_snooping.set() From faa3a5b48dc954dbb39f1ff83ac3dd7ec45e47ac Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Tue, 14 Oct 2025 15:34:55 -0700 Subject: [PATCH 14/20] Blacken code --- .../abm/models/disaggregate_accessibility.py | 14 +++--- activitysim/abm/models/initialize_tours.py | 4 +- activitysim/abm/models/location_choice.py | 6 +-- .../abm/models/parking_location_choice.py | 4 +- .../abm/models/trip_departure_choice.py | 6 +-- activitysim/abm/models/trip_destination.py | 4 +- activitysim/abm/models/trip_scheduling.py | 12 +++-- .../abm/models/trip_scheduling_choice.py | 6 +-- activitysim/abm/models/util/cdap.py | 9 ++-- .../models/util/school_escort_tours_trips.py | 48 +++++++++---------- .../abm/models/util/tour_scheduling.py | 6 +-- activitysim/abm/models/vehicle_allocation.py | 6 +-- activitysim/abm/models/vehicle_type_choice.py | 6 +-- .../abm/tables/disaggregate_accessibility.py | 6 +-- activitysim/abm/tables/shadow_pricing.py | 20 ++++---- activitysim/core/config.py | 11 +++-- activitysim/core/configuration/filesystem.py | 5 +- activitysim/core/configuration/logit.py | 6 +-- activitysim/core/estimation.py | 6 ++- activitysim/core/exceptions.py | 2 +- activitysim/core/input.py | 13 +++-- .../core/interaction_sample_simulate.py | 6 +-- activitysim/core/logit.py | 10 +++- activitysim/core/los.py | 6 +-- activitysim/core/mp_tasks.py | 12 +++-- activitysim/core/random.py | 4 +- activitysim/core/simulate.py | 14 +++--- activitysim/core/skim_dictionary.py | 1 - activitysim/core/timetable.py | 4 +- activitysim/core/tracing.py | 4 +- activitysim/core/workflow/checkpoint.py | 12 +++-- activitysim/core/workflow/state.py | 12 +++-- .../estimation/test/test_larch_estimation.py | 4 +- .../scripts/three_zone_example_data.py | 16 ++++--- .../scripts/two_zone_example_data.py | 9 ++-- .../three_zone_example_data.py | 16 ++++--- .../two_zone_example_data.py | 9 ++-- .../production_semcog/data_model/enums.py | 1 + .../data_model/input_checks.py | 1 + .../parking_location_choice_at_university.py | 20 ++++---- .../university_location_zone_override.py | 6 +-- .../data_model/enums.py | 1 + .../data_model/input_checks.py | 1 + .../data_model/input_checks_pydantic_dev.py | 1 + activitysim/workflows/steps/cmd/__init__.py | 1 + activitysim/workflows/steps/cmd/dsl.py | 1 + .../steps/contrast/data_inventory.py | 2 +- .../workflows/steps/contrast/runtime.py | 14 ++++-- activitysim/workflows/steps/main.py | 1 + activitysim/workflows/steps/progression.py | 2 +- test/cdap/test_cdap.py | 6 +-- 51 files changed, 238 insertions(+), 159 deletions(-) diff --git a/activitysim/abm/models/disaggregate_accessibility.py b/activitysim/abm/models/disaggregate_accessibility.py index df229c6ad7..af768b0e11 100644 --- a/activitysim/abm/models/disaggregate_accessibility.py +++ b/activitysim/abm/models/disaggregate_accessibility.py @@ -100,9 +100,9 @@ class DisaggregateAccessibilitySettings(PydanticReadable, extra="forbid"): BASE_RANDOM_SEED: int = 0 add_size_tables: bool = True zone_id_names: dict[str, str] = {"index_col": "zone_id"} - ORIGIN_SAMPLE_METHOD: Literal[ - None, "full", "uniform", "uniform-taz", "kmeans" - ] = None + ORIGIN_SAMPLE_METHOD: Literal[None, "full", "uniform", "uniform-taz", "kmeans"] = ( + None + ) """ The method in which origins are sampled. @@ -622,9 +622,11 @@ def create_proto_pop(self): # Create ID columns, defaults to "%tablename%_id" hhid, perid, tourid = ( - self.params[x]["index_col"] - if len(self.params[x]["index_col"]) > 0 - else x + "_id" + ( + self.params[x]["index_col"] + if len(self.params[x]["index_col"]) > 0 + else x + "_id" + ) for x in klist ) diff --git a/activitysim/abm/models/initialize_tours.py b/activitysim/abm/models/initialize_tours.py index 4ec9a1e97a..e1b1ed7308 100644 --- a/activitysim/abm/models/initialize_tours.py +++ b/activitysim/abm/models/initialize_tours.py @@ -141,7 +141,9 @@ def initialize_tours( f"{tours_without_persons.sum()} tours out of {len(persons)} without persons\n" f"{pd.Series({'person_id': tours_without_persons.index.values})}" ) - raise InputPopulationError(f"{tours_without_persons.sum()} tours with bad person_id") + raise InputPopulationError( + f"{tours_without_persons.sum()} tours with bad person_id" + ) if trace_hh_id: state.tracing.trace_df(tours, label="initialize_tours", warn_if_empty=True) diff --git a/activitysim/abm/models/location_choice.py b/activitysim/abm/models/location_choice.py index 7f032a8ae6..85c64e393f 100644 --- a/activitysim/abm/models/location_choice.py +++ b/activitysim/abm/models/location_choice.py @@ -1014,9 +1014,9 @@ def iterate_location_choice( logger.debug(f"{trace_label} max_iterations: {max_iterations}") - save_sample_df = ( - choices_df - ) = None # initialize to None, will be populated in first iteration + save_sample_df = choices_df = ( + None # initialize to None, will be populated in first iteration + ) for iteration in range(1, max_iterations + 1): persons_merged_df_ = persons_merged_df.copy() diff --git a/activitysim/abm/models/parking_location_choice.py b/activitysim/abm/models/parking_location_choice.py index 65aef85ecc..32f3aabee2 100644 --- a/activitysim/abm/models/parking_location_choice.py +++ b/activitysim/abm/models/parking_location_choice.py @@ -501,7 +501,9 @@ def parking_location( # lest they try to put tour samples into the same table if state.is_table(sample_table_name): - raise DuplicateWorkflowTableError("sample table %s already exists" % sample_table_name) + raise DuplicateWorkflowTableError( + "sample table %s already exists" % sample_table_name + ) state.extend_table(sample_table_name, save_sample_df) expressions.annotate_tables( diff --git a/activitysim/abm/models/trip_departure_choice.py b/activitysim/abm/models/trip_departure_choice.py index 7b34f8e742..bfb32139aa 100644 --- a/activitysim/abm/models/trip_departure_choice.py +++ b/activitysim/abm/models/trip_departure_choice.py @@ -132,9 +132,9 @@ def build_patterns(trips, time_windows): ] possible_windows = np.unique(possible_windows, axis=1).transpose() filler = np.full((possible_windows.shape[0], max_trip_count), np.nan) - filler[ - : possible_windows.shape[0], : possible_windows.shape[1] - ] = possible_windows + filler[: possible_windows.shape[0], : possible_windows.shape[1]] = ( + possible_windows + ) patterns.append(filler) pattern_sizes.append(filler.shape[0]) diff --git a/activitysim/abm/models/trip_destination.py b/activitysim/abm/models/trip_destination.py index d33f2efa48..8cdf8c3692 100644 --- a/activitysim/abm/models/trip_destination.py +++ b/activitysim/abm/models/trip_destination.py @@ -1746,7 +1746,9 @@ def trip_destination( # lest they try to put tour samples into the same table if state.is_table(sample_table_name): - raise DuplicateWorkflowTableError("sample table %s already exists" % sample_table_name) + raise DuplicateWorkflowTableError( + "sample table %s already exists" % sample_table_name + ) state.extend_table(sample_table_name, save_sample_df) expressions.annotate_tables( diff --git a/activitysim/abm/models/trip_scheduling.py b/activitysim/abm/models/trip_scheduling.py index 18595f84cd..0ce4f2ffff 100644 --- a/activitysim/abm/models/trip_scheduling.py +++ b/activitysim/abm/models/trip_scheduling.py @@ -585,9 +585,11 @@ def trip_scheduling( i = 0 while (i < max_iterations) and not trips_chunk.empty: # only chunk log first iteration since memory use declines with each iteration - with chunk.chunk_log( - state, trace_label - ) if i == 0 else chunk.chunk_log_skip(): + with ( + chunk.chunk_log(state, trace_label) + if i == 0 + else chunk.chunk_log_skip() + ): i += 1 is_last_iteration = i == max_iterations @@ -616,7 +618,9 @@ def trip_scheduling( logger.info("%s %s failed", trace_label_i, failed.sum()) if (failed.sum() > 0) & (model_settings.scheduling_mode == "relative"): - raise InvalidTravelError("failed trips with relative scheduling mode") + raise InvalidTravelError( + "failed trips with relative scheduling mode" + ) if not is_last_iteration: # boolean series of trips whose leg scheduling failed diff --git a/activitysim/abm/models/trip_scheduling_choice.py b/activitysim/abm/models/trip_scheduling_choice.py index 510d4ece8d..033c9b2b60 100644 --- a/activitysim/abm/models/trip_scheduling_choice.py +++ b/activitysim/abm/models/trip_scheduling_choice.py @@ -251,9 +251,9 @@ def run_trip_scheduling_choice( tours.loc[tours[HAS_OB_STOPS] != tours[HAS_IB_STOPS], NUM_ALTERNATIVES] = ( tours[TOUR_DURATION_COLUMN] + 1 ) - tours.loc[ - tours[HAS_OB_STOPS] & tours[HAS_IB_STOPS], NUM_ALTERNATIVES - ] = tours.apply(lambda x: alt_sizes[1, x.duration], axis=1) + tours.loc[tours[HAS_OB_STOPS] & tours[HAS_IB_STOPS], NUM_ALTERNATIVES] = ( + tours.apply(lambda x: alt_sizes[1, x.duration], axis=1) + ) # If no intermediate stops on the tour, then then main leg duration # equals the tour duration and the intermediate durations are zero diff --git a/activitysim/abm/models/util/cdap.py b/activitysim/abm/models/util/cdap.py index 849007bbc2..6d377b04ff 100644 --- a/activitysim/abm/models/util/cdap.py +++ b/activitysim/abm/models/util/cdap.py @@ -150,9 +150,9 @@ def assign_cdap_rank( ) # tag the backfilled persons - persons.loc[ - others[others.cdap_rank == RANK_UNASSIGNED].index, "cdap_rank" - ] = RANK_BACKFILL + persons.loc[others[others.cdap_rank == RANK_UNASSIGNED].index, "cdap_rank"] = ( + RANK_BACKFILL + ) del others # assign person number in cdapPersonArray preference order @@ -472,7 +472,8 @@ def build_cdap_spec( if not (0 <= row.cardinality <= MAX_INTERACTION_CARDINALITY): raise ModelConfigurationError( - "Bad row cardinality %d for %s. Try checking that all interaction terms include 3 or fewer person types." % (row.cardinality, row.slug) + "Bad row cardinality %d for %s. Try checking that all interaction terms include 3 or fewer person types." + % (row.cardinality, row.slug) ) # for all other interaction rules, we need to generate a row in the spec for each diff --git a/activitysim/abm/models/util/school_escort_tours_trips.py b/activitysim/abm/models/util/school_escort_tours_trips.py index 665844023f..902cb12325 100644 --- a/activitysim/abm/models/util/school_escort_tours_trips.py +++ b/activitysim/abm/models/util/school_escort_tours_trips.py @@ -165,15 +165,15 @@ def join_attributes(df, column_names): ).sum(axis=1) # school_destinations, school_starts, school_ends, and school_tour_ids are concatenated - bundles.loc[ - filtered_bundles.index, "school_destinations" - ] = join_attributes( - filtered_bundles, - [ - f"school_destination_child{first_child}", - f"school_destination_child{second_child}", - f"school_destination_child{third_child}", - ], + bundles.loc[filtered_bundles.index, "school_destinations"] = ( + join_attributes( + filtered_bundles, + [ + f"school_destination_child{first_child}", + f"school_destination_child{second_child}", + f"school_destination_child{third_child}", + ], + ) ) bundles.loc[filtered_bundles.index, "school_starts"] = join_attributes( @@ -194,15 +194,15 @@ def join_attributes(df, column_names): ], ) - bundles.loc[ - filtered_bundles.index, "school_tour_ids" - ] = join_attributes( - filtered_bundles, - [ - f"school_tour_id_child{first_child}", - f"school_tour_id_child{second_child}", - f"school_tour_id_child{third_child}", - ], + bundles.loc[filtered_bundles.index, "school_tour_ids"] = ( + join_attributes( + filtered_bundles, + [ + f"school_tour_id_child{first_child}", + f"school_tour_id_child{second_child}", + f"school_tour_id_child{third_child}", + ], + ) ) bundles.drop(columns=["first_child", "second_child", "third_child"], inplace=True) @@ -399,9 +399,9 @@ def create_chauf_escort_trips(bundles): ~chauf_trips["primary_purpose"].isna() ), f"Missing tour purpose for {chauf_trips[chauf_trips['primary_purpose'].isna()]}" - chauf_trips.loc[ - chauf_trips["purpose"] == "home", "trip_num" - ] = 999 # trips home are always last + chauf_trips.loc[chauf_trips["purpose"] == "home", "trip_num"] = ( + 999 # trips home are always last + ) chauf_trips.sort_values( by=["household_id", "tour_id", "outbound", "trip_num"], ascending=[True, True, False, True], @@ -559,9 +559,9 @@ def create_escortee_trips(bundles): id_cols = ["household_id", "person_id", "tour_id"] escortee_trips[id_cols] = escortee_trips[id_cols].astype("int64") - escortee_trips.loc[ - escortee_trips["purpose"] == "home", "trip_num" - ] = 999 # trips home are always last + escortee_trips.loc[escortee_trips["purpose"] == "home", "trip_num"] = ( + 999 # trips home are always last + ) escortee_trips.sort_values( by=["household_id", "tour_id", "outbound", "trip_num"], ascending=[True, True, False, True], diff --git a/activitysim/abm/models/util/tour_scheduling.py b/activitysim/abm/models/util/tour_scheduling.py index 0a7c6675d1..9e73b19635 100644 --- a/activitysim/abm/models/util/tour_scheduling.py +++ b/activitysim/abm/models/util/tour_scheduling.py @@ -79,9 +79,9 @@ def run_tour_scheduling( specs[spec_segment_name] = simulate.eval_coefficients( state, model_spec, coefficients_df, estimator ) - compute_settings[ - spec_segment_name - ] = spec_settings.compute_settings.subcomponent_settings(spec_segment_name) + compute_settings[spec_segment_name] = ( + spec_settings.compute_settings.subcomponent_settings(spec_segment_name) + ) if estimator: estimators[spec_segment_name] = estimator # add to local list diff --git a/activitysim/abm/models/vehicle_allocation.py b/activitysim/abm/models/vehicle_allocation.py index a3f04037c0..2d99e8aab4 100644 --- a/activitysim/abm/models/vehicle_allocation.py +++ b/activitysim/abm/models/vehicle_allocation.py @@ -255,9 +255,9 @@ def vehicle_allocation( # set choice for non-household vehicle option choices["choice"] = choices["choice"].astype(veh_choice_dtype) - choices.loc[ - choices["alt_choice"] == alts_from_spec[-1], "choice" - ] = alts_from_spec[-1] + choices.loc[choices["alt_choice"] == alts_from_spec[-1], "choice"] = ( + alts_from_spec[-1] + ) # creating a column for choice of each occupancy level tours_veh_occup_col = f"vehicle_occup_{occup}" diff --git a/activitysim/abm/models/vehicle_type_choice.py b/activitysim/abm/models/vehicle_type_choice.py index 5347b5bb79..f096e45dd8 100644 --- a/activitysim/abm/models/vehicle_type_choice.py +++ b/activitysim/abm/models/vehicle_type_choice.py @@ -566,9 +566,9 @@ class VehicleTypeChoiceSettings(LogitComponentSettings, extra="forbid"): PROBS_SPEC: str | None = None combinatorial_alts: dict | None = None alts_preprocessor: PreprocessorSettings | None = None - SIMULATION_TYPE: Literal[ - "simple_simulate", "interaction_simulate" - ] = "interaction_simulate" + SIMULATION_TYPE: Literal["simple_simulate", "interaction_simulate"] = ( + "interaction_simulate" + ) COLS_TO_INCLUDE_IN_VEHICLE_TABLE: list[str] = [] COLS_TO_INCLUDE_IN_CHOOSER_TABLE: list[str] = [] diff --git a/activitysim/abm/tables/disaggregate_accessibility.py b/activitysim/abm/tables/disaggregate_accessibility.py index 7828e1c4c0..a13f77ff01 100644 --- a/activitysim/abm/tables/disaggregate_accessibility.py +++ b/activitysim/abm/tables/disaggregate_accessibility.py @@ -190,9 +190,9 @@ def disaggregate_accessibility(state: workflow.State) -> pd.DataFrame: ) # Copy home_zone_id in proto-table to match the temporary 'nearest_zone_id' - proto_accessibility_df[ - "nearest_accessibility_zone_id" - ] = proto_accessibility_df.home_zone_id + proto_accessibility_df["nearest_accessibility_zone_id"] = ( + proto_accessibility_df.home_zone_id + ) # Set up the useful columns exact_cols = merging_params.get("by", []) diff --git a/activitysim/abm/tables/shadow_pricing.py b/activitysim/abm/tables/shadow_pricing.py index 5d58e56864..fa2832181c 100644 --- a/activitysim/abm/tables/shadow_pricing.py +++ b/activitysim/abm/tables/shadow_pricing.py @@ -905,7 +905,10 @@ def update_shadow_prices(self, state): self.sampled_persons = sampled_persons else: - raise SystemConfigurationError("unknown SHADOW_PRICE_METHOD %s, method must be one of 'ctramp', 'daysim', or 'simulation'" % shadow_price_method) + raise SystemConfigurationError( + "unknown SHADOW_PRICE_METHOD %s, method must be one of 'ctramp', 'daysim', or 'simulation'" + % shadow_price_method + ) def dest_size_terms(self, segment): assert segment in self.segment_ids @@ -924,7 +927,8 @@ def dest_size_terms(self, segment): utility_adjustment = self.shadow_prices[segment] else: raise SystemConfigurationError( - "unknown SHADOW_PRICE_METHOD %s, method must be one of 'ctramp', 'daysim', or 'simulation'" % shadow_price_method + "unknown SHADOW_PRICE_METHOD %s, method must be one of 'ctramp', 'daysim', or 'simulation'" + % shadow_price_method ) size_terms = pd.DataFrame( @@ -1037,9 +1041,7 @@ def buffers_for_shadow_pricing(shadow_pricing_info): if np.issubdtype(dtype, np.int64): typecode = ctypes.c_int64 else: - raise TypeError( - "buffer_for_shadow_pricing unrecognized dtype %s" % dtype - ) + raise TypeError("buffer_for_shadow_pricing unrecognized dtype %s" % dtype) shared_data_buffer = multiprocessing.Array(typecode, buffer_size) @@ -1086,9 +1088,7 @@ def buffers_for_shadow_pricing_choice(state, shadow_pricing_choice_info): if np.issubdtype(dtype, np.int64): typecode = ctypes.c_int64 else: - raise TypeError( - "buffer_for_shadow_pricing unrecognized dtype %s" % dtype - ) + raise TypeError("buffer_for_shadow_pricing unrecognized dtype %s" % dtype) shared_data_buffer = multiprocessing.Array(typecode, buffer_size) @@ -1201,7 +1201,9 @@ def shadow_price_data_from_buffers(data_buffers, shadow_pricing_info, model_sele ) if block_name(model_selector) not in data_buffers: - raise MissingNameError("Block %s not in data_buffers" % block_name(model_selector)) + raise MissingNameError( + "Block %s not in data_buffers" % block_name(model_selector) + ) shape = block_shapes[model_selector] data = data_buffers[block_name(model_selector)] diff --git a/activitysim/core/config.py b/activitysim/core/config.py index a88bc6233b..e395769160 100644 --- a/activitysim/core/config.py +++ b/activitysim/core/config.py @@ -101,7 +101,7 @@ def get_model_constants(model_settings): def get_logit_model_settings( - model_settings: LogitComponentSettings | dict[str, Any] | None + model_settings: LogitComponentSettings | dict[str, Any] | None, ): """ Read nest spec (for nested logit) from model settings file @@ -124,13 +124,18 @@ def get_logit_model_settings( if logit_type not in ["NL", "MNL"]: logger.error("Unrecognized logit type '%s'" % logit_type) - raise ModelConfigurationError("Unrecognized logit type '%s'. Logit type must be 'NL' for nested logit or 'MNL' for multinomial logit" % logit_type) + raise ModelConfigurationError( + "Unrecognized logit type '%s'. Logit type must be 'NL' for nested logit or 'MNL' for multinomial logit" + % logit_type + ) if logit_type == "NL": nests = model_settings.get("NESTS", None) if nests is None: logger.error("No NEST found in model spec for NL model type") - raise ModelConfigurationError("No NEST found in model spec for NL model type") + raise ModelConfigurationError( + "No NEST found in model spec for NL model type" + ) return nests diff --git a/activitysim/core/configuration/filesystem.py b/activitysim/core/configuration/filesystem.py index b766a81f27..75bd761365 100644 --- a/activitysim/core/configuration/filesystem.py +++ b/activitysim/core/configuration/filesystem.py @@ -16,7 +16,10 @@ from activitysim.core.configuration.base import PydanticBase from activitysim.core.configuration.logit import LogitComponentSettings -from activitysim.core.exceptions import SettingsFileNotFoundError, SystemConfigurationError +from activitysim.core.exceptions import ( + SettingsFileNotFoundError, + SystemConfigurationError, +) from activitysim.core.util import parse_suffix_args, suffix_tables_in_settings logger = logging.getLogger(__name__) diff --git a/activitysim/core/configuration/logit.py b/activitysim/core/configuration/logit.py index a97143f2dd..0416a96482 100644 --- a/activitysim/core/configuration/logit.py +++ b/activitysim/core/configuration/logit.py @@ -278,7 +278,7 @@ class TourModeComponentSettings(TemplatedLogitComponentSettings, extra="forbid") COMPUTE_TRIP_MODE_CHOICE_LOGSUMS: bool = False tvpb_mode_path_types: dict[str, Any] | None = None FORCE_ESCORTEE_CHAUFFEUR_MODE_MATCH: bool = True - nontour_preprocessor: PreprocessorSettings | list[ - PreprocessorSettings - ] | None = None + nontour_preprocessor: PreprocessorSettings | list[PreprocessorSettings] | None = ( + None + ) LOGSUM_CHOOSER_COLUMNS: list[str] = [] diff --git a/activitysim/core/estimation.py b/activitysim/core/estimation.py index 835142cee4..bbbe376ee4 100644 --- a/activitysim/core/estimation.py +++ b/activitysim/core/estimation.py @@ -16,7 +16,11 @@ from activitysim.core.configuration.base import PydanticBase from activitysim.core.util import reindex from activitysim.core.yaml_tools import safe_dump -from activitysim.core.exceptions import DuplicateWorkflowTableError, DuplicateLoadableObjectError, EstimationDataError +from activitysim.core.exceptions import ( + DuplicateWorkflowTableError, + DuplicateLoadableObjectError, + EstimationDataError, +) logger = logging.getLogger("estimation") diff --git a/activitysim/core/exceptions.py b/activitysim/core/exceptions.py index b8b2b62d47..6eb70412d3 100644 --- a/activitysim/core/exceptions.py +++ b/activitysim/core/exceptions.py @@ -91,4 +91,4 @@ class TableIndexError(RuntimeError): class EstimationDataError(RuntimeError): - """An error related to estimation data.""" \ No newline at end of file + """An error related to estimation data.""" diff --git a/activitysim/core/input.py b/activitysim/core/input.py index 6ac0794ce6..ecac76b07b 100644 --- a/activitysim/core/input.py +++ b/activitysim/core/input.py @@ -10,7 +10,10 @@ from activitysim.core import util, workflow from activitysim.core.configuration import InputTable -from activitysim.core.exceptions import MissingInputTableDefinition, ModelConfigurationError +from activitysim.core.exceptions import ( + MissingInputTableDefinition, + ModelConfigurationError, +) logger = logging.getLogger(__name__) @@ -204,7 +207,9 @@ def read_from_table_info(table_info: InputTable, state): f"index_col '{index_col}' specified in configs but not in {tablename} table!" ) logger.error(f"{tablename} columns are: {list(df.columns)}") - raise ModelConfigurationError(f"index_col '{index_col}' not in {tablename} table!") + raise ModelConfigurationError( + f"index_col '{index_col}' not in {tablename} table!" + ) if keep_columns: logger.debug("keeping columns: %s" % keep_columns) @@ -214,7 +219,9 @@ def read_from_table_info(table_info: InputTable, state): f"{list(set(keep_columns).difference(set(df.columns)))}" ) logger.error(f"{tablename} table has columns: {list(df.columns)}") - raise ModelConfigurationError(f"Required columns missing from {tablename} table") + raise ModelConfigurationError( + f"Required columns missing from {tablename} table" + ) df = df[keep_columns] diff --git a/activitysim/core/interaction_sample_simulate.py b/activitysim/core/interaction_sample_simulate.py index 7ba9e66ea2..e153c150cd 100644 --- a/activitysim/core/interaction_sample_simulate.py +++ b/activitysim/core/interaction_sample_simulate.py @@ -177,9 +177,9 @@ def _interaction_sample_simulate( if log_alt_losers: # logit.interaction_dataset adds ALT_CHOOSER_ID column if log_alt_losers is True # to enable detection of zero_prob-driving utils (e.g. -999 for all alts in a chooser) - interaction_df[ - interaction_simulate.ALT_CHOOSER_ID - ] = interaction_df.index.values + interaction_df[interaction_simulate.ALT_CHOOSER_ID] = ( + interaction_df.index.values + ) chunk_sizer.log_df(trace_label, "interaction_df", interaction_df) diff --git a/activitysim/core/logit.py b/activitysim/core/logit.py index 9d289bf2ef..105e18fecc 100644 --- a/activitysim/core/logit.py +++ b/activitysim/core/logit.py @@ -11,7 +11,11 @@ from activitysim.core import tracing, workflow from activitysim.core.choosing import choice_maker from activitysim.core.configuration.logit import LogitNestSpec -from activitysim.core.exceptions import InvalidTravelError, ModelConfigurationError, TableIndexError +from activitysim.core.exceptions import ( + InvalidTravelError, + ModelConfigurationError, + TableIndexError, +) logger = logging.getLogger(__name__) @@ -569,7 +573,9 @@ def each_nest(nest_spec: dict | LogitNestSpec, type=None, post_order=False): Nest object with info about the current node (nest or leaf) """ if type is not None and type not in Nest.nest_types(): - raise ModelConfigurationError("Unknown nest type '%s' in call to each_nest" % type) + raise ModelConfigurationError( + "Unknown nest type '%s' in call to each_nest" % type + ) if isinstance(nest_spec, dict): nest_spec = LogitNestSpec.model_validate(nest_spec) diff --git a/activitysim/core/los.py b/activitysim/core/los.py index 5ac90f930e..c4a6793c82 100644 --- a/activitysim/core/los.py +++ b/activitysim/core/los.py @@ -698,9 +698,9 @@ def allocate_shared_skim_buffers(self): if self.zone_system == THREE_ZONE: assert self.tvpb is not None - skim_buffers[ - self.tvpb.tap_cache.cache_tag - ] = self.tvpb.tap_cache.allocate_data_buffer(shared=True) + skim_buffers[self.tvpb.tap_cache.cache_tag] = ( + self.tvpb.tap_cache.allocate_data_buffer(shared=True) + ) return skim_buffers diff --git a/activitysim/core/mp_tasks.py b/activitysim/core/mp_tasks.py index c89a14cd6e..42b1ab444c 100644 --- a/activitysim/core/mp_tasks.py +++ b/activitysim/core/mp_tasks.py @@ -441,7 +441,9 @@ def build_slice_rules(state: workflow.State, slice_info, pipeline_tables): tables[table_name] = pipeline_tables[table_name] if primary_slicer not in tables: - raise SystemConfigurationError("primary slice table '%s' not in pipeline" % primary_slicer) + raise SystemConfigurationError( + "primary slice table '%s' not in pipeline" % primary_slicer + ) # allow wildcard 'True' to avoid slicing (or coalescing) any tables no explicitly listed in slice_info.tables # populationsim uses slice.except wildcards to avoid listing control tables (etc) that should not be sliced, @@ -543,7 +545,9 @@ def apportion_pipeline(state: workflow.State, sub_proc_names, step_info): "last_checkpoint_in_previous_multiprocess_step", None ) if last_checkpoint_in_previous_multiprocess_step is None: - raise CheckpointNameNotFoundError("missing last_checkpoint_in_previous_multiprocess_step") + raise CheckpointNameNotFoundError( + "missing last_checkpoint_in_previous_multiprocess_step" + ) state.checkpoint.restore(resume_after=last_checkpoint_in_previous_multiprocess_step) # ensure all tables are in the pipeline @@ -1788,7 +1792,9 @@ def get_breadcrumbs(state: workflow.State, run_list): # - can't resume multiprocess without breadcrumbs file if not breadcrumbs: error(state, f"empty breadcrumbs for resume_after '{resume_after}'") - raise CheckpointNameNotFoundError("empty breadcrumbs for resume_after '%s'" % resume_after) + raise CheckpointNameNotFoundError( + "empty breadcrumbs for resume_after '%s'" % resume_after + ) # if resume_after is specified by name if resume_after != LAST_CHECKPOINT: diff --git a/activitysim/core/random.py b/activitysim/core/random.py index 41450449e9..37b1976403 100644 --- a/activitysim/core/random.py +++ b/activitysim/core/random.py @@ -529,7 +529,9 @@ def set_base_seed(self, seed=None): """ if self.step_name is not None or self.channels: - raise DuplicateLoadableObjectError("Can only call set_base_seed before the first step.") + raise DuplicateLoadableObjectError( + "Can only call set_base_seed before the first step." + ) assert len(list(self.channels.keys())) == 0 diff --git a/activitysim/core/simulate.py b/activitysim/core/simulate.py index 479023451d..fffd246319 100644 --- a/activitysim/core/simulate.py +++ b/activitysim/core/simulate.py @@ -1012,9 +1012,7 @@ def set_skim_wrapper_targets(df, skims): skims = ( skims if isinstance(skims, list) - else skims.values() - if isinstance(skims, dict) - else [skims] + else skims.values() if isinstance(skims, dict) else [skims] ) # assume any object in skims can be treated as a skim @@ -1639,11 +1637,11 @@ def list_of_skims(skims): return ( skims if isinstance(skims, list) - else skims.values() - if isinstance(skims, dict) - else [skims] - if skims is not None - else [] + else ( + skims.values() + if isinstance(skims, dict) + else [skims] if skims is not None else [] + ) ) return [ diff --git a/activitysim/core/skim_dictionary.py b/activitysim/core/skim_dictionary.py index dd31983e28..59692e3d37 100644 --- a/activitysim/core/skim_dictionary.py +++ b/activitysim/core/skim_dictionary.py @@ -400,7 +400,6 @@ def wrap_3d(self, orig_key, dest_key, dim3_key): class SkimWrapper(object): - """ A SkimWrapper object is an access wrapper around a SkimDict of multiple skim objects, where each object is identified by a key. diff --git a/activitysim/core/timetable.py b/activitysim/core/timetable.py index e37ec36efc..cedb3d2673 100644 --- a/activitysim/core/timetable.py +++ b/activitysim/core/timetable.py @@ -464,7 +464,9 @@ def replace_table(self, state: workflow.State): % self.windows_table_name, level=logging.ERROR, ) - raise DuplicateWorkflowTableError("Attempt to replace_table while in transaction") + raise DuplicateWorkflowTableError( + "Attempt to replace_table while in transaction" + ) # get windows_df from bottleneck function in case updates to self.person_window # do not write through to pandas dataframe diff --git a/activitysim/core/tracing.py b/activitysim/core/tracing.py index 270c8d6d95..83c9f876aa 100644 --- a/activitysim/core/tracing.py +++ b/activitysim/core/tracing.py @@ -249,7 +249,9 @@ def slice_ids(df, ids, column=None): except KeyError: # this happens if specified slicer column is not in df # df = df[0:0] - raise TableSlicingError("slice_ids slicer column '%s' not in dataframe" % column) + raise TableSlicingError( + "slice_ids slicer column '%s' not in dataframe" % column + ) return df diff --git a/activitysim/core/workflow/checkpoint.py b/activitysim/core/workflow/checkpoint.py index c3fe7d8ae7..7391e1c9b9 100644 --- a/activitysim/core/workflow/checkpoint.py +++ b/activitysim/core/workflow/checkpoint.py @@ -1212,10 +1212,14 @@ def load_dataframe(self, table_name, checkpoint_name=None): # if there is no checkpoint name given, do not attempt to read from store if checkpoint_name is None: if table_name not in self.last_checkpoint: - raise CheckpointNameNotFoundError("table '%s' never checkpointed." % table_name) + raise CheckpointNameNotFoundError( + "table '%s' never checkpointed." % table_name + ) if not self.last_checkpoint[table_name]: - raise CheckpointNameNotFoundError("table '%s' was dropped." % table_name) + raise CheckpointNameNotFoundError( + "table '%s' was dropped." % table_name + ) return self._obj.get_dataframe(table_name) @@ -1225,7 +1229,9 @@ def load_dataframe(self, table_name, checkpoint_name=None): None, ) if checkpoint is None: - raise CheckpointNameNotFoundError("checkpoint '%s' not in checkpoints." % checkpoint_name) + raise CheckpointNameNotFoundError( + "checkpoint '%s' not in checkpoints." % checkpoint_name + ) # find the checkpoint that table was written to store last_checkpoint_name = checkpoint.get(table_name, None) diff --git a/activitysim/core/workflow/state.py b/activitysim/core/workflow/state.py index 2a428f78a0..9f7dcd4d6f 100644 --- a/activitysim/core/workflow/state.py +++ b/activitysim/core/workflow/state.py @@ -1027,10 +1027,14 @@ def get_table(self, table_name, checkpoint_name=None): # if they want current version of table, no need to read from pipeline store if checkpoint_name is None: if table_name not in self.checkpoint.last_checkpoint: - raise CheckpointNameNotFoundError("table '%s' never checkpointed." % table_name) + raise CheckpointNameNotFoundError( + "table '%s' never checkpointed." % table_name + ) if not self.checkpoint.last_checkpoint[table_name]: - raise CheckpointNameNotFoundError("table '%s' was dropped." % table_name) + raise CheckpointNameNotFoundError( + "table '%s' was dropped." % table_name + ) return self._context.get(table_name) @@ -1044,7 +1048,9 @@ def get_table(self, table_name, checkpoint_name=None): None, ) if checkpoint is None: - raise CheckpointNameNotFoundError("checkpoint '%s' not in checkpoints." % checkpoint_name) + raise CheckpointNameNotFoundError( + "checkpoint '%s' not in checkpoints." % checkpoint_name + ) # find the checkpoint that table was written to store last_checkpoint_name = checkpoint.get(table_name, None) diff --git a/activitysim/estimation/test/test_larch_estimation.py b/activitysim/estimation/test/test_larch_estimation.py index 2d2427c41b..e688b1098a 100644 --- a/activitysim/estimation/test/test_larch_estimation.py +++ b/activitysim/estimation/test/test_larch_estimation.py @@ -44,7 +44,7 @@ def _regression_check(dataframe_regression, df, basename=None, rtol=None): # pandas 1.3 handles int8 dtypes as actual numbers, so holdfast needs to be dropped manually # we're dropping it not adding to the regression check so older pandas will also work. basename=basename, - default_tolerance=dict(atol=1e-6, rtol=rtol) + default_tolerance=dict(atol=1e-6, rtol=rtol), # can set a little loose, as there is sometimes a little variance in these # results when switching backend implementations. We're checking all # the parameters and the log likelihood, so modest variance in individual @@ -129,7 +129,7 @@ def test_location_model( dataframe_regression.check( size_spec, basename=f"test_loc_{name}_{method}_size_spec", - default_tolerance=dict(atol=1e-6, rtol=5e-2) + default_tolerance=dict(atol=1e-6, rtol=5e-2), # set a little loose, as there is sometimes a little variance in these # results when switching backend implementations. ) diff --git a/activitysim/examples/placeholder_multiple_zone/scripts/three_zone_example_data.py b/activitysim/examples/placeholder_multiple_zone/scripts/three_zone_example_data.py index 7291c65337..1ce3861681 100644 --- a/activitysim/examples/placeholder_multiple_zone/scripts/three_zone_example_data.py +++ b/activitysim/examples/placeholder_multiple_zone/scripts/three_zone_example_data.py @@ -149,13 +149,15 @@ tap_df.to_csv(os.path.join(output_data, "tap.csv"), index=False) # create taz_z3 and tap skims -with omx.open_file( - os.path.join(input_data, "skims.omx"), "r" -) as ur_skims, omx.open_file( - os.path.join(output_data, "taz_skims.omx"), "w" -) as output_taz_skims_file, omx.open_file( - os.path.join(output_data, "tap_skims.omx"), "w" -) as output_tap_skims_file: +with ( + omx.open_file(os.path.join(input_data, "skims.omx"), "r") as ur_skims, + omx.open_file( + os.path.join(output_data, "taz_skims.omx"), "w" + ) as output_taz_skims_file, + omx.open_file( + os.path.join(output_data, "tap_skims.omx"), "w" + ) as output_tap_skims_file, +): for skim_name in ur_skims.list_matrices(): ur_skim = ur_skims[skim_name][:] new_skim = ur_skim[taz_zone_indexes, :][:, taz_zone_indexes] diff --git a/activitysim/examples/placeholder_multiple_zone/scripts/two_zone_example_data.py b/activitysim/examples/placeholder_multiple_zone/scripts/two_zone_example_data.py index a4c6c46fcb..20c7bccf53 100644 --- a/activitysim/examples/placeholder_multiple_zone/scripts/two_zone_example_data.py +++ b/activitysim/examples/placeholder_multiple_zone/scripts/two_zone_example_data.py @@ -104,11 +104,10 @@ # ### Create taz skims -with omx.open_file( - os.path.join(input_data, "skims.omx"), "r" -) as skims_file, omx.open_file( - os.path.join(output_data, "taz_skims.omx"), "w" -) as output_skims_file: +with ( + omx.open_file(os.path.join(input_data, "skims.omx"), "r") as skims_file, + omx.open_file(os.path.join(output_data, "taz_skims.omx"), "w") as output_skims_file, +): skims = skims_file.list_matrices() num_zones = skims_file.shape()[0] diff --git a/activitysim/examples/placeholder_multiple_zone/three_zone_example_data.py b/activitysim/examples/placeholder_multiple_zone/three_zone_example_data.py index 0a43cce220..54224087d6 100644 --- a/activitysim/examples/placeholder_multiple_zone/three_zone_example_data.py +++ b/activitysim/examples/placeholder_multiple_zone/three_zone_example_data.py @@ -143,13 +143,15 @@ tap_df.to_csv(os.path.join(output_data, "tap.csv"), index=False) # create taz_z3 and tap skims -with omx.open_file( - os.path.join(input_data, "skims.omx"), "r" -) as ur_skims, omx.open_file( - os.path.join(output_data, "taz_skims.omx"), "w" -) as output_taz_skims_file, omx.open_file( - os.path.join(output_data, "tap_skims.omx"), "w" -) as output_tap_skims_file: +with ( + omx.open_file(os.path.join(input_data, "skims.omx"), "r") as ur_skims, + omx.open_file( + os.path.join(output_data, "taz_skims.omx"), "w" + ) as output_taz_skims_file, + omx.open_file( + os.path.join(output_data, "tap_skims.omx"), "w" + ) as output_tap_skims_file, +): for skim_name in ur_skims.list_matrices(): ur_skim = ur_skims[skim_name][:] new_skim = ur_skim[taz_zone_indexes, :][:, taz_zone_indexes] diff --git a/activitysim/examples/placeholder_multiple_zone/two_zone_example_data.py b/activitysim/examples/placeholder_multiple_zone/two_zone_example_data.py index fbb26e2aaa..a6f5750a0f 100644 --- a/activitysim/examples/placeholder_multiple_zone/two_zone_example_data.py +++ b/activitysim/examples/placeholder_multiple_zone/two_zone_example_data.py @@ -101,11 +101,10 @@ # ### Create taz skims -with omx.open_file( - os.path.join(input_data, "skims.omx"), "r" -) as skims_file, omx.open_file( - os.path.join(output_data, "taz_skims.omx"), "w" -) as output_skims_file: +with ( + omx.open_file(os.path.join(input_data, "skims.omx"), "r") as skims_file, + omx.open_file(os.path.join(output_data, "taz_skims.omx"), "w") as output_skims_file, +): skims = skims_file.list_matrices() num_zones = skims_file.shape()[0] diff --git a/activitysim/examples/production_semcog/data_model/enums.py b/activitysim/examples/production_semcog/data_model/enums.py index 71c4369248..8ae8f9eb4c 100644 --- a/activitysim/examples/production_semcog/data_model/enums.py +++ b/activitysim/examples/production_semcog/data_model/enums.py @@ -3,6 +3,7 @@ Instructions: modify these enumerated variables as needed for your ActivitySim implementation. """ + from enum import IntEnum diff --git a/activitysim/examples/production_semcog/data_model/input_checks.py b/activitysim/examples/production_semcog/data_model/input_checks.py index b9b1f338b1..ce93a86e0c 100644 --- a/activitysim/examples/production_semcog/data_model/input_checks.py +++ b/activitysim/examples/production_semcog/data_model/input_checks.py @@ -3,6 +3,7 @@ Instructions: customize these example values for your own ActivitySim implementation """ + from typing import List, Optional import os, sys, logging diff --git a/activitysim/examples/production_semcog/extensions/parking_location_choice_at_university.py b/activitysim/examples/production_semcog/extensions/parking_location_choice_at_university.py index 49edf8d375..94ec7eaf4f 100644 --- a/activitysim/examples/production_semcog/extensions/parking_location_choice_at_university.py +++ b/activitysim/examples/production_semcog/extensions/parking_location_choice_at_university.py @@ -159,10 +159,10 @@ def parking_location_choice_at_university( tours_nearest_lot = tour_choosers.primary_purpose.isin( nearest_lot_tour_purposes ) & tour_choosers.destination.isin(all_univ_zones) - tour_choosers.loc[ - tours_nearest_lot, "univ_parking_zone_id" - ] = tour_choosers.loc[tours_nearest_lot, "destination"].map( - closest_parking_df["closest_parking_zone"] + tour_choosers.loc[tours_nearest_lot, "univ_parking_zone_id"] = ( + tour_choosers.loc[tours_nearest_lot, "destination"].map( + closest_parking_df["closest_parking_zone"] + ) ) logger.info( @@ -172,12 +172,12 @@ def parking_location_choice_at_university( ) # Overriding school_zone_id in persons table - trips.loc[ - trips.index.isin(trip_choosers.index), "parked_at_university" - ] = trip_choosers["parked_at_university"] - tours.loc[ - tours.index.isin(tour_choosers.index), "univ_parking_zone_id" - ] = tour_choosers["univ_parking_zone_id"] + trips.loc[trips.index.isin(trip_choosers.index), "parked_at_university"] = ( + trip_choosers["parked_at_university"] + ) + tours.loc[tours.index.isin(tour_choosers.index), "univ_parking_zone_id"] = ( + tour_choosers["univ_parking_zone_id"] + ) state.add_table("trips", trips) state.add_table("tours", tours) diff --git a/activitysim/examples/production_semcog/extensions/university_location_zone_override.py b/activitysim/examples/production_semcog/extensions/university_location_zone_override.py index 37c1b96e7e..698c2d6fb0 100644 --- a/activitysim/examples/production_semcog/extensions/university_location_zone_override.py +++ b/activitysim/examples/production_semcog/extensions/university_location_zone_override.py @@ -140,9 +140,9 @@ def university_location_zone_override( # saving original zone if desired original_zone_col_name = model_settings["ORIGINAL_ZONE_COL_NAME"] if original_zone_col_name is not None: - persons.loc[ - persons.index.isin(choosers.index), original_zone_col_name - ] = choosers[original_zone_col_name] + persons.loc[persons.index.isin(choosers.index), original_zone_col_name] = ( + choosers[original_zone_col_name] + ) state.add_table("persons", persons) diff --git a/activitysim/examples/prototype_mtc_extended/data_model/enums.py b/activitysim/examples/prototype_mtc_extended/data_model/enums.py index 124647e5db..b28960501a 100644 --- a/activitysim/examples/prototype_mtc_extended/data_model/enums.py +++ b/activitysim/examples/prototype_mtc_extended/data_model/enums.py @@ -3,6 +3,7 @@ Instructions: modify these enumerated variables as needed for your ActivitySim implementation. """ + from enum import IntEnum diff --git a/activitysim/examples/prototype_mtc_extended/data_model/input_checks.py b/activitysim/examples/prototype_mtc_extended/data_model/input_checks.py index 1cc63e5836..f87767ab69 100644 --- a/activitysim/examples/prototype_mtc_extended/data_model/input_checks.py +++ b/activitysim/examples/prototype_mtc_extended/data_model/input_checks.py @@ -3,6 +3,7 @@ Instructions: customize these example values for your own ActivitySim implementation """ + from __future__ import annotations import csv diff --git a/activitysim/examples/prototype_mtc_extended/data_model/input_checks_pydantic_dev.py b/activitysim/examples/prototype_mtc_extended/data_model/input_checks_pydantic_dev.py index b171119066..f2c3ddb997 100644 --- a/activitysim/examples/prototype_mtc_extended/data_model/input_checks_pydantic_dev.py +++ b/activitysim/examples/prototype_mtc_extended/data_model/input_checks_pydantic_dev.py @@ -3,6 +3,7 @@ Instructions: customize these example values for your own ActivitySim implementation """ + from typing import List, Optional import os, sys, logging diff --git a/activitysim/workflows/steps/cmd/__init__.py b/activitysim/workflows/steps/cmd/__init__.py index e62b19e851..1463609e30 100644 --- a/activitysim/workflows/steps/cmd/__init__.py +++ b/activitysim/workflows/steps/cmd/__init__.py @@ -4,6 +4,7 @@ environment,variable expansion, and expansion of ~ to a user’s home directory. """ + import logging from .dsl import CmdStep diff --git a/activitysim/workflows/steps/cmd/dsl.py b/activitysim/workflows/steps/cmd/dsl.py index 1d05a7d3b6..7337eab3ca 100644 --- a/activitysim/workflows/steps/cmd/dsl.py +++ b/activitysim/workflows/steps/cmd/dsl.py @@ -1,4 +1,5 @@ """pypyr step yaml definition for commands - domain specific language.""" + import logging import os import shlex diff --git a/activitysim/workflows/steps/contrast/data_inventory.py b/activitysim/workflows/steps/contrast/data_inventory.py index a3ff172a31..70b5f4a104 100644 --- a/activitysim/workflows/steps/contrast/data_inventory.py +++ b/activitysim/workflows/steps/contrast/data_inventory.py @@ -63,7 +63,7 @@ def run_step(context: Context) -> None: 1 ), index=dtypes_table.index, - ).apply(lambda x: "" if x else "\u2B05") + ).apply(lambda x: "" if x else "\u2b05") report << dtypes_table with report: diff --git a/activitysim/workflows/steps/contrast/runtime.py b/activitysim/workflows/steps/contrast/runtime.py index 8c37163305..3744d43e8e 100644 --- a/activitysim/workflows/steps/contrast/runtime.py +++ b/activitysim/workflows/steps/contrast/runtime.py @@ -67,7 +67,9 @@ def relabel_source(x): ) if len(include_runs) == 1: - result = c.mark_bar(size=6,).encode( + result = c.mark_bar( + size=6, + ).encode( x=alt.X("seconds:Q", stack=None), y=alt.Y("model_name", type="nominal", sort=None), color="source", @@ -80,7 +82,10 @@ def relabel_source(x): ) elif len(include_runs) == 2: - result = c.mark_bar(yOffset=-3, size=6,).transform_filter( + result = c.mark_bar( + yOffset=-3, + size=6, + ).transform_filter( (alt.datum.source == relabel_source(include_runs[0])) ).encode( x=alt.X("seconds:Q", stack=None), @@ -107,7 +112,10 @@ def relabel_source(x): ) elif len(include_runs) == 3: - result = c.mark_bar(yOffset=-5, size=4,).transform_filter( + result = c.mark_bar( + yOffset=-5, + size=4, + ).transform_filter( (alt.datum.source == relabel_source(include_runs[0])) ).encode( x=alt.X("seconds:Q", stack=None), diff --git a/activitysim/workflows/steps/main.py b/activitysim/workflows/steps/main.py index ccefaa825b..9859254d2b 100644 --- a/activitysim/workflows/steps/main.py +++ b/activitysim/workflows/steps/main.py @@ -1,4 +1,5 @@ """Naive custom loader without any error handling.""" + from __future__ import annotations import os diff --git a/activitysim/workflows/steps/progression.py b/activitysim/workflows/steps/progression.py index c0832dc46c..d77513bfa8 100644 --- a/activitysim/workflows/steps/progression.py +++ b/activitysim/workflows/steps/progression.py @@ -96,7 +96,7 @@ def update_progress_overall(description, formatting=""): def reset_progress_step(*args, description="", prefix="", **kwargs): if not os.environ.get("NO_RICH", False): - print(f"\u23F1 {time.strftime('%I:%M:%S %p')} - {description}") + print(f"\u23f1 {time.strftime('%I:%M:%S %p')} - {description}") progress.reset(progress_step, *args, description=prefix + description, **kwargs) else: print("╭" + "─" * (len(description) + 2) + "╮") diff --git a/test/cdap/test_cdap.py b/test/cdap/test_cdap.py index 38e2657769..b26078c904 100644 --- a/test/cdap/test_cdap.py +++ b/test/cdap/test_cdap.py @@ -162,9 +162,9 @@ def test_cdap_from_pipeline(reconnect_pipeline: workflow.State, caplog): lambda x: x[:-1].upper() if x.endswith("0") else x.upper() ) household_df["cdap_activity"] = household_df.apply( - lambda x: x["cdap_activity"] + "J" - if x["has_joint_tour"] == 1 - else x["cdap_activity"], + lambda x: ( + x["cdap_activity"] + "J" if x["has_joint_tour"] == 1 else x["cdap_activity"] + ), axis=1, ) From e31a231d256e8eb9df4c16196239eec4d4e7962a Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Tue, 14 Oct 2025 15:52:56 -0700 Subject: [PATCH 15/20] Updated code to reflect older version of Black (22.12.0) --- .../abm/models/disaggregate_accessibility.py | 6 +-- activitysim/abm/models/location_choice.py | 6 +-- .../abm/models/trip_departure_choice.py | 6 +-- .../abm/models/trip_scheduling_choice.py | 6 +-- activitysim/abm/models/util/cdap.py | 6 +-- .../models/util/school_escort_tours_trips.py | 48 +++++++++---------- .../abm/models/util/tour_scheduling.py | 6 +-- activitysim/abm/models/vehicle_allocation.py | 6 +-- activitysim/abm/models/vehicle_type_choice.py | 6 +-- .../abm/tables/disaggregate_accessibility.py | 6 +-- activitysim/core/configuration/logit.py | 6 +-- .../core/interaction_sample_simulate.py | 6 +-- activitysim/core/los.py | 6 +-- activitysim/core/simulate.py | 8 +++- .../parking_location_choice_at_university.py | 20 ++++---- .../university_location_zone_override.py | 6 +-- .../workflows/steps/contrast/runtime.py | 14 ++---- 17 files changed, 82 insertions(+), 86 deletions(-) diff --git a/activitysim/abm/models/disaggregate_accessibility.py b/activitysim/abm/models/disaggregate_accessibility.py index af768b0e11..aa2703e19c 100644 --- a/activitysim/abm/models/disaggregate_accessibility.py +++ b/activitysim/abm/models/disaggregate_accessibility.py @@ -100,9 +100,9 @@ class DisaggregateAccessibilitySettings(PydanticReadable, extra="forbid"): BASE_RANDOM_SEED: int = 0 add_size_tables: bool = True zone_id_names: dict[str, str] = {"index_col": "zone_id"} - ORIGIN_SAMPLE_METHOD: Literal[None, "full", "uniform", "uniform-taz", "kmeans"] = ( - None - ) + ORIGIN_SAMPLE_METHOD: Literal[ + None, "full", "uniform", "uniform-taz", "kmeans" + ] = None """ The method in which origins are sampled. diff --git a/activitysim/abm/models/location_choice.py b/activitysim/abm/models/location_choice.py index 85c64e393f..7f032a8ae6 100644 --- a/activitysim/abm/models/location_choice.py +++ b/activitysim/abm/models/location_choice.py @@ -1014,9 +1014,9 @@ def iterate_location_choice( logger.debug(f"{trace_label} max_iterations: {max_iterations}") - save_sample_df = choices_df = ( - None # initialize to None, will be populated in first iteration - ) + save_sample_df = ( + choices_df + ) = None # initialize to None, will be populated in first iteration for iteration in range(1, max_iterations + 1): persons_merged_df_ = persons_merged_df.copy() diff --git a/activitysim/abm/models/trip_departure_choice.py b/activitysim/abm/models/trip_departure_choice.py index bfb32139aa..7b34f8e742 100644 --- a/activitysim/abm/models/trip_departure_choice.py +++ b/activitysim/abm/models/trip_departure_choice.py @@ -132,9 +132,9 @@ def build_patterns(trips, time_windows): ] possible_windows = np.unique(possible_windows, axis=1).transpose() filler = np.full((possible_windows.shape[0], max_trip_count), np.nan) - filler[: possible_windows.shape[0], : possible_windows.shape[1]] = ( - possible_windows - ) + filler[ + : possible_windows.shape[0], : possible_windows.shape[1] + ] = possible_windows patterns.append(filler) pattern_sizes.append(filler.shape[0]) diff --git a/activitysim/abm/models/trip_scheduling_choice.py b/activitysim/abm/models/trip_scheduling_choice.py index 033c9b2b60..510d4ece8d 100644 --- a/activitysim/abm/models/trip_scheduling_choice.py +++ b/activitysim/abm/models/trip_scheduling_choice.py @@ -251,9 +251,9 @@ def run_trip_scheduling_choice( tours.loc[tours[HAS_OB_STOPS] != tours[HAS_IB_STOPS], NUM_ALTERNATIVES] = ( tours[TOUR_DURATION_COLUMN] + 1 ) - tours.loc[tours[HAS_OB_STOPS] & tours[HAS_IB_STOPS], NUM_ALTERNATIVES] = ( - tours.apply(lambda x: alt_sizes[1, x.duration], axis=1) - ) + tours.loc[ + tours[HAS_OB_STOPS] & tours[HAS_IB_STOPS], NUM_ALTERNATIVES + ] = tours.apply(lambda x: alt_sizes[1, x.duration], axis=1) # If no intermediate stops on the tour, then then main leg duration # equals the tour duration and the intermediate durations are zero diff --git a/activitysim/abm/models/util/cdap.py b/activitysim/abm/models/util/cdap.py index 6d377b04ff..21f42de827 100644 --- a/activitysim/abm/models/util/cdap.py +++ b/activitysim/abm/models/util/cdap.py @@ -150,9 +150,9 @@ def assign_cdap_rank( ) # tag the backfilled persons - persons.loc[others[others.cdap_rank == RANK_UNASSIGNED].index, "cdap_rank"] = ( - RANK_BACKFILL - ) + persons.loc[ + others[others.cdap_rank == RANK_UNASSIGNED].index, "cdap_rank" + ] = RANK_BACKFILL del others # assign person number in cdapPersonArray preference order diff --git a/activitysim/abm/models/util/school_escort_tours_trips.py b/activitysim/abm/models/util/school_escort_tours_trips.py index 902cb12325..665844023f 100644 --- a/activitysim/abm/models/util/school_escort_tours_trips.py +++ b/activitysim/abm/models/util/school_escort_tours_trips.py @@ -165,15 +165,15 @@ def join_attributes(df, column_names): ).sum(axis=1) # school_destinations, school_starts, school_ends, and school_tour_ids are concatenated - bundles.loc[filtered_bundles.index, "school_destinations"] = ( - join_attributes( - filtered_bundles, - [ - f"school_destination_child{first_child}", - f"school_destination_child{second_child}", - f"school_destination_child{third_child}", - ], - ) + bundles.loc[ + filtered_bundles.index, "school_destinations" + ] = join_attributes( + filtered_bundles, + [ + f"school_destination_child{first_child}", + f"school_destination_child{second_child}", + f"school_destination_child{third_child}", + ], ) bundles.loc[filtered_bundles.index, "school_starts"] = join_attributes( @@ -194,15 +194,15 @@ def join_attributes(df, column_names): ], ) - bundles.loc[filtered_bundles.index, "school_tour_ids"] = ( - join_attributes( - filtered_bundles, - [ - f"school_tour_id_child{first_child}", - f"school_tour_id_child{second_child}", - f"school_tour_id_child{third_child}", - ], - ) + bundles.loc[ + filtered_bundles.index, "school_tour_ids" + ] = join_attributes( + filtered_bundles, + [ + f"school_tour_id_child{first_child}", + f"school_tour_id_child{second_child}", + f"school_tour_id_child{third_child}", + ], ) bundles.drop(columns=["first_child", "second_child", "third_child"], inplace=True) @@ -399,9 +399,9 @@ def create_chauf_escort_trips(bundles): ~chauf_trips["primary_purpose"].isna() ), f"Missing tour purpose for {chauf_trips[chauf_trips['primary_purpose'].isna()]}" - chauf_trips.loc[chauf_trips["purpose"] == "home", "trip_num"] = ( - 999 # trips home are always last - ) + chauf_trips.loc[ + chauf_trips["purpose"] == "home", "trip_num" + ] = 999 # trips home are always last chauf_trips.sort_values( by=["household_id", "tour_id", "outbound", "trip_num"], ascending=[True, True, False, True], @@ -559,9 +559,9 @@ def create_escortee_trips(bundles): id_cols = ["household_id", "person_id", "tour_id"] escortee_trips[id_cols] = escortee_trips[id_cols].astype("int64") - escortee_trips.loc[escortee_trips["purpose"] == "home", "trip_num"] = ( - 999 # trips home are always last - ) + escortee_trips.loc[ + escortee_trips["purpose"] == "home", "trip_num" + ] = 999 # trips home are always last escortee_trips.sort_values( by=["household_id", "tour_id", "outbound", "trip_num"], ascending=[True, True, False, True], diff --git a/activitysim/abm/models/util/tour_scheduling.py b/activitysim/abm/models/util/tour_scheduling.py index 9e73b19635..0a7c6675d1 100644 --- a/activitysim/abm/models/util/tour_scheduling.py +++ b/activitysim/abm/models/util/tour_scheduling.py @@ -79,9 +79,9 @@ def run_tour_scheduling( specs[spec_segment_name] = simulate.eval_coefficients( state, model_spec, coefficients_df, estimator ) - compute_settings[spec_segment_name] = ( - spec_settings.compute_settings.subcomponent_settings(spec_segment_name) - ) + compute_settings[ + spec_segment_name + ] = spec_settings.compute_settings.subcomponent_settings(spec_segment_name) if estimator: estimators[spec_segment_name] = estimator # add to local list diff --git a/activitysim/abm/models/vehicle_allocation.py b/activitysim/abm/models/vehicle_allocation.py index 2d99e8aab4..a3f04037c0 100644 --- a/activitysim/abm/models/vehicle_allocation.py +++ b/activitysim/abm/models/vehicle_allocation.py @@ -255,9 +255,9 @@ def vehicle_allocation( # set choice for non-household vehicle option choices["choice"] = choices["choice"].astype(veh_choice_dtype) - choices.loc[choices["alt_choice"] == alts_from_spec[-1], "choice"] = ( - alts_from_spec[-1] - ) + choices.loc[ + choices["alt_choice"] == alts_from_spec[-1], "choice" + ] = alts_from_spec[-1] # creating a column for choice of each occupancy level tours_veh_occup_col = f"vehicle_occup_{occup}" diff --git a/activitysim/abm/models/vehicle_type_choice.py b/activitysim/abm/models/vehicle_type_choice.py index f096e45dd8..5347b5bb79 100644 --- a/activitysim/abm/models/vehicle_type_choice.py +++ b/activitysim/abm/models/vehicle_type_choice.py @@ -566,9 +566,9 @@ class VehicleTypeChoiceSettings(LogitComponentSettings, extra="forbid"): PROBS_SPEC: str | None = None combinatorial_alts: dict | None = None alts_preprocessor: PreprocessorSettings | None = None - SIMULATION_TYPE: Literal["simple_simulate", "interaction_simulate"] = ( - "interaction_simulate" - ) + SIMULATION_TYPE: Literal[ + "simple_simulate", "interaction_simulate" + ] = "interaction_simulate" COLS_TO_INCLUDE_IN_VEHICLE_TABLE: list[str] = [] COLS_TO_INCLUDE_IN_CHOOSER_TABLE: list[str] = [] diff --git a/activitysim/abm/tables/disaggregate_accessibility.py b/activitysim/abm/tables/disaggregate_accessibility.py index a13f77ff01..7828e1c4c0 100644 --- a/activitysim/abm/tables/disaggregate_accessibility.py +++ b/activitysim/abm/tables/disaggregate_accessibility.py @@ -190,9 +190,9 @@ def disaggregate_accessibility(state: workflow.State) -> pd.DataFrame: ) # Copy home_zone_id in proto-table to match the temporary 'nearest_zone_id' - proto_accessibility_df["nearest_accessibility_zone_id"] = ( - proto_accessibility_df.home_zone_id - ) + proto_accessibility_df[ + "nearest_accessibility_zone_id" + ] = proto_accessibility_df.home_zone_id # Set up the useful columns exact_cols = merging_params.get("by", []) diff --git a/activitysim/core/configuration/logit.py b/activitysim/core/configuration/logit.py index 0416a96482..a97143f2dd 100644 --- a/activitysim/core/configuration/logit.py +++ b/activitysim/core/configuration/logit.py @@ -278,7 +278,7 @@ class TourModeComponentSettings(TemplatedLogitComponentSettings, extra="forbid") COMPUTE_TRIP_MODE_CHOICE_LOGSUMS: bool = False tvpb_mode_path_types: dict[str, Any] | None = None FORCE_ESCORTEE_CHAUFFEUR_MODE_MATCH: bool = True - nontour_preprocessor: PreprocessorSettings | list[PreprocessorSettings] | None = ( - None - ) + nontour_preprocessor: PreprocessorSettings | list[ + PreprocessorSettings + ] | None = None LOGSUM_CHOOSER_COLUMNS: list[str] = [] diff --git a/activitysim/core/interaction_sample_simulate.py b/activitysim/core/interaction_sample_simulate.py index e153c150cd..7ba9e66ea2 100644 --- a/activitysim/core/interaction_sample_simulate.py +++ b/activitysim/core/interaction_sample_simulate.py @@ -177,9 +177,9 @@ def _interaction_sample_simulate( if log_alt_losers: # logit.interaction_dataset adds ALT_CHOOSER_ID column if log_alt_losers is True # to enable detection of zero_prob-driving utils (e.g. -999 for all alts in a chooser) - interaction_df[interaction_simulate.ALT_CHOOSER_ID] = ( - interaction_df.index.values - ) + interaction_df[ + interaction_simulate.ALT_CHOOSER_ID + ] = interaction_df.index.values chunk_sizer.log_df(trace_label, "interaction_df", interaction_df) diff --git a/activitysim/core/los.py b/activitysim/core/los.py index c4a6793c82..5ac90f930e 100644 --- a/activitysim/core/los.py +++ b/activitysim/core/los.py @@ -698,9 +698,9 @@ def allocate_shared_skim_buffers(self): if self.zone_system == THREE_ZONE: assert self.tvpb is not None - skim_buffers[self.tvpb.tap_cache.cache_tag] = ( - self.tvpb.tap_cache.allocate_data_buffer(shared=True) - ) + skim_buffers[ + self.tvpb.tap_cache.cache_tag + ] = self.tvpb.tap_cache.allocate_data_buffer(shared=True) return skim_buffers diff --git a/activitysim/core/simulate.py b/activitysim/core/simulate.py index fffd246319..24d472ea72 100644 --- a/activitysim/core/simulate.py +++ b/activitysim/core/simulate.py @@ -1012,7 +1012,9 @@ def set_skim_wrapper_targets(df, skims): skims = ( skims if isinstance(skims, list) - else skims.values() if isinstance(skims, dict) else [skims] + else skims.values() + if isinstance(skims, dict) + else [skims] ) # assume any object in skims can be treated as a skim @@ -1640,7 +1642,9 @@ def list_of_skims(skims): else ( skims.values() if isinstance(skims, dict) - else [skims] if skims is not None else [] + else [skims] + if skims is not None + else [] ) ) diff --git a/activitysim/examples/production_semcog/extensions/parking_location_choice_at_university.py b/activitysim/examples/production_semcog/extensions/parking_location_choice_at_university.py index 94ec7eaf4f..49edf8d375 100644 --- a/activitysim/examples/production_semcog/extensions/parking_location_choice_at_university.py +++ b/activitysim/examples/production_semcog/extensions/parking_location_choice_at_university.py @@ -159,10 +159,10 @@ def parking_location_choice_at_university( tours_nearest_lot = tour_choosers.primary_purpose.isin( nearest_lot_tour_purposes ) & tour_choosers.destination.isin(all_univ_zones) - tour_choosers.loc[tours_nearest_lot, "univ_parking_zone_id"] = ( - tour_choosers.loc[tours_nearest_lot, "destination"].map( - closest_parking_df["closest_parking_zone"] - ) + tour_choosers.loc[ + tours_nearest_lot, "univ_parking_zone_id" + ] = tour_choosers.loc[tours_nearest_lot, "destination"].map( + closest_parking_df["closest_parking_zone"] ) logger.info( @@ -172,12 +172,12 @@ def parking_location_choice_at_university( ) # Overriding school_zone_id in persons table - trips.loc[trips.index.isin(trip_choosers.index), "parked_at_university"] = ( - trip_choosers["parked_at_university"] - ) - tours.loc[tours.index.isin(tour_choosers.index), "univ_parking_zone_id"] = ( - tour_choosers["univ_parking_zone_id"] - ) + trips.loc[ + trips.index.isin(trip_choosers.index), "parked_at_university" + ] = trip_choosers["parked_at_university"] + tours.loc[ + tours.index.isin(tour_choosers.index), "univ_parking_zone_id" + ] = tour_choosers["univ_parking_zone_id"] state.add_table("trips", trips) state.add_table("tours", tours) diff --git a/activitysim/examples/production_semcog/extensions/university_location_zone_override.py b/activitysim/examples/production_semcog/extensions/university_location_zone_override.py index 698c2d6fb0..37c1b96e7e 100644 --- a/activitysim/examples/production_semcog/extensions/university_location_zone_override.py +++ b/activitysim/examples/production_semcog/extensions/university_location_zone_override.py @@ -140,9 +140,9 @@ def university_location_zone_override( # saving original zone if desired original_zone_col_name = model_settings["ORIGINAL_ZONE_COL_NAME"] if original_zone_col_name is not None: - persons.loc[persons.index.isin(choosers.index), original_zone_col_name] = ( - choosers[original_zone_col_name] - ) + persons.loc[ + persons.index.isin(choosers.index), original_zone_col_name + ] = choosers[original_zone_col_name] state.add_table("persons", persons) diff --git a/activitysim/workflows/steps/contrast/runtime.py b/activitysim/workflows/steps/contrast/runtime.py index 3744d43e8e..8c37163305 100644 --- a/activitysim/workflows/steps/contrast/runtime.py +++ b/activitysim/workflows/steps/contrast/runtime.py @@ -67,9 +67,7 @@ def relabel_source(x): ) if len(include_runs) == 1: - result = c.mark_bar( - size=6, - ).encode( + result = c.mark_bar(size=6,).encode( x=alt.X("seconds:Q", stack=None), y=alt.Y("model_name", type="nominal", sort=None), color="source", @@ -82,10 +80,7 @@ def relabel_source(x): ) elif len(include_runs) == 2: - result = c.mark_bar( - yOffset=-3, - size=6, - ).transform_filter( + result = c.mark_bar(yOffset=-3, size=6,).transform_filter( (alt.datum.source == relabel_source(include_runs[0])) ).encode( x=alt.X("seconds:Q", stack=None), @@ -112,10 +107,7 @@ def relabel_source(x): ) elif len(include_runs) == 3: - result = c.mark_bar( - yOffset=-5, - size=4, - ).transform_filter( + result = c.mark_bar(yOffset=-5, size=4,).transform_filter( (alt.datum.source == relabel_source(include_runs[0])) ).encode( x=alt.X("seconds:Q", stack=None), From 61e1d1111e34203e41db308abeefcfbd93efe22a Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Tue, 14 Oct 2025 17:28:52 -0700 Subject: [PATCH 16/20] Updated error messages to look for in activitysim.core tests --- activitysim/core/test/test_pipeline.py | 11 ++++++----- activitysim/core/test/test_random.py | 3 ++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/activitysim/core/test/test_pipeline.py b/activitysim/core/test/test_pipeline.py index dfa6d770a8..12f31dbc66 100644 --- a/activitysim/core/test/test_pipeline.py +++ b/activitysim/core/test/test_pipeline.py @@ -9,6 +9,7 @@ import tables from activitysim.core import workflow +from activitysim.core.exceptions import CheckpointNameNotFoundError from activitysim.core.test.extensions import steps # set the max households for all tests (this is to limit memory use on travis) @@ -70,17 +71,17 @@ def test_pipeline_run(state): state.checkpoint.load_dataframe("table1", checkpoint_name="step3") # try to get a table from a step before it was checkpointed - with pytest.raises(RuntimeError) as excinfo: + with pytest.raises(CheckpointNameNotFoundError) as excinfo: state.checkpoint.load_dataframe("table2", checkpoint_name="step1") assert "not in checkpoint 'step1'" in str(excinfo.value) # try to get a non-existant table - with pytest.raises(RuntimeError) as excinfo: + with pytest.raises(CheckpointNameNotFoundError) as excinfo: state.checkpoint.load_dataframe("bogus") assert "never checkpointed" in str(excinfo.value) # try to get an existing table from a non-existant checkpoint - with pytest.raises(RuntimeError) as excinfo: + with pytest.raises(CheckpointNameNotFoundError) as excinfo: state.checkpoint.load_dataframe("table1", checkpoint_name="bogus") assert "not in checkpoints" in str(excinfo.value) @@ -111,12 +112,12 @@ def test_pipeline_checkpoint_drop(state): state.checkpoint.load_dataframe("table1") - with pytest.raises(RuntimeError) as excinfo: + with pytest.raises(CheckpointNameNotFoundError) as excinfo: state.checkpoint.load_dataframe("table2") # assert "never checkpointed" in str(excinfo.value) # can't get a dropped table from current checkpoint - with pytest.raises(RuntimeError) as excinfo: + with pytest.raises(CheckpointNameNotFoundError) as excinfo: state.checkpoint.load_dataframe("table3") # assert "was dropped" in str(excinfo.value) diff --git a/activitysim/core/test/test_random.py b/activitysim/core/test/test_random.py index 63809278c1..bcbc602685 100644 --- a/activitysim/core/test/test_random.py +++ b/activitysim/core/test/test_random.py @@ -8,6 +8,7 @@ import pytest from activitysim.core import random +from activitysim.core.exceptions import DuplicateLoadableObjectError def test_basic(): @@ -27,7 +28,7 @@ def test_basic(): assert "Arrays are not almost equal" in str(excinfo.value) # second call should return something different - with pytest.raises(RuntimeError) as excinfo: + with pytest.raises(DuplicateLoadableObjectError) as excinfo: rng.set_base_seed(1) assert "call set_base_seed before the first step" in str(excinfo.value) From 2e127ef64129684e93d452514ab21a96a285fef9 Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Tue, 14 Oct 2025 17:49:41 -0700 Subject: [PATCH 17/20] Updated error to look for in abm.text --- activitysim/abm/test/test_pipeline/test_pipeline.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/activitysim/abm/test/test_pipeline/test_pipeline.py b/activitysim/abm/test/test_pipeline/test_pipeline.py index 70bc26f4b1..848eb5f50c 100644 --- a/activitysim/abm/test/test_pipeline/test_pipeline.py +++ b/activitysim/abm/test/test_pipeline/test_pipeline.py @@ -13,6 +13,10 @@ import pytest from activitysim.core import random, tracing, workflow +from activitysim.core.exceptions import ( + CheckpointNameNotFoundError, + SystemConfigurationError, +) # set the max households for all tests (this is to limit memory use on travis) HOUSEHOLDS_SAMPLE_SIZE = 50 @@ -190,12 +194,12 @@ def test_mini_pipeline_run(): regress_mini_location_choice_logsums(state) # try to get a non-existant table - with pytest.raises(RuntimeError) as excinfo: + with pytest.raises(CheckpointNameNotFoundError) as excinfo: state.checkpoint.load_dataframe("bogus") assert "never checkpointed" in str(excinfo.value) # try to get an existing table from a non-existant checkpoint - with pytest.raises(RuntimeError) as excinfo: + with pytest.raises(CheckpointNameNotFoundError) as excinfo: state.checkpoint.load_dataframe("households", checkpoint_name="bogus") assert "not in checkpoints" in str(excinfo.value) @@ -235,7 +239,7 @@ def test_mini_pipeline_run2(): regress_mini_auto(state) # try to run a model already in pipeline - with pytest.raises(RuntimeError) as excinfo: + with pytest.raises(SystemConfigurationError) as excinfo: state.run.by_name("auto_ownership_simulate") assert "run model 'auto_ownership_simulate' more than once" in str(excinfo.value) From dde2b6fe35d7496058d4a3afd3fd78ef69c59db6 Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Tue, 14 Oct 2025 18:01:30 -0700 Subject: [PATCH 18/20] Changed SystemConfigurationError to TableSlicingError in test --- activitysim/abm/test/test_pipeline/test_pipeline.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/activitysim/abm/test/test_pipeline/test_pipeline.py b/activitysim/abm/test/test_pipeline/test_pipeline.py index 848eb5f50c..9f76ee8282 100644 --- a/activitysim/abm/test/test_pipeline/test_pipeline.py +++ b/activitysim/abm/test/test_pipeline/test_pipeline.py @@ -13,10 +13,7 @@ import pytest from activitysim.core import random, tracing, workflow -from activitysim.core.exceptions import ( - CheckpointNameNotFoundError, - SystemConfigurationError, -) +from activitysim.core.exceptions import CheckpointNameNotFoundError, TableSlicingError # set the max households for all tests (this is to limit memory use on travis) HOUSEHOLDS_SAMPLE_SIZE = 50 @@ -239,7 +236,7 @@ def test_mini_pipeline_run2(): regress_mini_auto(state) # try to run a model already in pipeline - with pytest.raises(SystemConfigurationError) as excinfo: + with pytest.raises(TableSlicingError) as excinfo: state.run.by_name("auto_ownership_simulate") assert "run model 'auto_ownership_simulate' more than once" in str(excinfo.value) From e1ee1934713b3ac866f27ab73345d377ed9c0708 Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Tue, 14 Oct 2025 18:15:19 -0700 Subject: [PATCH 19/20] Reclasified error when a step is run more than once from TableSlicingError to DuplicateWorkflowNameError --- activitysim/abm/test/test_pipeline/test_pipeline.py | 7 +++++-- activitysim/core/workflow/runner.py | 4 ++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/activitysim/abm/test/test_pipeline/test_pipeline.py b/activitysim/abm/test/test_pipeline/test_pipeline.py index 9f76ee8282..b245e97f5e 100644 --- a/activitysim/abm/test/test_pipeline/test_pipeline.py +++ b/activitysim/abm/test/test_pipeline/test_pipeline.py @@ -13,7 +13,10 @@ import pytest from activitysim.core import random, tracing, workflow -from activitysim.core.exceptions import CheckpointNameNotFoundError, TableSlicingError +from activitysim.core.exceptions import ( + CheckpointNameNotFoundError, + DuplicateWorkflowNameError, +) # set the max households for all tests (this is to limit memory use on travis) HOUSEHOLDS_SAMPLE_SIZE = 50 @@ -236,7 +239,7 @@ def test_mini_pipeline_run2(): regress_mini_auto(state) # try to run a model already in pipeline - with pytest.raises(TableSlicingError) as excinfo: + with pytest.raises(DuplicateWorkflowNameError) as excinfo: state.run.by_name("auto_ownership_simulate") assert "run model 'auto_ownership_simulate' more than once" in str(excinfo.value) diff --git a/activitysim/core/workflow/runner.py b/activitysim/core/workflow/runner.py index 639ffc0ef9..79ecd0ed4f 100644 --- a/activitysim/core/workflow/runner.py +++ b/activitysim/core/workflow/runner.py @@ -7,7 +7,7 @@ from datetime import timedelta from activitysim.core import tracing -from activitysim.core.exceptions import DuplicateWorkflowNameError, TableSlicingError +from activitysim.core.exceptions import DuplicateWorkflowNameError from activitysim.core.workflow.accessor import FromState, StateAccessor from activitysim.core.workflow.checkpoint import ( CHECKPOINT_NAME, @@ -265,7 +265,7 @@ def _pre_run_step(self, model_name: str) -> bool | None: if model_name in checkpointed_models: if self._obj.settings.duplicate_step_execution == "error": checkpointed_model_bullets = "\n - ".join(checkpointed_models) - raise TableSlicingError( + raise DuplicateWorkflowNameError( f"Checkpointed Models:\n - {checkpointed_model_bullets}\n" f"Cannot run model '{model_name}' more than once" ) From 542c3ae7947ec4efd021985189597c616ca08cf3 Mon Sep 17 00:00:00 2001 From: JoeJimFlood Date: Tue, 14 Oct 2025 18:45:25 -0700 Subject: [PATCH 20/20] Renamed InputPopulationError to InputTableError for clarity as it can be raised if tours are being input --- activitysim/abm/models/initialize_tours.py | 6 ++---- activitysim/abm/tables/persons.py | 8 ++++---- activitysim/core/exceptions.py | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/activitysim/abm/models/initialize_tours.py b/activitysim/abm/models/initialize_tours.py index e1b1ed7308..79b0263ced 100644 --- a/activitysim/abm/models/initialize_tours.py +++ b/activitysim/abm/models/initialize_tours.py @@ -10,8 +10,8 @@ from activitysim.core import expressions, tracing, workflow from activitysim.core.configuration import PydanticReadable from activitysim.core.configuration.base import PreprocessorSettings +from activitysim.core.exceptions import InputTableError from activitysim.core.input import read_input_table -from activitysim.core.exceptions import InputPopulationError logger = logging.getLogger(__name__) @@ -141,9 +141,7 @@ def initialize_tours( f"{tours_without_persons.sum()} tours out of {len(persons)} without persons\n" f"{pd.Series({'person_id': tours_without_persons.index.values})}" ) - raise InputPopulationError( - f"{tours_without_persons.sum()} tours with bad person_id" - ) + raise InputTableError(f"{tours_without_persons.sum()} tours with bad person_id") if trace_hh_id: state.tracing.trace_df(tours, label="initialize_tours", warn_if_empty=True) diff --git a/activitysim/abm/tables/persons.py b/activitysim/abm/tables/persons.py index 24526d36f3..2c7716ddcc 100644 --- a/activitysim/abm/tables/persons.py +++ b/activitysim/abm/tables/persons.py @@ -9,8 +9,8 @@ from activitysim.abm.tables.util import simple_table_join from activitysim.core import workflow +from activitysim.core.exceptions import InputTableError from activitysim.core.input import read_input_table -from activitysim.core.exceptions import InputPopulationError logger = logging.getLogger(__name__) @@ -56,7 +56,7 @@ def persons(state: workflow.State) -> pd.DataFrame: f"{persons_without_households.sum()} persons out of {len(df)} without households\n" f"{pd.Series({'person_id': persons_without_households.index.values})}" ) - raise InputPopulationError( + raise InputTableError( f"{persons_without_households.sum()} persons with bad household_id" ) @@ -68,7 +68,7 @@ def persons(state: workflow.State) -> pd.DataFrame: f"{households_without_persons.sum()} households out of {len(households.index)} without persons\n" f"{pd.Series({'household_id': households_without_persons.index.values})}" ) - raise InputPopulationError( + raise InputTableError( f"{households_without_persons.sum()} households with no persons" ) @@ -108,5 +108,5 @@ def persons_merged( left_on="person_id", ) if n_persons != len(persons): - raise InputPopulationError("number of persons changed") + raise InputTableError("number of persons changed") return persons diff --git a/activitysim/core/exceptions.py b/activitysim/core/exceptions.py index 6eb70412d3..878ed6cfc9 100644 --- a/activitysim/core/exceptions.py +++ b/activitysim/core/exceptions.py @@ -74,7 +74,7 @@ class TableSlicingError(RuntimeError): """An error occurred trying to slice a table.""" -class InputPopulationError(RuntimeError): +class InputTableError(RuntimeError): """An issue with the input population was found."""