Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
605cf8b
Downgraded logging of which expression is being evaluated from "info"…
JoeJimFlood Oct 16, 2025
922dfcf
Downgraded column list before and after dropping unused columns from …
JoeJimFlood Oct 16, 2025
034bfb8
Downgraded reporting of model component results from "INFO" to "DEBUG"
JoeJimFlood Oct 16, 2025
39cc386
Downgraded a few log messages in settings checker from "INFO" to "DEBUG"
JoeJimFlood Oct 16, 2025
7d4d710
Changed default value of `level` argument in flow.TimeLogger.summary(…
JoeJimFlood Oct 17, 2025
4e467ae
Downgraded logging statements when merging the choosers and alternati…
JoeJimFlood Oct 17, 2025
1716eb6
Downgraded runtimes for flow evaluations from "INFO" to "DEBUG"
JoeJimFlood Oct 17, 2025
eeec281
Downgraded "sharrow_enabled is True/False" from "INFO" to "DEBUG" as …
JoeJimFlood Oct 17, 2025
0639deb
Downgraded utility data types reporting from "INFO" to "DEBUG"
JoeJimFlood Oct 17, 2025
709025d
Downgraded all instances of logger.info to logger.debug in activitysi…
JoeJimFlood Oct 17, 2025
cedbe00
Downgraded line reporting how many rows are being run for eval_intera…
JoeJimFlood Oct 18, 2025
ae496a1
Downgraded several (primarily repetitive) statements in activitysim.a…
JoeJimFlood Oct 18, 2025
0d7824f
Downgraded messages reportng how long each of the sampling, logsums, …
JoeJimFlood Oct 18, 2025
7fcbe3e
Downgraded more detailed messages from "INFO" to "DEBUG" in trip dest…
JoeJimFlood Oct 18, 2025
b7d929c
Reclassified some logging statements
JoeJimFlood Oct 18, 2025
c1f0215
Swapped logging statement classifications to get desired statement pr…
JoeJimFlood Oct 18, 2025
c1c483b
Downgraded statement from "INFO" to "DEBUG" that wasn't providing muc…
JoeJimFlood Oct 18, 2025
efebd64
blacken
JoeJimFlood Oct 18, 2025
81d81f8
Added documentation on logging levels
JoeJimFlood Oct 20, 2025
fca54bf
Merge branch 'main' into logging-cleanup
jpn-- Oct 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions activitysim/abm/models/settings_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ def try_load_model_settings(
) -> tuple[PydanticBase | None, Exception | None]:

msg = f"Attempting to load model settings for {model_name} via {model_settings_class.__name__} and {model_settings_file}"
logger.info(msg)
logger.debug(msg)
file_logger.info(msg)

try:
Expand All @@ -348,7 +348,7 @@ class InputCheckerSettings(PydanticBase):
)
result = model_settings, None
msg = f"Successfully loaded model settings from {model_settings_file}"
logger.info(msg)
logger.debug(msg)
except Exception as e:
result = None, e
return result
Expand All @@ -358,12 +358,12 @@ def try_load_spec(
model_name: str, model_settings: PydanticBase, spec_file: str, state: State
) -> tuple[DataFrame | None, Exception | None]:
msg = f"Attempting to load SPEC for {model_name} via {model_settings.__class__.__name__}"
logger.info(msg)
logger.debug(msg)
file_logger.info(msg)
try:
result = state.filesystem.read_model_spec(spec_file), None
msg = f"Successfully loaded model SPEC from {spec_file}"
logger.info(msg)
logger.debug(msg)
file_logger.info(msg)
except Exception as e:
# always return a dataframe
Expand All @@ -375,13 +375,13 @@ def try_load_coefs(
model_name: str, model_settings: PydanticBase, coefs_file: str, state: State
) -> tuple[DataFrame, Optional[Exception]]:
msg = f"Attempting to load COEFFICIENTS for {model_name} via {model_settings.__class__.__name__}"
logger.info(msg)
logger.debug(msg)
file_logger.info(msg)

try:
result = state.filesystem.read_model_coefficients(file_name=coefs_file), None
msg = f"Successfully loaded model Coefficients from {coefs_file}"
logger.info(msg)
logger.debug(msg)
file_logger.info(msg)
except Exception as e:
result = None, e
Expand Down Expand Up @@ -423,7 +423,7 @@ def try_eval_spec_coefs(
else:
result = eval_coefficients(state, spec, coefs, estimator=None), None
msg = f"Successfully evaluated coefficients for {model_name}"
logger.info(msg)
logger.debug(msg)
file_logger.info(msg)
except Exception as e:
result = None, e
Expand Down
18 changes: 11 additions & 7 deletions activitysim/abm/models/trip_destination.py
Original file line number Diff line number Diff line change
Expand Up @@ -820,7 +820,7 @@ def compute_logsums(
adds od_logsum and dp_logsum columns to trips (in place)
"""
trace_label = tracing.extend_trace_label(trace_label, "compute_logsums")
logger.info("Running %s with %d samples", trace_label, destination_sample.shape[0])
logger.debug("Running %s with %d samples", trace_label, destination_sample.shape[0])

# chunk usage is uniform so better to combine
chunk_tag = "trip_destination.compute_logsums"
Expand Down Expand Up @@ -977,7 +977,7 @@ def trip_destination_simulate(

alt_dest_col_name = model_settings.ALT_DEST_COL_NAME

logger.info("Running trip_destination_simulate with %d trips", len(trips))
logger.debug("Running trip_destination_simulate with %d trips", len(trips))

skims = skim_hotel.sample_skims(presample=False)

Expand Down Expand Up @@ -1102,7 +1102,7 @@ def choose_trip_destination(
)
trips = trips[~dropped_trips]

t0 = print_elapsed_time("%s.trip_destination_sample" % trace_label, t0)
t0 = print_elapsed_time("%s.trip_destination_sample" % trace_label, t0, debug=True)

if trips.empty:
return pd.Series(index=trips.index).to_frame("choice"), None
Expand All @@ -1124,7 +1124,7 @@ def choose_trip_destination(
destination_sample["od_logsum"] = 0.0
destination_sample["dp_logsum"] = 0.0

t0 = print_elapsed_time("%s.compute_logsums" % trace_label, t0)
t0 = print_elapsed_time("%s.compute_logsums" % trace_label, t0, debug=True)

destinations = trip_destination_simulate(
state,
Expand Down Expand Up @@ -1155,7 +1155,9 @@ def choose_trip_destination(
else:
destination_sample = None

t0 = print_elapsed_time("%s.trip_destination_simulate" % trace_label, t0)
t0 = print_elapsed_time(
"%s.trip_destination_simulate" % trace_label, t0, debug=True
)

return destinations, destination_sample

Expand Down Expand Up @@ -1484,7 +1486,9 @@ def run_trip_destination(
else:
None

logger.info("Running %s with %d trips", nth_trace_label, nth_trips.shape[0])
logger.debug(
"Running %s with %d trips", nth_trace_label, nth_trips.shape[0]
)

# - choose destination for nth_trips, segmented by primary_purpose
choices_list = []
Expand Down Expand Up @@ -1649,7 +1653,7 @@ def trip_destination(
estimator.write_table(state.get_dataframe("land_use"), "landuse", append=False)
estimator.write_model_settings(model_settings, model_settings_file_name)

logger.info("Running %s with %d trips", trace_label, trips_df.shape[0])
logger.debug("Running %s with %d trips", trace_label, trips_df.shape[0])

trips_df, save_sample_df = run_trip_destination(
state,
Expand Down
10 changes: 5 additions & 5 deletions activitysim/abm/models/util/canonical_ids.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def canonical_tours(state: workflow.State):
)
non_mandatory_channels = enumerate_tour_types(non_mandatory_tour_flavors)

logger.info(f"Non-Mandatory tour flavors used are {non_mandatory_tour_flavors}")
logger.debug(f"Non-Mandatory tour flavors used are {non_mandatory_tour_flavors}")

# ---- mandatory_channels
mtf_model_settings_file_name = "mandatory_tour_frequency.yaml"
Expand All @@ -271,7 +271,7 @@ def canonical_tours(state: workflow.State):
)
mandatory_channels = enumerate_tour_types(mandatory_tour_flavors)

logger.info(f"Mandatory tour flavors used are {mandatory_tour_flavors}")
logger.debug(f"Mandatory tour flavors used are {mandatory_tour_flavors}")

# ---- atwork_subtour_channels
atwork_model_settings_file_name = "atwork_subtour_frequency.yaml"
Expand All @@ -288,7 +288,7 @@ def canonical_tours(state: workflow.State):
)
atwork_subtour_channels = enumerate_tour_types(atwork_subtour_flavors)

logger.info(f"Atwork subtour flavors used are {atwork_subtour_flavors}")
logger.debug(f"Atwork subtour flavors used are {atwork_subtour_flavors}")

# we need to distinguish between subtours of different work tours
# (e.g. eat1_1 is eat subtour for parent work tour 1 and eat1_2 is for work tour 2)
Expand Down Expand Up @@ -317,7 +317,7 @@ def canonical_tours(state: workflow.State):
joint_tour_flavors = determine_flavors_from_alts_file(
jtf_alts, provided_joint_flavors, default_joint_flavors
)
logger.info(f"Joint tour flavors used are {joint_tour_flavors}")
logger.debug(f"Joint tour flavors used are {joint_tour_flavors}")

joint_tour_channels = enumerate_tour_types(joint_tour_flavors)
joint_tour_channels = ["j_%s" % c for c in joint_tour_channels]
Expand All @@ -343,7 +343,7 @@ def canonical_tours(state: workflow.State):
school_escort_flavors = {"escort": 2 * num_escortees}
school_escort_channels = enumerate_tour_types(school_escort_flavors)
school_escort_channels = ["se_%s" % c for c in school_escort_channels]
logger.info(f"School escort tour flavors used are {school_escort_flavors}")
logger.debug(f"School escort tour flavors used are {school_escort_flavors}")

sub_channels = sub_channels + school_escort_channels

Expand Down
14 changes: 7 additions & 7 deletions activitysim/abm/models/util/tour_destination.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,18 +92,18 @@ def _destination_sample(
coefficients_file_name=model_settings.COEFFICIENTS,
)

logger.info("running %s with %d tours", trace_label, len(choosers))
logger.debug("running %s with %d tours", trace_label, len(choosers))

sample_size = model_settings.SAMPLE_SIZE
if estimator and model_settings.ESTIMATION_SAMPLE_SIZE >= 0:
sample_size = model_settings.ESTIMATION_SAMPLE_SIZE
logger.info(
logger.debug(
f"Estimation mode for {trace_label} using sample size of {sample_size}"
)

if state.settings.disable_destination_sampling:
sample_size = 0
logger.info(
logger.debug(
f"SAMPLE_SIZE set to 0 for {trace_label} because disable_destination_sampling is set"
)

Expand Down Expand Up @@ -562,7 +562,7 @@ def destination_presample(
trace_label = tracing.extend_trace_label(trace_label, "presample")
chunk_tag = "tour_destination.presample"

logger.info(f"{trace_label} location_presample")
logger.debug(f"{trace_label} location_presample")

alt_dest_col_name = model_settings.ALT_DEST_COL_NAME
assert DEST_TAZ != alt_dest_col_name
Expand Down Expand Up @@ -652,7 +652,7 @@ def run_destination_sample(
)

if pre_sample_taz:
logger.info(
logger.debug(
"Running %s destination_presample with %d tours" % (trace_label, len(tours))
)

Expand Down Expand Up @@ -740,7 +740,7 @@ def run_destination_logsums(
how="left",
)

logger.info("Running %s with %s rows", trace_label, len(choosers))
logger.debug("Running %s with %s rows", trace_label, len(choosers))

state.tracing.dump_df(DUMP, persons_merged, trace_label, "persons_merged")
state.tracing.dump_df(DUMP, choosers, trace_label, "choosers")
Expand Down Expand Up @@ -832,7 +832,7 @@ def run_destination_simulate(

constants = model_settings.CONSTANTS

logger.info("Running tour_destination_simulate with %d persons", len(choosers))
logger.debug("Running tour_destination_simulate with %d persons", len(choosers))

# create wrapper with keys for this lookup - in this case there is a home_zone_id in the choosers
# and a zone_id in the alternatives which get merged during interaction
Expand Down
12 changes: 6 additions & 6 deletions activitysim/abm/models/util/tour_od.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,15 +155,15 @@ def _od_sample(
else:
alt_col_name = alt_od_col_name

logger.info("running %s with %d tours", trace_label, len(choosers))
logger.debug("running %s with %d tours", trace_label, len(choosers))

sample_size = model_settings.SAMPLE_SIZE
if state.settings.disable_destination_sampling or (
estimator and estimator.want_unsampled_alternatives
):
# FIXME interaction_sample will return unsampled complete alternatives
# with probs and pick_count
logger.info(
logger.debug(
(
"Estimation mode for %s using unsampled alternatives "
"short_circuit_choices"
Expand Down Expand Up @@ -607,7 +607,7 @@ def od_presample(
trace_label = tracing.extend_trace_label(trace_label, "presample")
chunk_tag = "tour_od.presample"

logger.info(f"{trace_label} od_presample")
logger.debug(f"{trace_label} od_presample")

alt_od_col_name = get_od_id_col(ORIG_MAZ, DEST_TAZ)

Expand Down Expand Up @@ -711,7 +711,7 @@ def run_od_sample(
)

if pre_sample_taz:
logger.info(
logger.debug(
"Running %s destination_presample with %d tours" % (trace_label, len(tours))
)

Expand Down Expand Up @@ -780,7 +780,7 @@ def run_od_logsums(
choosers[origin_id_col].astype(str) + "_" + choosers[dest_id_col].astype(str)
)

logger.info("Running %s with %s rows", trace_label, len(choosers))
logger.debug("Running %s with %s rows", trace_label, len(choosers))

state.tracing.dump_df(DUMP, choosers, trace_label, "choosers")

Expand Down Expand Up @@ -989,7 +989,7 @@ def run_od_simulate(

constants = model_settings.CONSTANTS

logger.info("Running tour_destination_simulate with %d persons", len(choosers))
logger.debug("Running tour_destination_simulate with %d persons", len(choosers))

# create wrapper with keys for this lookup - in this case there is an origin ID
# column and a destination ID columns in the alternatives table.
Expand Down
2 changes: 1 addition & 1 deletion activitysim/abm/models/util/tour_scheduling.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def run_tour_scheduling(
if estimators:
timetable.begin_transaction(list(estimators.values()))

logger.info(f"Running {trace_label} with %d tours", len(chooser_tours))
logger.debug(f"Running {trace_label} with %d tours", len(chooser_tours))
choices = vts.vectorize_tour_scheduling(
state,
chooser_tours,
Expand Down
12 changes: 6 additions & 6 deletions activitysim/abm/models/util/vectorize_tour_scheduling.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def _compute_logsums(
mandatory=False,
)
choosers = alt_tdd.join(tours_merged, how="left", rsuffix="_chooser")
logger.info(
logger.debug(
f"{trace_label} compute_logsums for {choosers.shape[0]} choosers {alt_tdd.shape[0]} alts"
)

Expand Down Expand Up @@ -194,7 +194,7 @@ def _compute_logsums(

if preprocessor_settings:
simulate.set_skim_wrapper_targets(choosers, skims)
logger.info(
logger.debug(
f"{trace_label} start preprocessing prior to compute_logsums for {choosers.shape[0]} choosers {alt_tdd.shape[0]} alts"
)
expressions.assign_columns(
Expand All @@ -204,7 +204,7 @@ def _compute_logsums(
locals_dict=locals_dict,
trace_label=trace_label,
)
logger.info(
logger.debug(
f"{trace_label} end preprocessing prior to compute_logsums for {choosers.shape[0]} choosers {alt_tdd.shape[0]} alts"
)

Expand Down Expand Up @@ -426,7 +426,7 @@ def compute_tour_scheduling_logsums(
)
chunk_sizer.log_df(trace_label, "deduped_alt_tdds", deduped_alt_tdds)

logger.info(
logger.debug(
f"{trace_label} compute_logsums "
f"deduped_alt_tdds reduced number of rows by "
f"{round(100 * (len(alt_tdd) - len(deduped_alt_tdds)) / len(alt_tdd), 2)}% "
Expand Down Expand Up @@ -758,7 +758,7 @@ def _schedule_tours(

"""

logger.info(
logger.debug(
"%s schedule_tours running %d tour choices" % (tour_trace_label, len(tours))
)

Expand Down Expand Up @@ -908,7 +908,7 @@ def schedule_tours(
logger.info("schedule_tours %s tours not monotonic_increasing - sorting df")
tours = tours.sort_index()

logger.info(
logger.debug(
"%s schedule_tours running %d tour choices" % (tour_trace_label, len(tours))
)

Expand Down
2 changes: 1 addition & 1 deletion activitysim/core/assign.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ def rng_lognormal(random_draws, mu, sigma, broadcast=True, scale=False):
)

if trace_label:
logger.info(f"{trace_label}.assign_variables {target} = {expression}")
logger.debug(f"{trace_label}.assign_variables {target} = {expression}")

if is_temp_singular(target) or is_throwaway(target):
try:
Expand Down
Loading