Skip to content

Commit

Permalink
506 add trajectory visualization+timepoints tests (#520)
Browse files Browse the repository at this point in the history
* adding in test_timepoints
* add parametrized inputs
* use different step sizes
  • Loading branch information
marjoleinpnnl authored Mar 15, 2024
1 parent b5ed105 commit 3703778
Showing 1 changed file with 51 additions and 28 deletions.
79 changes: 51 additions & 28 deletions tests/visuals/test_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,37 +26,40 @@ def make_nice_labels(labels):
}


def create_distributions(logging_step_size=20, time_unit="twenty"):
model_1_path = (
"https://raw.githubusercontent.com/DARPA-ASKEM/simulation-integration"
"/main/data/models/SEIRHD_NPI_Type1_petrinet.json"
)
start_time = 0.0
end_time = 100.0
num_samples = 30
sample = pyciemss.sample(
model_1_path,
end_time,
logging_step_size,
num_samples,
time_unit=time_unit,
start_time=start_time,
solver_method="euler",
solver_options={"step_size": 1e-2},
)["unprocessed_result"]

return convert_to_output_format(
sample,
# using same time point formula as in 'logging_times' from pyciemms interfaces formula 'sample'
timepoints=np.arange(
start_time + logging_step_size, end_time, logging_step_size
),
time_unit=time_unit,
)


class TestTrajectory:
@staticmethod
@pytest.fixture
def distributions():
model_1_path = (
"https://raw.githubusercontent.com/DARPA-ASKEM/simulation-integration"
"/main/data/models/SEIRHD_NPI_Type1_petrinet.json"
)
start_time = 0.0
end_time = 100.0
logging_step_size = 1
num_samples = 30
sample = pyciemss.sample(
model_1_path,
end_time,
logging_step_size,
num_samples,
start_time=start_time,
solver_method="euler",
solver_options={"step_size": 0.1},
)["unprocessed_result"]

for e in sample.values():
if len(e.shape) > 1:
num_timepoints = e.shape[1]

return convert_to_output_format(
sample,
timepoints=np.linspace(start_time, end_time, num_timepoints),
time_unit="notional",
)
return create_distributions(logging_step_size=1, time_unit="notional")

@staticmethod
@pytest.fixture
Expand All @@ -83,6 +86,26 @@ def test_base(self, distributions):
df = pd.DataFrame(vega.find_named(schema["data"], "distributions")["values"])
assert {"trajectory", "timepoint", "lower", "upper"} == set(df.columns)

@pytest.mark.parametrize("logging_step_size", [5, 1])
@pytest.mark.parametrize("time_unit", ["five", None])
@pytest.mark.parametrize("end_time", [80, 100])
def test_timepoints(self, logging_step_size, time_unit, end_time):
# distribution will create timepoint from logging_step_size, and start and end time
new_distribution = create_distributions(
logging_step_size=logging_step_size, time_unit=time_unit
)
label = "timepoint_unknown" if time_unit is None else f"timepoint_{time_unit}"
# check if new time label is correct
assert label in new_distribution.columns

schema = plots.trajectories(new_distribution)
df = pd.DataFrame(vega.find_named(schema["data"], "distributions")["values"])
new_timepoints = [
float(x) for x in np.arange(logging_step_size, end_time, logging_step_size)
]
# check timepoint created match the input logging_step_size and start and end time
assert df.timepoint[: len(new_timepoints)].tolist() == new_timepoints

def test_markers(self, distributions):
schema = plots.trajectories(
distributions,
Expand Down Expand Up @@ -224,7 +247,7 @@ def test_traces(self, distributions, traces):
)

shown_traces = pd.DataFrame(vega.find_named(schema["data"], "traces")["values"])
plots.save_schema(schema, "_schema.json")
plots.save_schema(schema, "_schema.vg.json")

assert sorted(traces.columns.unique()) == sorted(
shown_traces["trajectory"].unique()
Expand Down

0 comments on commit 3703778

Please sign in to comment.