Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .vscode/extensions.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@
"DevSoft.svg-viewer-vscode",
"eamodio.gitlens",
"exiasr.hadolint",
"Gruntfuggly.todo-tree",
"ms-azuretools.vscode-containers",
"ms-python.black-formatter",
"ms-python.pylint",
"ms-python.python",
"njpwerner.autodocstring",
"samuelcolvin.jinjahtml",
"timonwong.shellcheck",
"vscode-icons-team.vscode-icons",
"vscode-icons-team.vscode-icons"
]
}
Original file line number Diff line number Diff line change
Expand Up @@ -220,50 +220,153 @@ def test_response_surface_modeling(
create_function_from_project(page, our_project["uuid"])

# 3. start a RSM with that function
service_keys = [
"mmux-vite-app-moga-write",
"mmux-vite-app-sumo-write",
"mmux-vite-app-uq-write",
]

with log_context(
logging.INFO,
f"Waiting for {service_key} to be responsive (waiting for {_DEFAULT_RESPONSE_TO_WAIT_FOR})",
):
project_data = create_project_from_service_dashboard(
ServiceType.DYNAMIC, service_key, None, service_version
)
assert "workbench" in project_data, "Expected workbench to be in project data!"
assert isinstance(
project_data["workbench"], dict
), "Expected workbench to be a dict!"
node_ids: list[str] = list(project_data["workbench"])
assert len(node_ids) == 1, "Expected 1 node in the workbench!"
for service_key in service_keys:
with log_context(
logging.INFO,
f"Waiting for {service_key} to be responsive (waiting for {_DEFAULT_RESPONSE_TO_WAIT_FOR})",
):
project_data = create_project_from_service_dashboard(
ServiceType.DYNAMIC, service_key, None, service_version
)
assert (
"workbench" in project_data
), "Expected workbench to be in project data!"
assert isinstance(
project_data["workbench"], dict
), "Expected workbench to be a dict!"
node_ids: list[str] = list(project_data["workbench"])
assert len(node_ids) == 1, "Expected 1 node in the workbench!"

wait_for_service_running(
page=page,
node_id=node_ids[0],
websocket=log_in_and_out,
timeout=_WAITING_FOR_SERVICE_TO_START,
press_start_button=False,
product_url=product_url,
is_service_legacy=is_service_legacy,
)

wait_for_service_running(
page=page,
node_id=node_ids[0],
websocket=log_in_and_out,
timeout=_WAITING_FOR_SERVICE_TO_START,
press_start_button=False,
product_url=product_url,
is_service_legacy=is_service_legacy,
)
service_iframe = page.frame_locator("iframe")
with log_context(logging.INFO, "Waiting for the RSM to be ready..."):
service_iframe.get_by_role("grid").wait_for(
state="visible", timeout=_WAITING_FOR_SERVICE_TO_APPEAR
)

service_iframe = page.frame_locator("iframe")
with log_context(logging.INFO, "Waiting for the RSM to be ready..."):
service_iframe.get_by_role("grid").wait_for(
state="visible", timeout=_WAITING_FOR_SERVICE_TO_APPEAR
)
# select the function
with log_context(logging.INFO, "Selected test function..."):
service_iframe.get_by_role("button", name="SELECT").nth(0).click()

page.wait_for_timeout(10000)
with log_context(logging.INFO, "Filling the input parameters..."):
min_test_id = "Mean" if "uq" in service_key.lower() else "Min"
min_inputs = service_iframe.locator(
f'[mmux-testid="input-block-{min_test_id}"] input[type="number"]'
)
count_min = min_inputs.count()

# # select the function
# service_iframe.get_by_role("gridcell", name=_FUNCTION_NAME).click()
for i in range(count_min):
input_field = min_inputs.nth(i)
input_field.fill(str(i + 1))
logging.info(f"Filled {min_test_id} input {i} with value {i + 1}")
assert input_field.input_value() == str(i + 1)

# # Find the first input field (textbox) in the iframe
# min_input_field = service_iframe.get_by_role("textbox").nth(0)
# min_input_field.fill("1")
# max_input_field = service_iframe.get_by_role("textbox").nth(1)
# max_input_field.fill("10")
max_test_id = "Standard Deviation" if "uq" in service_key.lower() else "Max"
max_inputs = service_iframe.locator(
f'[mmux-testid="input-block-{max_test_id}"] input[type="number"]'
)
count_max = max_inputs.count()

for i in range(count_max):
input_field = max_inputs.nth(i)
input_field.fill(str((i + 1) * 10))
logging.info(
f"Filled {max_test_id} input {i} with value {(i + 1) * 10}"
)
assert input_field.input_value() == str((i + 1) * 10)

page.wait_for_timeout(1000)
page.keyboard.press("Tab")
page.wait_for_timeout(1000)

if "moga" in service_key.lower():
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

MINOR: highlight these keywords by adding them as CONSTANTS e.g.

EXPECTED_SERVICE_KEY

with log_context(logging.INFO, "Filling the output parameters..."):
output_plus_button = service_iframe.locator(
'[mmux-testid="add-output-var-btn"]'
)

output_plus_button.click()

output_confirm_button = service_iframe.locator(
'[mmux-testid="confirm-add-output-btn"]'
)
output_confirm_button.click()

# Click the next button
with log_context(logging.INFO, "Clicking Next to go to the next step..."):
service_iframe.locator('[mmux-testid="next-button"]').click()

with log_context(logging.INFO, "Starting the sampling..."):
service_iframe.locator('[mmux-testid="extend-sampling-btn"]').click()
service_iframe.locator('[mmux-testid="new-sampling-campaign-btn"]').click()
samplingInput = service_iframe.locator(
'[mmux-testid="lhs-number-of-sampling-points-input"] input[type="number"]'
)
samplingInput.fill("40")
samplingInput.press("Enter")
service_iframe.locator('[mmux-testid="run-sampling-btn"]').click()

# # click on next
# service_iframe.get_by_role("button", name="Next").click()
with log_context(logging.INFO, "Waiting for the sampling to launch..."):
toast = service_iframe.locator("div.Toastify__toast").filter(
has_text="Sampling started running successfully, please wait for completion."
)
toast.wait_for(state="visible", timeout=120000) # waits up to 120 seconds

with log_context(logging.INFO, "Waiting for the sampling to complete..."):

def all_completed():
status_cells = service_iframe.locator(
'div[role="gridcell"][data-field="status"]'
)
total = status_cells.count()
if total == 0:
return False
for i in range(total):
text = (status_cells.nth(i).text_content() or "").lower().strip()
logging.info(f"STATUS CELL TEXT {i}: {text}")
if text != "complete":
return False
return True

while not all_completed():
logging.info("⏳ Waiting for all status cells to be completed...")
page.wait_for_timeout(3000)
service_iframe.locator(
'[mmux-testid="refresh-job-collections-btn"]'
).click()

service_iframe.locator(
'[mmux-testid="select-all-successful-jobs-btn"] '
).click()

plotly_graph = service_iframe.locator(".js-plotly-plot")
plotly_graph.wait_for(state="visible", timeout=300000)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am a bit confused here, so you wait for the graph to show up? But do you check if the sample created is included?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the function is created specifically for the test, so it's empty and has no jobs, then we launch a sampling campaign which will update the UI when the jobs are ready, as the runner is a jsonifier and we have timeouts, more than five of them consistently finish when the launching is done, and thus the plot is displayed.

the display of the plot can only happen when you have more than 5 jobs listed and finished.

if we want to wait for every job to finish, we can wait longer to do a refresh or implement a refresh mechanism every 10 seconds to wait for all of them to be complete

do you require the testing to also check the execution for failed jobs? we can fail the test if any job fails

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not saying every job has to finish. But I think it would be best to check if the jobs are listed at least?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So i finally replaced the simple graph approach with one that checks the table for the status to turn to complete, and refreshes it until it happens. after that it selects all the new values and waits for the plot to appear.

page.wait_for_timeout(2000)

# # then we wait a long time
# page.wait_for_timeout(1 * MINUTE)
with (
log_context(logging.INFO, "Go back to dashboard"),
page.expect_response(
re.compile(r"/projects\?.+")
) as list_projects_response,
):
page.get_by_test_id("dashboardBtn").click()
page.get_by_test_id("confirmDashboardBtn").click()
assert (
list_projects_response.value.ok
), f"Failed to list projects: {list_projects_response.value.status}"
page.wait_for_timeout(2000)
Loading