Skip to content

Commit c48e07a

Browse files
committed
[promptflow][BugFix] Print error message when run status is failed (#1051)
# Description If run status is failed, print error message; otherwise, user cannot know why the run is failed in SDK experience. SDK experience: <img width="1112" alt="image" src="https://github.com/microsoft/promptflow/assets/38847871/8441b759-da46-4268-aadf-26a72ea8c670"> CLI experience: <img width="1115" alt="image" src="https://github.com/microsoft/promptflow/assets/38847871/702a1596-0a0b-45b1-b40e-549d0614b927"> # All Promptflow Contribution checklist: - [x] **The pull request does not introduce [breaking changes].** - [ ] **CHANGELOG is updated for new features, bug fixes or other significant changes.** - [x] **I have read the [contribution guidelines](../CONTRIBUTING.md).** - [ ] **Create an issue and link to the pull request to get dedicated review from promptflow team. Learn more: [suggested workflow](../CONTRIBUTING.md#suggested-workflow).** ## General Guidelines and Best Practices - [x] Title of the pull request is clear and informative. - [x] There are a small number of commits, each of which have an informative message. This means that previously merged commits do not appear in the history of the PR. For more information on cleaning up the commits in your PR, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). ### Testing Guidelines - [x] Pull request includes test coverage for the included changes.
1 parent ba1fefe commit c48e07a

File tree

3 files changed

+33
-20
lines changed

3 files changed

+33
-20
lines changed

src/promptflow/promptflow/_sdk/operations/_local_storage_operations.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -430,13 +430,13 @@ def _prepare_folder(path: Union[str, Path]) -> Path:
430430

431431
@staticmethod
432432
def _outputs_padding(df: pd.DataFrame, expected_rows: int) -> pd.DataFrame:
433+
if len(df) == expected_rows:
434+
return df
433435
missing_lines = []
434436
lines_set = set(df[LINE_NUMBER].values)
435437
for i in range(expected_rows):
436438
if i not in lines_set:
437439
missing_lines.append({LINE_NUMBER: i})
438-
if len(missing_lines) == 0:
439-
return df
440440
df_to_append = pd.DataFrame(missing_lines)
441441
res = pd.concat([df, df_to_append], ignore_index=True)
442442
res = res.sort_values(by=LINE_NUMBER, ascending=True)

src/promptflow/promptflow/_sdk/operations/_run_operations.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
)
2323
from promptflow._sdk._errors import InvalidRunStatusError, RunExistsError, RunNotFoundError, RunOperationParameterError
2424
from promptflow._sdk._orm import RunInfo as ORMRun
25-
from promptflow._sdk._utils import incremental_print, safe_parse_object_list
25+
from promptflow._sdk._utils import incremental_print, print_red_error, safe_parse_object_list
2626
from promptflow._sdk._visualize_functions import dump_html, generate_html_string
2727
from promptflow._sdk.entities import Run
2828
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
@@ -140,7 +140,10 @@ def stream(self, name: Union[str, Run]) -> Run:
140140
available_logs = local_storage.logger.get_logs()
141141
incremental_print(available_logs, printed, file_handler)
142142
self._print_run_summary(run)
143-
# won't print error here, put it in run dict
143+
# print error message when run is failed
144+
if run.status == RunStatus.FAILED:
145+
error_message = local_storage.load_exception()["message"]
146+
print_red_error(error_message)
144147
except KeyboardInterrupt:
145148
error_message = "The output streaming for the run was interrupted, but the run is still executing."
146149
print(error_message)

src/promptflow/tests/sdk_cli_test/e2etests/test_flow_run.py

+26-16
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,23 @@ def create_run_against_run(client, run: Run) -> Run:
5858
)
5959

6060

61+
def assert_run_with_invalid_column_mapping(client: PFClient, run: Run, capfd: pytest.CaptureFixture) -> None:
62+
assert run.status == RunStatus.FAILED
63+
64+
expected_error_message = "The input for batch run is incorrect. Couldn't find these mapping relations"
65+
66+
client.stream(run.name)
67+
out, _ = capfd.readouterr()
68+
assert expected_error_message in out
69+
70+
local_storage = LocalStorageOperations(run)
71+
assert os.path.exists(local_storage._exception_path)
72+
73+
exception = local_storage.load_exception()
74+
assert expected_error_message in exception["message"]
75+
assert exception["code"] == "BulkRunException"
76+
77+
6178
@pytest.mark.usefixtures("use_secrets_config_file", "setup_local_connection", "install_custom_tool_pkg")
6279
@pytest.mark.sdk_test
6380
@pytest.mark.e2etest
@@ -321,7 +338,7 @@ def test_run_reference_failed_run(self, pf):
321338
with pytest.raises(RunNotFoundError):
322339
pf.runs.get(name=run_name)
323340

324-
def test_referenced_output_not_exist(self, pf):
341+
def test_referenced_output_not_exist(self, pf: PFClient, capfd: pytest.CaptureFixture) -> None:
325342
# failed run won't generate output
326343
failed_run = pf.run(
327344
flow=f"{FLOWS_DIR}/failed_flow",
@@ -336,13 +353,7 @@ def test_referenced_output_not_exist(self, pf):
336353
flow=f"{FLOWS_DIR}/failed_flow",
337354
column_mapping={"text": "${run.outputs.text}"},
338355
)
339-
340-
local_storage = LocalStorageOperations(run)
341-
assert os.path.exists(local_storage._exception_path)
342-
343-
exception = local_storage.load_exception()
344-
assert "The input for batch run is incorrect. Couldn't find these mapping relations" in exception["message"]
345-
assert exception["code"] == "BulkRunException"
356+
assert_run_with_invalid_column_mapping(pf, run, capfd)
346357

347358
def test_connection_overwrite_file(self, local_client, local_aoai_connection):
348359
run = create_yaml_run(
@@ -650,7 +661,12 @@ def test_flow_bulk_run_with_additional_includes(self, azure_open_ai_connection:
650661
additional_includes = _get_additional_includes(snapshot_path / "flow.dag.yaml")
651662
assert not additional_includes
652663

653-
def test_input_mapping_source_not_found_error(self, azure_open_ai_connection: AzureOpenAIConnection, pf):
664+
def test_input_mapping_source_not_found_error(
665+
self,
666+
azure_open_ai_connection: AzureOpenAIConnection,
667+
pf: PFClient,
668+
capfd: pytest.CaptureFixture,
669+
):
654670
# input_mapping source not found error won't create run
655671
name = str(uuid.uuid4())
656672
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
@@ -660,13 +676,7 @@ def test_input_mapping_source_not_found_error(self, azure_open_ai_connection: Az
660676
column_mapping={"not_exist": "${data.not_exist_key}"},
661677
name=name,
662678
)
663-
664-
local_storage = LocalStorageOperations(run)
665-
assert os.path.exists(local_storage._exception_path)
666-
667-
exception = local_storage.load_exception()
668-
assert "The input for batch run is incorrect. Couldn't find these mapping relations" in exception["message"]
669-
assert exception["code"] == "BulkRunException"
679+
assert_run_with_invalid_column_mapping(pf, run, capfd)
670680

671681
def test_input_mapping_with_dict(self, azure_open_ai_connection: AzureOpenAIConnection, pf):
672682
data_path = f"{DATAS_DIR}/webClassification3.jsonl"

0 commit comments

Comments
 (0)