Push on main #13150
3 errors, 22 fail, 2 skipped, 152 pass in 5m 14s
179 tests 152 ✅ 5m 14s ⏱️
1 suites 2 💤
1 files 22 ❌ 3 🔥
Results for commit 3af352f.
Annotations
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
tests.executor.e2etests.test_batch_server with error
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
collection failure
ImportError while importing test module '/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_batch_server.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/_pytest/python.py:520: in importtestmodule
mod = import_path(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/_pytest/pathlib.py:584: in import_path
importlib.import_module(module_name)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/importlib/__init__.py:127: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
<frozen importlib._bootstrap>:1030: in _gcd_import
???
<frozen importlib._bootstrap>:1007: in _find_and_load
???
<frozen importlib._bootstrap>:986: in _find_and_load_unlocked
???
<frozen importlib._bootstrap>:680: in _load_unlocked
???
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/_pytest/assertion/rewrite.py:178: in exec_module
exec(co, module.__dict__)
src/promptflow/tests/executor/e2etests/test_batch_server.py:9: in <module>
from promptflow._proxy import AbstractExecutorProxy, ProxyFactory
E ImportError: cannot import name 'AbstractExecutorProxy' from 'promptflow._proxy' (/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_proxy/__init__.py)
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
tests.executor.e2etests.test_csharp_executor_proxy with error
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
collection failure
ImportError while importing test module '/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_csharp_executor_proxy.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/_pytest/python.py:520: in importtestmodule
mod = import_path(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/_pytest/pathlib.py:584: in import_path
importlib.import_module(module_name)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/importlib/__init__.py:127: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
<frozen importlib._bootstrap>:1030: in _gcd_import
???
<frozen importlib._bootstrap>:1007: in _find_and_load
???
<frozen importlib._bootstrap>:986: in _find_and_load_unlocked
???
<frozen importlib._bootstrap>:680: in _load_unlocked
???
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/_pytest/assertion/rewrite.py:178: in exec_module
exec(co, module.__dict__)
src/promptflow/tests/executor/e2etests/test_csharp_executor_proxy.py:12: in <module>
from promptflow._proxy._csharp_executor_proxy import CSharpExecutorProxy
E ModuleNotFoundError: No module named 'promptflow._proxy._csharp_executor_proxy'
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
tests.executor.e2etests.test_image with error
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
collection failure
ImportError while importing test module '/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_image.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/_pytest/python.py:520: in importtestmodule
mod = import_path(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/_pytest/pathlib.py:584: in import_path
importlib.import_module(module_name)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/importlib/__init__.py:127: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
<frozen importlib._bootstrap>:1030: in _gcd_import
???
<frozen importlib._bootstrap>:1007: in _find_and_load
???
<frozen importlib._bootstrap>:986: in _find_and_load_unlocked
???
<frozen importlib._bootstrap>:680: in _load_unlocked
???
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/_pytest/assertion/rewrite.py:178: in exec_module
exec(co, module.__dict__)
src/promptflow/tests/executor/e2etests/test_image.py:8: in <module>
from promptflow._utils.multimedia_utils import MIME_PATTERN, BasicMultimediaProcessor, ImageProcessor
E ImportError: cannot import name 'BasicMultimediaProcessor' from 'promptflow._utils.multimedia_utils' (/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_utils/multimedia_utils.py)
Check warning on line 0 in tests.executor.e2etests.test_traces.TestExecutorTraces
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_executor_openai_api_flow[openai_chat_api_flow-inputs0] (tests.executor.e2etests.test_traces.TestExecutorTraces) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 1s]
Raw output
assert False is True
self = <executor.e2etests.test_traces.TestExecutorTraces object at 0x7efc81d3c820>
flow_folder = 'openai_chat_api_flow'
inputs = {'chat_history': [], 'question': 'What is the capital of the United States of America?', 'stream': False}
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
@pytest.mark.parametrize(
"flow_folder, inputs",
[
("openai_chat_api_flow", get_chat_input(False)),
("openai_chat_api_flow", get_chat_input(True)),
("openai_completion_api_flow", get_completion_input(False)),
("openai_completion_api_flow", get_completion_input(True)),
("llm_tool", {"topic": "Hello", "stream": False}),
("llm_tool", {"topic": "Hello", "stream": True}),
],
)
def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
assert "total_tokens" in flow_result.run_info.system_metrics
assert flow_result.run_info.system_metrics["total_tokens"] > 0
get_traced = False
for api_call in flow_result.run_info.api_calls:
get_traced = get_traced or self.validate_openai_apicall(serialize(api_call))
> assert get_traced is True
E assert False is True
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:216: AssertionError
Check warning on line 0 in tests.executor.e2etests.test_traces.TestExecutorTraces
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_executor_openai_api_flow[openai_chat_api_flow-inputs1] (tests.executor.e2etests.test_traces.TestExecutorTraces) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 1s]
Raw output
assert False is True
self = <executor.e2etests.test_traces.TestExecutorTraces object at 0x7efc81bc9100>
flow_folder = 'openai_chat_api_flow'
inputs = {'chat_history': [], 'question': 'What is the capital of the United States of America?', 'stream': True}
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
@pytest.mark.parametrize(
"flow_folder, inputs",
[
("openai_chat_api_flow", get_chat_input(False)),
("openai_chat_api_flow", get_chat_input(True)),
("openai_completion_api_flow", get_completion_input(False)),
("openai_completion_api_flow", get_completion_input(True)),
("llm_tool", {"topic": "Hello", "stream": False}),
("llm_tool", {"topic": "Hello", "stream": True}),
],
)
def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
assert "total_tokens" in flow_result.run_info.system_metrics
assert flow_result.run_info.system_metrics["total_tokens"] > 0
get_traced = False
for api_call in flow_result.run_info.api_calls:
get_traced = get_traced or self.validate_openai_apicall(serialize(api_call))
> assert get_traced is True
E assert False is True
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:216: AssertionError
Check warning on line 0 in tests.executor.e2etests.test_traces.TestExecutorTraces
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_executor_openai_api_flow[openai_completion_api_flow-inputs2] (tests.executor.e2etests.test_traces.TestExecutorTraces) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert False is True
self = <executor.e2etests.test_traces.TestExecutorTraces object at 0x7efc81bc93a0>
flow_folder = 'openai_completion_api_flow'
inputs = {'prompt': 'What is the capital of the United States of America?', 'stream': False}
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
@pytest.mark.parametrize(
"flow_folder, inputs",
[
("openai_chat_api_flow", get_chat_input(False)),
("openai_chat_api_flow", get_chat_input(True)),
("openai_completion_api_flow", get_completion_input(False)),
("openai_completion_api_flow", get_completion_input(True)),
("llm_tool", {"topic": "Hello", "stream": False}),
("llm_tool", {"topic": "Hello", "stream": True}),
],
)
def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
assert "total_tokens" in flow_result.run_info.system_metrics
assert flow_result.run_info.system_metrics["total_tokens"] > 0
get_traced = False
for api_call in flow_result.run_info.api_calls:
get_traced = get_traced or self.validate_openai_apicall(serialize(api_call))
> assert get_traced is True
E assert False is True
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:216: AssertionError
Check warning on line 0 in tests.executor.e2etests.test_traces.TestExecutorTraces
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_executor_openai_api_flow[openai_completion_api_flow-inputs3] (tests.executor.e2etests.test_traces.TestExecutorTraces) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert False is True
self = <executor.e2etests.test_traces.TestExecutorTraces object at 0x7efc81bc95b0>
flow_folder = 'openai_completion_api_flow'
inputs = {'prompt': 'What is the capital of the United States of America?', 'stream': True}
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
@pytest.mark.parametrize(
"flow_folder, inputs",
[
("openai_chat_api_flow", get_chat_input(False)),
("openai_chat_api_flow", get_chat_input(True)),
("openai_completion_api_flow", get_completion_input(False)),
("openai_completion_api_flow", get_completion_input(True)),
("llm_tool", {"topic": "Hello", "stream": False}),
("llm_tool", {"topic": "Hello", "stream": True}),
],
)
def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
assert "total_tokens" in flow_result.run_info.system_metrics
assert flow_result.run_info.system_metrics["total_tokens"] > 0
get_traced = False
for api_call in flow_result.run_info.api_calls:
get_traced = get_traced or self.validate_openai_apicall(serialize(api_call))
> assert get_traced is True
E assert False is True
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:216: AssertionError
Check warning on line 0 in tests.executor.e2etests.test_traces.TestExecutorTraces
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_executor_openai_api_flow[llm_tool-inputs4] (tests.executor.e2etests.test_traces.TestExecutorTraces) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert False is True
self = <executor.e2etests.test_traces.TestExecutorTraces object at 0x7efc81bc9520>
flow_folder = 'llm_tool', inputs = {'stream': False, 'topic': 'Hello'}
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
@pytest.mark.parametrize(
"flow_folder, inputs",
[
("openai_chat_api_flow", get_chat_input(False)),
("openai_chat_api_flow", get_chat_input(True)),
("openai_completion_api_flow", get_completion_input(False)),
("openai_completion_api_flow", get_completion_input(True)),
("llm_tool", {"topic": "Hello", "stream": False}),
("llm_tool", {"topic": "Hello", "stream": True}),
],
)
def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
assert "total_tokens" in flow_result.run_info.system_metrics
assert flow_result.run_info.system_metrics["total_tokens"] > 0
get_traced = False
for api_call in flow_result.run_info.api_calls:
get_traced = get_traced or self.validate_openai_apicall(serialize(api_call))
> assert get_traced is True
E assert False is True
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:216: AssertionError
Check warning on line 0 in tests.executor.e2etests.test_traces.TestExecutorTraces
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_executor_openai_api_flow[llm_tool-inputs5] (tests.executor.e2etests.test_traces.TestExecutorTraces) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert False is True
self = <executor.e2etests.test_traces.TestExecutorTraces object at 0x7efc81bc96a0>
flow_folder = 'llm_tool', inputs = {'stream': True, 'topic': 'Hello'}
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
@pytest.mark.parametrize(
"flow_folder, inputs",
[
("openai_chat_api_flow", get_chat_input(False)),
("openai_chat_api_flow", get_chat_input(True)),
("openai_completion_api_flow", get_completion_input(False)),
("openai_completion_api_flow", get_completion_input(True)),
("llm_tool", {"topic": "Hello", "stream": False}),
("llm_tool", {"topic": "Hello", "stream": True}),
],
)
def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
assert "total_tokens" in flow_result.run_info.system_metrics
assert flow_result.run_info.system_metrics["total_tokens"] > 0
get_traced = False
for api_call in flow_result.run_info.api_calls:
get_traced = get_traced or self.validate_openai_apicall(serialize(api_call))
> assert get_traced is True
E assert False is True
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:216: AssertionError
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace[flow_with_trace-inputs0-5] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 4s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 416, in assert_otel_traces
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81ec8a90>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'flow_with_trace', inputs = {'user_id': 1}, expected_span_length = 5
@pytest.mark.parametrize(
"flow_file, inputs, expected_span_length",
[
("flow_with_trace", {"user_id": 1}, 5),
("flow_with_trace_async", {"user_id": 1}, 5),
],
)
def test_otel_trace(
self,
dev_connections,
flow_file,
inputs,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces, dev_connections, flow_file, inputs, expected_span_length
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:404:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81ec8a90>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}, 'flow_with_trace', {'user_id': 1}, 5)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7f75d460>
process = <MockSpawnProcess name='MockSpawnProcess-1' pid=2606 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 416, in assert_otel_traces
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace[flow_with_trace_async-inputs1-5] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 4s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 416, in assert_otel_traces
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81d64070>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'flow_with_trace_async', inputs = {'user_id': 1}
expected_span_length = 5
@pytest.mark.parametrize(
"flow_file, inputs, expected_span_length",
[
("flow_with_trace", {"user_id": 1}, 5),
("flow_with_trace_async", {"user_id": 1}, 5),
],
)
def test_otel_trace(
self,
dev_connections,
flow_file,
inputs,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces, dev_connections, flow_file, inputs, expected_span_length
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:404:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81d64070>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...2f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}, 'flow_with_trace_async', {'user_id': 1}, 5)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7fcfaaf0>
process = <MockSpawnProcess name='MockSpawnProcess-2' pid=2627 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 416, in assert_otel_traces
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace_with_llm[openai_chat_api_flow-inputs0-False-3] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 3s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81ec8eb0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'openai_chat_api_flow'
inputs = {'chat_history': [], 'question': 'What is the capital of the United States of America?', 'stream': False}
is_stream = False, expected_span_length = 3
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 4),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 4),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:481:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81ec8eb0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...', {'chat_history': [], 'question': 'What is the capital of the United States of America?', 'stream': False}, False, 3)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7fee4df0>
process = <MockSpawnProcess name='MockSpawnProcess-4' pid=2799 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace_with_llm[openai_chat_api_flow-inputs1-True-4] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 3s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81f45d00>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'openai_chat_api_flow'
inputs = {'chat_history': [], 'question': 'What is the capital of the United States of America?', 'stream': True}
is_stream = True, expected_span_length = 4
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 4),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 4),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:481:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81f45d00>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...ow', {'chat_history': [], 'question': 'What is the capital of the United States of America?', 'stream': True}, True, 4)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7f75d1f0>
process = <MockSpawnProcess name='MockSpawnProcess-5' pid=2880 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace_with_llm[openai_completion_api_flow-inputs2-False-3] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 3s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda9d0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'openai_completion_api_flow'
inputs = {'prompt': 'What is the capital of the United States of America?', 'stream': False}
is_stream = False, expected_span_length = 3
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 4),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 4),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:481:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda9d0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...ai_completion_api_flow', {'prompt': 'What is the capital of the United States of America?', 'stream': False}, False, 3)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7f34b880>
process = <MockSpawnProcess name='MockSpawnProcess-6' pid=2910 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace_with_llm[openai_completion_api_flow-inputs3-True-4] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 3s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda6d0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'openai_completion_api_flow'
inputs = {'prompt': 'What is the capital of the United States of America?', 'stream': True}
is_stream = True, expected_span_length = 4
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 4),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 4),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:481:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda6d0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...enai_completion_api_flow', {'prompt': 'What is the capital of the United States of America?', 'stream': True}, True, 4)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7fe81d30>
process = <MockSpawnProcess name='MockSpawnProcess-7' pid=2982 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace_with_llm[llm_tool-inputs4-False-4] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 3s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda2b0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'llm_tool', inputs = {'stream': False, 'topic': 'Hello'}
is_stream = False, expected_span_length = 4
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 4),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 4),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:481:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda2b0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}, 'llm_tool', {'stream': False, 'topic': 'Hello'}, False, 4)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7fd18be0>
process = <MockSpawnProcess name='MockSpawnProcess-8' pid=3030 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace_with_llm[flow_with_async_llm_tasks-inputs5-False-6] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 4s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda3a0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'flow_with_async_llm_tasks'
inputs = {'chat_history': [], 'models': ['gpt-35-turbo', 'gpt-35-turbo'], 'question': 'What is the capital of the United States of America?'}
is_stream = False, expected_span_length = 6
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 4),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 4),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:481:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda3a0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...dels': ['gpt-35-turbo', 'gpt-35-turbo'], 'question': 'What is the capital of the United States of America?'}, False, 6)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7fd18dc0>
process = <MockSpawnProcess name='MockSpawnProcess-9' pid=3158 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 498, in assert_otel_traces_with_llm
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace_with_embedding[openai_embedding_api_flow-inputs0-3] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 4s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 535, in assert_otel_traces_with_embedding
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda100>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'openai_embedding_api_flow', inputs = {'input': 'Hello'}
expected_span_length = 3
@pytest.mark.parametrize(
"flow_file, inputs, expected_span_length",
[
("openai_embedding_api_flow", {"input": "Hello"}, 3),
# [9906] is the tokenized version of "Hello"
("openai_embedding_api_flow_with_token", {"input": [9906]}, 3),
],
)
def test_otel_trace_with_embedding(
self,
dev_connections,
flow_file,
inputs,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_embedding,
dev_connections,
flow_file,
inputs,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:519:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_embedding of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81eda100>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba..., 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}, 'openai_embedding_api_flow', {'input': 'Hello'}, 3)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc80fad7c0>
process = <MockSpawnProcess name='MockSpawnProcess-10' pid=3257 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 535, in assert_otel_traces_with_embedding
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace_with_embedding[openai_embedding_api_flow_with_token-inputs1-3] (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 4s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 535, in assert_otel_traces_with_embedding
self.validate_span_list(span_list, line_run_id, expected_span_length)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81d6beb0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
flow_file = 'openai_embedding_api_flow_with_token', inputs = {'input': [9906]}
expected_span_length = 3
@pytest.mark.parametrize(
"flow_file, inputs, expected_span_length",
[
("openai_embedding_api_flow", {"input": "Hello"}, 3),
# [9906] is the tokenized version of "Hello"
("openai_embedding_api_flow_with_token", {"input": [9906]}, 3),
],
)
def test_otel_trace_with_embedding(
self,
dev_connections,
flow_file,
inputs,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_embedding,
dev_connections,
flow_file,
inputs,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:519:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_embedding of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81d6beb0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...e': 'azure', 'api_version': '2023-07-01-preview'}}, ...}, 'openai_embedding_api_flow_with_token', {'input': [9906]}, 3)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7f7f73a0>
process = <MockSpawnProcess name='MockSpawnProcess-11' pid=3640 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...ist\n assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 535, in assert_otel_traces_with_embedding
E self.validate_span_list(span_list, line_run_id, expected_span_length)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 667, in validate_span_list
E assert len(span_list) == expected_span_length, f"Got {len(span_list)} spans."
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_flow_with_traced_function (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 4s]
Raw output
Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 562, in assert_otel_traces_run_flow_then_traced_function
assert len(span_list) == 7, f"Got {len(span_list)} spans." # 4 + 1 + 2 spans in total
AssertionError: Got 0 spans.
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81d6b280>
def test_flow_with_traced_function(self):
> execute_function_in_subprocess(self.assert_otel_traces_run_flow_then_traced_function)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:551:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_run_flow_then_traced_function of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81d6b280>>
args = (), kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7f7bc280>
process = <MockSpawnProcess name='MockSpawnProcess-12' pid=3974 parent=2329 stopped exitcode=0>
err = "AssertionError('Got 0 spans.')"
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...assert len(span_list) == 7, f"Got {len(span_list)} spans." # 4 + 1 + 2 spans in total\nAssertionError: Got 0 spans.\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError('Got 0 spans.')
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 562, in assert_otel_traces_run_flow_then_traced_function
E assert len(span_list) == 7, f"Got {len(span_list)} spans." # 4 + 1 + 2 spans in total
E AssertionError: Got 0 spans.
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_otel_trace_with_batch (tests.executor.e2etests.test_traces.TestOTelTracer) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 13s]
Raw output
Exception: An error occurred in the subprocess: AttributeError("'FlowRunInfo' object has no attribute 'otel_trace_id'")
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 658, in assert_otel_traces_with_batch
assert f"0x{int(run_info.otel_trace_id, 16):032x}" in trace_ids
AttributeError: 'FlowRunInfo' object has no attribute 'otel_trace_id'
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81d6b8e0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
def test_otel_trace_with_batch(self, dev_connections):
flow_file = "flow_with_trace"
> execute_function_in_subprocess(self.assert_otel_traces_with_batch, dev_connections, flow_file)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:601:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_batch of <executor.e2etests.test_traces.TestOTelTracer object at 0x7efc81d6b8e0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}, 'flow_with_trace')
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7efc8c0d3430>
error_queue = <multiprocessing.queues.Queue object at 0x7efc7f3391f0>
process = <MockSpawnProcess name='MockSpawnProcess-13' pid=4292 parent=2329 stopped exitcode=0>
err = 'AttributeError("\'FlowRunInfo\' object has no attribute \'otel_trace_id\'")'
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...fo.otel_trace_id, 16):032x}" in trace_ids\nAttributeError: \'FlowRunInfo\' object has no attribute \'otel_trace_id\'\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AttributeError("'FlowRunInfo' object has no attribute 'otel_trace_id'")
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 658, in assert_otel_traces_with_batch
E assert f"0x{int(run_info.otel_trace_id, 16):032x}" in trace_ids
E AttributeError: 'FlowRunInfo' object has no attribute 'otel_trace_id'
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_eager_flow.TestEagerFlow
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_batch_run[basic_callable_class-inputs_mapping2-<lambda>-init_kwargs2] (tests.executor.e2etests.test_eager_flow.TestEagerFlow) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
promptflow._core.tool_meta_generator.PythonLoadError: Failed to load python function 'MyFlow' from file 'simple_callable_class'.
self = <executor.e2etests.test_eager_flow.TestEagerFlow object at 0x7f42becb9490>
flow_folder = 'basic_callable_class'
inputs_mapping = {'func_input': '${data.func_input}'}
ensure_output = <function TestEagerFlow.<lambda> at 0x7f42bed91700>
init_kwargs = {'obj_input': 'obj_input'}
@pytest.mark.parametrize(
"flow_folder, inputs_mapping, ensure_output, init_kwargs",
[
(
"dummy_flow_with_trace",
{"text": "${data.text}", "models": "${data.models}"},
lambda x: "output" in x and x["output"] == "dummy_output",
None,
),
(
"flow_with_dataclass_output",
{"text": "${data.text}", "models": "${data.models}"},
lambda x: x["text"] == "text" and isinstance(x["models"], list),
None,
),
(
"basic_callable_class",
{"func_input": "${data.func_input}"},
lambda x: x["obj_input"] == "obj_input" and x["func_input"] == "func_input",
{"obj_input": "obj_input"},
),
],
)
def test_batch_run(self, flow_folder, inputs_mapping, ensure_output, init_kwargs):
batch_engine = BatchEngine(
get_yaml_file(flow_folder, root=EAGER_FLOW_ROOT),
get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT),
init_kwargs=init_kwargs,
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, root=EAGER_FLOW_ROOT)}
output_dir = Path(mkdtemp())
> batch_result = batch_engine.run(input_dirs, inputs_mapping, output_dir)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_eager_flow.py:75:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/batch/_batch_engine.py:219: in run
raise e
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/batch/_batch_engine.py:166: in run
self._executor_proxy = ProxyFactory().create_executor_proxy(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_proxy/_proxy_factory.py:46: in create_executor_proxy
return async_run_allowing_running_loop(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_utils/async_utils.py:40: in async_run_allowing_running_loop
return asyncio.run(async_func(*args, **kwargs))
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/asyncio/runners.py:44: in run
return loop.run_until_complete(main)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/asyncio/base_events.py:647: in run_until_complete
return future.result()
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/batch/_python_executor_proxy.py:59: in create
flow_executor = FlowExecutor.create(flow_file, connections, working_dir, storage=storage, raise_ex=False)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:207: in create
return ScriptExecutor(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_script_executor.py:41: in __init__
self._initialize_function()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <promptflow.executor._script_executor.ScriptExecutor object at 0x7f42a0bf2430>
def _initialize_function(self):
module_name, func_name = self._parse_flow_file()
module = importlib.import_module(module_name)
func = getattr(module, func_name, None)
if func is None or not inspect.isfunction(func):
> raise PythonLoadError(
message_format="Failed to load python function '{func_name}' from file '{module_name}'.",
func_name=func_name,
module_name=module_name,
)
E promptflow._core.tool_meta_generator.PythonLoadError: Failed to load python function 'MyFlow' from file 'simple_callable_class'.
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_script_executor.py:133: PythonLoadError
Check warning on line 0 in tests.executor.e2etests.test_eager_flow.TestEagerFlow
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_batch_run_with_init_multiple_workers[1-<lambda>] (tests.executor.e2etests.test_eager_flow.TestEagerFlow) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
promptflow._core.tool_meta_generator.PythonLoadError: Failed to load python function 'MyFlow' from file 'simple_callable_class'.
self = <executor.e2etests.test_eager_flow.TestEagerFlow object at 0x7f42bebe24c0>
worker_count = 1
ensure_output = <function TestEagerFlow.<lambda> at 0x7f42bed918b0>
@pytest.mark.parametrize(
"worker_count, ensure_output",
[
# batch run with 1 worker
# obj id in each line run should be the same
(
1,
lambda outputs: len(outputs) == 4 and outputs[0]["obj_id"] == outputs[1]["obj_id"],
),
# batch run with 2 workers
(
2,
# there will be at most 2 instances be created.
lambda outputs: len(outputs) == 4 and len(set([o["obj_id"] for o in outputs])) <= 2,
),
],
)
def test_batch_run_with_init_multiple_workers(self, worker_count, ensure_output):
flow_folder = "basic_callable_class"
init_kwargs = {"obj_input": "obj_input"}
input_dirs = {"data": get_flow_inputs_file(flow_folder, root=EAGER_FLOW_ROOT)}
output_dir = Path(mkdtemp())
batch_engine = BatchEngine(
get_yaml_file(flow_folder, root=EAGER_FLOW_ROOT),
get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT),
init_kwargs=init_kwargs,
worker_count=worker_count,
)
> batch_engine.run(input_dirs, {"func_input": "${data.func_input}"}, output_dir)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_eager_flow.py:126:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/batch/_batch_engine.py:219: in run
raise e
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/batch/_batch_engine.py:166: in run
self._executor_proxy = ProxyFactory().create_executor_proxy(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_proxy/_proxy_factory.py:46: in create_executor_proxy
return async_run_allowing_running_loop(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_utils/async_utils.py:40: in async_run_allowing_running_loop
return asyncio.run(async_func(*args, **kwargs))
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/asyncio/runners.py:44: in run
return loop.run_until_complete(main)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/asyncio/base_events.py:647: in run_until_complete
return future.result()
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/batch/_python_executor_proxy.py:59: in create
flow_executor = FlowExecutor.create(flow_file, connections, working_dir, storage=storage, raise_ex=False)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:207: in create
return ScriptExecutor(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_script_executor.py:41: in __init__
self._initialize_function()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <promptflow.executor._script_executor.ScriptExecutor object at 0x7f42bdb25a30>
def _initialize_function(self):
module_name, func_name = self._parse_flow_file()
module = importlib.import_module(module_name)
func = getattr(module, func_name, None)
if func is None or not inspect.isfunction(func):
> raise PythonLoadError(
message_format="Failed to load python function '{func_name}' from file '{module_name}'.",
func_name=func_name,
module_name=module_name,
)
E promptflow._core.tool_meta_generator.PythonLoadError: Failed to load python function 'MyFlow' from file 'simple_callable_class'.
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_script_executor.py:133: PythonLoadError
Check warning on line 0 in tests.executor.e2etests.test_eager_flow.TestEagerFlow
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_batch_run_with_init_multiple_workers[2-<lambda>] (tests.executor.e2etests.test_eager_flow.TestEagerFlow) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
promptflow._core.tool_meta_generator.PythonLoadError: Failed to load python function 'MyFlow' from file 'simple_callable_class'.
self = <executor.e2etests.test_eager_flow.TestEagerFlow object at 0x7f42bebe20d0>
worker_count = 2
ensure_output = <function TestEagerFlow.<lambda> at 0x7f42bed91940>
@pytest.mark.parametrize(
"worker_count, ensure_output",
[
# batch run with 1 worker
# obj id in each line run should be the same
(
1,
lambda outputs: len(outputs) == 4 and outputs[0]["obj_id"] == outputs[1]["obj_id"],
),
# batch run with 2 workers
(
2,
# there will be at most 2 instances be created.
lambda outputs: len(outputs) == 4 and len(set([o["obj_id"] for o in outputs])) <= 2,
),
],
)
def test_batch_run_with_init_multiple_workers(self, worker_count, ensure_output):
flow_folder = "basic_callable_class"
init_kwargs = {"obj_input": "obj_input"}
input_dirs = {"data": get_flow_inputs_file(flow_folder, root=EAGER_FLOW_ROOT)}
output_dir = Path(mkdtemp())
batch_engine = BatchEngine(
get_yaml_file(flow_folder, root=EAGER_FLOW_ROOT),
get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT),
init_kwargs=init_kwargs,
worker_count=worker_count,
)
> batch_engine.run(input_dirs, {"func_input": "${data.func_input}"}, output_dir)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_eager_flow.py:126:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/batch/_batch_engine.py:219: in run
raise e
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/batch/_batch_engine.py:166: in run
self._executor_proxy = ProxyFactory().create_executor_proxy(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_proxy/_proxy_factory.py:46: in create_executor_proxy
return async_run_allowing_running_loop(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_utils/async_utils.py:40: in async_run_allowing_running_loop
return asyncio.run(async_func(*args, **kwargs))
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/asyncio/runners.py:44: in run
return loop.run_until_complete(main)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/asyncio/base_events.py:647: in run_until_complete
return future.result()
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/batch/_python_executor_proxy.py:59: in create
flow_executor = FlowExecutor.create(flow_file, connections, working_dir, storage=storage, raise_ex=False)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:207: in create
return ScriptExecutor(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_script_executor.py:41: in __init__
self._initialize_function()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <promptflow.executor._script_executor.ScriptExecutor object at 0x7f42bdb26c10>
def _initialize_function(self):
module_name, func_name = self._parse_flow_file()
module = importlib.import_module(module_name)
func = getattr(module, func_name, None)
if func is None or not inspect.isfunction(func):
> raise PythonLoadError(
message_format="Failed to load python function '{func_name}' from file '{module_name}'.",
func_name=func_name,
module_name=module_name,
)
E promptflow._core.tool_meta_generator.PythonLoadError: Failed to load python function 'MyFlow' from file 'simple_callable_class'.
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_script_executor.py:133: PythonLoadError
Check warning on line 0 in tests.executor.e2etests.test_batch_engine.TestBatch
github-actions / Executor E2E Test Result [main](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:main++)
test_batch_resume_aggregation_with_image[eval_flow_with_image_resume-eval_flow_with_image_resume_default_20240305_111258_103000] (tests.executor.e2etests.test_batch_engine.TestBatch) failed
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 6s]
Raw output
assert 2 == 1
self = <executor.e2etests.test_batch_engine.TestBatch object at 0x7fa14ad8c880>
flow_folder = 'eval_flow_with_image_resume'
resume_from_run_name = 'eval_flow_with_image_resume_default_20240305_111258_103000'
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas....com/', 'api_key': 'ac21d4a97d044b28990a740132f40d35', 'api_type': 'azure', 'api_version': '2023-07-01-preview'}}, ...}
@pytest.mark.parametrize(
"flow_folder, resume_from_run_name",
[("eval_flow_with_image_resume", "eval_flow_with_image_resume_default_20240305_111258_103000")],
)
def test_batch_resume_aggregation_with_image(self, flow_folder, resume_from_run_name, dev_connections):
run_storage = LocalStorageOperations(Run(flow="eval_flow_with_image_resume"))
batch_engine = BatchEngine(
get_yaml_file(flow_folder),
get_flow_folder(flow_folder),
connections=dev_connections,
storage=run_storage,
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name="data.jsonl")}
output_dir = Path(mkdtemp())
inputs_mapping = {"input_image": "${data.input_image}"}
run_folder = RUNS_ROOT / resume_from_run_name
mock_resume_from_run = MockRun(resume_from_run_name, run_folder)
resume_from_run_storage = LocalStorageOperations(mock_resume_from_run)
resume_from_run_output_dir = resume_from_run_storage.outputs_folder
resume_run_id = mock_resume_from_run.name + "_resume"
resume_run_batch_results = batch_engine.run(
input_dirs,
inputs_mapping,
output_dir,
resume_run_id,
resume_from_run_storage=resume_from_run_storage,
resume_from_run_output_dir=resume_from_run_output_dir,
)
nlines = 3
assert resume_run_batch_results.total_lines == nlines
assert resume_run_batch_results.completed_lines == nlines
assert resume_run_batch_results.metrics == {"image_count": 3}
jsonl_files = glob.glob(os.path.join(run_storage._run_infos_folder, "*.jsonl"))
for file_path in jsonl_files:
contents = load_jsonl(file_path)
for content in contents:
assert content["run_info"]["root_run_id"] == resume_run_id
status_summary = {f"__pf__.nodes.{k}": v for k, v in resume_run_batch_results.node_status.items()}
assert status_summary["__pf__.nodes.flip_image.completed"] == 3
> assert status_summary["__pf__.nodes.count_image.completed"] == 1
E assert 2 == 1
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_batch_engine.py:512: AssertionError