Skip to content

Commit 04811b5

Browse files
authored
fix(openai): set user messages as prompts, not completions (#2781)
1 parent eb60d42 commit 04811b5

File tree

2 files changed

+61
-29
lines changed

2 files changed

+61
-29
lines changed

packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py

+22-14
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
106106
start_time=run.get("start_time"),
107107
)
108108

109-
i = 0
109+
prompt_index = 0
110110
if assistants.get(run["assistant_id"]) is not None or Config.enrich_assistant:
111111
if Config.enrich_assistant:
112112
assistant = model_as_dict(
@@ -131,27 +131,35 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
131131
SpanAttributes.LLM_RESPONSE_MODEL,
132132
assistant["model"],
133133
)
134-
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
134+
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system")
135135
_set_span_attribute(
136136
span,
137-
f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
137+
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
138138
assistant["instructions"],
139139
)
140-
i += 1
141-
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
140+
prompt_index += 1
141+
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system")
142142
_set_span_attribute(
143-
span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", run["instructions"]
143+
span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content", run["instructions"]
144144
)
145+
prompt_index += 1
145146

146-
for i, msg in enumerate(messages):
147-
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{i}"
147+
completion_index = 0
148+
for msg in messages:
149+
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}"
148150
content = msg.get("content")
149151

150-
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
151-
_set_span_attribute(
152-
span, f"{prefix}.content", content[0].get("text").get("value")
153-
)
154-
_set_span_attribute(span, f"gen_ai.response.{i}.id", msg.get("id"))
152+
message_content = content[0].get("text").get("value")
153+
message_role = msg.get("role")
154+
if message_role in ["user", "system"]:
155+
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", message_role)
156+
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content", message_content)
157+
prompt_index += 1
158+
else:
159+
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
160+
_set_span_attribute(span, f"{prefix}.content", message_content)
161+
_set_span_attribute(span, f"gen_ai.response.{completion_index}.id", msg.get("id"))
162+
completion_index += 1
155163

156164
if run.get("usage"):
157165
usage_dict = model_as_dict(run.get("usage"))
@@ -223,7 +231,7 @@ def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
223231
)
224232

225233
kwargs["event_handler"] = EventHandleWrapper(
226-
original_handler=kwargs["event_handler"], span=span
234+
original_handler=kwargs["event_handler"], span=span,
227235
)
228236

229237
response = wrapped(*args, **kwargs)

packages/opentelemetry-instrumentation-openai/tests/traces/test_assistant.py

+39-15
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,12 @@ def assistant(openai_client):
1818
@pytest.mark.vcr
1919
def test_new_assistant(exporter, openai_client, assistant):
2020
thread = openai_client.beta.threads.create()
21+
user_message = "I need to solve the equation `3x + 11 = 14`. Can you help me?"
2122

2223
openai_client.beta.threads.messages.create(
2324
thread_id=thread.id,
2425
role="user",
25-
content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
26+
content=user_message,
2627
)
2728

2829
run = openai_client.beta.threads.runs.create(
@@ -69,33 +70,40 @@ def test_new_assistant(exporter, openai_client, assistant):
6970
== "Please address the user as Jane Doe. The user has a premium account."
7071
)
7172
assert open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.1.role"] == "system"
73+
assert open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.2.role"] == "user"
74+
assert open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.2.content"] == user_message
7275
assert open_ai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 145
7376
assert open_ai_span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] == 155
7477
assert open_ai_span.attributes[SpanAttributes.LLM_SYSTEM] == "openai"
7578

76-
for idx, message in enumerate(messages.data):
79+
completion_index = 0
80+
for message in messages.data:
81+
if message.role in ["user", "system"]:
82+
continue
7783
assert (
78-
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{idx}.content"]
84+
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}.content"]
7985
== message.content[0].text.value
8086
)
8187
assert (
82-
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{idx}.role"]
88+
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}.role"]
8389
== message.role
8490
)
8591
assert (
86-
open_ai_span.attributes[f"gen_ai.response.{idx}.id"]
92+
open_ai_span.attributes[f"gen_ai.response.{completion_index}.id"]
8793
== message.id
8894
)
95+
completion_index += 1
8996

9097

9198
@pytest.mark.vcr
9299
def test_new_assistant_with_polling(exporter, openai_client, assistant):
93100
thread = openai_client.beta.threads.create()
101+
user_message = "I need to solve the equation `3x + 11 = 14`. Can you help me?"
94102

95103
openai_client.beta.threads.messages.create(
96104
thread_id=thread.id,
97105
role="user",
98-
content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
106+
content=user_message,
99107
)
100108

101109
run = openai_client.beta.threads.runs.create_and_poll(
@@ -128,30 +136,40 @@ def test_new_assistant_with_polling(exporter, openai_client, assistant):
128136
== "Please address the user as Jane Doe. The user has a premium account."
129137
)
130138
assert open_ai_span.attributes["gen_ai.prompt.1.role"] == "system"
139+
assert open_ai_span.attributes["gen_ai.prompt.2.role"] == "user"
140+
assert open_ai_span.attributes["gen_ai.prompt.2.content"] == user_message
131141
assert open_ai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 374
132142
assert open_ai_span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] == 86
133143
assert open_ai_span.attributes[SpanAttributes.LLM_SYSTEM] == "openai"
134144

135-
for idx, message in enumerate(messages.data):
145+
completion_index = 0
146+
for message in messages.data:
147+
if message.role in ["user", "system"]:
148+
continue
136149
assert (
137-
open_ai_span.attributes[f"gen_ai.completion.{idx}.content"]
150+
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}.content"]
138151
== message.content[0].text.value
139152
)
140-
assert open_ai_span.attributes[f"gen_ai.completion.{idx}.role"] == message.role
141153
assert (
142-
open_ai_span.attributes[f"gen_ai.response.{idx}.id"]
154+
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}.role"]
155+
== message.role
156+
)
157+
assert (
158+
open_ai_span.attributes[f"gen_ai.response.{completion_index}.id"]
143159
== message.id
144160
)
161+
completion_index += 1
145162

146163

147164
@pytest.mark.vcr
148165
def test_existing_assistant(exporter, openai_client):
149166
thread = openai_client.beta.threads.create()
167+
user_message = "I need to solve the equation `3x + 11 = 14`. Can you help me?"
150168

151169
openai_client.beta.threads.messages.create(
152170
thread_id=thread.id,
153171
role="user",
154-
content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
172+
content=user_message,
155173
)
156174

157175
run = openai_client.beta.threads.runs.create(
@@ -197,23 +215,29 @@ def test_existing_assistant(exporter, openai_client):
197215
== "Please address the user as Jane Doe. The user has a premium account."
198216
)
199217
assert open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.1.role"] == "system"
218+
assert open_ai_span.attributes["gen_ai.prompt.2.role"] == "user"
219+
assert open_ai_span.attributes["gen_ai.prompt.2.content"] == user_message
200220
assert open_ai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 639
201221
assert open_ai_span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] == 170
202222
assert open_ai_span.attributes[SpanAttributes.LLM_SYSTEM] == "openai"
203223

204-
for idx, message in enumerate(messages.data):
224+
completion_index = 0
225+
for message in messages.data:
226+
if message.role in ["user", "system"]:
227+
continue
205228
assert (
206-
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{idx}.content"]
229+
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}.content"]
207230
== message.content[0].text.value
208231
)
209232
assert (
210-
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{idx}.role"]
233+
open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}.role"]
211234
== message.role
212235
)
213236
assert (
214-
open_ai_span.attributes[f"gen_ai.response.{idx}.id"]
237+
open_ai_span.attributes[f"gen_ai.response.{completion_index}.id"]
215238
== message.id
216239
)
240+
completion_index += 1
217241

218242

219243
@pytest.mark.vcr

0 commit comments

Comments
 (0)