@@ -18,11 +18,12 @@ def assistant(openai_client):
18
18
@pytest .mark .vcr
19
19
def test_new_assistant (exporter , openai_client , assistant ):
20
20
thread = openai_client .beta .threads .create ()
21
+ user_message = "I need to solve the equation `3x + 11 = 14`. Can you help me?"
21
22
22
23
openai_client .beta .threads .messages .create (
23
24
thread_id = thread .id ,
24
25
role = "user" ,
25
- content = "I need to solve the equation `3x + 11 = 14`. Can you help me?" ,
26
+ content = user_message ,
26
27
)
27
28
28
29
run = openai_client .beta .threads .runs .create (
@@ -69,33 +70,40 @@ def test_new_assistant(exporter, openai_client, assistant):
69
70
== "Please address the user as Jane Doe. The user has a premium account."
70
71
)
71
72
assert open_ai_span .attributes [f"{ SpanAttributes .LLM_PROMPTS } .1.role" ] == "system"
73
+ assert open_ai_span .attributes [f"{ SpanAttributes .LLM_PROMPTS } .2.role" ] == "user"
74
+ assert open_ai_span .attributes [f"{ SpanAttributes .LLM_PROMPTS } .2.content" ] == user_message
72
75
assert open_ai_span .attributes [SpanAttributes .LLM_USAGE_PROMPT_TOKENS ] == 145
73
76
assert open_ai_span .attributes [SpanAttributes .LLM_USAGE_COMPLETION_TOKENS ] == 155
74
77
assert open_ai_span .attributes [SpanAttributes .LLM_SYSTEM ] == "openai"
75
78
76
- for idx , message in enumerate (messages .data ):
79
+ completion_index = 0
80
+ for message in messages .data :
81
+ if message .role in ["user" , "system" ]:
82
+ continue
77
83
assert (
78
- open_ai_span .attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .{ idx } .content" ]
84
+ open_ai_span .attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .{ completion_index } .content" ]
79
85
== message .content [0 ].text .value
80
86
)
81
87
assert (
82
- open_ai_span .attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .{ idx } .role" ]
88
+ open_ai_span .attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .{ completion_index } .role" ]
83
89
== message .role
84
90
)
85
91
assert (
86
- open_ai_span .attributes [f"gen_ai.response.{ idx } .id" ]
92
+ open_ai_span .attributes [f"gen_ai.response.{ completion_index } .id" ]
87
93
== message .id
88
94
)
95
+ completion_index += 1
89
96
90
97
91
98
@pytest .mark .vcr
92
99
def test_new_assistant_with_polling (exporter , openai_client , assistant ):
93
100
thread = openai_client .beta .threads .create ()
101
+ user_message = "I need to solve the equation `3x + 11 = 14`. Can you help me?"
94
102
95
103
openai_client .beta .threads .messages .create (
96
104
thread_id = thread .id ,
97
105
role = "user" ,
98
- content = "I need to solve the equation `3x + 11 = 14`. Can you help me?" ,
106
+ content = user_message ,
99
107
)
100
108
101
109
run = openai_client .beta .threads .runs .create_and_poll (
@@ -128,30 +136,40 @@ def test_new_assistant_with_polling(exporter, openai_client, assistant):
128
136
== "Please address the user as Jane Doe. The user has a premium account."
129
137
)
130
138
assert open_ai_span .attributes ["gen_ai.prompt.1.role" ] == "system"
139
+ assert open_ai_span .attributes ["gen_ai.prompt.2.role" ] == "user"
140
+ assert open_ai_span .attributes ["gen_ai.prompt.2.content" ] == user_message
131
141
assert open_ai_span .attributes [SpanAttributes .LLM_USAGE_PROMPT_TOKENS ] == 374
132
142
assert open_ai_span .attributes [SpanAttributes .LLM_USAGE_COMPLETION_TOKENS ] == 86
133
143
assert open_ai_span .attributes [SpanAttributes .LLM_SYSTEM ] == "openai"
134
144
135
- for idx , message in enumerate (messages .data ):
145
+ completion_index = 0
146
+ for message in messages .data :
147
+ if message .role in ["user" , "system" ]:
148
+ continue
136
149
assert (
137
- open_ai_span .attributes [f"gen_ai.completion. { idx } .content" ]
150
+ open_ai_span .attributes [f"{ SpanAttributes . LLM_COMPLETIONS } . { completion_index } .content" ]
138
151
== message .content [0 ].text .value
139
152
)
140
- assert open_ai_span .attributes [f"gen_ai.completion.{ idx } .role" ] == message .role
141
153
assert (
142
- open_ai_span .attributes [f"gen_ai.response.{ idx } .id" ]
154
+ open_ai_span .attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .{ completion_index } .role" ]
155
+ == message .role
156
+ )
157
+ assert (
158
+ open_ai_span .attributes [f"gen_ai.response.{ completion_index } .id" ]
143
159
== message .id
144
160
)
161
+ completion_index += 1
145
162
146
163
147
164
@pytest .mark .vcr
148
165
def test_existing_assistant (exporter , openai_client ):
149
166
thread = openai_client .beta .threads .create ()
167
+ user_message = "I need to solve the equation `3x + 11 = 14`. Can you help me?"
150
168
151
169
openai_client .beta .threads .messages .create (
152
170
thread_id = thread .id ,
153
171
role = "user" ,
154
- content = "I need to solve the equation `3x + 11 = 14`. Can you help me?" ,
172
+ content = user_message ,
155
173
)
156
174
157
175
run = openai_client .beta .threads .runs .create (
@@ -197,23 +215,29 @@ def test_existing_assistant(exporter, openai_client):
197
215
== "Please address the user as Jane Doe. The user has a premium account."
198
216
)
199
217
assert open_ai_span .attributes [f"{ SpanAttributes .LLM_PROMPTS } .1.role" ] == "system"
218
+ assert open_ai_span .attributes ["gen_ai.prompt.2.role" ] == "user"
219
+ assert open_ai_span .attributes ["gen_ai.prompt.2.content" ] == user_message
200
220
assert open_ai_span .attributes [SpanAttributes .LLM_USAGE_PROMPT_TOKENS ] == 639
201
221
assert open_ai_span .attributes [SpanAttributes .LLM_USAGE_COMPLETION_TOKENS ] == 170
202
222
assert open_ai_span .attributes [SpanAttributes .LLM_SYSTEM ] == "openai"
203
223
204
- for idx , message in enumerate (messages .data ):
224
+ completion_index = 0
225
+ for message in messages .data :
226
+ if message .role in ["user" , "system" ]:
227
+ continue
205
228
assert (
206
- open_ai_span .attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .{ idx } .content" ]
229
+ open_ai_span .attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .{ completion_index } .content" ]
207
230
== message .content [0 ].text .value
208
231
)
209
232
assert (
210
- open_ai_span .attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .{ idx } .role" ]
233
+ open_ai_span .attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .{ completion_index } .role" ]
211
234
== message .role
212
235
)
213
236
assert (
214
- open_ai_span .attributes [f"gen_ai.response.{ idx } .id" ]
237
+ open_ai_span .attributes [f"gen_ai.response.{ completion_index } .id" ]
215
238
== message .id
216
239
)
240
+ completion_index += 1
217
241
218
242
219
243
@pytest .mark .vcr
0 commit comments