Skip to content

Commit 01751d1

Browse files
authored
feat: add support for parse via responses (#256)
1 parent 4426dd9 commit 01751d1

File tree

5 files changed

+205
-1
lines changed

5 files changed

+205
-1
lines changed

CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
## 4.7.0 - 2025-06-10
2+
3+
- feat: add support for parse endpoint in responses API (no longer beta)
4+
15
## 4.6.2 - 2025-06-09
26

37
- fix: replace `import posthog` with direct method imports

posthog/ai/openai/openai.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,42 @@ def _capture_streaming_event(
230230
groups=posthog_groups,
231231
)
232232

233+
def parse(
234+
self,
235+
posthog_distinct_id: Optional[str] = None,
236+
posthog_trace_id: Optional[str] = None,
237+
posthog_properties: Optional[Dict[str, Any]] = None,
238+
posthog_privacy_mode: bool = False,
239+
posthog_groups: Optional[Dict[str, Any]] = None,
240+
**kwargs: Any,
241+
):
242+
"""
243+
Parse structured output using OpenAI's 'responses.parse' method, but also track usage in PostHog.
244+
245+
Args:
246+
posthog_distinct_id: Optional ID to associate with the usage event.
247+
posthog_trace_id: Optional trace UUID for linking events.
248+
posthog_properties: Optional dictionary of extra properties to include in the event.
249+
posthog_privacy_mode: Whether to anonymize the input and output.
250+
posthog_groups: Optional dictionary of groups to associate with the event.
251+
**kwargs: Any additional parameters for the OpenAI Responses Parse API.
252+
253+
Returns:
254+
The response from OpenAI's responses.parse call.
255+
"""
256+
return call_llm_and_track_usage(
257+
posthog_distinct_id,
258+
self._client._ph_client,
259+
"openai",
260+
posthog_trace_id,
261+
posthog_properties,
262+
posthog_privacy_mode,
263+
posthog_groups,
264+
self._client.base_url,
265+
self._original.parse,
266+
**kwargs,
267+
)
268+
233269

234270
class WrappedChat:
235271
"""Wrapper for OpenAI chat that tracks usage in PostHog."""

posthog/ai/openai/openai_async.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,42 @@ async def _capture_streaming_event(
230230
groups=posthog_groups,
231231
)
232232

233+
async def parse(
234+
self,
235+
posthog_distinct_id: Optional[str] = None,
236+
posthog_trace_id: Optional[str] = None,
237+
posthog_properties: Optional[Dict[str, Any]] = None,
238+
posthog_privacy_mode: bool = False,
239+
posthog_groups: Optional[Dict[str, Any]] = None,
240+
**kwargs: Any,
241+
):
242+
"""
243+
Parse structured output using OpenAI's 'responses.parse' method, but also track usage in PostHog.
244+
245+
Args:
246+
posthog_distinct_id: Optional ID to associate with the usage event.
247+
posthog_trace_id: Optional trace UUID for linking events.
248+
posthog_properties: Optional dictionary of extra properties to include in the event.
249+
posthog_privacy_mode: Whether to anonymize the input and output.
250+
posthog_groups: Optional dictionary of groups to associate with the event.
251+
**kwargs: Any additional parameters for the OpenAI Responses Parse API.
252+
253+
Returns:
254+
The response from OpenAI's responses.parse call.
255+
"""
256+
return await call_llm_and_track_usage_async(
257+
posthog_distinct_id,
258+
self._client._ph_client,
259+
"openai",
260+
posthog_trace_id,
261+
posthog_properties,
262+
posthog_privacy_mode,
263+
posthog_groups,
264+
self._client.base_url,
265+
self._original.parse,
266+
**kwargs,
267+
)
268+
233269

234270
class WrappedChat:
235271
"""Async wrapper for OpenAI chat that tracks usage in PostHog."""

posthog/test/ai/openai/test_openai.py

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,11 @@
2626
ResponseOutputMessage,
2727
ResponseOutputText,
2828
ResponseUsage,
29+
ParsedResponse,
30+
)
31+
from openai.types.responses.parsed_response import (
32+
ParsedResponseOutputMessage,
33+
ParsedResponseOutputText,
2934
)
3035

3136
from posthog.ai.openai import OpenAI
@@ -115,6 +120,59 @@ def mock_openai_response_with_responses_api():
115120
)
116121

117122

123+
@pytest.fixture
124+
def mock_parsed_response():
125+
return ParsedResponse(
126+
id="test",
127+
model="gpt-4o-2024-08-06",
128+
object="response",
129+
created_at=1741476542,
130+
status="completed",
131+
error=None,
132+
incomplete_details=None,
133+
instructions=None,
134+
max_output_tokens=None,
135+
tools=[],
136+
tool_choice="auto",
137+
output=[
138+
ParsedResponseOutputMessage(
139+
id="msg_123",
140+
type="message",
141+
role="assistant",
142+
status="completed",
143+
content=[
144+
ParsedResponseOutputText(
145+
type="output_text",
146+
text='{"name": "Science Fair", "date": "Friday", "participants": ["Alice", "Bob"]}',
147+
annotations=[],
148+
parsed={
149+
"name": "Science Fair",
150+
"date": "Friday",
151+
"participants": ["Alice", "Bob"],
152+
},
153+
)
154+
],
155+
)
156+
],
157+
output_parsed={
158+
"name": "Science Fair",
159+
"date": "Friday",
160+
"participants": ["Alice", "Bob"],
161+
},
162+
parallel_tool_calls=True,
163+
previous_response_id=None,
164+
usage=ResponseUsage(
165+
input_tokens=15,
166+
output_tokens=20,
167+
input_tokens_details={"prompt_tokens": 15, "cached_tokens": 0},
168+
output_tokens_details={"reasoning_tokens": 5},
169+
total_tokens=35,
170+
),
171+
user=None,
172+
metadata={},
173+
)
174+
175+
118176
@pytest.fixture
119177
def mock_embedding_response():
120178
return CreateEmbeddingResponse(
@@ -646,3 +704,73 @@ def test_responses_api(mock_client, mock_openai_response_with_responses_api):
646704
assert props["$ai_http_status"] == 200
647705
assert props["foo"] == "bar"
648706
assert isinstance(props["$ai_latency"], float)
707+
708+
709+
def test_responses_parse(mock_client, mock_parsed_response):
710+
with patch(
711+
"openai.resources.responses.Responses.parse",
712+
return_value=mock_parsed_response,
713+
):
714+
client = OpenAI(api_key="test-key", posthog_client=mock_client)
715+
response = client.responses.parse(
716+
model="gpt-4o-2024-08-06",
717+
input=[
718+
{"role": "system", "content": "Extract the event information."},
719+
{
720+
"role": "user",
721+
"content": "Alice and Bob are going to a science fair on Friday.",
722+
},
723+
],
724+
text={
725+
"format": {
726+
"type": "json_schema",
727+
"json_schema": {
728+
"name": "event",
729+
"schema": {
730+
"type": "object",
731+
"properties": {
732+
"name": {"type": "string"},
733+
"date": {"type": "string"},
734+
"participants": {
735+
"type": "array",
736+
"items": {"type": "string"},
737+
},
738+
},
739+
"required": ["name", "date", "participants"],
740+
},
741+
},
742+
}
743+
},
744+
posthog_distinct_id="test-id",
745+
posthog_properties={"foo": "bar"},
746+
)
747+
748+
assert response == mock_parsed_response
749+
assert mock_client.capture.call_count == 1
750+
751+
call_args = mock_client.capture.call_args[1]
752+
props = call_args["properties"]
753+
754+
assert call_args["distinct_id"] == "test-id"
755+
assert call_args["event"] == "$ai_generation"
756+
assert props["$ai_provider"] == "openai"
757+
assert props["$ai_model"] == "gpt-4o-2024-08-06"
758+
assert props["$ai_input"] == [
759+
{"role": "system", "content": "Extract the event information."},
760+
{
761+
"role": "user",
762+
"content": "Alice and Bob are going to a science fair on Friday.",
763+
},
764+
]
765+
assert props["$ai_output_choices"] == [
766+
{
767+
"role": "assistant",
768+
"content": '{"name": "Science Fair", "date": "Friday", "participants": ["Alice", "Bob"]}',
769+
}
770+
]
771+
assert props["$ai_input_tokens"] == 15
772+
assert props["$ai_output_tokens"] == 20
773+
assert props["$ai_reasoning_tokens"] == 5
774+
assert props["$ai_http_status"] == 200
775+
assert props["foo"] == "bar"
776+
assert isinstance(props["$ai_latency"], float)

posthog/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
VERSION = "4.6.2"
1+
VERSION = "4.7.0"
22

33
if __name__ == "__main__":
44
print(VERSION, end="") # noqa: T201

0 commit comments

Comments
 (0)