Skip to content

Commit 2816b08

Browse files
authored
Add TLMResponses API (#99)
1 parent 149fc88 commit 2816b08

File tree

5 files changed

+360
-8
lines changed

5 files changed

+360
-8
lines changed

CHANGELOG.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## [Unreleased]
99

10+
## [1.1.22] - 2025-07-29
11+
12+
### Added
13+
14+
- Added `TLMResponses` module, providing support for trust scoring with OpenAI Responses object
15+
1016
## [1.1.21] - 2025-07-28
1117

1218
### Changed
@@ -285,7 +291,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
285291
- Release of the Cleanlab TLM Python client.
286292

287293

288-
[Unreleased]: https://github.com/cleanlab/cleanlab-tlm/compare/v1.1.21...HEAD
294+
[Unreleased]: https://github.com/cleanlab/cleanlab-tlm/compare/v1.1.22...HEAD
295+
[1.1.22]: https://github.com/cleanlab/cleanlab-tlm/compare/v1.1.21...v1.1.22
289296
[1.1.21]: https://github.com/cleanlab/cleanlab-tlm/compare/v1.1.20...v1.1.21
290297
[1.1.20]: https://github.com/cleanlab/cleanlab-tlm/compare/v1.1.19...v1.1.20
291298
[1.1.19]: https://github.com/cleanlab/cleanlab-tlm/compare/v1.1.18...v1.1.19

src/cleanlab_tlm/__about__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
# SPDX-License-Identifier: MIT
2-
__version__ = "1.1.21"
2+
__version__ = "1.1.22"

src/cleanlab_tlm/utils/chat.py

Lines changed: 67 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,16 @@
44
OpenAI's chat models.
55
"""
66

7+
from __future__ import annotations
8+
79
import json
810
import warnings
911
from typing import TYPE_CHECKING, Any, Literal, Optional, Union, cast
1012

1113
if TYPE_CHECKING:
1214
from openai.types.chat import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageParam
15+
from openai.types.responses import Response
16+
1317

1418
# Define message prefixes
1519
_SYSTEM_PREFIX = "System: "
@@ -208,7 +212,7 @@ def _find_index_after_first_system_block(messages: list[dict[str, Any]]) -> int:
208212

209213

210214
def _form_prompt_responses_api(
211-
messages: list[dict[str, Any]],
215+
messages: list[dict[str, Any]] | str,
212216
tools: Optional[list[dict[str, Any]]] = None,
213217
**responses_api_kwargs: Any,
214218
) -> str:
@@ -226,7 +230,9 @@ def _form_prompt_responses_api(
226230
Returns:
227231
str: A formatted string representing the chat history as a single prompt.
228232
"""
229-
messages = messages.copy()
233+
234+
messages = [{"role": "user", "content": messages}] if isinstance(messages, str) else messages.copy()
235+
230236
output = ""
231237

232238
# Find the index after the first consecutive block of system messages
@@ -301,7 +307,7 @@ def _form_prompt_responses_api(
301307

302308

303309
def _form_prompt_chat_completions_api(
304-
messages: list["ChatCompletionMessageParam"],
310+
messages: list[ChatCompletionMessageParam],
305311
tools: Optional[list[dict[str, Any]]] = None,
306312
) -> str:
307313
"""
@@ -443,7 +449,7 @@ def form_prompt_string(
443449
)
444450

445451

446-
def form_response_string_chat_completions(response: "ChatCompletion") -> str:
452+
def form_response_string_chat_completions(response: ChatCompletion) -> str:
447453
"""Form a single string representing the response, out of the raw response object returned by OpenAI's Chat Completions API.
448454
449455
This function extracts the assistant's response message from a ChatCompletion object
@@ -468,7 +474,9 @@ def form_response_string_chat_completions(response: "ChatCompletion") -> str:
468474
return form_response_string_chat_completions_api(response_msg)
469475

470476

471-
def form_response_string_chat_completions_api(response: Union[dict[str, Any], "ChatCompletionMessage"]) -> str:
477+
def form_response_string_chat_completions_api(
478+
response: Union[dict[str, Any], ChatCompletionMessage],
479+
) -> str:
472480
"""
473481
Form a single string representing the response, out of an assistant response message dictionary in Chat Completions API format.
474482
@@ -517,6 +525,60 @@ def form_response_string_chat_completions_api(response: Union[dict[str, Any], "C
517525
return str(content)
518526

519527

528+
def form_response_string_responses_api(response: Response) -> str:
529+
"""
530+
Format an assistant response message dictionary from the Responses API into a single string.
531+
532+
Given a Response object from the Responses API, this function formats the response into a string
533+
that includes both content and tool calls (if present). Tool calls are formatted using XML tags
534+
with JSON content, consistent with the format used in `form_prompt_string`.
535+
536+
Args:
537+
response (Responses): A Response object from the OpenAI Responses API containing output elements with message content and/or function calls
538+
539+
Returns:
540+
str: A formatted string containing the response content and any tool calls.
541+
Tool calls are formatted as XML tags containing JSON with function
542+
name and arguments.
543+
544+
Raises:
545+
ImportError: If openai is not installed.
546+
"""
547+
try:
548+
from openai.types.responses.response_output_text import ResponseOutputText
549+
except ImportError as e:
550+
raise ImportError("OpenAI is a required dependency. Please install it with `pip install openai`.") from e
551+
552+
content_parts = []
553+
554+
for output in response.output:
555+
if output.type == "message":
556+
output_content = [content.text for content in output.content if isinstance(content, ResponseOutputText)]
557+
content_parts.append("\n".join(output_content))
558+
elif output.type == "function_call":
559+
try:
560+
tool_call = {
561+
"name": output.name,
562+
"arguments": (json.loads(output.arguments) if output.arguments else {}),
563+
"call_id": output.call_id,
564+
}
565+
content_parts.append(f"{_TOOL_CALL_TAG_START}\n{json.dumps(tool_call, indent=2)}\n{_TOOL_CALL_TAG_END}")
566+
except (AttributeError, TypeError, json.JSONDecodeError) as e:
567+
warnings.warn(
568+
f"Error formatting tool call in response: {e}. Skipping this tool call.",
569+
UserWarning,
570+
stacklevel=2,
571+
)
572+
else:
573+
warnings.warn(
574+
f"Unexpected output type: {output.type}. Skipping this output.",
575+
UserWarning,
576+
stacklevel=2,
577+
)
578+
579+
return "\n".join(content_parts)
580+
581+
520582
def _response_to_dict(response: Any) -> dict[str, Any]:
521583
# `response` should be a Union[dict[str, Any], ChatCompletionMessage], but last isinstance check wouldn't be reachable
522584
if isinstance(response, dict):

src/cleanlab_tlm/utils/chat_completions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ class TLMChatCompletion(BaseTLM):
3030
by passing in the inputs to OpenAI's Chat Completions API and the ChatCompletion response object.
3131
3232
Args:
33-
quality_preset ({"base", "low", "medium"}, default = "medium"): an optional preset configuration to control
33+
quality_preset ({"base", "low", "medium", "high", "best"}, default = "medium"): an optional preset configuration to control
3434
the quality of TLM trustworthiness scores vs. latency/costs.
3535
3636
api_key (str, optional): Cleanlab TLM API key. If not provided, will attempt to read from CLEANLAB_API_KEY environment variable.

0 commit comments

Comments
 (0)