Skip to content

Commit d264014

Browse files
committed
refactor: Extract _set_finish_reason helper to eliminate code duplication
Extract finish_reason mapping logic into a reusable helper function to address code duplication feedback from Gemini Code Assist review on PR #3698. Changes: - Added _set_finish_reason(response, finish_reason) helper function - Replaced three duplicate mapping blocks with single helper call: * Non-streaming path (line ~880) * Streaming tool-call path (line ~1387) * Streaming text-only path (line ~1409) - Preserved all existing comments and behavior - Improved maintainability - single source of truth for mapping logic Addresses: https://github.com/google/adk-python/pull/3698\#discussion_r18xxxxx
1 parent 22a627b commit d264014

File tree

1 file changed

+22
-21
lines changed

1 file changed

+22
-21
lines changed

src/google/adk/models/lite_llm.py

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,25 @@ def _infer_mime_type_from_uri(uri: str) -> Optional[str]:
176176
return None
177177

178178

179+
def _set_finish_reason(
180+
response: types.LlmResponse, finish_reason: Any
181+
) -> None:
182+
"""Sets the finish reason on the LlmResponse, mapping from string if necessary.
183+
184+
Args:
185+
response: The LlmResponse object to update.
186+
finish_reason: The finish reason value, either a FinishReason enum or a string
187+
that needs to be mapped.
188+
"""
189+
if isinstance(finish_reason, types.FinishReason):
190+
response.finish_reason = finish_reason
191+
else:
192+
finish_reason_str = str(finish_reason).lower()
193+
response.finish_reason = _FINISH_REASON_MAPPING.get(
194+
finish_reason_str, types.FinishReason.OTHER
195+
)
196+
197+
179198
def _decode_inline_text_data(raw_bytes: bytes) -> str:
180199
"""Decodes inline file bytes that represent textual content."""
181200
try:
@@ -1081,13 +1100,7 @@ def _model_response_to_generate_content_response(
10811100
if finish_reason:
10821101
# If LiteLLM already provides a FinishReason enum (e.g., for Gemini), use
10831102
# it directly. Otherwise, map the finish_reason string to the enum.
1084-
if isinstance(finish_reason, types.FinishReason):
1085-
llm_response.finish_reason = finish_reason
1086-
else:
1087-
finish_reason_str = str(finish_reason).lower()
1088-
llm_response.finish_reason = _FINISH_REASON_MAPPING.get(
1089-
finish_reason_str, types.FinishReason.OTHER
1090-
)
1103+
_set_finish_reason(llm_response, finish_reason)
10911104
if response.get("usage", None):
10921105
llm_response.usage_metadata = types.GenerateContentResponseUsageMetadata(
10931106
prompt_token_count=response["usage"].get("prompt_tokens", 0),
@@ -1668,13 +1681,7 @@ async def generate_content_async(
16681681
# to ensure consistent behavior across both streaming and non-streaming modes.
16691682
# Without this, Claude and other models via LiteLLM would hit stop conditions
16701683
# that the agent couldn't properly handle.
1671-
if isinstance(finish_reason, types.FinishReason):
1672-
aggregated_llm_response_with_tool_call.finish_reason = finish_reason
1673-
else:
1674-
finish_reason_str = str(finish_reason).lower()
1675-
aggregated_llm_response_with_tool_call.finish_reason = _FINISH_REASON_MAPPING.get(
1676-
finish_reason_str, types.FinishReason.OTHER
1677-
)
1684+
_set_finish_reason(aggregated_llm_response_with_tool_call, finish_reason)
16781685
text = ""
16791686
reasoning_parts = []
16801687
function_calls.clear()
@@ -1696,13 +1703,7 @@ async def generate_content_async(
16961703
# to ensure consistent behavior across both streaming and non-streaming modes.
16971704
# Without this, Claude and other models via LiteLLM would hit stop conditions
16981705
# that the agent couldn't properly handle.
1699-
if isinstance(finish_reason, types.FinishReason):
1700-
aggregated_llm_response.finish_reason = finish_reason
1701-
else:
1702-
finish_reason_str = str(finish_reason).lower()
1703-
aggregated_llm_response.finish_reason = _FINISH_REASON_MAPPING.get(
1704-
finish_reason_str, types.FinishReason.OTHER
1705-
)
1706+
_set_finish_reason(aggregated_llm_response, finish_reason)
17061707
text = ""
17071708
reasoning_parts = []
17081709

0 commit comments

Comments
 (0)