diff --git a/lib/langchain/llm/response/google_gemini_response.rb b/lib/langchain/llm/response/google_gemini_response.rb index 3a974d663..4f102a7c6 100644 --- a/lib/langchain/llm/response/google_gemini_response.rb +++ b/lib/langchain/llm/response/google_gemini_response.rb @@ -15,7 +15,7 @@ def role end def tool_calls - if raw_response.dig("candidates", 0, "content") && raw_response.dig("candidates", 0, "content", "parts", 0).has_key?("functionCall") + if raw_response.dig("candidates", 0, "content", "parts") && raw_response.dig("candidates", 0, "content", "parts", 0).has_key?("functionCall") raw_response.dig("candidates", 0, "content", "parts") else [] diff --git a/spec/fixtures/llm/google_gemini/chat_with_tool_calls_max_tokens.json b/spec/fixtures/llm/google_gemini/chat_with_tool_calls_max_tokens.json new file mode 100644 index 000000000..c86261b35 --- /dev/null +++ b/spec/fixtures/llm/google_gemini/chat_with_tool_calls_max_tokens.json @@ -0,0 +1,11 @@ +{ + "candidates": [ + { + "content": { + "role": "model" + }, + "finishReason": "MAX_TOKENS", + "index": 0 + } + ] +} diff --git a/spec/lib/langchain/llm/response/google_gemini_response_spec.rb b/spec/lib/langchain/llm/response/google_gemini_response_spec.rb index ec94c927b..74147cd15 100644 --- a/spec/lib/langchain/llm/response/google_gemini_response_spec.rb +++ b/spec/lib/langchain/llm/response/google_gemini_response_spec.rb @@ -25,6 +25,17 @@ it "returns tool_calls" do expect(response.tool_calls).to eq([{"functionCall" => {"name" => "calculator__execute", "args" => {"input" => "2+2"}}}]) end + + context 'when output has reached max tokens' do + let(:raw_response) { + JSON.parse File.read("spec/fixtures/llm/google_gemini/chat_with_tool_calls_max_tokens.json") + } + let(:response) { described_class.new(raw_response) } + + it "returns tool_calls" do + expect(response.tool_calls).to eq([]) + end + end end describe "#embeddings" do