Skip to content

Commit ab0f906

Browse files
authored
Merge pull request #1485 from expphoto/feat/llm-no-temp-gpt5-only
feat(llm): stop sending temperature to non‑supporting models
2 parents 58bbd6e + c14a1ed commit ab0f906

File tree

2 files changed

+17
-7
lines changed

2 files changed

+17
-7
lines changed

backend/chat/chat.py

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,20 @@ def create_agent(self):
3131
cfg = Config()
3232

3333
# Retrieve LLM using get_llm with settings from config
34-
provider = get_llm(
35-
llm_provider=cfg.smart_llm_provider,
36-
model=cfg.smart_llm_model,
37-
temperature=0.35,
38-
max_tokens=cfg.smart_token_limit,
39-
**self.config.llm_kwargs
40-
).llm
34+
# Avoid passing temperature for models that do not support it
35+
from gpt_researcher.llm_provider.generic.base import NO_SUPPORT_TEMPERATURE_MODELS
36+
37+
llm_init_kwargs = {
38+
"llm_provider": cfg.smart_llm_provider,
39+
"model": cfg.smart_llm_model,
40+
**self.config.llm_kwargs,
41+
}
42+
43+
if cfg.smart_llm_model not in NO_SUPPORT_TEMPERATURE_MODELS:
44+
llm_init_kwargs["temperature"] = 0.35
45+
llm_init_kwargs["max_tokens"] = cfg.smart_token_limit
46+
47+
provider = get_llm(**llm_init_kwargs).llm
4148

4249
# If vector_store is not initialized, process documents and add to vector_store
4350
if not self.vector_store:

gpt_researcher/llm_provider/generic/base.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,9 @@
4747
"o3-2025-04-16",
4848
"o4-mini",
4949
"o4-mini-2025-04-16",
50+
# GPT-5 family: OpenAI enforces default temperature only
51+
"gpt-5",
52+
"gpt-5-mini",
5053
]
5154

5255
SUPPORT_REASONING_EFFORT_MODELS = [

0 commit comments

Comments
 (0)