We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f57ac88 commit 8421740Copy full SHA for 8421740
gpt_researcher/utils/llm.py
@@ -54,7 +54,7 @@ async def create_chat_completion(
54
raise ValueError("Model cannot be None")
55
if max_tokens is not None and max_tokens > 32001:
56
raise ValueError(
57
- f"Max tokens cannot be more than 16,000, but got {max_tokens}")
+ f"Max tokens cannot be more than 32,000, but got {max_tokens}")
58
59
# Get the provider from supported providers
60
provider_kwargs = {'model': model}
0 commit comments