We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents 571d8ad + 8421740 commit e3f2e3aCopy full SHA for e3f2e3a
gpt_researcher/utils/llm.py
@@ -54,7 +54,7 @@ async def create_chat_completion(
54
raise ValueError("Model cannot be None")
55
if max_tokens is not None and max_tokens > 32001:
56
raise ValueError(
57
- f"Max tokens cannot be more than 16,000, but got {max_tokens}")
+ f"Max tokens cannot be more than 32,000, but got {max_tokens}")
58
59
# Get the provider from supported providers
60
provider_kwargs = {'model': model}
0 commit comments