diff --git a/.changeset/gorgeous-rocks-grin.md b/.changeset/gorgeous-rocks-grin.md new file mode 100644 index 000000000000..d8e5984d4945 --- /dev/null +++ b/.changeset/gorgeous-rocks-grin.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/togetherai': patch +--- + +chore (provider/togetherai): Incorporate the latest model ids. diff --git a/content/providers/01-ai-sdk-providers/24-togetherai.mdx b/content/providers/01-ai-sdk-providers/24-togetherai.mdx index 5e4049641c7d..67fcdbea9946 100644 --- a/content/providers/01-ai-sdk-providers/24-togetherai.mdx +++ b/content/providers/01-ai-sdk-providers/24-togetherai.mdx @@ -107,6 +107,6 @@ The Together.ai provider also supports [completion models](https://docs.together The table above lists popular models. Please see the [Together.ai docs](https://docs.together.ai/docs/serverless-models) for a full list of - available models. The table above lists popular models. You can also pass any - available provider model ID as a string if needed. + available models. You can also pass any available provider model ID as a + string if needed. diff --git a/packages/togetherai/src/togetherai-chat-settings.ts b/packages/togetherai/src/togetherai-chat-settings.ts index 29c4d60f6247..5f3d43555476 100644 --- a/packages/togetherai/src/togetherai-chat-settings.ts +++ b/packages/togetherai/src/togetherai-chat-settings.ts @@ -2,36 +2,37 @@ import { OpenAICompatibleChatSettings } from '@ai-sdk/openai-compatible'; // https://docs.together.ai/docs/serverless-models#chat-models export type TogetherAIChatModelId = - | 'databricks/dbrx-instruct' - | 'deepseek-ai/deepseek-llm-67b-chat' + | 'meta-llama/Llama-3.3-70B-Instruct-Turbo' + | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' + | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' + | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' + | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo' + | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo' + | 'meta-llama/Llama-3.2-3B-Instruct-Turbo' + | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite' + | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite' + | 'meta-llama/Llama-3-8b-chat-hf' + | 'meta-llama/Llama-3-70b-chat-hf' + | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' + | 'Qwen/Qwen2.5-Coder-32B-Instruct' + | 'Qwen/QwQ-32B-Preview' + | 'microsoft/WizardLM-2-8x22B' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' + | 'databricks/dbrx-instruct' + | 'deepseek-ai/deepseek-llm-67b-chat' | 'google/gemma-2b-it' | 'Gryphe/MythoMax-L2-13b' | 'meta-llama/Llama-2-13b-chat-hf' - | 'meta-llama/Llama-3-70b-chat-hf' - | 'meta-llama/Llama-3-8b-chat-hf' - | 'meta-llama/Llama-3.2-3B-Instruct-Turbo' - | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite' - | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo' - | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite' - | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo' - | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' - | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' - | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' - | 'microsoft/WizardLM-2-8x22B' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' - | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' + | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO' - | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' - | 'Qwen/Qwen2-72B-Instruct' - | 'Qwen/Qwen2.5-72B-Instruct-Turbo' | 'Qwen/Qwen2.5-7B-Instruct-Turbo' - | 'Qwen/Qwen2.5-Coder-32B-Instruct' - | 'togethercomputer/StripedHyena-Nous-7B' + | 'Qwen/Qwen2.5-72B-Instruct-Turbo' + | 'Qwen/Qwen2-72B-Instruct' | 'upstage/SOLAR-10.7B-Instruct-v1.0' | (string & {}); diff --git a/packages/togetherai/src/togetherai-completion-settings.ts b/packages/togetherai/src/togetherai-completion-settings.ts index 16fc86425161..534d3703749f 100644 --- a/packages/togetherai/src/togetherai-completion-settings.ts +++ b/packages/togetherai/src/togetherai-completion-settings.ts @@ -2,6 +2,10 @@ import { OpenAICompatibleCompletionSettings } from '@ai-sdk/openai-compatible'; // https://docs.together.ai/docs/serverless-models#language-models export type TogetherAICompletionModelId = + | 'meta-llama/Llama-2-70b-hf' + | 'mistralai/Mistral-7B-v0.1' + | 'mistralai/Mixtral-8x7B-v0.1' + | 'Meta-Llama/Llama-Guard-7b' | 'codellama/CodeLlama-34b-Instruct-hf' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | (string & {}); diff --git a/packages/togetherai/src/togetherai-embedding-settings.ts b/packages/togetherai/src/togetherai-embedding-settings.ts index 9cc008f3026f..fe1c2d2cd0a0 100644 --- a/packages/togetherai/src/togetherai-embedding-settings.ts +++ b/packages/togetherai/src/togetherai-embedding-settings.ts @@ -2,14 +2,14 @@ import { OpenAICompatibleEmbeddingSettings } from '@ai-sdk/openai-compatible'; // https://docs.together.ai/docs/serverless-models#embedding-models export type TogetherAIEmbeddingModelId = - | 'BAAI/bge-base-en-v1.5' - | 'BAAI/bge-large-en-v1.5' - | 'bert-base-uncased' - | 'sentence-transformers/msmarco-bert-base-dot-v5' | 'togethercomputer/m2-bert-80M-2k-retrieval' | 'togethercomputer/m2-bert-80M-32k-retrieval' | 'togethercomputer/m2-bert-80M-8k-retrieval' | 'WhereIsAI/UAE-Large-V1' + | 'BAAI/bge-large-en-v1.5' + | 'BAAI/bge-base-en-v1.5' + | 'sentence-transformers/msmarco-bert-base-dot-v5' + | 'bert-base-uncased' | (string & {}); export interface TogetherAIEmbeddingSettings