diff --git a/docs/src/app/api/page.tsx b/docs/src/app/api/page.tsx index 58f6346e..ddca2671 100644 --- a/docs/src/app/api/page.tsx +++ b/docs/src/app/api/page.tsx @@ -54,15 +54,16 @@ print(result.response)`} /> default: "openai"

LM provider to use for completions.

- "openai", "OpenAI API"], ["anthropic", "Anthropic API"], - ["portkey", "Portkey AI gateway"], - ["openrouter", "OpenRouter"], - ["litellm", "LiteLLM (multi-provider)"], - ["vllm", "Local vLLM server"], + ["vercel", "Vercel AI Gateway"], + ["portkey", "Portkey AI gateway"], + ["openrouter", "OpenRouter"], + ["litellm", "LiteLLM (multi-provider)"], + ["vllm", "Local vLLM server"], ]} /> @@ -81,6 +82,12 @@ backend_kwargs={ "model_name": "gpt-5-mini", } +# Vercel AI Gateway +backend_kwargs={ + "api_key": "...", + "model_name": "openai/gpt-5.2", # Format: creator/model-name +} + # vLLM (local) backend_kwargs={ "base_url": "http://localhost:8000/v1", diff --git a/docs/src/app/backends/page.tsx b/docs/src/app/backends/page.tsx index 6a0708aa..1808bddd 100644 --- a/docs/src/app/backends/page.tsx +++ b/docs/src/app/backends/page.tsx @@ -7,7 +7,7 @@ export default function BackendsPage() {

- RLMs natively support a wide range of language model providers, including OpenAI, Anthropic, Portkey, OpenRouter, and LiteLLM. Additional providers can be supported with minimal effort. The backend_kwargs are named arguments passed directly to the backend client. + RLMs natively support a wide range of language model providers, including OpenAI, Anthropic, Vercel AI Gateway, Portkey, OpenRouter, and LiteLLM. Additional providers can be supported with minimal effort. The backend_kwargs are named arguments passed directly to the backend client.

@@ -64,6 +64,23 @@ export default function BackendsPage() {
+

Vercel AI Gateway

+

+ Vercel AI Gateway provides a unified endpoint for accessing multiple AI providers with built-in caching and analytics. +

+ +

+ Vercel AI Gateway provides OpenAI-compatible endpoints for multiple AI providers. Use the creator/model-name format (e.g., openai/gpt-5.2, anthropic/claude-sonnet-4.5). Cost tracking works correctly through the standard usage API. +

+ +
+

LiteLLM

LiteLLM is a universal interface for 100+ model providers, with support for local models and custom endpoints. diff --git a/docs/src/app/page.tsx b/docs/src/app/page.tsx index 66d3baf4..b1a8a41a 100644 --- a/docs/src/app/page.tsx +++ b/docs/src/app/page.tsx @@ -96,6 +96,7 @@ uv pip install -e . --extra modal`} /> OpenAI Anthropic + Vercel AI Gateway Portkey @@ -127,6 +128,22 @@ rlm = RLM( verbose=False, # print to logs ) +result = rlm.completion("Calculate 2^(2^(2^2)) using Python.") +print(result.response)`} /> + + + diff --git a/rlm/clients/__init__.py b/rlm/clients/__init__.py index 71606268..0eb8a226 100644 --- a/rlm/clients/__init__.py +++ b/rlm/clients/__init__.py @@ -36,6 +36,11 @@ def get_client( backend_kwargs.setdefault("base_url", "https://openrouter.ai/api/v1") return OpenAIClient(**backend_kwargs) + elif backend == "vercel": + from rlm.clients.openai import OpenAIClient + + backend_kwargs.setdefault("base_url", "https://ai-gateway.vercel.sh/v1") + return OpenAIClient(**backend_kwargs) elif backend == "litellm": from rlm.clients.litellm import LiteLLMClient @@ -46,5 +51,5 @@ def get_client( return AnthropicClient(**backend_kwargs) else: raise ValueError( - f"Unknown backend: {backend}. Supported backends: ['openai', 'vllm', 'portkey', 'openrouter', 'litellm', 'anthropic']" + f"Unknown backend: {backend}. Supported backends: ['openai', 'vllm', 'portkey', 'openrouter', 'vercel', 'litellm', 'anthropic']" ) diff --git a/rlm/core/types.py b/rlm/core/types.py index ad167628..0807139b 100644 --- a/rlm/core/types.py +++ b/rlm/core/types.py @@ -2,7 +2,7 @@ from types import ModuleType from typing import Any, Literal -ClientBackend = Literal["openai", "portkey", "openrouter", "vllm", "litellm", "anthropic"] +ClientBackend = Literal["openai", "portkey", "openrouter", "vllm", "litellm", "anthropic", "vercel"] EnvironmentType = Literal["local", "prime", "modal"]