Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 12 additions & 5 deletions docs/src/app/api/page.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,16 @@ print(result.response)`} />
<span className="text-xs px-2 py-1 rounded-md bg-blue-100 text-blue-700 font-mono">default: "openai"</span>
</div>
<p className="text-muted-foreground mb-4">LM provider to use for completions.</p>
<Table
<Table
headers={["Value", "Provider"]}
rows={[
[<code key="1" className="text-sm">"openai"</code>, "OpenAI API"],
[<code key="2" className="text-sm">"anthropic"</code>, "Anthropic API"],
[<code key="3" className="text-sm">"portkey"</code>, "Portkey AI gateway"],
[<code key="4" className="text-sm">"openrouter"</code>, "OpenRouter"],
[<code key="5" className="text-sm">"litellm"</code>, "LiteLLM (multi-provider)"],
[<code key="6" className="text-sm">"vllm"</code>, "Local vLLM server"],
[<code key="3" className="text-sm">"vercel"</code>, "Vercel AI Gateway"],
[<code key="4" className="text-sm">"portkey"</code>, "Portkey AI gateway"],
[<code key="5" className="text-sm">"openrouter"</code>, "OpenRouter"],
[<code key="6" className="text-sm">"litellm"</code>, "LiteLLM (multi-provider)"],
[<code key="7" className="text-sm">"vllm"</code>, "Local vLLM server"],
]}
/>
</div>
Expand All @@ -81,6 +82,12 @@ backend_kwargs={
"model_name": "gpt-5-mini",
}

# Vercel AI Gateway
backend_kwargs={
"api_key": "...",
"model_name": "openai/gpt-5.2", # Format: creator/model-name
}

# vLLM (local)
backend_kwargs={
"base_url": "http://localhost:8000/v1",
Expand Down
19 changes: 18 additions & 1 deletion docs/src/app/backends/page.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ export default function BackendsPage() {

<p className="text-muted-foreground mb-6">
<p>
RLMs natively support a wide range of language model providers, including <code>OpenAI</code>, <code>Anthropic</code>, <code>Portkey</code>, <code>OpenRouter</code>, and <code>LiteLLM</code>. Additional providers can be supported with minimal effort. The <code>backend_kwargs</code> are named arguments passed directly to the backend client.
RLMs natively support a wide range of language model providers, including <code>OpenAI</code>, <code>Anthropic</code>, <code>Vercel AI Gateway</code>, <code>Portkey</code>, <code>OpenRouter</code>, and <code>LiteLLM</code>. Additional providers can be supported with minimal effort. The <code>backend_kwargs</code> are named arguments passed directly to the backend client.
</p>
</p>

Expand Down Expand Up @@ -64,6 +64,23 @@ export default function BackendsPage() {

<hr className="my-8 border-border" />

<h2 className="text-2xl font-semibold mb-4">Vercel AI Gateway</h2>
<p className="text-muted-foreground mb-4">
<a href="https://vercel.com/docs/infrastructure/ai-gateway" className="text-primary underline font-medium" target="_blank" rel="noopener noreferrer">Vercel AI Gateway</a> provides a unified endpoint for accessing multiple AI providers with built-in caching and analytics.
</p>
<CodeBlock code={`rlm = RLM(
backend="vercel",
backend_kwargs={
"api_key": os.getenv("AI_GATEWAY_API_KEY"),
"model_name": "openai/gpt-5.2", # Format: creator/model-name
},
)`} />
<p className="text-muted-foreground mt-4">
Vercel AI Gateway provides OpenAI-compatible endpoints for multiple AI providers. Use the <code className="px-1.5 py-0.5 rounded bg-muted text-foreground text-sm">creator/model-name</code> format (e.g., <code className="px-1.5 py-0.5 rounded bg-muted text-foreground text-sm">openai/gpt-5.2</code>, <code className="px-1.5 py-0.5 rounded bg-muted text-foreground text-sm">anthropic/claude-sonnet-4.5</code>). Cost tracking works correctly through the standard usage API.
</p>

<hr className="my-8 border-border" />

<h2 className="text-2xl font-semibold mb-4">LiteLLM</h2>
<p className="text-muted-foreground mb-4">
<a href="https://docs.litellm.ai/docs/" className="text-primary underline font-medium" target="_blank" rel="noopener noreferrer">LiteLLM</a> is a universal interface for 100+ model providers, with support for local models and custom endpoints.
Expand Down
17 changes: 17 additions & 0 deletions docs/src/app/page.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ uv pip install -e . --extra modal`} />
<TabsList>
<TabsTrigger value="openai">OpenAI</TabsTrigger>
<TabsTrigger value="anthropic">Anthropic</TabsTrigger>
<TabsTrigger value="vercel">Vercel AI Gateway</TabsTrigger>
<TabsTrigger value="portkey">Portkey</TabsTrigger>
</TabsList>
<TabsContent value="openai">
Expand Down Expand Up @@ -127,6 +128,22 @@ rlm = RLM(
verbose=False, # print to logs
)

result = rlm.completion("Calculate 2^(2^(2^2)) using Python.")
print(result.response)`} />
</TabsContent>
<TabsContent value="vercel">
<CodeBlock code={`import os
from rlm import RLM

rlm = RLM(
backend="vercel",
backend_kwargs={
"api_key": os.getenv("AI_GATEWAY_API_KEY"),
"model_name": "openai/gpt-5.2", # Format: creator/model-name
},
verbose=False, # print to logs
)

result = rlm.completion("Calculate 2^(2^(2^2)) using Python.")
print(result.response)`} />
</TabsContent>
Expand Down
7 changes: 6 additions & 1 deletion rlm/clients/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ def get_client(

backend_kwargs.setdefault("base_url", "https://openrouter.ai/api/v1")
return OpenAIClient(**backend_kwargs)
elif backend == "vercel":
from rlm.clients.openai import OpenAIClient

backend_kwargs.setdefault("base_url", "https://ai-gateway.vercel.sh/v1")
return OpenAIClient(**backend_kwargs)
elif backend == "litellm":
from rlm.clients.litellm import LiteLLMClient

Expand All @@ -46,5 +51,5 @@ def get_client(
return AnthropicClient(**backend_kwargs)
else:
raise ValueError(
f"Unknown backend: {backend}. Supported backends: ['openai', 'vllm', 'portkey', 'openrouter', 'litellm', 'anthropic']"
f"Unknown backend: {backend}. Supported backends: ['openai', 'vllm', 'portkey', 'openrouter', 'vercel', 'litellm', 'anthropic']"
)
2 changes: 1 addition & 1 deletion rlm/core/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from types import ModuleType
from typing import Any, Literal

ClientBackend = Literal["openai", "portkey", "openrouter", "vllm", "litellm", "anthropic"]
ClientBackend = Literal["openai", "portkey", "openrouter", "vllm", "litellm", "anthropic", "vercel"]
EnvironmentType = Literal["local", "prime", "modal"]


Expand Down