Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
dccc37c
feat: add Google Gemini as a built-in model provider
claude Mar 14, 2026
11b4e95
feat: update Gemini models to latest and add Japanese/French i18n
claude Mar 15, 2026
8c328c1
Add google-genai as optional dependency group for Gemini provider
claude Mar 15, 2026
bf0be40
Make google-genai a core dependency instead of optional
claude Mar 15, 2026
a9fd162
Bump google-genai minimum version to >=1.67.0
claude Mar 15, 2026
8c0fc65
Fix formatting and lint issues for CI checks
claude Mar 15, 2026
c919e93
Remove French and Japanese locale additions to reduce PR scope
claude Mar 15, 2026
8f7ffae
Remove leftover FR locale import from console i18n
claude Mar 15, 2026
058c1b5
Revert unrelated console formatting changes to reduce PR scope
claude Mar 15, 2026
947708d
Add unit tests for GeminiProvider
claude Mar 15, 2026
60238fb
Reorder model normalization to strip prefix before display_name fallback
claude Mar 15, 2026
8f36a25
Narrow exception handling from generic Exception to genai APIError
claude Mar 15, 2026
94825f3
Update Gemini fallback model IDs to match current API names
claude Mar 15, 2026
cd58e66
Add GeminiChatModel to ChatModelName Literal type
claude Mar 15, 2026
c64be9d
Fix Gemini provider hanging on save: add missing await and timeout
claude Mar 15, 2026
236f7a9
Address PR review feedback: add generic Exception fallback and fix docs
ekzhu Mar 15, 2026
491fa6b
Fix pylint warnings in Gemini provider tests
ekzhu Mar 15, 2026
2bc4238
Fix formatting in providers router (trailing comma, black)
ekzhu Mar 15, 2026
c198b8c
Update gemini_provider.py
ekzhu Mar 16, 2026
0979019
Merge branch 'main' into claude/add-gemini-provider-3Gy4l
xieyxclack Mar 16, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions console/src/components/MarkdownCopy/MarkdownCopy.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ export function MarkdownCopy({
localShowMarkdown && !(editable && !textareaProps.disabled)
? content
: editable
? editContent
: content;
? editContent
: content;

if (!contentToCopy) return;

Expand Down
6 changes: 3 additions & 3 deletions console/src/layouts/Sidebar.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -255,8 +255,8 @@ export default function Sidebar({ selectedKey }: SidebarProps) {
const lang = i18n.language?.startsWith("zh")
? "zh"
: i18n.language?.startsWith("ru")
? "ru"
: "en";
? "ru"
: "en";
const faqLang = lang === "zh" ? "zh" : "en";
const url = `https://copaw.agentscope.io/docs/faq.${faqLang}.md`;
fetch(url, { cache: "no-cache" })
Expand All @@ -268,7 +268,7 @@ export default function Sidebar({ selectedKey }: SidebarProps) {
setUpdateMarkdown(
match && lang !== "ru"
? match[0].trim()
: UPDATE_MD[lang] ?? UPDATE_MD.en,
: (UPDATE_MD[lang] ?? UPDATE_MD.en),
);
})
.catch(() => {
Expand Down
5 changes: 2 additions & 3 deletions console/src/pages/Control/Sessions/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,8 @@ function SessionsPage() {
let filtered: Session[] = sessions;

if (filterUserId) {
filtered = filtered.filter(
(session: Session) =>
session.user_id?.toLowerCase().includes(filterUserId.toLowerCase()),
filtered = filtered.filter((session: Session) =>
session.user_id?.toLowerCase().includes(filterUserId.toLowerCase()),
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,23 +87,23 @@ export function RemoteProviderCard({
const statusLabel = isAvailable
? t("models.providerAvailable")
: isConfigured
? t("models.providerNoModels")
: t("models.providerNotConfigured");
? t("models.providerNoModels")
: t("models.providerNotConfigured");
const statusType = isAvailable
? "enabled"
: isConfigured
? "partial"
: "disabled";
? "partial"
: "disabled";
const statusDotColor = isAvailable
? "#52c41a"
: isConfigured
? "#faad14"
: "#d9d9d9";
? "#faad14"
: "#d9d9d9";
const statusDotShadow = isAvailable
? "0 0 0 2px rgba(82, 196, 26, 0.2)"
: isConfigured
? "0 0 0 2px rgba(250, 173, 20, 0.2)"
: "none";
? "0 0 0 2px rgba(250, 173, 20, 0.2)"
: "none";

return (
<Card
Expand Down Expand Up @@ -135,8 +135,8 @@ export function RemoteProviderCard({
statusType === "enabled"
? styles.enabled
: statusType === "partial"
? styles.partial
: styles.disabled
? styles.partial
: styles.disabled
}`}
>
{statusLabel}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,10 @@ import api from "../../../../../api";
import { useTranslation } from "react-i18next";
import styles from "../../index.module.less";

interface ProviderConfigFormValues
extends Omit<ProviderConfigRequest, "generate_kwargs"> {
interface ProviderConfigFormValues extends Omit<
ProviderConfigRequest,
"generate_kwargs"
> {
generate_kwargs_text?: string;
}

Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ dependencies = [
"aiofiles>=24.1.0",
"paho-mqtt>=2.0.0",
"matrix-nio>=0.24.0",
"google-genai>=1.67.0",
]

[tool.setuptools.dynamic]
Expand Down
9 changes: 9 additions & 0 deletions src/copaw/agents/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,13 @@
AnthropicChatFormatter = None
AnthropicChatModel = None

try:
from agentscope.formatter import GeminiChatFormatter
from agentscope.model import GeminiChatModel
except ImportError: # pragma: no cover - compatibility fallback
GeminiChatFormatter = None
GeminiChatModel = None

from .utils.tool_message_utils import _sanitize_tool_messages
from ..providers import ProviderManager
from ..providers.retry_chat_model import RetryChatModel
Expand Down Expand Up @@ -82,6 +89,8 @@ async def wrapper(
}
if AnthropicChatModel is not None and AnthropicChatFormatter is not None:
_CHAT_MODEL_FORMATTER_MAP[AnthropicChatModel] = AnthropicChatFormatter
if GeminiChatModel is not None and GeminiChatFormatter is not None:
_CHAT_MODEL_FORMATTER_MAP[GeminiChatModel] = GeminiChatFormatter


def _get_formatter_for_chat_model(
Expand Down
113 changes: 113 additions & 0 deletions src/copaw/providers/gemini_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
# -*- coding: utf-8 -*-
"""A Google Gemini provider implementation using AgentScope's native
GeminiChatModel."""

from __future__ import annotations

from typing import Any, List

from agentscope.model import ChatModelBase
from google import genai

from copaw.providers.provider import ModelInfo, Provider


class GeminiProvider(Provider):
"""Provider implementation for Google Gemini API."""

def _client(self, timeout: float = 5) -> Any: # noqa: W0613
_ = timeout # Gemini SDK does not support per-client timeout
return genai.Client(api_key=self.api_key)

@staticmethod
def _normalize_models_payload(payload: Any) -> List[ModelInfo]:
models: List[ModelInfo] = []
for row in payload or []:
model_id = str(getattr(row, "name", "") or "").strip()
display_name = str(
getattr(row, "display_name", "") or model_id,
).strip()

if not model_id:
continue

# Gemini API returns model names like "models/gemini-2.5-flash"
# Strip the "models/" prefix for cleaner IDs
if model_id.startswith("models/"):
model_id = model_id[len("models/") :]

if not display_name or display_name.startswith("models/"):
display_name = model_id

models.append(ModelInfo(id=model_id, name=display_name))

deduped: List[ModelInfo] = []
seen: set[str] = set()
for model in models:
if model.id in seen:
continue
seen.add(model.id)
deduped.append(model)
return deduped

async def check_connection(self, timeout: float = 5) -> tuple[bool, str]:
"""Check if Google Gemini provider is reachable."""
try:
client = self._client(timeout=timeout)
# Use the async list models endpoint to verify connectivity
async for _ in await client.aio.models.list():
break
return True, ""
except Exception:
return (
False,
"Failed to connect to Google Gemini API. "
"Check your API key.",
)

async def fetch_models(self, timeout: float = 5) -> List[ModelInfo]:
"""Fetch available models from Gemini API."""
try:
client = self._client(timeout=timeout)
payload = []
async for model in await client.aio.models.list():
payload.append(model)
models = self._normalize_models_payload(payload)
return models
except Exception:
return []

async def check_model_connection(
self,
model_id: str,
timeout: float = 5,
) -> tuple[bool, str]:
"""Check if a specific Gemini model is reachable/usable."""
target = (model_id or "").strip()
if not target:
return False, "Empty model ID"

try:
client = self._client(timeout=timeout)
response = client.aio.models.generate_content_stream(
model=target,
contents="ping",
)
async for _ in response:
break
return True, ""
except Exception:
return (
False,
f"Model '{model_id}' is not reachable or usable",
)

def get_chat_model_instance(self, model_id: str) -> ChatModelBase:
from agentscope.model import GeminiChatModel

return GeminiChatModel(
model_name=model_id,
stream=True,
api_key=self.api_key,
generate_kwargs=self.generate_kwargs,
)
25 changes: 25 additions & 0 deletions src/copaw/providers/provider_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
)
from copaw.providers.openai_provider import OpenAIProvider
from copaw.providers.anthropic_provider import AnthropicProvider
from copaw.providers.gemini_provider import GeminiProvider
from copaw.providers.ollama_provider import OllamaProvider
from copaw.constant import SECRET_DIR
from copaw.local_models import create_local_chat_model
Expand Down Expand Up @@ -92,6 +93,16 @@

ANTHROPIC_MODELS: List[ModelInfo] = []

GEMINI_MODELS: List[ModelInfo] = [
ModelInfo(id="gemini-3.1-pro", name="Gemini 3.1 Pro"),
ModelInfo(id="gemini-3-flash", name="Gemini 3 Flash"),
ModelInfo(id="gemini-3.1-flash-lite", name="Gemini 3.1 Flash Lite"),
ModelInfo(id="gemini-2.5-pro", name="Gemini 2.5 Pro"),
ModelInfo(id="gemini-2.5-flash", name="Gemini 2.5 Flash"),
ModelInfo(id="gemini-2.5-flash-lite", name="Gemini 2.5 Flash Lite"),
ModelInfo(id="gemini-2.0-flash", name="Gemini 2.0 Flash"),
]

PROVIDER_MODELSCOPE = OpenAIProvider(
id="modelscope",
name="ModelScope",
Expand Down Expand Up @@ -169,6 +180,17 @@
freeze_url=True,
)

PROVIDER_GEMINI = GeminiProvider(
id="gemini",
name="Google Gemini",
base_url="https://generativelanguage.googleapis.com",
api_key_prefix="",
models=GEMINI_MODELS,
chat_model="GeminiChatModel",
freeze_url=True,
support_model_discovery=True,
)

PROVIDER_OLLAMA = OllamaProvider(
id="ollama",
name="Ollama",
Expand Down Expand Up @@ -244,6 +266,7 @@ def _init_builtins(self):
self._add_builtin(PROVIDER_AZURE_OPENAI)
self._add_builtin(PROVIDER_MINIMAX)
self._add_builtin(PROVIDER_ANTHROPIC)
self._add_builtin(PROVIDER_GEMINI)
self._add_builtin(PROVIDER_OLLAMA)
self._add_builtin(PROVIDER_LMSTUDIO)
self._add_builtin(PROVIDER_LLAMACPP)
Expand Down Expand Up @@ -454,6 +477,8 @@ def _provider_from_data(self, data: Dict) -> Provider:

if provider_id == "anthropic" or chat_model == "AnthropicChatModel":
return AnthropicProvider.model_validate(data)
if provider_id == "gemini" or chat_model == "GeminiChatModel":
return GeminiProvider.model_validate(data)
if provider_id == "ollama":
return OllamaProvider.model_validate(data)
if data.get("is_local", False):
Expand Down
31 changes: 29 additions & 2 deletions website/public/docs/models.en.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@ You need to configure a model before chatting with CoPaw. You can do this under

![Console models](https://img.alicdn.com/imgextra/i1/O1CN01zHAE1Z26w6jXl2xbr_!!6000000007725-2-tps-3802-1968.png)

CoPaw supports multiple LLM providers: **cloud providers** (require API Key), **local providers** (llama.cpp / MLX), **Ollama provider**, **LM Studio provider**, and you can add **custom providers**. This page explains how to configure each type.
CoPaw supports multiple LLM providers: **cloud providers** (require API Key, including Google Gemini), **local providers** (llama.cpp / MLX), **Ollama provider**, **LM Studio provider**, and you can add **custom providers**. This page explains how to configure each type.

---

## Configure cloud providers

Cloud providers (including ModelScope, DashScope, Aliyun Coding Plan, OpenAI, and Azure OpenAI) call remote models via API and require an **API Key**.
Cloud providers (including ModelScope, DashScope, Aliyun Coding Plan, OpenAI, Azure OpenAI, Google Gemini, and MiniMax) call remote models via API and require an **API Key**.

**In the console:**

Expand All @@ -35,6 +35,33 @@ Cloud providers (including ModelScope, DashScope, Aliyun Coding Plan, OpenAI, an
>
> ![cancel](https://img.alicdn.com/imgextra/i2/O1CN01A8j1IR1n8fHGnio0q_!!6000000005045-2-tps-3802-1968.png)

## Google Gemini provider

The Google Gemini provider uses Google's native Gemini API (via the `google-genai` SDK) to access Gemini models. Pre-configured models include Gemini 3.1 Pro, Gemini 3 Flash, Gemini 3.1 Flash Lite, Gemini 2.5 Pro, Gemini 2.5 Flash, Gemini 2.5 Flash Lite, and Gemini 2.0 Flash. Additional models can be auto-discovered from the API.

**Prerequisites:**

- Obtain a Gemini API key from [Google AI Studio](https://aistudio.google.com/apikey).

**In the console:**

1. Open the console and go to **Settings → Models**.
2. Find the **Google Gemini** provider card and click **Settings**. Enter your **API key** and click **Save**.
3. After saving, the card status becomes **Available**. The provider supports **model discovery** — click **Models** to auto-discover available Gemini models from the API.
4. In the **LLM Configuration** section at the top, select **Google Gemini** in the **Provider** dropdown and choose a model (e.g. `gemini-2.5-flash`), then click **Save**.

**Using the CLI:**

```bash
# Configure the API key
copaw models config-key gemini

# Set Gemini as the active LLM
copaw models set-llm
```

> **Tip:** Gemini models with thinking capabilities (e.g. Gemini 3.1 Pro, Gemini 2.5 Pro, Gemini 2.5 Flash) support extended reasoning. CoPaw automatically handles thinking blocks and thought signatures from these models.

## Local providers (llama.cpp / MLX)

Local providers run models on your machine with **no API Key**; data stays on-device.
Expand Down
Loading
Loading