Skip to content

Commit 3fca414

Browse files
authored
Merge pull request #3183 from xtekky/3mora2-main
Update Qwen
2 parents f46c179 + bb02eeb commit 3fca414

File tree

8 files changed

+546
-396
lines changed

8 files changed

+546
-396
lines changed

g4f/Provider/Blackbox.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
import string
99
from pathlib import Path
1010
from typing import Optional
11-
from datetime import datetime, timedelta
1211

1312
from ..typing import AsyncResult, Messages, MediaListType
1413
from ..requests.raise_for_status import raise_for_status

g4f/Provider/DeepInfra.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,8 @@ class DeepInfra(OpenaiTemplate):
7777
"qwen-3-32b": "Qwen/Qwen3-32B",
7878
"qwen-3-235b": "Qwen/Qwen3-235B-A22B",
7979
"qwq-32b": "Qwen/QwQ-32B",
80+
81+
"moonshotai/Kimi-K2-Instruct": "moonshotai/Kimi-K2-Instruct-0905",
8082
}
8183

8284
@classmethod

g4f/Provider/PollinationsAI.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
8888
vision_models = [default_vision_model]
8989
_models_loaded = False
9090
model_aliases = {
91+
"openai-fast": "gpt-4.1-nano",
9192
"llama-4-scout": "llamascout",
9293
"deepseek-r1": "deepseek-reasoning",
9394
"sdxl-turbo": "turbo",
@@ -105,6 +106,8 @@ def get_alias(model: dict) -> str:
105106
alias = model.get("name")
106107
if (model.get("aliases")):
107108
alias = model.get("aliases")[0]
109+
elif alias in cls.model_aliases:
110+
alias = cls.model_aliases[alias]
108111
return alias.replace("-instruct", "").replace("qwen-", "qwen").replace("qwen", "qwen-")
109112
if not cls._models_loaded:
110113
try:

g4f/Provider/Qwen.py

Lines changed: 131 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,50 @@
55
import re
66
import uuid
77
from time import time
8+
from typing import Literal, Optional
89

910
import aiohttp
1011
from ..errors import RateLimitError
11-
from ..typing import AsyncResult, Messages
12-
from ..providers.response import JsonConversation, Reasoning, Usage
12+
from ..typing import AsyncResult, Messages, MediaListType
13+
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
1314
from ..requests import sse_stream
15+
from ..tools.media import merge_media
1416
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
1517
from .helper import get_last_user_message
1618
from .. import debug
1719

20+
try:
21+
import curl_cffi
22+
23+
has_curl_cffi = True
24+
except ImportError:
25+
has_curl_cffi = False
26+
27+
text_models = [
28+
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
29+
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwq-32b', 'qwen-turbo-2025-02-11',
30+
'qwen2.5-omni-7b', 'qvq-72b-preview-0310', 'qwen2.5-vl-32b-instruct', 'qwen2.5-14b-instruct-1m',
31+
'qwen2.5-coder-32b-instruct', 'qwen2.5-72b-instruct']
32+
33+
image_models = [
34+
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
35+
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwen-turbo-2025-02-11',
36+
'qwen2.5-omni-7b', 'qwen2.5-vl-32b-instruct', 'qwen2.5-14b-instruct-1m', 'qwen2.5-coder-32b-instruct',
37+
'qwen2.5-72b-instruct']
38+
39+
vision_models = [
40+
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
41+
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwen-turbo-2025-02-11',
42+
'qwen2.5-omni-7b', 'qvq-72b-preview-0310', 'qwen2.5-vl-32b-instruct', 'qwen2.5-14b-instruct-1m',
43+
'qwen2.5-coder-32b-instruct', 'qwen2.5-72b-instruct']
44+
45+
models = [
46+
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
47+
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwq-32b', 'qwen-turbo-2025-02-11',
48+
'qwen2.5-omni-7b', 'qvq-72b-preview-0310', 'qwen2.5-vl-32b-instruct', 'qwen2.5-14b-instruct-1m',
49+
'qwen2.5-coder-32b-instruct', 'qwen2.5-72b-instruct']
50+
51+
1852
class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
1953
"""
2054
Provider for Qwen's chat service (chat.qwen.ai), with configurable
@@ -26,42 +60,70 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
2660
supports_stream = True
2761
supports_message_history = False
2862

29-
# Complete list of models, extracted from the API
30-
models = [
31-
"qwen3-max-preview",
32-
"qwen3-235b-a22b",
33-
"qwen3-coder-plus",
34-
"qwen3-30b-a3b",
35-
"qwen3-coder-30b-a3b-instruct",
36-
"qwen-max-latest",
37-
"qwen-plus-2025-01-25",
38-
"qwq-32b",
39-
"qwen-turbo-2025-02-11",
40-
"qwen2.5-omni-7b",
41-
"qvq-72b-preview-0310",
42-
"qwen2.5-vl-32b-instruct",
43-
"qwen2.5-14b-instruct-1m",
44-
"qwen2.5-coder-32b-instruct",
45-
"qwen2.5-72b-instruct",
46-
]
63+
_models_loaded = True
64+
image_models = image_models
65+
text_models = text_models
66+
vision_models = vision_models
67+
models = models
4768
default_model = "qwen3-235b-a22b"
4869

4970
_midtoken: str = None
5071
_midtoken_uses: int = 0
5172

73+
@classmethod
74+
def get_models(cls) -> list[str]:
75+
if not cls._models_loaded and has_curl_cffi:
76+
response = curl_cffi.get(f"{cls.url}/api/models")
77+
if response.ok:
78+
models = response.json().get("data", [])
79+
cls.text_models = [model["id"] for model in models if "t2t" in model["info"]["meta"]["chat_type"]]
80+
81+
cls.image_models = [
82+
model["id"] for model in models if
83+
"image_edit" in model["info"]["meta"]["chat_type"] or "t2i" in model["info"]["meta"]["chat_type"]
84+
]
85+
86+
cls.vision_models = [model["id"] for model in models if model["info"]["meta"]["capabilities"]["vision"]]
87+
88+
cls.models = [model["id"] for model in models]
89+
cls.default_model = cls.models[0]
90+
cls._models_loaded = True
91+
cls.live += 1
92+
debug.log(f"Loaded {len(cls.models)} models from {cls.url}")
93+
94+
else:
95+
debug.log(f"Failed to load models from {cls.url}: {response.status_code} {response.reason}")
96+
return cls.models
97+
5298
@classmethod
5399
async def create_async_generator(
54100
cls,
55101
model: str,
56102
messages: Messages,
103+
media: MediaListType = None,
57104
conversation: JsonConversation = None,
58105
proxy: str = None,
59106
timeout: int = 120,
60107
stream: bool = True,
61108
enable_thinking: bool = True,
109+
chat_type: Literal[
110+
"t2t", "search", "artifacts", "web_dev", "deep_research", "t2i", "image_edit", "t2v"
111+
] = "t2t",
112+
aspect_ratio: Optional[Literal["1:1", "4:3", "3:4", "16:9", "9:16"]] = None,
62113
**kwargs
63114
) -> AsyncResult:
64-
115+
"""
116+
chat_type:
117+
DeepResearch = "deep_research"
118+
Artifacts = "artifacts"
119+
WebSearch = "search"
120+
ImageGeneration = "t2i"
121+
ImageEdit = "image_edit"
122+
VideoGeneration = "t2v"
123+
Txt2Txt = "t2t"
124+
WebDev = "web_dev"
125+
"""
126+
65127
model_name = cls.get_model(model)
66128

67129
headers = {
@@ -94,7 +156,8 @@ async def create_async_generator(
94156
raise RuntimeError("Failed to extract bx-umidtoken.")
95157
cls._midtoken = match.group(1)
96158
cls._midtoken_uses = 1
97-
debug.log(f"[Qwen] INFO: New midtoken obtained. Use count: {cls._midtoken_uses}. Midtoken: {cls._midtoken}")
159+
debug.log(
160+
f"[Qwen] INFO: New midtoken obtained. Use count: {cls._midtoken_uses}. Midtoken: {cls._midtoken}")
98161
else:
99162
cls._midtoken_uses += 1
100163
debug.log(f"[Qwen] INFO: Reusing midtoken. Use count: {cls._midtoken_uses}")
@@ -103,17 +166,16 @@ async def create_async_generator(
103166
req_headers['bx-umidtoken'] = cls._midtoken
104167
req_headers['bx-v'] = '2.5.31'
105168
message_id = str(uuid.uuid4())
106-
parent_id = None
107169
if conversation is None:
108170
chat_payload = {
109171
"title": "New Chat",
110172
"models": [model_name],
111173
"chat_mode": "normal",
112-
"chat_type": "t2t",
174+
"chat_type": chat_type,
113175
"timestamp": int(time() * 1000)
114176
}
115177
async with session.post(
116-
f'{cls.url}/api/v2/chats/new', json=chat_payload, headers=req_headers, proxy=proxy
178+
f'{cls.url}/api/v2/chats/new', json=chat_payload, headers=req_headers, proxy=proxy
117179
) as resp:
118180
resp.raise_for_status()
119181
data = await resp.json()
@@ -124,7 +186,32 @@ async def create_async_generator(
124186
cookies={key: value for key, value in resp.cookies.items()},
125187
parent_id=None
126188
)
127-
189+
files = []
190+
media = list(merge_media(media))
191+
if media:
192+
for _file, file_name in media:
193+
file_class: Literal["default", "vision", "video", "audio", "document"] = "vision"
194+
_type: Literal["file", "image", "video", "audio"] = "image"
195+
file_type = "image/jpeg"
196+
showType: Literal["file", "image", "video", "audio"] = "image"
197+
198+
if isinstance(_file, str) and _file.startswith('http'):
199+
if chat_type == "image_edit":
200+
file_class = "vision"
201+
_type = "image"
202+
file_type = "image"
203+
showType = "image"
204+
205+
files.append(
206+
{
207+
"type": _type,
208+
"name": file_name,
209+
"file_type": file_type,
210+
"showType": showType,
211+
"file_class": file_class,
212+
"url": _file
213+
}
214+
)
128215
msg_payload = {
129216
"stream": stream,
130217
"incremental_output": stream,
@@ -140,28 +227,30 @@ async def create_async_generator(
140227
"role": "user",
141228
"content": prompt,
142229
"user_action": "chat",
143-
"files": [],
230+
"files": files,
144231
"models": [model_name],
145-
"chat_type": "t2t",
232+
"chat_type": chat_type,
146233
"feature_config": {
147234
"thinking_enabled": enable_thinking,
148235
"output_schema": "phase",
149236
"thinking_budget": 81920
150237
},
151238
"extra": {
152239
"meta": {
153-
"subChatType": "t2t"
240+
"subChatType": chat_type
154241
}
155242
},
156-
"sub_chat_type": "t2t",
243+
"sub_chat_type": chat_type,
157244
"parent_id": None
158245
}
159246
]
160247
}
248+
if aspect_ratio:
249+
msg_payload["size"] = aspect_ratio
161250

162251
async with session.post(
163-
f'{cls.url}/api/v2/chat/completions?chat_id={conversation.chat_id}', json=msg_payload,
164-
headers=req_headers, proxy=proxy, timeout=timeout, cookies=conversation.cookies
252+
f'{cls.url}/api/v2/chat/completions?chat_id={conversation.chat_id}', json=msg_payload,
253+
headers=req_headers, proxy=proxy, timeout=timeout, cookies=conversation.cookies
165254
) as resp:
166255
first_line = await resp.content.readline()
167256
line_str = first_line.decode().strip()
@@ -182,10 +271,17 @@ async def create_async_generator(
182271
delta = choices[0].get("delta", {})
183272
phase = delta.get("phase")
184273
content = delta.get("content")
274+
status = delta.get("status")
275+
extra = delta.get("extra", {})
185276
if phase == "think" and not thinking_started:
186277
thinking_started = True
187278
elif phase == "answer" and thinking_started:
188279
thinking_started = False
280+
elif phase == "image_gen" and status == "typing":
281+
yield ImageResponse(content, prompt, extra)
282+
continue
283+
elif phase == "image_gen" and status == "finished":
284+
yield FinishReason("stop")
189285
if content:
190286
yield Reasoning(content) if thinking_started else content
191287
except (json.JSONDecodeError, KeyError, IndexError):
@@ -198,13 +294,14 @@ async def create_async_generator(
198294
is_rate_limit = (isinstance(e, aiohttp.ClientResponseError) and e.status == 429) or \
199295
("RateLimited" in str(e))
200296
if is_rate_limit:
201-
debug.log(f"[Qwen] WARNING: Rate limit detected (attempt {attempt + 1}/5). Invalidating current midtoken.")
297+
debug.log(
298+
f"[Qwen] WARNING: Rate limit detected (attempt {attempt + 1}/5). Invalidating current midtoken.")
202299
cls._midtoken = None
203300
cls._midtoken_uses = 0
301+
conversation = None
204302
await asyncio.sleep(2)
205303
continue
206304
else:
207305
raise e
208306

209307
raise RateLimitError("The Qwen provider reached the request limit after 5 attempts.")
210-

0 commit comments

Comments
 (0)