Skip to content

Commit 4e5c26f

Browse files
committed
Remove unused import and update sse_stream function signature for consistency
1 parent bfc7707 commit 4e5c26f

File tree

3 files changed

+25
-26
lines changed

3 files changed

+25
-26
lines changed

g4f/Provider/Blackbox.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
import string
99
from pathlib import Path
1010
from typing import Optional
11-
from datetime import datetime, timedelta
1211

1312
from ..typing import AsyncResult, Messages, MediaListType
1413
from ..requests.raise_for_status import raise_for_status

g4f/Provider/Qwen.py

Lines changed: 24 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
from ..typing import AsyncResult, Messages, MediaListType
1313
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
1414
from ..requests import sse_stream
15+
from ..tools.media import merge_media
1516
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
1617
from .helper import get_last_user_message
1718
from .. import debug
@@ -60,7 +61,6 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
6061
supports_message_history = False
6162

6263
_models_loaded = True
63-
# Complete list of models, extracted from the API
6464
image_models = image_models
6565
text_models = text_models
6666
vision_models = vision_models
@@ -88,27 +88,29 @@ def get_models(cls) -> list[str]:
8888
cls.models = [model["id"] for model in models]
8989
cls.default_model = cls.models[0]
9090
cls._models_loaded = True
91+
cls.live += 1
92+
debug.log(f"Loaded {len(cls.models)} models from {cls.url}")
9193

9294
else:
9395
debug.log(f"Failed to load models from {cls.url}: {response.status_code} {response.reason}")
9496
return cls.models
9597

9698
@classmethod
9799
async def create_async_generator(
98-
cls,
99-
model: str,
100-
messages: Messages,
101-
media: MediaListType = None,
102-
conversation: JsonConversation = None,
103-
proxy: str = None,
104-
timeout: int = 120,
105-
stream: bool = True,
106-
enable_thinking: bool = True,
107-
chat_type: Literal[
108-
"t2t", "search", "artifacts", "web_dev", "deep_research", "t2i", "image_edit", "t2v"
109-
] = "t2t",
110-
image_size: Optional[Literal["1:1", "4:3", "3:4", "16:9", "9:16"]] = None,
111-
**kwargs
100+
cls,
101+
model: str,
102+
messages: Messages,
103+
media: MediaListType = None,
104+
conversation: JsonConversation = None,
105+
proxy: str = None,
106+
timeout: int = 120,
107+
stream: bool = True,
108+
enable_thinking: bool = True,
109+
chat_type: Literal[
110+
"t2t", "search", "artifacts", "web_dev", "deep_research", "t2i", "image_edit", "t2v"
111+
] = "t2t",
112+
aspect_ratio: Optional[Literal["1:1", "4:3", "3:4", "16:9", "9:16"]] = None,
113+
**kwargs
112114
) -> AsyncResult:
113115
"""
114116
chat_type:
@@ -164,7 +166,6 @@ async def create_async_generator(
164166
req_headers['bx-umidtoken'] = cls._midtoken
165167
req_headers['bx-v'] = '2.5.31'
166168
message_id = str(uuid.uuid4())
167-
parent_id = None
168169
if conversation is None:
169170
chat_payload = {
170171
"title": "New Chat",
@@ -186,8 +187,9 @@ async def create_async_generator(
186187
parent_id=None
187188
)
188189
files = []
190+
media = list(merge_media(media))
189191
if media:
190-
for index, (_file, file_name) in enumerate(media):
192+
for _file, file_name in media:
191193
file_class: Literal["default", "vision", "video", "audio", "document"] = "vision"
192194
_type: Literal["file", "image", "video", "audio"] = "image"
193195
file_type = "image/jpeg"
@@ -206,7 +208,7 @@ async def create_async_generator(
206208
"name": file_name,
207209
"file_type": file_type,
208210
"showType": showType,
209-
"file_class": file_class, # "document"
211+
"file_class": file_class,
210212
"url": _file
211213
}
212214
)
@@ -243,8 +245,8 @@ async def create_async_generator(
243245
}
244246
]
245247
}
246-
if image_size:
247-
msg_payload["size"] = image_size
248+
if aspect_ratio:
249+
msg_payload["size"] = aspect_ratio
248250

249251
async with session.post(
250252
f'{cls.url}/api/v2/chat/completions?chat_id={conversation.chat_id}', json=msg_payload,
@@ -276,11 +278,10 @@ async def create_async_generator(
276278
elif phase == "answer" and thinking_started:
277279
thinking_started = False
278280
elif phase == "image_gen" and status == "typing":
279-
yield ImageResponse([content], "", extra)
281+
yield ImageResponse(content, prompt, extra)
280282
continue
281283
elif phase == "image_gen" and status == "finished":
282-
yield FinishReason(status)
283-
284+
yield FinishReason("stop")
284285
if content:
285286
yield Reasoning(content) if thinking_started else content
286287
except (json.JSONDecodeError, KeyError, IndexError):

g4f/requests/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,6 @@ def stop_browser():
113113
await callback(page)
114114
for c in await page.send(nodriver.cdp.network.get_cookies([url])):
115115
cookies[c.name] = c.value
116-
await page.close()
117116
stop_browser()
118117
return {
119118
"impersonate": "chrome",
@@ -219,7 +218,7 @@ def on_stop():
219218
BrowserConfig.stop_browser = on_stop
220219
return browser, on_stop
221220

222-
async def sse_stream(iter_lines: Iterator[bytes]) -> AsyncIterator[dict]:
221+
async def sse_stream(iter_lines: AsyncIterator[bytes]) -> AsyncIterator[dict]:
223222
if hasattr(iter_lines, "content"):
224223
iter_lines = iter_lines.content
225224
elif hasattr(iter_lines, "iter_lines"):

0 commit comments

Comments
 (0)