Skip to content

Commit 438e5d9

Browse files
authored
Merge pull request open-webui#16456 from open-webui/dev
0.6.22
2 parents 30d0f8b + 17cc3b7 commit 438e5d9

File tree

77 files changed

+1922
-202
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

77 files changed

+1922
-202
lines changed

CHANGELOG.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,22 @@ All notable changes to this project will be documented in this file.
55
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

8+
## [0.6.22] - 2025-08-11
9+
10+
### Added
11+
12+
- 🔗 **OpenAI API '/v1' Endpoint Compatibility**: Enhanced API compatibility by supporting requests to paths like '/v1/models', '/v1/embeddings', and '/v1/chat/completions'. This allows Open WebUI to integrate more seamlessly with tools that expect OpenAI's '/v1' API structure.
13+
- 🪄 **Toggle for Guided Response Regeneration Menu**: Introduced a new setting in 'Interface' settings, providing the ability to enable or disable the expanded guided response regeneration menu. This offers users more control over their chat workflow and interface preferences.
14+
-**General UI/UX Enhancements**: Implemented various user interface and experience improvements, including more rounded corners for cards in the Knowledge, Prompts, and Tools sections, and minor layout adjustments within the chat Navbar for improved visual consistency.
15+
- 🌐 **Localization & Internationalization Improvements**: Introduced support for the Kabyle (Taqbaylit) language, refined and expanded translations for Chinese, expanding the platform's linguistic coverage.
16+
17+
### Fixed
18+
19+
- 🐞 **OpenAI Error Message Propagation**: Resolved an issue where specific OpenAI API errors (e.g., 'Organization Not Verified') were obscured by generic 'JSONResponse' iterable errors. The system now correctly propagates detailed and actionable error messages from OpenAI to the user.
20+
- 🌲 **Pinecone Insert Issue**: Fixed a bug that prevented proper insertion of items into Pinecone vector databases.
21+
- 📦 **S3 Vector Issue**: Resolved a bug where s3vector functionality failed due to incorrect import paths.
22+
- 🏠 **Landing Page Option Setting Not Working**: Fixed an issue where the landing page option in settings was not functioning as intended.
23+
824
## [0.6.21] - 2025-08-10
925

1026
### Added

backend/open_webui/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -871,7 +871,7 @@ def oidc_oauth_register(client: OAuth):
871871
ENABLE_DIRECT_CONNECTIONS = PersistentConfig(
872872
"ENABLE_DIRECT_CONNECTIONS",
873873
"direct.enable",
874-
os.environ.get("ENABLE_DIRECT_CONNECTIONS", "True").lower() == "true",
874+
os.environ.get("ENABLE_DIRECT_CONNECTIONS", "False").lower() == "true",
875875
)
876876

877877
####################################

backend/open_webui/main.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1261,6 +1261,7 @@ async def inspect_websocket(request: Request, call_next):
12611261

12621262

12631263
@app.get("/api/models")
1264+
@app.get("/api/v1/models") # Experimental: Compatibility with OpenAI API
12641265
async def get_models(
12651266
request: Request, refresh: bool = False, user=Depends(get_verified_user)
12661267
):
@@ -1341,6 +1342,7 @@ async def get_base_models(request: Request, user=Depends(get_admin_user)):
13411342

13421343

13431344
@app.post("/api/embeddings")
1345+
@app.post("/api/v1/embeddings") # Experimental: Compatibility with OpenAI API
13441346
async def embeddings(
13451347
request: Request, form_data: dict, user=Depends(get_verified_user)
13461348
):
@@ -1367,6 +1369,7 @@ async def embeddings(
13671369

13681370

13691371
@app.post("/api/chat/completions")
1372+
@app.post("/api/v1/chat/completions") # Experimental: Compatibility with OpenAI API
13701373
async def chat_completion(
13711374
request: Request,
13721375
form_data: dict,

backend/open_webui/retrieval/vector/dbs/pinecone.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@
3232
PINECONE_CLOUD,
3333
)
3434
from open_webui.env import SRC_LOG_LEVELS
35+
from open_webui.retrieval.vector.utils import stringify_metadata
36+
3537

3638
NO_LIMIT = 10000 # Reasonable limit to avoid overwhelming the system
3739
BATCH_SIZE = 100 # Recommended batch size for Pinecone operations
@@ -183,7 +185,7 @@ def _create_points(
183185
point = {
184186
"id": item["id"],
185187
"values": item["vector"],
186-
"metadata": metadata,
188+
"metadata": stringify_metadata(metadata),
187189
}
188190
points.append(point)
189191
return points

backend/open_webui/retrieval/vector/dbs/s3vector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from backend.open_webui.retrieval.vector.utils import stringify_metadata
1+
from open_webui.retrieval.vector.utils import stringify_metadata
22
from open_webui.retrieval.vector.main import (
33
VectorDBBase,
44
VectorItem,

backend/open_webui/utils/middleware.py

Lines changed: 90 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020

2121
from fastapi import Request, HTTPException
22-
from starlette.responses import Response, StreamingResponse
22+
from starlette.responses import Response, StreamingResponse, JSONResponse
2323

2424

2525
from open_webui.models.chats import Chats
@@ -1254,91 +1254,111 @@ async def background_tasks_handler():
12541254
# Non-streaming response
12551255
if not isinstance(response, StreamingResponse):
12561256
if event_emitter:
1257-
if "error" in response:
1258-
error = response["error"].get("detail", response["error"])
1259-
Chats.upsert_message_to_chat_by_id_and_message_id(
1260-
metadata["chat_id"],
1261-
metadata["message_id"],
1262-
{
1263-
"error": {"content": error},
1264-
},
1265-
)
1266-
1267-
if "selected_model_id" in response:
1268-
Chats.upsert_message_to_chat_by_id_and_message_id(
1269-
metadata["chat_id"],
1270-
metadata["message_id"],
1271-
{
1272-
"selectedModelId": response["selected_model_id"],
1273-
},
1274-
)
1275-
1276-
choices = response.get("choices", [])
1277-
if choices and choices[0].get("message", {}).get("content"):
1278-
content = response["choices"][0]["message"]["content"]
1279-
1280-
if content:
1257+
if isinstance(response, dict) or isinstance(response, JSONResponse):
12811258

1282-
await event_emitter(
1283-
{
1284-
"type": "chat:completion",
1285-
"data": response,
1286-
}
1287-
)
1288-
1289-
title = Chats.get_chat_title_by_id(metadata["chat_id"])
1290-
1291-
await event_emitter(
1259+
if isinstance(response, JSONResponse) and isinstance(
1260+
response.body, bytes
1261+
):
1262+
try:
1263+
response_data = json.loads(response.body.decode("utf-8"))
1264+
except json.JSONDecodeError:
1265+
response_data = {"error": {"detail": "Invalid JSON response"}}
1266+
else:
1267+
response_data = response
1268+
1269+
if "error" in response_data:
1270+
error = response_data["error"].get("detail", response_data["error"])
1271+
Chats.upsert_message_to_chat_by_id_and_message_id(
1272+
metadata["chat_id"],
1273+
metadata["message_id"],
12921274
{
1293-
"type": "chat:completion",
1294-
"data": {
1295-
"done": True,
1296-
"content": content,
1297-
"title": title,
1298-
},
1299-
}
1275+
"error": {"content": error},
1276+
},
13001277
)
13011278

1302-
# Save message in the database
1279+
if "selected_model_id" in response_data:
13031280
Chats.upsert_message_to_chat_by_id_and_message_id(
13041281
metadata["chat_id"],
13051282
metadata["message_id"],
13061283
{
1307-
"role": "assistant",
1308-
"content": content,
1284+
"selectedModelId": response_data["selected_model_id"],
13091285
},
13101286
)
13111287

1312-
# Send a webhook notification if the user is not active
1313-
if not get_active_status_by_user_id(user.id):
1314-
webhook_url = Users.get_user_webhook_url_by_id(user.id)
1315-
if webhook_url:
1316-
post_webhook(
1317-
request.app.state.WEBUI_NAME,
1318-
webhook_url,
1319-
f"{title} - {request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}\n\n{content}",
1320-
{
1321-
"action": "chat",
1322-
"message": content,
1288+
choices = response_data.get("choices", [])
1289+
if choices and choices[0].get("message", {}).get("content"):
1290+
content = response_data["choices"][0]["message"]["content"]
1291+
1292+
if content:
1293+
await event_emitter(
1294+
{
1295+
"type": "chat:completion",
1296+
"data": response_data,
1297+
}
1298+
)
1299+
1300+
title = Chats.get_chat_title_by_id(metadata["chat_id"])
1301+
1302+
await event_emitter(
1303+
{
1304+
"type": "chat:completion",
1305+
"data": {
1306+
"done": True,
1307+
"content": content,
13231308
"title": title,
1324-
"url": f"{request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}",
13251309
},
1326-
)
1310+
}
1311+
)
13271312

1328-
await background_tasks_handler()
1313+
# Save message in the database
1314+
Chats.upsert_message_to_chat_by_id_and_message_id(
1315+
metadata["chat_id"],
1316+
metadata["message_id"],
1317+
{
1318+
"role": "assistant",
1319+
"content": content,
1320+
},
1321+
)
13291322

1330-
if events and isinstance(events, list) and isinstance(response, dict):
1331-
extra_response = {}
1332-
for event in events:
1333-
if isinstance(event, dict):
1334-
extra_response.update(event)
1335-
else:
1336-
extra_response[event] = True
1323+
# Send a webhook notification if the user is not active
1324+
if not get_active_status_by_user_id(user.id):
1325+
webhook_url = Users.get_user_webhook_url_by_id(user.id)
1326+
if webhook_url:
1327+
post_webhook(
1328+
request.app.state.WEBUI_NAME,
1329+
webhook_url,
1330+
f"{title} - {request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}\n\n{content}",
1331+
{
1332+
"action": "chat",
1333+
"message": content,
1334+
"title": title,
1335+
"url": f"{request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}",
1336+
},
1337+
)
13371338

1338-
response = {
1339-
**extra_response,
1340-
**response,
1341-
}
1339+
await background_tasks_handler()
1340+
1341+
if events and isinstance(events, list):
1342+
extra_response = {}
1343+
for event in events:
1344+
if isinstance(event, dict):
1345+
extra_response.update(event)
1346+
else:
1347+
extra_response[event] = True
1348+
1349+
response_data = {
1350+
**extra_response,
1351+
**response_data,
1352+
}
1353+
1354+
if isinstance(response, dict):
1355+
response = response_data
1356+
if isinstance(response, JSONResponse):
1357+
response = JSONResponse(
1358+
content=response_data,
1359+
headers=response.headers,
1360+
status_code=response.status_code,
1361+
)
13421362

13431363
return response
13441364
else:

package-lock.json

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "open-webui",
3-
"version": "0.6.21",
3+
"version": "0.6.22",
44
"private": true,
55
"scripts": {
66
"dev": "npm run pyodide:fetch && vite dev --host",

src/lib/apis/openai/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,7 @@ export const generateOpenAIChatCompletion = async (
379379
return res.json();
380380
})
381381
.catch((err) => {
382-
error = `${err?.detail ?? err}`;
382+
error = err?.detail ?? err;
383383
return null;
384384
});
385385

src/lib/components/chat/Chat.svelte

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1783,11 +1783,24 @@
17831783
},
17841784
`${WEBUI_BASE_URL}/api`
17851785
).catch(async (error) => {
1786-
toast.error(`${error}`);
1786+
console.log(error);
1787+
1788+
let errorMessage = error;
1789+
if (error?.error?.message) {
1790+
errorMessage = error.error.message;
1791+
} else if (error?.message) {
1792+
errorMessage = error.message;
1793+
}
17871794
1795+
if (typeof errorMessage === 'object') {
1796+
errorMessage = $i18n.t(`Uh-oh! There was an issue with the response.`);
1797+
}
1798+
1799+
toast.error(`${errorMessage}`);
17881800
responseMessage.error = {
17891801
content: error
17901802
};
1803+
17911804
responseMessage.done = true;
17921805
17931806
history.messages[responseMessageId] = responseMessage;

0 commit comments

Comments
 (0)