Skip to content

Commit aa7aca9

Browse files
authored
fix: upgrade pydantic to 2.10 to fix NodeWithScore serialization error (#587)
Also: - fix typo for `enable_kg_enhance_query_refine` parameter
1 parent 74d012c commit aa7aca9

File tree

8 files changed

+29
-27
lines changed

8 files changed

+29
-27
lines changed

backend/app/api/admin_routes/models.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,4 +44,4 @@ class ChatEngineBasedRetrieveRequest(BaseModel):
4444
top_k: Optional[int] = 5
4545
similarity_top_k: Optional[int] = None
4646
oversampling_factor: Optional[int] = 5
47-
enable_kg_enchance_query_refine: Optional[bool] = True
47+
enable_kg_enhance_query_refine: Optional[bool] = False

backend/app/api/admin_routes/retrieve.py

+10-9
Original file line numberDiff line numberDiff line change
@@ -15,15 +15,15 @@
1515

1616

1717
@router.get("/admin/retrieve/documents")
18-
async def retrieve_documents(
18+
def retrieve_documents(
1919
session: SessionDep,
2020
user: CurrentSuperuserDep,
2121
question: str,
2222
chat_engine: str = "default",
2323
top_k: Optional[int] = 5,
2424
similarity_top_k: Optional[int] = None,
2525
oversampling_factor: Optional[int] = 5,
26-
enable_kg_enchance_query_refine: Optional[bool] = True,
26+
enable_kg_enhance_query_refine: Optional[bool] = True,
2727
) -> List[Document]:
2828
try:
2929
return retrieve_service.chat_engine_retrieve_documents(
@@ -33,7 +33,7 @@ async def retrieve_documents(
3333
chat_engine_name=chat_engine,
3434
similarity_top_k=similarity_top_k,
3535
oversampling_factor=oversampling_factor,
36-
enable_kg_enchance_query_refine=enable_kg_enchance_query_refine,
36+
enable_kg_enhance_query_refine=enable_kg_enhance_query_refine,
3737
)
3838
except KBNotFound as e:
3939
raise e
@@ -43,26 +43,27 @@ async def retrieve_documents(
4343

4444

4545
@router.get("/admin/embedding_retrieve")
46-
async def embedding_retrieve(
46+
def embedding_retrieve(
4747
session: SessionDep,
4848
user: CurrentSuperuserDep,
4949
question: str,
5050
chat_engine: str = "default",
5151
top_k: Optional[int] = 5,
5252
similarity_top_k: Optional[int] = None,
5353
oversampling_factor: Optional[int] = 5,
54-
enable_kg_enchance_query_refine: Optional[bool] = True,
54+
enable_kg_enhance_query_refine=False,
5555
) -> List[NodeWithScore]:
5656
try:
57-
return retrieve_service.chat_engine_retrieve_chunks(
57+
nodes = retrieve_service.chat_engine_retrieve_chunks(
5858
session,
5959
question=question,
6060
top_k=top_k,
6161
chat_engine_name=chat_engine,
6262
similarity_top_k=similarity_top_k,
6363
oversampling_factor=oversampling_factor,
64-
enable_kg_enchance_query_refine=enable_kg_enchance_query_refine,
64+
enable_kg_enhance_query_refine=enable_kg_enhance_query_refine,
6565
)
66+
return nodes
6667
except KBNotFound as e:
6768
raise e
6869
except Exception as e:
@@ -71,7 +72,7 @@ async def embedding_retrieve(
7172

7273

7374
@router.post("/admin/embedding_retrieve")
74-
async def embedding_search(
75+
def embedding_search(
7576
session: SessionDep,
7677
user: CurrentSuperuserDep,
7778
request: ChatEngineBasedRetrieveRequest,
@@ -83,7 +84,7 @@ async def embedding_search(
8384
top_k=request.top_k,
8485
similarity_top_k=request.similarity_top_k,
8586
oversampling_factor=request.oversampling_factor,
86-
enable_kg_enchance_query_refine=request.enable_kg_enchance_query_refine,
87+
enable_kg_enhance_query_refine=request.enable_kg_enhance_query_refine,
8788
)
8889
except KBNotFound as e:
8990
raise e

backend/app/rag/retrieve.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def chat_engine_retrieve_documents(
2727
chat_engine_name: str = "default",
2828
similarity_top_k: Optional[int] = None,
2929
oversampling_factor: Optional[int] = None,
30-
enable_kg_enchance_query_refine: bool = False,
30+
enable_kg_enhance_query_refine: bool = False,
3131
) -> List[DBDocument]:
3232
chat_engine_config = ChatEngineConfig.load_from_db(db_session, chat_engine_name)
3333
if not chat_engine_config.knowledge_base:
@@ -40,7 +40,7 @@ def chat_engine_retrieve_documents(
4040
chat_engine_name=chat_engine_name,
4141
similarity_top_k=similarity_top_k,
4242
oversampling_factor=oversampling_factor,
43-
enable_kg_enchance_query_refine=enable_kg_enchance_query_refine,
43+
enable_kg_enhance_query_refine=enable_kg_enhance_query_refine,
4444
)
4545

4646
linked_knowledge_base = chat_engine_config.knowledge_base.linked_knowledge_base
@@ -59,15 +59,15 @@ def chat_engine_retrieve_chunks(
5959
chat_engine_name: str = "default",
6060
similarity_top_k: Optional[int] = None,
6161
oversampling_factor: Optional[int] = None,
62-
enable_kg_enchance_query_refine: bool = False,
62+
enable_kg_enhance_query_refine: bool = False,
6363
) -> List[NodeWithScore]:
6464
retriever = ChatEngineBasedRetriever(
6565
db_session=db_session,
6666
engine_name=chat_engine_name,
6767
top_k=top_k,
6868
similarity_top_k=similarity_top_k,
6969
oversampling_factor=oversampling_factor,
70-
enable_kg_enchance_query_refine=enable_kg_enchance_query_refine,
70+
enable_kg_enhance_query_refine=enable_kg_enhance_query_refine,
7171
)
7272
return retriever.retrieve(question)
7373

backend/app/rag/retrievers/LegacyChatEngineRetriever.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -33,14 +33,14 @@ def __init__(
3333
top_k: int = 10,
3434
similarity_top_k: int = None,
3535
oversampling_factor: int = 5,
36-
enable_kg_enchance_query_refine: bool = False,
36+
enable_kg_enhance_query_refine: bool = False,
3737
):
3838
self.db_session = db_session
3939
self.engine_name = engine_name
4040
self.top_k = top_k
4141
self.similarity_top_k = similarity_top_k
4242
self.oversampling_factor = oversampling_factor
43-
self.enable_kg_enchance_query_refine = enable_kg_enchance_query_refine
43+
self.enable_kg_enhance_query_refine = enable_kg_enhance_query_refine
4444

4545
self.chat_engine_config = chat_engine_config or ChatEngineConfig.load_from_db(
4646
db_session, engine_name
@@ -128,7 +128,7 @@ def _refine_query(self, query: str) -> str:
128128
)
129129
graph_knowledges_context = graph_knowledges.template
130130
else:
131-
entities, relations = graph_index.retrieve_with_weight(
131+
entities, relations, _ = graph_index.retrieve_with_weight(
132132
query,
133133
[],
134134
depth=kg_config.depth,

backend/app/repositories/chunk.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def document_exists_chunks(self, session: Session, document_id: int) -> bool:
2323
)
2424

2525
def get_documents_by_chunk_ids(
26-
self, session: Session, chunk_ids: list[int]
26+
self, session: Session, chunk_ids: list[str]
2727
) -> list[DBDocument]:
2828
stmt = select(DBDocument).where(
2929
DBDocument.id.in_(

backend/pyproject.toml

+4-3
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@ dependencies = [
1212
"celery>=5.4.0",
1313
"dspy-ai>=2.4.9",
1414
"langfuse>=2.48.0",
15-
"llama-index>=0.11.10",
15+
"llama-index==0.12.10",
1616
"alembic>=1.13.1",
17-
"pydantic>=2.8.2",
17+
"pydantic==2.10.5",
1818
# Update Check: https://github.com/pydantic/pydantic/issues/8061
1919
"pydantic-settings>=2.3.3",
2020
"sentry-sdk>=2.5.1",
@@ -23,7 +23,8 @@ dependencies = [
2323
"tenacity~=8.4.0",
2424
"redis>=5.0.5",
2525
"flower>=2.0.1",
26-
"llama-index-llms-gemini>=0.1.11",
26+
# Notice: Ensure bug issue https://github.com/run-llama/llama_index/issues/17475 be fixed before upgrade to 0.4.3
27+
"llama-index-llms-gemini==0.4.2",
2728
"tidb-vector>=0.0.14",
2829
"deepdiff>=7.0.1",
2930
"python-dotenv>=1.0.1",

backend/requirements-dev.lock

+3-3
Original file line numberDiff line numberDiff line change
@@ -361,7 +361,7 @@ llama-index-llms-anthropic==0.6.3
361361
llama-index-llms-bedrock==0.3.3
362362
llama-index-llms-gemini==0.4.2
363363
llama-index-llms-ollama==0.4.2
364-
llama-index-llms-openai==0.3.12
364+
llama-index-llms-openai==0.3.13
365365
# via llama-index
366366
# via llama-index-agent-openai
367367
# via llama-index-cli
@@ -532,7 +532,7 @@ pyasn1-modules==0.4.0
532532
# via google-auth
533533
pycparser==2.22
534534
# via cffi
535-
pydantic==2.8.2
535+
pydantic==2.10.5
536536
# via anthropic
537537
# via cohere
538538
# via deepeval
@@ -551,7 +551,7 @@ pydantic==2.8.2
551551
# via pydantic-settings
552552
# via ragas
553553
# via sqlmodel
554-
pydantic-core==2.20.1
554+
pydantic-core==2.27.2
555555
# via pydantic
556556
pydantic-settings==2.6.1
557557
# via langchain-community

backend/requirements.lock

+3-3
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,7 @@ llama-index-llms-anthropic==0.6.3
354354
llama-index-llms-bedrock==0.3.3
355355
llama-index-llms-gemini==0.4.2
356356
llama-index-llms-ollama==0.4.2
357-
llama-index-llms-openai==0.3.12
357+
llama-index-llms-openai==0.3.13
358358
# via llama-index
359359
# via llama-index-agent-openai
360360
# via llama-index-cli
@@ -520,7 +520,7 @@ pyasn1-modules==0.4.0
520520
# via google-auth
521521
pycparser==2.22
522522
# via cffi
523-
pydantic==2.8.2
523+
pydantic==2.10.5
524524
# via anthropic
525525
# via cohere
526526
# via deepeval
@@ -539,7 +539,7 @@ pydantic==2.8.2
539539
# via pydantic-settings
540540
# via ragas
541541
# via sqlmodel
542-
pydantic-core==2.20.1
542+
pydantic-core==2.27.2
543543
# via pydantic
544544
pydantic-settings==2.6.1
545545
# via langchain-community

0 commit comments

Comments
 (0)