diff --git a/.semversioner/next-release/minor-20250114014943106649.json b/.semversioner/next-release/minor-20250114014943106649.json new file mode 100644 index 0000000000..6822ecd9f0 --- /dev/null +++ b/.semversioner/next-release/minor-20250114014943106649.json @@ -0,0 +1,4 @@ +{ + "type": "minor", + "description": "add conversation_history to api" +} diff --git a/graphrag/api/query.py b/graphrag/api/query.py index f41eec3e9b..0d34d2a05c 100644 --- a/graphrag/api/query.py +++ b/graphrag/api/query.py @@ -37,6 +37,9 @@ get_global_search_engine, get_local_search_engine, ) +from graphrag.query.context_builder.conversation_history import ( + ConversationHistory, +) from graphrag.query.indexer_adapters import ( read_indexer_communities, read_indexer_covariates, @@ -68,6 +71,7 @@ async def global_search( dynamic_community_selection: bool, response_type: str, query: str, + conversation_history: ConversationHistory | None = None, ) -> tuple[ str | dict[str, Any] | list[dict[str, Any]], str | list[pd.DataFrame] | dict[str, pd.DataFrame], @@ -121,7 +125,7 @@ async def global_search( reduce_system_prompt=reduce_prompt, general_knowledge_inclusion_prompt=knowledge_prompt, ) - result: SearchResult = await search_engine.asearch(query=query) + result: SearchResult = await search_engine.asearch(query=query, conversation_history=conversation_history) response = result.response context_data = _reformat_context_data(result.context_data) # type: ignore return response, context_data @@ -138,6 +142,7 @@ async def global_search_streaming( dynamic_community_selection: bool, response_type: str, query: str, + conversation_history: ConversationHistory | None = None, ) -> AsyncGenerator: """Perform a global search and return the context data and response via a generator. @@ -190,7 +195,7 @@ async def global_search_streaming( reduce_system_prompt=reduce_prompt, general_knowledge_inclusion_prompt=knowledge_prompt, ) - search_result = search_engine.astream_search(query=query) + search_result = search_engine.astream_search(query=query, conversation_history=conversation_history) # when streaming results, a context data object is returned as the first result # and the query response in subsequent tokens @@ -217,6 +222,7 @@ async def local_search( community_level: int, response_type: str, query: str, + conversation_history: ConversationHistory | None = None, ) -> tuple[ str | dict[str, Any] | list[dict[str, Any]], str | list[pd.DataFrame] | dict[str, pd.DataFrame], @@ -268,7 +274,7 @@ async def local_search( system_prompt=prompt, ) - result: SearchResult = await search_engine.asearch(query=query) + result: SearchResult = await search_engine.asearch(query=query, conversation_history=conversation_history) response = result.response context_data = _reformat_context_data(result.context_data) # type: ignore return response, context_data @@ -286,6 +292,7 @@ async def local_search_streaming( community_level: int, response_type: str, query: str, + conversation_history: ConversationHistory | None = None, ) -> AsyncGenerator: """Perform a local search and return the context data and response via a generator. @@ -333,7 +340,7 @@ async def local_search_streaming( response_type=response_type, system_prompt=prompt, ) - search_result = search_engine.astream_search(query=query) + search_result = search_engine.astream_search(query=query, conversation_history=conversation_history) # when streaming results, a context data object is returned as the first result # and the query response in subsequent tokens @@ -358,6 +365,7 @@ async def drift_search( relationships: pd.DataFrame, community_level: int, query: str, + conversation_history: ConversationHistory | None = None, ) -> tuple[ str | dict[str, Any] | list[dict[str, Any]], str | list[pd.DataFrame] | dict[str, pd.DataFrame], @@ -410,7 +418,7 @@ async def drift_search( local_system_prompt=prompt, ) - result: SearchResult = await search_engine.asearch(query=query) + result: SearchResult = await search_engine.asearch(query=query, conversation_history=conversation_history) response = result.response context_data = _reformat_context_data(result.context_data) # type: ignore @@ -430,6 +438,7 @@ async def basic_search( config: GraphRagConfig, text_units: pd.DataFrame, query: str, + conversation_history: ConversationHistory | None = None, ) -> tuple[ str | dict[str, Any] | list[dict[str, Any]], str | list[pd.DataFrame] | dict[str, pd.DataFrame], @@ -468,7 +477,7 @@ async def basic_search( system_prompt=prompt, ) - result: SearchResult = await search_engine.asearch(query=query) + result: SearchResult = await search_engine.asearch(query=query, conversation_history=conversation_history) response = result.response context_data = _reformat_context_data(result.context_data) # type: ignore return response, context_data @@ -479,6 +488,7 @@ async def basic_search_streaming( config: GraphRagConfig, text_units: pd.DataFrame, query: str, + conversation_history: ConversationHistory | None = None, ) -> AsyncGenerator: """Perform a local search and return the context data and response via a generator. @@ -513,7 +523,7 @@ async def basic_search_streaming( system_prompt=prompt, ) - search_result = search_engine.astream_search(query=query) + search_result = search_engine.astream_search(query=query, conversation_history=conversation_history) # when streaming results, a context data object is returned as the first result # and the query response in subsequent tokens