diff --git a/.github/workflows/linting.yaml b/.github/workflows/linting.yaml index 7c12e0a278..aa05436996 100644 --- a/.github/workflows/linting.yaml +++ b/.github/workflows/linting.yaml @@ -27,4 +27,4 @@ jobs: pip install pre-commit - name: Run pre-commit - run: pre-commit run --all-files + run: pre-commit run --all-files --show-diff-on-failure diff --git a/README-zh.md b/README-zh.md index e69de29bb2..d345562f07 100644 --- a/README-zh.md +++ b/README-zh.md @@ -0,0 +1,1424 @@ +# LightRAG: Simple and Fast Retrieval-Augmented Generation + +LightRAG Diagram + +## 🎉 新闻 + +- [X] [2025.03.18]🎯📢LightRAG现已支持引文功能。 +- [X] [2025.02.05]🎯📢我们团队发布了[VideoRAG](https://github.com/HKUDS/VideoRAG),用于理解超长上下文视频。 +- [X] [2025.01.13]🎯📢我们团队发布了[MiniRAG](https://github.com/HKUDS/MiniRAG),使用小型模型简化RAG。 +- [X] [2025.01.06]🎯📢现在您可以[使用PostgreSQL进行存储](#using-postgresql-for-storage)。 +- [X] [2024.12.31]🎯📢LightRAG现在支持[通过文档ID删除](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete)。 +- [X] [2024.11.25]🎯📢LightRAG现在支持无缝集成[自定义知识图谱](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#insert-custom-kg),使用户能够用自己的领域专业知识增强系统。 +- [X] [2024.11.19]🎯📢LightRAG的综合指南现已在[LearnOpenCV](https://learnopencv.com/lightrag)上发布。非常感谢博客作者。 +- [X] [2024.11.11]🎯📢LightRAG现在支持[通过实体名称删除实体](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete)。 +- [X] [2024.11.09]🎯📢推出[LightRAG Gui](https://lightrag-gui.streamlit.app),允许您插入、查询、可视化和下载LightRAG知识。 +- [X] [2024.11.04]🎯📢现在您可以[使用Neo4J进行存储](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#using-neo4j-for-storage)。 +- [X] [2024.10.29]🎯📢LightRAG现在通过`textract`支持多种文件类型,包括PDF、DOC、PPT和CSV。 +- [X] [2024.10.20]🎯📢我们为LightRAG添加了一个新功能:图形可视化。 +- [X] [2024.10.18]🎯📢我们添加了[LightRAG介绍视频](https://youtu.be/oageL-1I0GE)的链接。感谢作者! +- [X] [2024.10.17]🎯📢我们创建了一个[Discord频道](https://discord.gg/yF2MmDJyGJ)!欢迎加入分享和讨论!🎉🎉 +- [X] [2024.10.16]🎯📢LightRAG现在支持[Ollama模型](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)! +- [X] [2024.10.15]🎯📢LightRAG现在支持[Hugging Face模型](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)! + +
+ + 算法流程图 + + +![LightRAG索引流程图](https://learnopencv.com/wp-content/uploads/2024/11/LightRAG-VectorDB-Json-KV-Store-Indexing-Flowchart-scaled.jpg) +*图1:LightRAG索引流程图 - 图片来源:[Source](https://learnopencv.com/lightrag/)* +![LightRAG检索和查询流程图](https://learnopencv.com/wp-content/uploads/2024/11/LightRAG-Querying-Flowchart-Dual-Level-Retrieval-Generation-Knowledge-Graphs-scaled.jpg) +*图2:LightRAG检索和查询流程图 - 图片来源:[Source](https://learnopencv.com/lightrag/)* + +
+ +## 安装 + +### 安装LightRAG核心 + +* 从源代码安装(推荐) + +```bash +cd LightRAG +pip install -e . +``` + +* 从PyPI安装 + +```bash +pip install lightrag-hku +``` + +### 安装LightRAG服务器 + +LightRAG服务器旨在提供Web UI和API支持。Web UI便于文档索引、知识图谱探索和简单的RAG查询界面。LightRAG服务器还提供兼容Ollama的接口,旨在将LightRAG模拟为Ollama聊天模型。这使得AI聊天机器人(如Open WebUI)可以轻松访问LightRAG。 + +* 从PyPI安装 + +```bash +pip install "lightrag-hku[api]" +``` + +* 从源代码安装 + +```bash +# 如有必要,创建Python虚拟环境 +# 以可编辑模式安装并支持API +pip install -e ".[api]" +``` + +**有关LightRAG服务器的更多信息,请参阅[LightRAG服务器](./lightrag/api/README.md)。** + +## 快速开始 + +* [视频演示](https://www.youtube.com/watch?v=g21royNJ4fw)展示如何在本地运行LightRAG。 +* 所有代码都可以在`examples`中找到。 +* 如果使用OpenAI模型,请在环境中设置OpenAI API密钥:`export OPENAI_API_KEY="sk-..."`。 +* 下载演示文本"狄更斯的圣诞颂歌": + +```bash +curl https://raw.githubusercontent.com/gusye1234/nano-graphrag/main/tests/mock_data.txt > ./book.txt +``` + +## 查询 + +使用以下Python代码片段(在脚本中)初始化LightRAG并执行查询: + +```python +import os +import asyncio +from lightrag import LightRAG, QueryParam +from lightrag.llm.openai import gpt_4o_mini_complete, gpt_4o_complete, openai_embed +from lightrag.kg.shared_storage import initialize_pipeline_status +from lightrag.utils import setup_logger + +setup_logger("lightrag", level="INFO") + +async def initialize_rag(): + rag = LightRAG( + working_dir="your/path", + embedding_func=openai_embed, + llm_model_func=gpt_4o_mini_complete + ) + + await rag.initialize_storages() + await initialize_pipeline_status() + + return rag + +def main(): + # 初始化RAG实例 + rag = asyncio.run(initialize_rag()) + # 插入文本 + rag.insert("Your text") + + # 执行朴素搜索 + mode="naive" + # 执行本地搜索 + mode="local" + # 执行全局搜索 + mode="global" + # 执行混合搜索 + mode="hybrid" + # 混合模式集成知识图谱和向量检索 + mode="mix" + + rag.query( + "这个故事的主要主题是什么?", + param=QueryParam(mode=mode) + ) + +if __name__ == "__main__": + main() +``` + +### 查询参数 + +```python +class QueryParam: + mode: Literal["local", "global", "hybrid", "naive", "mix"] = "global" + """指定检索模式: + - "local":专注于上下文相关信息。 + - "global":利用全局知识。 + - "hybrid":结合本地和全局检索方法。 + - "naive":执行基本搜索,不使用高级技术。 + - "mix":集成知识图谱和向量检索。混合模式结合知识图谱和向量搜索: + - 同时使用结构化(KG)和非结构化(向量)信息 + - 通过分析关系和上下文提供全面的答案 + - 通过HTML img标签支持图像内容 + - 允许通过top_k参数控制检索深度 + """ + only_need_context: bool = False + """如果为True,仅返回检索到的上下文而不生成响应。""" + response_type: str = "Multiple Paragraphs" + """定义响应格式。示例:'Multiple Paragraphs'(多段落), 'Single Paragraph'(单段落), 'Bullet Points'(要点列表)。""" + top_k: int = 60 + """要检索的顶部项目数量。在'local'模式下代表实体,在'global'模式下代表关系。""" + max_token_for_text_unit: int = 4000 + """每个检索文本块允许的最大令牌数。""" + max_token_for_global_context: int = 4000 + """全局检索中关系描述的最大令牌分配。""" + max_token_for_local_context: int = 4000 + """本地检索中实体描述的最大令牌分配。""" + ids: list[str] | None = None # 仅支持PG向量数据库 + """用于过滤RAG的ID列表。""" + model_func: Callable[..., object] | None = None + """查询使用的LLM模型函数。如果提供了此选项,它将代替LightRAG全局模型函数。 + 这允许为不同的查询模式使用不同的模型。 + """ + ... +``` + +> top_k的默认值可以通过环境变量TOP_K更改。 + +### LLM and Embedding注入 + +LightRAG 需要利用LLM和Embeding模型来完成文档索引和知识库查询工作。在初始化LightRAG的时候需要把阶段,需要把LLM和Embedding的操作函数注入到对象中: + +
+ 使用类OpenAI的API + +* LightRAG还支持类OpenAI的聊天/嵌入API: + +```python +async def llm_model_func( + prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs +) -> str: + return await openai_complete_if_cache( + "solar-mini", + prompt, + system_prompt=system_prompt, + history_messages=history_messages, + api_key=os.getenv("UPSTAGE_API_KEY"), + base_url="https://api.upstage.ai/v1/solar", + **kwargs + ) + +async def embedding_func(texts: list[str]) -> np.ndarray: + return await openai_embed( + texts, + model="solar-embedding-1-large-query", + api_key=os.getenv("UPSTAGE_API_KEY"), + base_url="https://api.upstage.ai/v1/solar" + ) + +async def initialize_rag(): + rag = LightRAG( + working_dir=WORKING_DIR, + llm_model_func=llm_model_func, + embedding_func=EmbeddingFunc( + embedding_dim=4096, + max_token_size=8192, + func=embedding_func + ) + ) + + await rag.initialize_storages() + await initialize_pipeline_status() + + return rag +``` + +
+ +
+ 使用Hugging Face模型 + +* 如果您想使用Hugging Face模型,只需要按如下方式设置LightRAG: + +参见`lightrag_hf_demo.py` + +```python +# 使用Hugging Face模型初始化LightRAG +rag = LightRAG( + working_dir=WORKING_DIR, + llm_model_func=hf_model_complete, # 使用Hugging Face模型进行文本生成 + llm_model_name='meta-llama/Llama-3.1-8B-Instruct', # Hugging Face的模型名称 + # 使用Hugging Face嵌入函数 + embedding_func=EmbeddingFunc( + embedding_dim=384, + max_token_size=5000, + func=lambda texts: hf_embed( + texts, + tokenizer=AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2"), + embed_model=AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2") + ) + ), +) +``` + +
+ +
+ 使用Ollama模型 +如果您想使用Ollama模型,您需要拉取计划使用的模型和嵌入模型,例如`nomic-embed-text`。 + +然后您只需要按如下方式设置LightRAG: + +```python +# 使用Ollama模型初始化LightRAG +rag = LightRAG( + working_dir=WORKING_DIR, + llm_model_func=ollama_model_complete, # 使用Ollama模型进行文本生成 + llm_model_name='your_model_name', # 您的模型名称 + # 使用Ollama嵌入函数 + embedding_func=EmbeddingFunc( + embedding_dim=768, + max_token_size=8192, + func=lambda texts: ollama_embed( + texts, + embed_model="nomic-embed-text" + ) + ), +) +``` + +* **增加上下文大小** + +为了使LightRAG正常工作,上下文应至少为32k令牌。默认情况下,Ollama模型的上下文大小为8k。您可以通过以下两种方式之一实现这一点: + +* **在Modelfile中增加`num_ctx`参数** + +1. 拉取模型: + +```bash +ollama pull qwen2 +``` + +2. 显示模型文件: + +```bash +ollama show --modelfile qwen2 > Modelfile +``` + +3. 编辑Modelfile,添加以下行: + +```bash +PARAMETER num_ctx 32768 +``` + +4. 创建修改后的模型: + +```bash +ollama create -f Modelfile qwen2m +``` + +* **通过Ollama API设置`num_ctx`** + +您可以使用`llm_model_kwargs`参数配置ollama: + +```python +rag = LightRAG( + working_dir=WORKING_DIR, + llm_model_func=ollama_model_complete, # 使用Ollama模型进行文本生成 + llm_model_name='your_model_name', # 您的模型名称 + llm_model_kwargs={"options": {"num_ctx": 32768}}, + # 使用Ollama嵌入函数 + embedding_func=EmbeddingFunc( + embedding_dim=768, + max_token_size=8192, + func=lambda texts: ollama_embedding( + texts, + embed_model="nomic-embed-text" + ) + ), +) +``` + +* **低RAM GPU** + +为了在低RAM GPU上运行此实验,您应该选择小型模型并调整上下文窗口(增加上下文会增加内存消耗)。例如,在6Gb RAM的改装挖矿GPU上运行这个ollama示例需要将上下文大小设置为26k,同时使用`gemma2:2b`。它能够在`book.txt`中找到197个实体和19个关系。 + +
+
+ LlamaIndex + +LightRAG支持与LlamaIndex集成 (`llm/llama_index_impl.py`): + +- 通过LlamaIndex与OpenAI和其他提供商集成 +- 详细设置和示例请参见[LlamaIndex文档](lightrag/llm/Readme.md) + +**使用示例:** + +```python +# 使用LlamaIndex直接访问OpenAI +import asyncio +from lightrag import LightRAG +from lightrag.llm.llama_index_impl import llama_index_complete_if_cache, llama_index_embed +from llama_index.embeddings.openai import OpenAIEmbedding +from llama_index.llms.openai import OpenAI +from lightrag.kg.shared_storage import initialize_pipeline_status +from lightrag.utils import setup_logger + +# 为LightRAG设置日志处理程序 +setup_logger("lightrag", level="INFO") + +async def initialize_rag(): + rag = LightRAG( + working_dir="your/path", + llm_model_func=llama_index_complete_if_cache, # LlamaIndex兼容的完成函数 + embedding_func=EmbeddingFunc( # LlamaIndex兼容的嵌入函数 + embedding_dim=1536, + max_token_size=8192, + func=lambda texts: llama_index_embed(texts, embed_model=embed_model) + ), + ) + + await rag.initialize_storages() + await initialize_pipeline_status() + + return rag + +def main(): + # 初始化RAG实例 + rag = asyncio.run(initialize_rag()) + + with open("./book.txt", "r", encoding="utf-8") as f: + rag.insert(f.read()) + + # 执行朴素搜索 + print( + rag.query("这个故事的主要主题是什么?", param=QueryParam(mode="naive")) + ) + + # 执行本地搜索 + print( + rag.query("这个故事的主要主题是什么?", param=QueryParam(mode="local")) + ) + + # 执行全局搜索 + print( + rag.query("这个故事的主要主题是什么?", param=QueryParam(mode="global")) + ) + + # 执行混合搜索 + print( + rag.query("这个故事的主要主题是什么?", param=QueryParam(mode="hybrid")) + ) + +if __name__ == "__main__": + main() +``` + +**详细文档和示例,请参见:** + +- [LlamaIndex文档](lightrag/llm/Readme.md) +- [直接OpenAI示例](examples/lightrag_llamaindex_direct_demo.py) +- [LiteLLM代理示例](examples/lightrag_llamaindex_litellm_demo.py) + +
+ +### Token统计功能 +
+ 概述和使用 + +LightRAG提供了TokenTracker工具来跟踪和管理大模型的token消耗。这个功能对于控制API成本和优化性能特别有用。 + +#### 使用方法 + +```python +from lightrag.utils import TokenTracker + +# 创建TokenTracker实例 +token_tracker = TokenTracker() + +# 方法1:使用上下文管理器(推荐) +# 适用于需要自动跟踪token使用的场景 +with token_tracker: + result1 = await llm_model_func("你的问题1") + result2 = await llm_model_func("你的问题2") + +# 方法2:手动添加token使用记录 +# 适用于需要更精细控制token统计的场景 +token_tracker.reset() + +rag.insert() + +rag.query("你的问题1", param=QueryParam(mode="naive")) +rag.query("你的问题2", param=QueryParam(mode="mix")) + +# 显示总token使用量(包含插入和查询操作) +print("Token usage:", token_tracker.get_usage()) +``` + +#### 使用建议 +- 在长会话或批量操作中使用上下文管理器,可以自动跟踪所有token消耗 +- 对于需要分段统计的场景,使用手动模式并适时调用reset() +- 定期检查token使用情况,有助于及时发现异常消耗 +- 在开发测试阶段积极使用此功能,以便优化生产环境的成本 + +#### 实际应用示例 +您可以参考以下示例来实现token统计: +- `examples/lightrag_gemini_track_token_demo.py`:使用Google Gemini模型的token统计示例 +- `examples/lightrag_siliconcloud_track_token_demo.py`:使用SiliconCloud模型的token统计示例 + +这些示例展示了如何在不同模型和场景下有效地使用TokenTracker功能。 + +
+ +### 对话历史 + +LightRAG现在通过对话历史功能支持多轮对话。以下是使用方法: + +```python +# 创建对话历史 +conversation_history = [ + {"role": "user", "content": "主角对圣诞节的态度是什么?"}, + {"role": "assistant", "content": "在故事开始时,埃比尼泽·斯克鲁奇对圣诞节持非常消极的态度..."}, + {"role": "user", "content": "他的态度是如何改变的?"} +] + +# 创建带有对话历史的查询参数 +query_param = QueryParam( + mode="mix", # 或其他模式:"local"、"global"、"hybrid" + conversation_history=conversation_history, # 添加对话历史 + history_turns=3 # 考虑最近的对话轮数 +) + +# 进行考虑对话历史的查询 +response = rag.query( + "是什么导致了他性格的这种变化?", + param=query_param +) +``` + +### 自定义提示词 + +LightRAG现在支持自定义提示,以便对系统行为进行精细控制。以下是使用方法: + +```python +# 创建查询参数 +query_param = QueryParam( + mode="hybrid", # 或其他模式:"local"、"global"、"hybrid"、"mix"和"naive" +) + +# 示例1:使用默认系统提示 +response_default = rag.query( + "可再生能源的主要好处是什么?", + param=query_param +) +print(response_default) + +# 示例2:使用自定义提示 +custom_prompt = """ +您是环境科学领域的专家助手。请提供详细且结构化的答案,并附带示例。 +---对话历史--- +{history} + +---知识库--- +{context_data} + +---响应规则--- + +- 目标格式和长度:{response_type} +""" +response_custom = rag.query( + "可再生能源的主要好处是什么?", + param=query_param, + system_prompt=custom_prompt # 传递自定义提示 +) +print(response_custom) +``` + +### 关键词提取 + +我们引入了新函数`query_with_separate_keyword_extraction`来增强关键词提取功能。该函数将关键词提取过程与用户提示分开,专注于查询以提高提取关键词的相关性。 + +* 工作原理 + +该函数将输入分为两部分: + +- `用户查询` +- `提示` + +然后仅对`用户查询`执行关键词提取。这种分离确保提取过程是集中和相关的,不受`提示`中任何额外语言的影响。它还允许`提示`纯粹用于响应格式化,保持用户原始问题的意图和清晰度。 + +* 使用示例 + +这个`示例`展示了如何为教育内容定制函数,专注于为高年级学生提供详细解释。 + +```python +rag.query_with_separate_keyword_extraction( + query="解释重力定律", + prompt="提供适合学习物理的高中生的详细解释。", + param=QueryParam(mode="hybrid") +) +``` + +### 插入自定义知识 + +```python +custom_kg = { + "chunks": [ + { + "content": "Alice和Bob正在合作进行量子计算研究。", + "source_id": "doc-1" + } + ], + "entities": [ + { + "entity_name": "Alice", + "entity_type": "person", + "description": "Alice是一位专门研究量子物理的研究员。", + "source_id": "doc-1" + }, + { + "entity_name": "Bob", + "entity_type": "person", + "description": "Bob是一位数学家。", + "source_id": "doc-1" + }, + { + "entity_name": "量子计算", + "entity_type": "technology", + "description": "量子计算利用量子力学现象进行计算。", + "source_id": "doc-1" + } + ], + "relationships": [ + { + "src_id": "Alice", + "tgt_id": "Bob", + "description": "Alice和Bob是研究伙伴。", + "keywords": "合作 研究", + "weight": 1.0, + "source_id": "doc-1" + }, + { + "src_id": "Alice", + "tgt_id": "量子计算", + "description": "Alice进行量子计算研究。", + "keywords": "研究 专业", + "weight": 1.0, + "source_id": "doc-1" + }, + { + "src_id": "Bob", + "tgt_id": "量子计算", + "description": "Bob研究量子计算。", + "keywords": "研究 应用", + "weight": 1.0, + "source_id": "doc-1" + } + ] +} + +rag.insert_custom_kg(custom_kg) +``` + +## 插入 + +
+ 基本插入 + +```python +# 基本插入 +rag.insert("文本") +``` + +
+ +
+ 批量插入 + +```python +# 基本批量插入:一次插入多个文本 +rag.insert(["文本1", "文本2",...]) + +# 带有自定义批量大小配置的批量插入 +rag = LightRAG( + working_dir=WORKING_DIR, + addon_params={ + "insert_batch_size": 4 # 每批处理4个文档 + } +) + +rag.insert(["文本1", "文本2", "文本3", ...]) # 文档将以4个为一批进行处理 +``` + +`addon_params`中的`insert_batch_size`参数控制插入过程中每批处理的文档数量。这对于以下情况很有用: + +- 管理大型文档集合的内存使用 +- 优化处理速度 +- 提供更好的进度跟踪 +- 如果未指定,默认值为10 + +
+ +
+ 带ID插入 + +如果您想为文档提供自己的ID,文档数量和ID数量必须相同。 + +```python +# 插入单个文本,并为其提供ID +rag.insert("文本1", ids=["文本1的ID"]) + +# 插入多个文本,并为它们提供ID +rag.insert(["文本1", "文本2",...], ids=["文本1的ID", "文本2的ID"]) +``` + +
+ +
+ 使用管道插入 + +`apipeline_enqueue_documents`和`apipeline_process_enqueue_documents`函数允许您对文档进行增量插入到图中。 + +这对于需要在后台处理文档的场景很有用,同时仍允许主线程继续执行。 + +并使用例程处理新文档。 + +```python +rag = LightRAG(..) + +await rag.apipeline_enqueue_documents(input) +# 您的循环例程 +await rag.apipeline_process_enqueue_documents(input) +``` + +
+ +
+ 插入多文件类型支持 + +`textract`支持读取TXT、DOCX、PPTX、CSV和PDF等文件类型。 + +```python +import textract + +file_path = 'TEXT.pdf' +text_content = textract.process(file_path) + +rag.insert(text_content.decode('utf-8')) +``` + +
+ +
+ 引文功能 + +通过提供文件路径,系统确保可以将来源追溯到其原始文档。 + +```python +# 定义文档及其文件路径 +documents = ["文档内容1", "文档内容2"] +file_paths = ["path/to/doc1.txt", "path/to/doc2.txt"] + +# 插入带有文件路径的文档 +rag.insert(documents, file_paths=file_paths) +``` + +
+ +## 存储 + +
+ 使用Neo4J进行存储 + +* 对于生产级场景,您很可能想要利用企业级解决方案 +* 进行KG存储。推荐在Docker中运行Neo4J以进行无缝本地测试。 +* 参见:https://hub.docker.com/_/neo4j + +```python +export NEO4J_URI="neo4j://localhost:7687" +export NEO4J_USERNAME="neo4j" +export NEO4J_PASSWORD="password" + +# 为LightRAG设置日志记录器 +setup_logger("lightrag", level="INFO") + +# 当您启动项目时,请确保通过指定kg="Neo4JStorage"来覆盖默认的KG:NetworkX。 + +# 注意:默认设置使用NetworkX +# 使用Neo4J实现初始化LightRAG。 +async def initialize_rag(): + rag = LightRAG( + working_dir=WORKING_DIR, + llm_model_func=gpt_4o_mini_complete, # 使用gpt_4o_mini_complete LLM模型 + graph_storage="Neo4JStorage", #<-----------覆盖KG默认值 + ) + + # 初始化数据库连接 + await rag.initialize_storages() + # 初始化文档处理的管道状态 + await initialize_pipeline_status() + + return rag +``` + +参见test_neo4j.py获取工作示例。 + +
+ +
+ 使用Faiss进行存储 + +- 安装所需依赖: + +``` +pip install faiss-cpu +``` + +如果您有GPU支持,也可以安装`faiss-gpu`。 + +- 这里我们使用`sentence-transformers`,但您也可以使用维度为`3072`的`OpenAIEmbedding`模型。 + +```python +async def embedding_func(texts: list[str]) -> np.ndarray: + model = SentenceTransformer('all-MiniLM-L6-v2') + embeddings = model.encode(texts, convert_to_numpy=True) + return embeddings + +# 使用LLM模型函数和嵌入函数初始化LightRAG +rag = LightRAG( + working_dir=WORKING_DIR, + llm_model_func=llm_model_func, + embedding_func=EmbeddingFunc( + embedding_dim=384, + max_token_size=8192, + func=embedding_func, + ), + vector_storage="FaissVectorDBStorage", + vector_db_storage_cls_kwargs={ + "cosine_better_than_threshold": 0.3 # 您期望的阈值 + } +) +``` + +
+ +
+ 使用PostgreSQL进行存储 + +对于生产级场景,您很可能想要利用企业级解决方案。PostgreSQL可以为您提供一站式解决方案,作为KV存储、向量数据库(pgvector)和图数据库(apache AGE)。 + +* PostgreSQL很轻量,整个二进制发行版包括所有必要的插件可以压缩到40MB:参考[Windows发布版](https://github.com/ShanGor/apache-age-windows/releases/tag/PG17%2Fv1.5.0-rc0),它在Linux/Mac上也很容易安装。 +* 如果您是初学者并想避免麻烦,推荐使用docker,请从这个镜像开始(请务必阅读概述):https://hub.docker.com/r/shangor/postgres-for-rag +* 如何开始?参考:[examples/lightrag_zhipu_postgres_demo.py](https://github.com/HKUDS/LightRAG/blob/main/examples/lightrag_zhipu_postgres_demo.py) +* 为AGE创建索引示例:(如有必要,将下面的`dickens`改为您的图名) + ```sql + load 'age'; + SET search_path = ag_catalog, "$user", public; + CREATE INDEX CONCURRENTLY entity_p_idx ON dickens."Entity" (id); + CREATE INDEX CONCURRENTLY vertex_p_idx ON dickens."_ag_label_vertex" (id); + CREATE INDEX CONCURRENTLY directed_p_idx ON dickens."DIRECTED" (id); + CREATE INDEX CONCURRENTLY directed_eid_idx ON dickens."DIRECTED" (end_id); + CREATE INDEX CONCURRENTLY directed_sid_idx ON dickens."DIRECTED" (start_id); + CREATE INDEX CONCURRENTLY directed_seid_idx ON dickens."DIRECTED" (start_id,end_id); + CREATE INDEX CONCURRENTLY edge_p_idx ON dickens."_ag_label_edge" (id); + CREATE INDEX CONCURRENTLY edge_sid_idx ON dickens."_ag_label_edge" (start_id); + CREATE INDEX CONCURRENTLY edge_eid_idx ON dickens."_ag_label_edge" (end_id); + CREATE INDEX CONCURRENTLY edge_seid_idx ON dickens."_ag_label_edge" (start_id,end_id); + create INDEX CONCURRENTLY vertex_idx_node_id ON dickens."_ag_label_vertex" (ag_catalog.agtype_access_operator(properties, '"node_id"'::agtype)); + create INDEX CONCURRENTLY entity_idx_node_id ON dickens."Entity" (ag_catalog.agtype_access_operator(properties, '"node_id"'::agtype)); + CREATE INDEX CONCURRENTLY entity_node_id_gin_idx ON dickens."Entity" using gin(properties); + ALTER TABLE dickens."DIRECTED" CLUSTER ON directed_sid_idx; + + -- 如有必要可以删除 + drop INDEX entity_p_idx; + drop INDEX vertex_p_idx; + drop INDEX directed_p_idx; + drop INDEX directed_eid_idx; + drop INDEX directed_sid_idx; + drop INDEX directed_seid_idx; + drop INDEX edge_p_idx; + drop INDEX edge_sid_idx; + drop INDEX edge_eid_idx; + drop INDEX edge_seid_idx; + drop INDEX vertex_idx_node_id; + drop INDEX entity_idx_node_id; + drop INDEX entity_node_id_gin_idx; + ``` +* Apache AGE的已知问题:发布版本存在以下问题: + > 您可能会发现节点/边的属性是空的。 + > 这是发布版本的已知问题:https://github.com/apache/age/pull/1721 + > + > 您可以从源代码编译AGE来修复它。 + > + +
+ +## 删除 + +```python +# 删除实体:通过实体名称删除实体 +rag.delete_by_entity("Project Gutenberg") + +# 删除文档:通过文档ID删除与文档相关的实体和关系 +rag.delete_by_doc_id("doc_id") +``` + +## 编辑实体和关系 + +LightRAG现在支持全面的知识图谱管理功能,允许您在知识图谱中创建、编辑和删除实体和关系。 + +
+ 创建实体和关系 + +```python +# 创建新实体 +entity = rag.create_entity("Google", { + "description": "Google是一家专注于互联网相关服务和产品的跨国科技公司。", + "entity_type": "company" +}) + +# 创建另一个实体 +product = rag.create_entity("Gmail", { + "description": "Gmail是由Google开发的电子邮件服务。", + "entity_type": "product" +}) + +# 创建实体之间的关系 +relation = rag.create_relation("Google", "Gmail", { + "description": "Google开发和运营Gmail。", + "keywords": "开发 运营 服务", + "weight": 2.0 +}) +``` + +
+ +
+ 编辑实体和关系 + +```python +# 编辑现有实体 +updated_entity = rag.edit_entity("Google", { + "description": "Google是Alphabet Inc.的子公司,成立于1998年。", + "entity_type": "tech_company" +}) + +# 重命名实体(所有关系都会正确迁移) +renamed_entity = rag.edit_entity("Gmail", { + "entity_name": "Google Mail", + "description": "Google Mail(前身为Gmail)是一项电子邮件服务。" +}) + +# 编辑实体之间的关系 +updated_relation = rag.edit_relation("Google", "Google Mail", { + "description": "Google创建并维护Google Mail服务。", + "keywords": "创建 维护 电子邮件服务", + "weight": 3.0 +}) +``` + +
+ +所有操作都有同步和异步版本。异步版本带有前缀"a"(例如,`acreate_entity`,`aedit_relation`)。 + +#### 实体操作 + +- **create_entity**:创建具有指定属性的新实体 +- **edit_entity**:更新现有实体的属性或重命名它 + +#### 关系操作 + +- **create_relation**:在现有实体之间创建新关系 +- **edit_relation**:更新现有关系的属性 + +这些操作在图数据库和向量数据库组件之间保持数据一致性,确保您的知识图谱保持连贯。 + +## 数据导出功能 + +### 概述 + +LightRAG允许您以各种格式导出知识图谱数据,用于分析、共享和备份目的。系统支持导出实体、关系和关系数据。 + +### 导出功能 + +#### 基本用法 + +```python +# 基本CSV导出(默认格式) +rag.export_data("knowledge_graph.csv") + +# 指定任意格式 +rag.export_data("output.xlsx", file_format="excel") +``` + +#### 支持的不同文件格式 + +```python +# 以CSV格式导出数据 +rag.export_data("graph_data.csv", file_format="csv") + +# 导出数据到Excel表格 +rag.export_data("graph_data.xlsx", file_format="excel") + +# 以markdown格式导出数据 +rag.export_data("graph_data.md", file_format="md") + +# 导出数据为文本 +rag.export_data("graph_data.txt", file_format="txt") +``` + +#### 附加选项 + +在导出中包含向量嵌入(可选): + +```python +rag.export_data("complete_data.csv", include_vector_data=True) +``` + +### 导出数据包括 + +所有导出包括: + +* 实体信息(名称、ID、元数据) +* 关系数据(实体之间的连接) +* 来自向量数据库的关系信息 + +## 实体合并 + +
+ 合并实体及其关系 + +LightRAG现在支持将多个实体合并为单个实体,自动处理所有关系: + +```python +# 基本实体合并 +rag.merge_entities( + source_entities=["人工智能", "AI", "机器智能"], + target_entity="AI技术" +) +``` + +使用自定义合并策略: + +```python +# 为不同字段定义自定义合并策略 +rag.merge_entities( + source_entities=["约翰·史密斯", "史密斯博士", "J·史密斯"], + target_entity="约翰·史密斯", + merge_strategy={ + "description": "concatenate", # 组合所有描述 + "entity_type": "keep_first", # 保留第一个实体的类型 + "source_id": "join_unique" # 组合所有唯一的源ID + } +) +``` + +使用自定义目标实体数据: + +```python +# 为合并后的实体指定确切值 +rag.merge_entities( + source_entities=["纽约", "NYC", "大苹果"], + target_entity="纽约市", + target_entity_data={ + "entity_type": "LOCATION", + "description": "纽约市是美国人口最多的城市。", + } +) +``` + +结合两种方法的高级用法: + +```python +# 使用策略和自定义数据合并公司实体 +rag.merge_entities( + source_entities=["微软公司", "Microsoft Corporation", "MSFT"], + target_entity="微软", + merge_strategy={ + "description": "concatenate", # 组合所有描述 + "source_id": "join_unique" # 组合源ID + }, + target_entity_data={ + "entity_type": "ORGANIZATION", + } +) +``` + +合并实体时: + +* 所有来自源实体的关系都会重定向到目标实体 +* 重复的关系会被智能合并 +* 防止自我关系(循环) +* 合并后删除源实体 +* 保留关系权重和属性 + +
+ +## 缓存 + +
+ 清除缓存 + +您可以使用不同模式清除LLM响应缓存: + +```python +# 清除所有缓存 +await rag.aclear_cache() + +# 清除本地模式缓存 +await rag.aclear_cache(modes=["local"]) + +# 清除提取缓存 +await rag.aclear_cache(modes=["default"]) + +# 清除多个模式 +await rag.aclear_cache(modes=["local", "global", "hybrid"]) + +# 同步版本 +rag.clear_cache(modes=["local"]) +``` + +有效的模式包括: + +- `"default"`:提取缓存 +- `"naive"`:朴素搜索缓存 +- `"local"`:本地搜索缓存 +- `"global"`:全局搜索缓存 +- `"hybrid"`:混合搜索缓存 +- `"mix"`:混合搜索缓存 + +
+ +## LightRAG初始化参数 + +
+ 参数 + +| **参数** | **类型** | **说明** | **默认值** | +|--------------|----------|-----------------|-------------| +| **working_dir** | `str` | 存储缓存的目录 | `lightrag_cache+timestamp` | +| **kv_storage** | `str` | Storage type for documents and text chunks. Supported types: `JsonKVStorage`,`PGKVStorage`,`RedisKVStorage`,`MongoKVStorage` | `JsonKVStorage` | +| **vector_storage** | `str` | Storage type for embedding vectors. Supported types: `NanoVectorDBStorage`,`PGVectorStorage`,`MilvusVectorDBStorage`,`ChromaVectorDBStorage`,`FaissVectorDBStorage`,`MongoVectorDBStorage`,`QdrantVectorDBStorage` | `NanoVectorDBStorage` | +| **graph_storage** | `str` | Storage type for graph edges and nodes. Supported types: `NetworkXStorage`,`Neo4JStorage`,`PGGraphStorage`,`AGEStorage` | `NetworkXStorage` | +| **doc_status_storage** | `str` | Storage type for documents process status. Supported types: `JsonDocStatusStorage`,`PGDocStatusStorage`,`MongoDocStatusStorage` | `JsonDocStatusStorage` | +| **chunk_token_size** | `int` | 拆分文档时每个块的最大令牌大小 | `1200` | +| **chunk_overlap_token_size** | `int` | 拆分文档时两个块之间的重叠令牌大小 | `100` | +| **tiktoken_model_name** | `str` | 用于计算令牌数的Tiktoken编码器的模型名称 | `gpt-4o-mini` | +| **entity_extract_max_gleaning** | `int` | 实体提取过程中的循环次数,附加历史消息 | `1` | +| **entity_summary_to_max_tokens** | `int` | 每个实体摘要的最大令牌大小 | `500` | +| **node_embedding_algorithm** | `str` | 节点嵌入算法(当前未使用) | `node2vec` | +| **node2vec_params** | `dict` | 节点嵌入的参数 | `{"dimensions": 1536,"num_walks": 10,"walk_length": 40,"window_size": 2,"iterations": 3,"random_seed": 3,}` | +| **embedding_func** | `EmbeddingFunc` | 从文本生成嵌入向量的函数 | `openai_embed` | +| **embedding_batch_num** | `int` | 嵌入过程的最大批量大小(每批发送多个文本) | `32` | +| **embedding_func_max_async** | `int` | 最大并发异步嵌入进程数 | `16` | +| **llm_model_func** | `callable` | LLM生成的函数 | `gpt_4o_mini_complete` | +| **llm_model_name** | `str` | 用于生成的LLM模型名称 | `meta-llama/Llama-3.2-1B-Instruct` | +| **llm_model_max_token_size** | `int` | LLM生成的最大令牌大小(影响实体关系摘要) | `32768`(默认值由环境变量MAX_TOKENS更改) | +| **llm_model_max_async** | `int` | 最大并发异步LLM进程数 | `4`(默认值由环境变量MAX_ASYNC更改) | +| **llm_model_kwargs** | `dict` | LLM生成的附加参数 | | +| **vector_db_storage_cls_kwargs** | `dict` | 向量数据库的附加参数,如设置节点和关系检索的阈值 | cosine_better_than_threshold: 0.2(默认值由环境变量COSINE_THRESHOLD更改) | +| **enable_llm_cache** | `bool` | 如果为`TRUE`,将LLM结果存储在缓存中;重复的提示返回缓存的响应 | `TRUE` | +| **enable_llm_cache_for_entity_extract** | `bool` | 如果为`TRUE`,将实体提取的LLM结果存储在缓存中;适合初学者调试应用程序 | `TRUE` | +| **addon_params** | `dict` | 附加参数,例如`{"example_number": 1, "language": "Simplified Chinese", "entity_types": ["organization", "person", "geo", "event"], "insert_batch_size": 10}`:设置示例限制、输出语言和文档处理的批量大小 | `example_number: 所有示例, language: English, insert_batch_size: 10` | +| **convert_response_to_json_func** | `callable` | 未使用 | `convert_response_to_json` | +| **embedding_cache_config** | `dict` | 问答缓存的配置。包含三个参数:`enabled`:布尔值,启用/禁用缓存查找功能。启用时,系统将在生成新答案之前检查缓存的响应。`similarity_threshold`:浮点值(0-1),相似度阈值。当新问题与缓存问题的相似度超过此阈值时,将直接返回缓存的答案而不调用LLM。`use_llm_check`:布尔值,启用/禁用LLM相似度验证。启用时,在返回缓存答案之前,将使用LLM作为二次检查来验证问题之间的相似度。 | 默认:`{"enabled": False, "similarity_threshold": 0.95, "use_llm_check": False}` | + +
+ +## 错误处理 + +
+点击查看错误处理详情 + +API包括全面的错误处理: + +- 文件未找到错误(404) +- 处理错误(500) +- 支持多种文件编码(UTF-8和GBK) + +
+ +## LightRAG API + +LightRAG服务器旨在提供Web UI和API支持。**有关LightRAG服务器的更多信息,请参阅[LightRAG服务器](./lightrag/api/README.md)。** + +## 知识图谱可视化 + +LightRAG服务器提供全面的知识图谱可视化功能。它支持各种重力布局、节点查询、子图过滤等。**有关LightRAG服务器的更多信息,请参阅[LightRAG服务器](./lightrag/api/README.md)。** + +![iShot_2025-03-23_12.40.08](./README.assets/iShot_2025-03-23_12.40.08.png) + +## 评估 + +### 数据集 + +LightRAG使用的数据集可以从[TommyChien/UltraDomain](https://huggingface.co/datasets/TommyChien/UltraDomain)下载。 + +### 生成查询 + +LightRAG使用以下提示生成高级查询,相应的代码在`example/generate_query.py`中。 + +
+ 提示 + +```python +给定以下数据集描述: + +{description} + +请识别5个可能会使用此数据集的潜在用户。对于每个用户,列出他们会使用此数据集执行的5个任务。然后,对于每个(用户,任务)组合,生成5个需要对整个数据集有高级理解的问题。 + +按以下结构输出结果: +- 用户1:[用户描述] + - 任务1:[任务描述] + - 问题1: + - 问题2: + - 问题3: + - 问题4: + - 问题5: + - 任务2:[任务描述] + ... + - 任务5:[任务描述] +- 用户2:[用户描述] + ... +- 用户5:[用户描述] + ... +``` + +
+ +### 批量评估 + +为了评估两个RAG系统在高级查询上的性能,LightRAG使用以下提示,具体代码可在`example/batch_eval.py`中找到。 + +
+ 提示 + +```python +---角色--- +您是一位专家,负责根据三个标准评估同一问题的两个答案:**全面性**、**多样性**和**赋能性**。 +---目标--- +您将根据三个标准评估同一问题的两个答案:**全面性**、**多样性**和**赋能性**。 + +- **全面性**:答案提供了多少细节来涵盖问题的所有方面和细节? +- **多样性**:答案在提供关于问题的不同视角和见解方面有多丰富多样? +- **赋能性**:答案在多大程度上帮助读者理解并对主题做出明智判断? + +对于每个标准,选择更好的答案(答案1或答案2)并解释原因。然后,根据这三个类别选择总体赢家。 + +这是问题: +{query} + +这是两个答案: + +**答案1:** +{answer1} + +**答案2:** +{answer2} + +使用上述三个标准评估两个答案,并为每个标准提供详细解释。 + +以下列JSON格式输出您的评估: + +{{ + "全面性": {{ + "获胜者": "[答案1或答案2]", + "解释": "[在此提供解释]" + }}, + "赋能性": {{ + "获胜者": "[答案1或答案2]", + "解释": "[在此提供解释]" + }}, + "总体获胜者": {{ + "获胜者": "[答案1或答案2]", + "解释": "[根据三个标准总结为什么这个答案是总体获胜者]" + }} +}} +``` + +
+ +### 总体性能表 + +| |**农业**| |**计算机科学**| |**法律**| |**混合**| | +|----------------------|---------------|------------|------|------------|---------|------------|-------|------------| +| |NaiveRAG|**LightRAG**|NaiveRAG|**LightRAG**|NaiveRAG|**LightRAG**|NaiveRAG|**LightRAG**| +|**全面性**|32.4%|**67.6%**|38.4%|**61.6%**|16.4%|**83.6%**|38.8%|**61.2%**| +|**多样性**|23.6%|**76.4%**|38.0%|**62.0%**|13.6%|**86.4%**|32.4%|**67.6%**| +|**赋能性**|32.4%|**67.6%**|38.8%|**61.2%**|16.4%|**83.6%**|42.8%|**57.2%**| +|**总体**|32.4%|**67.6%**|38.8%|**61.2%**|15.2%|**84.8%**|40.0%|**60.0%**| +| |RQ-RAG|**LightRAG**|RQ-RAG|**LightRAG**|RQ-RAG|**LightRAG**|RQ-RAG|**LightRAG**| +|**全面性**|31.6%|**68.4%**|38.8%|**61.2%**|15.2%|**84.8%**|39.2%|**60.8%**| +|**多样性**|29.2%|**70.8%**|39.2%|**60.8%**|11.6%|**88.4%**|30.8%|**69.2%**| +|**赋能性**|31.6%|**68.4%**|36.4%|**63.6%**|15.2%|**84.8%**|42.4%|**57.6%**| +|**总体**|32.4%|**67.6%**|38.0%|**62.0%**|14.4%|**85.6%**|40.0%|**60.0%**| +| |HyDE|**LightRAG**|HyDE|**LightRAG**|HyDE|**LightRAG**|HyDE|**LightRAG**| +|**全面性**|26.0%|**74.0%**|41.6%|**58.4%**|26.8%|**73.2%**|40.4%|**59.6%**| +|**多样性**|24.0%|**76.0%**|38.8%|**61.2%**|20.0%|**80.0%**|32.4%|**67.6%**| +|**赋能性**|25.2%|**74.8%**|40.8%|**59.2%**|26.0%|**74.0%**|46.0%|**54.0%**| +|**总体**|24.8%|**75.2%**|41.6%|**58.4%**|26.4%|**73.6%**|42.4%|**57.6%**| +| |GraphRAG|**LightRAG**|GraphRAG|**LightRAG**|GraphRAG|**LightRAG**|GraphRAG|**LightRAG**| +|**全面性**|45.6%|**54.4%**|48.4%|**51.6%**|48.4%|**51.6%**|**50.4%**|49.6%| +|**多样性**|22.8%|**77.2%**|40.8%|**59.2%**|26.4%|**73.6%**|36.0%|**64.0%**| +|**赋能性**|41.2%|**58.8%**|45.2%|**54.8%**|43.6%|**56.4%**|**50.8%**|49.2%| +|**总体**|45.2%|**54.8%**|48.0%|**52.0%**|47.2%|**52.8%**|**50.4%**|49.6%| + +## 复现 + +所有代码都可以在`./reproduce`目录中找到。 + +### 步骤0 提取唯一上下文 + +首先,我们需要提取数据集中的唯一上下文。 + +
+ 代码 + +```python +def extract_unique_contexts(input_directory, output_directory): + + os.makedirs(output_directory, exist_ok=True) + + jsonl_files = glob.glob(os.path.join(input_directory, '*.jsonl')) + print(f"找到{len(jsonl_files)}个JSONL文件。") + + for file_path in jsonl_files: + filename = os.path.basename(file_path) + name, ext = os.path.splitext(filename) + output_filename = f"{name}_unique_contexts.json" + output_path = os.path.join(output_directory, output_filename) + + unique_contexts_dict = {} + + print(f"处理文件:{filename}") + + try: + with open(file_path, 'r', encoding='utf-8') as infile: + for line_number, line in enumerate(infile, start=1): + line = line.strip() + if not line: + continue + try: + json_obj = json.loads(line) + context = json_obj.get('context') + if context and context not in unique_contexts_dict: + unique_contexts_dict[context] = None + except json.JSONDecodeError as e: + print(f"文件{filename}第{line_number}行JSON解码错误:{e}") + except FileNotFoundError: + print(f"未找到文件:{filename}") + continue + except Exception as e: + print(f"处理文件{filename}时发生错误:{e}") + continue + + unique_contexts_list = list(unique_contexts_dict.keys()) + print(f"文件{filename}中有{len(unique_contexts_list)}个唯一的`context`条目。") + + try: + with open(output_path, 'w', encoding='utf-8') as outfile: + json.dump(unique_contexts_list, outfile, ensure_ascii=False, indent=4) + print(f"唯一的`context`条目已保存到:{output_filename}") + except Exception as e: + print(f"保存到文件{output_filename}时发生错误:{e}") + + print("所有文件已处理完成。") + +``` + +
+ +### 步骤1 插入上下文 + +对于提取的上下文,我们将它们插入到LightRAG系统中。 + +
+ 代码 + +```python +def insert_text(rag, file_path): + with open(file_path, mode='r') as f: + unique_contexts = json.load(f) + + retries = 0 + max_retries = 3 + while retries < max_retries: + try: + rag.insert(unique_contexts) + break + except Exception as e: + retries += 1 + print(f"插入失败,重试({retries}/{max_retries}),错误:{e}") + time.sleep(10) + if retries == max_retries: + print("超过最大重试次数后插入失败") +``` + +
+ +### 步骤2 生成查询 + +我们从数据集中每个上下文的前半部分和后半部分提取令牌,然后将它们组合为数据集描述以生成查询。 + +
+ 代码 + +```python +tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + +def get_summary(context, tot_tokens=2000): + tokens = tokenizer.tokenize(context) + half_tokens = tot_tokens // 2 + + start_tokens = tokens[1000:1000 + half_tokens] + end_tokens = tokens[-(1000 + half_tokens):1000] + + summary_tokens = start_tokens + end_tokens + summary = tokenizer.convert_tokens_to_string(summary_tokens) + + return summary +``` + +
+ +### 步骤3 查询 + +对于步骤2中生成的查询,我们将提取它们并查询LightRAG。 + +
+ 代码 + +```python +def extract_queries(file_path): + with open(file_path, 'r') as f: + data = f.read() + + data = data.replace('**', '') + + queries = re.findall(r'- Question \d+: (.+)', data) + + return queries +``` + +
+ +## Star历史 + + + + + + Star History Chart + + + +## 贡献 + +感谢所有贡献者! + + + + + +## 🌟引用 + +```python +@article{guo2024lightrag, +title={LightRAG: Simple and Fast Retrieval-Augmented Generation}, +author={Zirui Guo and Lianghao Xia and Yanhua Yu and Tu Ao and Chao Huang}, +year={2024}, +eprint={2410.05779}, +archivePrefix={arXiv}, +primaryClass={cs.IR} +} +``` + +**感谢您对我们工作的关注!** diff --git a/README.assets/b2aaf634151b4706892693ffb43d9093.png b/README.assets/b2aaf634151b4706892693ffb43d9093.png new file mode 100644 index 0000000000..ac38781127 Binary files /dev/null and b/README.assets/b2aaf634151b4706892693ffb43d9093.png differ diff --git a/README.assets/iShot_2025-03-23_12.40.08.png b/README.assets/iShot_2025-03-23_12.40.08.png new file mode 100644 index 0000000000..c4250b1203 Binary files /dev/null and b/README.assets/iShot_2025-03-23_12.40.08.png differ diff --git a/README.md b/README.md index eb2575e731..c3a436501e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@

🚀 LightRAG: Simple and Fast Retrieval-Augmented Generation

+
@@ -6,6 +7,7 @@ lightrag
+

@@ -28,31 +30,23 @@

-
-This repository hosts the code of LightRAG. The structure of this code is based on nano-graphrag. +LightRAG Diagram -LightRAG Diagram -
-
- - - +
+ HKUDS%2FLightRAG | Trendshift +
-
- - 🎉 News - +## 🎉 News -- [X] [2025.03.18]🎯📢LightRAG now supports citation functionality. +- [X] [2025.03.18]🎯📢LightRAG now supports citation functionality, enabling proper source attribution. - [X] [2025.02.05]🎯📢Our team has released [VideoRAG](https://github.com/HKUDS/VideoRAG) understanding extremely long-context videos. - [X] [2025.01.13]🎯📢Our team has released [MiniRAG](https://github.com/HKUDS/MiniRAG) making RAG simpler with small models. - [X] [2025.01.06]🎯📢You can now [use PostgreSQL for Storage](#using-postgresql-for-storage). - [X] [2024.12.31]🎯📢LightRAG now supports [deletion by document ID](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete). - [X] [2024.11.25]🎯📢LightRAG now supports seamless integration of [custom knowledge graphs](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#insert-custom-kg), empowering users to enhance the system with their own domain expertise. - [X] [2024.11.19]🎯📢A comprehensive guide to LightRAG is now available on [LearnOpenCV](https://learnopencv.com/lightrag). Many thanks to the blog author. -- [X] [2024.11.12]🎯📢LightRAG now supports [Oracle Database 23ai for all storage types (KV, vector, and graph)](https://github.com/HKUDS/LightRAG/blob/main/examples/lightrag_oracle_demo.py). - [X] [2024.11.11]🎯📢LightRAG now supports [deleting entities by their names](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete). - [X] [2024.11.09]🎯📢Introducing the [LightRAG Gui](https://lightrag-gui.streamlit.app), which allows you to insert, query, visualize, and download LightRAG knowledge. - [X] [2024.11.04]🎯📢You can now [use Neo4J for Storage](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#using-neo4j-for-storage). @@ -63,8 +57,6 @@ This repository hosts the code of LightRAG. The structure of this code is based - [X] [2024.10.16]🎯📢LightRAG now supports [Ollama models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)! - [X] [2024.10.15]🎯📢LightRAG now supports [Hugging Face models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)! -
-
Algorithm Flowchart @@ -77,7 +69,9 @@ This repository hosts the code of LightRAG. The structure of this code is based
-## Install +## Installation + +### Install LightRAG Core * Install from source (Recommend) @@ -92,6 +86,26 @@ pip install -e . pip install lightrag-hku ``` +### Install LightRAG Server + +The LightRAG Server is designed to provide Web UI and API support. The Web UI facilitates document indexing, knowledge graph exploration, and a simple RAG query interface. LightRAG Server also provide an Ollama compatible interfaces, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat bot, such as Open WebUI, to access LightRAG easily. + +* Install from PyPI + +```bash +pip install "lightrag-hku[api]" +``` + +* Installation from Source + +```bash +# create a Python virtual enviroment if neccesary +# Install in editable mode with API support +pip install -e ".[api]" +``` + +**For more information about LightRAG Server, please refer to [LightRAG Server](./lightrag/api/README.md).** + ## Quick Start * [Video demo](https://www.youtube.com/watch?v=g21royNJ4fw) of running LightRAG locally. @@ -185,11 +199,20 @@ class QueryParam: """Maximum number of tokens allocated for entity descriptions in local retrieval.""" ids: list[str] | None = None # ONLY SUPPORTED FOR PG VECTOR DBs """List of ids to filter the RAG.""" + model_func: Callable[..., object] | None = None + """Optional override for the LLM model function to use for this specific query. + If provided, this will be used instead of the global model function. + This allows using different models for different query modes. + """ ... ``` > default value of Top_k can be change by environment variables TOP_K. +### LLM and Embedding Injection + +LightRAG requires the utilization of LLM and Embedding models to accomplish document indexing and querying tasks. During the initialization phase, it is necessary to inject the invocation methods of the relevant models into LightRAG: +
Using Open AI-like APIs @@ -266,8 +289,7 @@ rag = LightRAG(
Using Ollama Models - -### Overview +**Overview** If you want to use Ollama models, you need to pull model you plan to use and embedding model, for example `nomic-embed-text`. @@ -291,11 +313,11 @@ rag = LightRAG( ) ``` -### Increasing context size +* **Increasing context size** In order for LightRAG to work context should be at least 32k tokens. By default Ollama models have context size of 8k. You can achieve this using one of two ways: -#### Increasing the `num_ctx` parameter in Modelfile. +* **Increasing the `num_ctx` parameter in Modelfile** 1. Pull the model: @@ -321,7 +343,7 @@ PARAMETER num_ctx 32768 ollama create -f Modelfile qwen2m ``` -#### Setup `num_ctx` via Ollama API. +* **Setup `num_ctx` via Ollama API** Tiy can use `llm_model_kwargs` param to configure ollama: @@ -343,7 +365,7 @@ rag = LightRAG( ) ``` -#### Low RAM GPUs +* **Low RAM GPUs** In order to run this experiment on low RAM GPU you should select small model and tune context window (increasing context increase memory consumption). For example, running this ollama example on repurposed mining GPU with 6Gb of RAM required to set context size to 26k while using `gemma2:2b`. It was able to find 197 entities and 19 relations on `book.txt`. @@ -351,13 +373,12 @@ In order to run this experiment on low RAM GPU you should select small model and
LlamaIndex -LightRAG supports integration with LlamaIndex. +LightRAG supports integration with LlamaIndex (`llm/llama_index_impl.py`): -1. **LlamaIndex** (`llm/llama_index_impl.py`): - - Integrates with OpenAI and other providers through LlamaIndex - - See [LlamaIndex Documentation](lightrag/llm/Readme.md) for detailed setup and examples +- Integrates with OpenAI and other providers through LlamaIndex +- See [LlamaIndex Documentation](lightrag/llm/Readme.md) for detailed setup and examples -### Example Usage +**Example Usage** ```python # Using LlamaIndex with direct OpenAI access @@ -419,18 +440,71 @@ if __name__ == "__main__": main() ``` -#### For detailed documentation and examples, see: +**For detailed documentation and examples, see:** - [LlamaIndex Documentation](lightrag/llm/Readme.md) - [Direct OpenAI Example](examples/lightrag_llamaindex_direct_demo.py) - [LiteLLM Proxy Example](examples/lightrag_llamaindex_litellm_demo.py)
+ +### Token Usage Tracking +
- Conversation History Support + Overview and Usage + +LightRAG provides a TokenTracker tool to monitor and manage token consumption by large language models. This feature is particularly useful for controlling API costs and optimizing performance. + +#### Usage + +```python +from lightrag.utils import TokenTracker + +# Create TokenTracker instance +token_tracker = TokenTracker() + +# Method 1: Using context manager (Recommended) +# Suitable for scenarios requiring automatic token usage tracking +with token_tracker: + result1 = await llm_model_func("your question 1") + result2 = await llm_model_func("your question 2") + +# Method 2: Manually adding token usage records +# Suitable for scenarios requiring more granular control over token statistics +token_tracker.reset() + +rag.insert() + +rag.query("your question 1", param=QueryParam(mode="naive")) +rag.query("your question 2", param=QueryParam(mode="mix")) + +# Display total token usage (including insert and query operations) +print("Token usage:", token_tracker.get_usage()) +``` + +#### Usage Tips +- Use context managers for long sessions or batch operations to automatically track all token consumption +- For scenarios requiring segmented statistics, use manual mode and call reset() when appropriate +- Regular checking of token usage helps detect abnormal consumption early +- Actively use this feature during development and testing to optimize production costs + +#### Practical Examples +You can refer to these examples for implementing token tracking: +- `examples/lightrag_gemini_track_token_demo.py`: Token tracking example using Google Gemini model +- `examples/lightrag_siliconcloud_track_token_demo.py`: Token tracking example using SiliconCloud model + +These examples demonstrate how to effectively use the TokenTracker feature with different models and scenarios. + +
+ +### Conversation History Support + LightRAG now supports multi-turn dialogue through the conversation history feature. Here's how to use it: +
+ Usage Example + ```python # Create conversation history conversation_history = [ @@ -455,11 +529,13 @@ response = rag.query(
-
- Custom Prompt Support +### Custom Prompt Support LightRAG now supports custom prompts for fine-tuned control over the system's behavior. Here's how to use it: +
+ Usage Example + ```python # Create query parameters query_param = QueryParam( @@ -496,12 +572,11 @@ print(response_custom)
-
- Separate Keyword Extraction +### Separate Keyword Extraction We've introduced a new function `query_with_separate_keyword_extraction` to enhance the keyword extraction capabilities. This function separates the keyword extraction process from the user's prompt, focusing solely on the query to improve the relevance of extracted keywords. -##### How It Works? +**How It Works?** The function operates by dividing the input into two parts: @@ -510,7 +585,8 @@ The function operates by dividing the input into two parts: It then performs keyword extraction exclusively on the `user query`. This separation ensures that the extraction process is focused and relevant, unaffected by any additional language in the `prompt`. It also allows the `prompt` to serve purely for response formatting, maintaining the intent and clarity of the user's original question. -##### Usage Example +
+ Usage Example This `example` shows how to tailor the function for educational content, focusing on detailed explanations for older students. @@ -524,79 +600,18 @@ rag.query_with_separate_keyword_extraction(
-
- Insert Custom KG - -```python -custom_kg = { - "chunks": [ - { - "content": "Alice and Bob are collaborating on quantum computing research.", - "source_id": "doc-1" - } - ], - "entities": [ - { - "entity_name": "Alice", - "entity_type": "person", - "description": "Alice is a researcher specializing in quantum physics.", - "source_id": "doc-1" - }, - { - "entity_name": "Bob", - "entity_type": "person", - "description": "Bob is a mathematician.", - "source_id": "doc-1" - }, - { - "entity_name": "Quantum Computing", - "entity_type": "technology", - "description": "Quantum computing utilizes quantum mechanical phenomena for computation.", - "source_id": "doc-1" - } - ], - "relationships": [ - { - "src_id": "Alice", - "tgt_id": "Bob", - "description": "Alice and Bob are research partners.", - "keywords": "collaboration research", - "weight": 1.0, - "source_id": "doc-1" - }, - { - "src_id": "Alice", - "tgt_id": "Quantum Computing", - "description": "Alice conducts research on quantum computing.", - "keywords": "research expertise", - "weight": 1.0, - "source_id": "doc-1" - }, - { - "src_id": "Bob", - "tgt_id": "Quantum Computing", - "description": "Bob researches quantum computing.", - "keywords": "research application", - "weight": 1.0, - "source_id": "doc-1" - } - ] -} - -rag.insert_custom_kg(custom_kg) -``` - -
- ## Insert -#### Basic Insert +
+ Basic Insert ```python # Basic Insert rag.insert("Text") ``` +
+
Batch Insert @@ -608,11 +623,11 @@ rag.insert(["TEXT1", "TEXT2",...]) rag = LightRAG( working_dir=WORKING_DIR, addon_params={ - "insert_batch_size": 20 # Process 20 documents per batch + "insert_batch_size": 4 # Process 4 documents per batch } ) -rag.insert(["TEXT1", "TEXT2", "TEXT3", ...]) # Documents will be processed in batches of 20 +rag.insert(["TEXT1", "TEXT2", "TEXT3", ...]) # Documents will be processed in batches of 4 ``` The `insert_batch_size` parameter in `addon_params` controls how many documents are processed in each batch during insertion. This is useful for: @@ -646,7 +661,7 @@ The `apipeline_enqueue_documents` and `apipeline_process_enqueue_documents` func This is useful for scenarios where you want to process documents in the background while still allowing the main thread to continue executing. -And using a routine to process news documents. +And using a routine to process new documents. ```python rag = LightRAG(..) @@ -674,6 +689,70 @@ rag.insert(text_content.decode('utf-8'))
+
+ Insert Custom KG + +```python +custom_kg = { + "chunks": [ + { + "content": "Alice and Bob are collaborating on quantum computing research.", + "source_id": "doc-1" + } + ], + "entities": [ + { + "entity_name": "Alice", + "entity_type": "person", + "description": "Alice is a researcher specializing in quantum physics.", + "source_id": "doc-1" + }, + { + "entity_name": "Bob", + "entity_type": "person", + "description": "Bob is a mathematician.", + "source_id": "doc-1" + }, + { + "entity_name": "Quantum Computing", + "entity_type": "technology", + "description": "Quantum computing utilizes quantum mechanical phenomena for computation.", + "source_id": "doc-1" + } + ], + "relationships": [ + { + "src_id": "Alice", + "tgt_id": "Bob", + "description": "Alice and Bob are research partners.", + "keywords": "collaboration research", + "weight": 1.0, + "source_id": "doc-1" + }, + { + "src_id": "Alice", + "tgt_id": "Quantum Computing", + "description": "Alice conducts research on quantum computing.", + "keywords": "research expertise", + "weight": 1.0, + "source_id": "doc-1" + }, + { + "src_id": "Bob", + "tgt_id": "Quantum Computing", + "description": "Bob researches quantum computing.", + "keywords": "research application", + "weight": 1.0, + "source_id": "doc-1" + } + ] +} + +rag.insert_custom_kg(custom_kg) +``` + +
+
Citation Functionality @@ -834,7 +913,7 @@ rag.delete_by_doc_id("doc_id") LightRAG now supports comprehensive knowledge graph management capabilities, allowing you to create, edit, and delete entities and relationships within your knowledge graph.
- Create Entities and Relations + Create Entities and Relations ```python # Create new entity @@ -860,7 +939,7 @@ relation = rag.create_relation("Google", "Gmail", {
- Edit Entities and Relations + Edit Entities and Relations ```python # Edit an existing entity @@ -883,8 +962,6 @@ updated_relation = rag.edit_relation("Google", "Google Mail", { }) ``` -
- All operations are available in both synchronous and asynchronous versions. The asynchronous versions have the prefix "a" (e.g., `acreate_entity`, `aedit_relation`). #### Entity Operations @@ -899,15 +976,18 @@ All operations are available in both synchronous and asynchronous versions. The These operations maintain data consistency across both the graph database and vector database components, ensuring your knowledge graph remains coherent. +
+ ## Data Export Functions -## Overview +### Overview LightRAG allows you to export your knowledge graph data in various formats for analysis, sharing, and backup purposes. The system supports exporting entities, relations, and relationship data. -## Export Functions +### Export Functions -### Basic Usage +
+ Basic Usage ```python # Basic CSV export (default format) @@ -917,7 +997,10 @@ rag.export_data("knowledge_graph.csv") rag.export_data("output.xlsx", file_format="excel") ``` -### Different File Formats supported +
+ +
+ Different File Formats supported ```python #Export data in CSV format @@ -932,14 +1015,19 @@ rag.export_data("graph_data.md", file_format="md") # Export data in Text rag.export_data("graph_data.txt", file_format="txt") ``` -## Additional Options +
+ +
+ Additional Options Include vector embeddings in the export (optional): ```python rag.export_data("complete_data.csv", include_vector_data=True) ``` -## Data Included in Export +
+ +### Data Included in Export All exports include: @@ -1059,33 +1147,34 @@ Valid modes are:
Parameters -| **Parameter** | **Type** | **Explanation** | **Default** | -| -------------------------------------------------- | ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| **working\_dir** | `str` | Directory where the cache will be stored | `lightrag_cache+timestamp` | -| **kv\_storage** | `str` | Storage type for documents and text chunks. Supported types:`JsonKVStorage`, `OracleKVStorage` | `JsonKVStorage` | -| **vector\_storage** | `str` | Storage type for embedding vectors. Supported types:`NanoVectorDBStorage`, `OracleVectorDBStorage` | `NanoVectorDBStorage` | -| **graph\_storage** | `str` | Storage type for graph edges and nodes. Supported types:`NetworkXStorage`, `Neo4JStorage`, `OracleGraphStorage` | `NetworkXStorage` | -| **chunk\_token\_size** | `int` | Maximum token size per chunk when splitting documents | `1200` | -| **chunk\_overlap\_token\_size** | `int` | Overlap token size between two chunks when splitting documents | `100` | -| **tiktoken\_model\_name** | `str` | Model name for the Tiktoken encoder used to calculate token numbers | `gpt-4o-mini` | -| **entity\_extract\_max\_gleaning** | `int` | Number of loops in the entity extraction process, appending history messages | `1` | -| **entity\_summary\_to\_max\_tokens** | `int` | Maximum token size for each entity summary | `500` | -| **node\_embedding\_algorithm** | `str` | Algorithm for node embedding (currently not used) | `node2vec` | -| **node2vec\_params** | `dict` | Parameters for node embedding | `{"dimensions": 1536,"num_walks": 10,"walk_length": 40,"window_size": 2,"iterations": 3,"random_seed": 3,}` | -| **embedding\_func** | `EmbeddingFunc` | Function to generate embedding vectors from text | `openai_embed` | -| **embedding\_batch\_num** | `int` | Maximum batch size for embedding processes (multiple texts sent per batch) | `32` | -| **embedding\_func\_max\_async** | `int` | Maximum number of concurrent asynchronous embedding processes | `16` | -| **llm\_model\_func** | `callable` | Function for LLM generation | `gpt_4o_mini_complete` | -| **llm\_model\_name** | `str` | LLM model name for generation | `meta-llama/Llama-3.2-1B-Instruct` | -| **llm\_model\_max\_token\_size** | `int` | Maximum token size for LLM generation (affects entity relation summaries) | `32768`(default value changed by env var MAX_TOKENS) | -| **llm\_model\_max\_async** | `int` | Maximum number of concurrent asynchronous LLM processes | `4`(default value changed by env var MAX_ASYNC) | -| **llm\_model\_kwargs** | `dict` | Additional parameters for LLM generation | | -| **vector\_db\_storage\_cls\_kwargs** | `dict` | Additional parameters for vector database, like setting the threshold for nodes and relations retrieval. | cosine_better_than_threshold: 0.2(default value changed by env var COSINE_THRESHOLD) | -| **enable\_llm\_cache** | `bool` | If `TRUE`, stores LLM results in cache; repeated prompts return cached responses | `TRUE` | -| **enable\_llm\_cache\_for\_entity\_extract** | `bool` | If `TRUE`, stores LLM results in cache for entity extraction; Good for beginners to debug your application | `TRUE` | -| **addon\_params** | `dict` | Additional parameters, e.g.,`{"example_number": 1, "language": "Simplified Chinese", "entity_types": ["organization", "person", "geo", "event"], "insert_batch_size": 10}`: sets example limit, output language, and batch size for document processing | `example_number: all examples, language: English, insert_batch_size: 10` | -| **convert\_response\_to\_json\_func** | `callable` | Not used | `convert_response_to_json` | -| **embedding\_cache\_config** | `dict` | Configuration for question-answer caching. Contains three parameters:`
`- `enabled`: Boolean value to enable/disable cache lookup functionality. When enabled, the system will check cached responses before generating new answers.`
`- `similarity_threshold`: Float value (0-1), similarity threshold. When a new question's similarity with a cached question exceeds this threshold, the cached answer will be returned directly without calling the LLM.`
`- `use_llm_check`: Boolean value to enable/disable LLM similarity verification. When enabled, LLM will be used as a secondary check to verify the similarity between questions before returning cached answers. | Default:`{"enabled": False, "similarity_threshold": 0.95, "use_llm_check": False}` | +| **Parameter** | **Type** | **Explanation** | **Default** | +|--------------|----------|-----------------|-------------| +| **working_dir** | `str` | Directory where the cache will be stored | `lightrag_cache+timestamp` | +| **kv_storage** | `str` | Storage type for documents and text chunks. Supported types: `JsonKVStorage`,`PGKVStorage`,`RedisKVStorage`,`MongoKVStorage` | `JsonKVStorage` | +| **vector_storage** | `str` | Storage type for embedding vectors. Supported types: `NanoVectorDBStorage`,`PGVectorStorage`,`MilvusVectorDBStorage`,`ChromaVectorDBStorage`,`FaissVectorDBStorage`,`MongoVectorDBStorage`,`QdrantVectorDBStorage` | `NanoVectorDBStorage` | +| **graph_storage** | `str` | Storage type for graph edges and nodes. Supported types: `NetworkXStorage`,`Neo4JStorage`,`PGGraphStorage`,`AGEStorage` | `NetworkXStorage` | +| **doc_status_storage** | `str` | Storage type for documents process status. Supported types: `JsonDocStatusStorage`,`PGDocStatusStorage`,`MongoDocStatusStorage` | `JsonDocStatusStorage` | +| **chunk_token_size** | `int` | Maximum token size per chunk when splitting documents | `1200` | +| **chunk_overlap_token_size** | `int` | Overlap token size between two chunks when splitting documents | `100` | +| **tiktoken_model_name** | `str` | Model name for the Tiktoken encoder used to calculate token numbers | `gpt-4o-mini` | +| **entity_extract_max_gleaning** | `int` | Number of loops in the entity extraction process, appending history messages | `1` | +| **entity_summary_to_max_tokens** | `int` | Maximum token size for each entity summary | `500` | +| **node_embedding_algorithm** | `str` | Algorithm for node embedding (currently not used) | `node2vec` | +| **node2vec_params** | `dict` | Parameters for node embedding | `{"dimensions": 1536,"num_walks": 10,"walk_length": 40,"window_size": 2,"iterations": 3,"random_seed": 3,}` | +| **embedding_func** | `EmbeddingFunc` | Function to generate embedding vectors from text | `openai_embed` | +| **embedding_batch_num** | `int` | Maximum batch size for embedding processes (multiple texts sent per batch) | `32` | +| **embedding_func_max_async** | `int` | Maximum number of concurrent asynchronous embedding processes | `16` | +| **llm_model_func** | `callable` | Function for LLM generation | `gpt_4o_mini_complete` | +| **llm_model_name** | `str` | LLM model name for generation | `meta-llama/Llama-3.2-1B-Instruct` | +| **llm_model_max_token_size** | `int` | Maximum token size for LLM generation (affects entity relation summaries) | `32768`(default value changed by env var MAX_TOKENS) | +| **llm_model_max_async** | `int` | Maximum number of concurrent asynchronous LLM processes | `4`(default value changed by env var MAX_ASYNC) | +| **llm_model_kwargs** | `dict` | Additional parameters for LLM generation | | +| **vector_db_storage_cls_kwargs** | `dict` | Additional parameters for vector database, like setting the threshold for nodes and relations retrieval | cosine_better_than_threshold: 0.2(default value changed by env var COSINE_THRESHOLD) | +| **enable_llm_cache** | `bool` | If `TRUE`, stores LLM results in cache; repeated prompts return cached responses | `TRUE` | +| **enable_llm_cache_for_entity_extract** | `bool` | If `TRUE`, stores LLM results in cache for entity extraction; Good for beginners to debug your application | `TRUE` | +| **addon_params** | `dict` | Additional parameters, e.g., `{"example_number": 1, "language": "Simplified Chinese", "entity_types": ["organization", "person", "geo", "event"], "insert_batch_size": 10}`: sets example limit, output language, and batch size for document processing | `example_number: all examples, language: English, insert_batch_size: 10` | +| **convert_response_to_json_func** | `callable` | Not used | `convert_response_to_json` | +| **embedding_cache_config** | `dict` | Configuration for question-answer caching. Contains three parameters: `enabled`: Boolean value to enable/disable cache lookup functionality. When enabled, the system will check cached responses before generating new answers. `similarity_threshold`: Float value (0-1), similarity threshold. When a new question's similarity with a cached question exceeds this threshold, the cached answer will be returned directly without calling the LLM. `use_llm_check`: Boolean value to enable/disable LLM similarity verification. When enabled, LLM will be used as a secondary check to verify the similarity between questions before returning cached answers. | Default: `{"enabled": False, "similarity_threshold": 0.95, "use_llm_check": False}` |
@@ -1102,174 +1191,15 @@ The API includes comprehensive error handling:
-## API - -LightRag can be installed with API support to serve a Fast api interface to perform data upload and indexing/Rag operations/Rescan of the input folder etc.. +## LightRAG API -[LightRag API](lightrag/api/README.md) +The LightRAG Server is designed to provide Web UI and API support. **For more information about LightRAG Server, please refer to [LightRAG Server](./lightrag/api/README.md).** ## Graph Visualization -
- Graph visualization with html - -* The following code can be found in `examples/graph_visual_with_html.py` - -```python -import networkx as nx -from pyvis.network import Network - -# Load the GraphML file -G = nx.read_graphml('./dickens/graph_chunk_entity_relation.graphml') - -# Create a Pyvis network -net = Network(notebook=True) - -# Convert NetworkX graph to Pyvis network -net.from_nx(G) - -# Save and display the network -net.show('knowledge_graph.html') -``` - -
+The LightRAG Server offers a comprehensive knowledge graph visualization feature. It supports various gravity layouts, node queries, subgraph filtering, and more. **For more information about LightRAG Server, please refer to [LightRAG Server](./lightrag/api/README.md).** -
- Graph visualization with Neo4 - -* The following code can be found in `examples/graph_visual_with_neo4j.py` - -```python -import os -import json -from lightrag.utils import xml_to_json -from neo4j import GraphDatabase - -# Constants -WORKING_DIR = "./dickens" -BATCH_SIZE_NODES = 500 -BATCH_SIZE_EDGES = 100 - -# Neo4j connection credentials -NEO4J_URI = "bolt://localhost:7687" -NEO4J_USERNAME = "neo4j" -NEO4J_PASSWORD = "your_password" - -def convert_xml_to_json(xml_path, output_path): - """Converts XML file to JSON and saves the output.""" - if not os.path.exists(xml_path): - print(f"Error: File not found - {xml_path}") - return None - - json_data = xml_to_json(xml_path) - if json_data: - with open(output_path, 'w', encoding='utf-8') as f: - json.dump(json_data, f, ensure_ascii=False, indent=2) - print(f"JSON file created: {output_path}") - return json_data - else: - print("Failed to create JSON data") - return None - -def process_in_batches(tx, query, data, batch_size): - """Process data in batches and execute the given query.""" - for i in range(0, len(data), batch_size): - batch = data[i:i + batch_size] - tx.run(query, {"nodes": batch} if "nodes" in query else {"edges": batch}) - -def main(): - # Paths - xml_file = os.path.join(WORKING_DIR, 'graph_chunk_entity_relation.graphml') - json_file = os.path.join(WORKING_DIR, 'graph_data.json') - - # Convert XML to JSON - json_data = convert_xml_to_json(xml_file, json_file) - if json_data is None: - return - - # Load nodes and edges - nodes = json_data.get('nodes', []) - edges = json_data.get('edges', []) - - # Neo4j queries - create_nodes_query = """ - UNWIND $nodes AS node - MERGE (e:Entity {id: node.id}) - SET e.entity_type = node.entity_type, - e.description = node.description, - e.source_id = node.source_id, - e.displayName = node.id - REMOVE e:Entity - WITH e, node - CALL apoc.create.addLabels(e, [node.entity_type]) YIELD node AS labeledNode - RETURN count(*) - """ - - create_edges_query = """ - UNWIND $edges AS edge - MATCH (source {id: edge.source}) - MATCH (target {id: edge.target}) - WITH source, target, edge, - CASE - WHEN edge.keywords CONTAINS 'lead' THEN 'lead' - WHEN edge.keywords CONTAINS 'participate' THEN 'participate' - WHEN edge.keywords CONTAINS 'uses' THEN 'uses' - WHEN edge.keywords CONTAINS 'located' THEN 'located' - WHEN edge.keywords CONTAINS 'occurs' THEN 'occurs' - ELSE REPLACE(SPLIT(edge.keywords, ',')[0], '\"', '') - END AS relType - CALL apoc.create.relationship(source, relType, { - weight: edge.weight, - description: edge.description, - keywords: edge.keywords, - source_id: edge.source_id - }, target) YIELD rel - RETURN count(*) - """ - - set_displayname_and_labels_query = """ - MATCH (n) - SET n.displayName = n.id - WITH n - CALL apoc.create.setLabels(n, [n.entity_type]) YIELD node - RETURN count(*) - """ - - # Create a Neo4j driver - driver = GraphDatabase.driver(NEO4J_URI, auth=(NEO4J_USERNAME, NEO4J_PASSWORD)) - - try: - # Execute queries in batches - with driver.session() as session: - # Insert nodes in batches - session.execute_write(process_in_batches, create_nodes_query, nodes, BATCH_SIZE_NODES) - - # Insert edges in batches - session.execute_write(process_in_batches, create_edges_query, edges, BATCH_SIZE_EDGES) - - # Set displayName and labels - session.run(set_displayname_and_labels_query) - - except Exception as e: - print(f"Error occurred: {e}") - - finally: - driver.close() - -if __name__ == "__main__": - main() -``` - -
- -
- Graphml 3d visualizer - -LightRag can be installed with Tools support to add extra tools like the graphml 3d visualizer. - -[LightRag Visualizer](lightrag/tools/lightrag_visualizer/README.md) - -
+![iShot_2025-03-23_12.40.08](./README.assets/iShot_2025-03-23_12.40.08.png) ## Evaluation @@ -1364,28 +1294,28 @@ Output your evaluation in the following JSON format: ### Overall Performance Table -| | **Agriculture** | | **CS** | | **Legal** | | **Mix** | | -| --------------------------- | --------------------- | ------------------ | ------------ | ------------------ | --------------- | ------------------ | --------------- | ------------------ | -| | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | -| **Comprehensiveness** | 32.4% | **67.6%** | 38.4% | **61.6%** | 16.4% | **83.6%** | 38.8% | **61.2%** | -| **Diversity** | 23.6% | **76.4%** | 38.0% | **62.0%** | 13.6% | **86.4%** | 32.4% | **67.6%** | -| **Empowerment** | 32.4% | **67.6%** | 38.8% | **61.2%** | 16.4% | **83.6%** | 42.8% | **57.2%** | -| **Overall** | 32.4% | **67.6%** | 38.8% | **61.2%** | 15.2% | **84.8%** | 40.0% | **60.0%** | -| | RQ-RAG | **LightRAG** | RQ-RAG | **LightRAG** | RQ-RAG | **LightRAG** | RQ-RAG | **LightRAG** | -| **Comprehensiveness** | 31.6% | **68.4%** | 38.8% | **61.2%** | 15.2% | **84.8%** | 39.2% | **60.8%** | -| **Diversity** | 29.2% | **70.8%** | 39.2% | **60.8%** | 11.6% | **88.4%** | 30.8% | **69.2%** | -| **Empowerment** | 31.6% | **68.4%** | 36.4% | **63.6%** | 15.2% | **84.8%** | 42.4% | **57.6%** | -| **Overall** | 32.4% | **67.6%** | 38.0% | **62.0%** | 14.4% | **85.6%** | 40.0% | **60.0%** | -| | HyDE | **LightRAG** | HyDE | **LightRAG** | HyDE | **LightRAG** | HyDE | **LightRAG** | -| **Comprehensiveness** | 26.0% | **74.0%** | 41.6% | **58.4%** | 26.8% | **73.2%** | 40.4% | **59.6%** | -| **Diversity** | 24.0% | **76.0%** | 38.8% | **61.2%** | 20.0% | **80.0%** | 32.4% | **67.6%** | -| **Empowerment** | 25.2% | **74.8%** | 40.8% | **59.2%** | 26.0% | **74.0%** | 46.0% | **54.0%** | -| **Overall** | 24.8% | **75.2%** | 41.6% | **58.4%** | 26.4% | **73.6%** | 42.4% | **57.6%** | -| | GraphRAG | **LightRAG** | GraphRAG | **LightRAG** | GraphRAG | **LightRAG** | GraphRAG | **LightRAG** | -| **Comprehensiveness** | 45.6% | **54.4%** | 48.4% | **51.6%** | 48.4% | **51.6%** | **50.4%** | 49.6% | -| **Diversity** | 22.8% | **77.2%** | 40.8% | **59.2%** | 26.4% | **73.6%** | 36.0% | **64.0%** | -| **Empowerment** | 41.2% | **58.8%** | 45.2% | **54.8%** | 43.6% | **56.4%** | **50.8%** | 49.2% | -| **Overall** | 45.2% | **54.8%** | 48.0% | **52.0%** | 47.2% | **52.8%** | **50.4%** | 49.6% | +| |**Agriculture**| |**CS**| |**Legal**| |**Mix**| | +|----------------------|---------------|------------|------|------------|---------|------------|-------|------------| +| |NaiveRAG|**LightRAG**|NaiveRAG|**LightRAG**|NaiveRAG|**LightRAG**|NaiveRAG|**LightRAG**| +|**Comprehensiveness**|32.4%|**67.6%**|38.4%|**61.6%**|16.4%|**83.6%**|38.8%|**61.2%**| +|**Diversity**|23.6%|**76.4%**|38.0%|**62.0%**|13.6%|**86.4%**|32.4%|**67.6%**| +|**Empowerment**|32.4%|**67.6%**|38.8%|**61.2%**|16.4%|**83.6%**|42.8%|**57.2%**| +|**Overall**|32.4%|**67.6%**|38.8%|**61.2%**|15.2%|**84.8%**|40.0%|**60.0%**| +| |RQ-RAG|**LightRAG**|RQ-RAG|**LightRAG**|RQ-RAG|**LightRAG**|RQ-RAG|**LightRAG**| +|**Comprehensiveness**|31.6%|**68.4%**|38.8%|**61.2%**|15.2%|**84.8%**|39.2%|**60.8%**| +|**Diversity**|29.2%|**70.8%**|39.2%|**60.8%**|11.6%|**88.4%**|30.8%|**69.2%**| +|**Empowerment**|31.6%|**68.4%**|36.4%|**63.6%**|15.2%|**84.8%**|42.4%|**57.6%**| +|**Overall**|32.4%|**67.6%**|38.0%|**62.0%**|14.4%|**85.6%**|40.0%|**60.0%**| +| |HyDE|**LightRAG**|HyDE|**LightRAG**|HyDE|**LightRAG**|HyDE|**LightRAG**| +|**Comprehensiveness**|26.0%|**74.0%**|41.6%|**58.4%**|26.8%|**73.2%**|40.4%|**59.6%**| +|**Diversity**|24.0%|**76.0%**|38.8%|**61.2%**|20.0%|**80.0%**|32.4%|**67.6%**| +|**Empowerment**|25.2%|**74.8%**|40.8%|**59.2%**|26.0%|**74.0%**|46.0%|**54.0%**| +|**Overall**|24.8%|**75.2%**|41.6%|**58.4%**|26.4%|**73.6%**|42.4%|**57.6%**| +| |GraphRAG|**LightRAG**|GraphRAG|**LightRAG**|GraphRAG|**LightRAG**|GraphRAG|**LightRAG**| +|**Comprehensiveness**|45.6%|**54.4%**|48.4%|**51.6%**|48.4%|**51.6%**|**50.4%**|49.6%| +|**Diversity**|22.8%|**77.2%**|40.8%|**59.2%**|26.4%|**73.6%**|36.0%|**64.0%**| +|**Empowerment**|41.2%|**58.8%**|45.2%|**54.8%**|43.6%|**56.4%**|**50.8%**|49.2%| +|**Overall**|45.2%|**54.8%**|48.0%|**52.0%**|47.2%|**52.8%**|**50.4%**|49.6%| ## Reproduce diff --git a/config.ini.example b/config.ini.example index 3041611e31..5ff7cfbbdd 100644 --- a/config.ini.example +++ b/config.ini.example @@ -13,23 +13,6 @@ uri=redis://localhost:6379/1 [qdrant] uri = http://localhost:16333 -[oracle] -dsn = localhost:1521/XEPDB1 -user = your_username -password = your_password -config_dir = /path/to/oracle/config -wallet_location = /path/to/wallet # 可选 -wallet_password = your_wallet_password # 可选 -workspace = default # 可选,默认为default - -[tidb] -host = localhost -port = 4000 -user = your_username -password = your_password -database = your_database -workspace = default # 可选,默认为default - [postgres] host = localhost port = 5432 diff --git a/env.example b/env.example index 86b4481f2a..cb6b8e2866 100644 --- a/env.example +++ b/env.example @@ -3,37 +3,33 @@ ### Server Configuration # HOST=0.0.0.0 # PORT=9621 -# WORKERS=1 -# NAMESPACE_PREFIX=lightrag # separating data from difference Lightrag instances -# MAX_GRAPH_NODES=1000 # Max nodes return from grap retrieval +# WORKERS=2 # CORS_ORIGINS=http://localhost:3000,http://localhost:8080 +WEBUI_TITLE='Graph RAG Engine' +WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System" ### Optional SSL Configuration # SSL=true # SSL_CERTFILE=/path/to/cert.pem # SSL_KEYFILE=/path/to/key.pem -### Security (empty for no api-key is needed) -# LIGHTRAG_API_KEY=your-secure-api-key-here - -### Directory Configuration +### Directory Configuration (defaults to current working directory) # WORKING_DIR= # INPUT_DIR= ### Ollama Emulating Model Tag # OLLAMA_EMULATING_MODEL_TAG=latest +### Max nodes return from grap retrieval +# MAX_GRAPH_NODES=1000 + ### Logging level # LOG_LEVEL=INFO # VERBOSE=False -# LOG_DIR=/path/to/log/directory # Log file directory path, defaults to current working directory -# LOG_MAX_BYTES=10485760 # Log file max size in bytes, defaults to 10MB -# LOG_BACKUP_COUNT=5 # Number of backup files to keep, defaults to 5 - -### Max async calls for LLM -# MAX_ASYNC=4 -### Optional Timeout for LLM -# TIMEOUT=150 # Time out in seconds, None for infinite timeout +# LOG_MAX_BYTES=10485760 +# LOG_BACKUP_COUNT=5 +### Logfile location (defaults to current working directory) +# LOG_DIR=/path/to/log/directory ### Settings for RAG query # HISTORY_TURNS=3 @@ -44,21 +40,42 @@ # MAX_TOKEN_ENTITY_DESC=4000 ### Settings for document indexing +SUMMARY_LANGUAGE=English # CHUNK_SIZE=1200 # CHUNK_OVERLAP_SIZE=100 -# MAX_TOKENS=32768 # Max tokens send to LLM for summarization -# MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary -# SUMMARY_LANGUAGE=English + +### Number of parallel processing documents in one patch +# MAX_PARALLEL_INSERT=2 + +### Max tokens for entity/relations description after merge +# MAX_TOKEN_SUMMARY=500 +### Number of entities/edges to trigger LLM re-summary on merge ( at least 3 is recommented) +# FORCE_LLM_SUMMARY_ON_MERGE=6 + +### Num of chunks send to Embedding in single request +# EMBEDDING_BATCH_NUM=32 +### Max concurrency requests for Embedding +# EMBEDDING_FUNC_MAX_ASYNC=16 # MAX_EMBED_TOKENS=8192 -# ENABLE_LLM_CACHE_FOR_EXTRACT=true # Enable LLM cache for entity extraction -# MAX_PARALLEL_INSERT=2 # Maximum number of parallel processing documents in pipeline -### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) +### LLM Configuration +### Time out in seconds for LLM, None for infinite timeout +TIMEOUT=150 +### Some models like o1-mini require temperature to be set to 1 +TEMPERATURE=0.5 +### Max concurrency requests of LLM +MAX_ASYNC=4 +### Max tokens send to LLM (less than context size of the model) +MAX_TOKENS=32768 +ENABLE_LLM_CACHE=true +ENABLE_LLM_CACHE_FOR_EXTRACT=true + +### Ollama example (For local services installed with docker, you can use host.docker.internal as host) LLM_BINDING=ollama LLM_MODEL=mistral-nemo:latest LLM_BINDING_API_KEY=your_api_key -### Ollama example LLM_BINDING_HOST=http://localhost:11434 + ### OpenAI alike example # LLM_BINDING=openai # LLM_MODEL=gpt-4o @@ -99,22 +116,14 @@ LIGHTRAG_VECTOR_STORAGE=NanoVectorDBStorage LIGHTRAG_GRAPH_STORAGE=NetworkXStorage LIGHTRAG_DOC_STATUS_STORAGE=JsonDocStatusStorage -### Oracle Database Configuration -ORACLE_DSN=localhost:1521/XEPDB1 -ORACLE_USER=your_username -ORACLE_PASSWORD='your_password' -ORACLE_CONFIG_DIR=/path/to/oracle/config -#ORACLE_WALLET_LOCATION=/path/to/wallet # optional -#ORACLE_WALLET_PASSWORD='your_password' # optional -#ORACLE_WORKSPACE=default # separating all data from difference Lightrag instances(deprecated, use NAMESPACE_PREFIX in future) - -### TiDB Configuration -TIDB_HOST=localhost -TIDB_PORT=4000 -TIDB_USER=your_username -TIDB_PASSWORD='your_password' -TIDB_DATABASE=your_database -#TIDB_WORKSPACE=default # separating all data from difference Lightrag instances(deprecated, use NAMESPACE_PREFIX in future) +### TiDB Configuration (Deprecated) +# TIDB_HOST=localhost +# TIDB_PORT=4000 +# TIDB_USER=your_username +# TIDB_PASSWORD='your_password' +# TIDB_DATABASE=your_database +### separating all data from difference Lightrag instances(deprecating) +# TIDB_WORKSPACE=default ### PostgreSQL Configuration POSTGRES_HOST=localhost @@ -122,7 +131,8 @@ POSTGRES_PORT=5432 POSTGRES_USER=your_username POSTGRES_PASSWORD='your_password' POSTGRES_DATABASE=your_database -#POSTGRES_WORKSPACE=default # separating all data from difference Lightrag instances(deprecated, use NAMESPACE_PREFIX in future) +### separating all data from difference Lightrag instances(deprecating) +# POSTGRES_WORKSPACE=default ### Independent AGM Configuration(not for AMG embedded in PostreSQL) AGE_POSTGRES_DB= @@ -132,7 +142,8 @@ AGE_POSTGRES_HOST= # AGE_POSTGRES_PORT=8529 # AGE Graph Name(apply to PostgreSQL and independent AGM) -# AGE_GRAPH_NAME=lightrag # deprecated, use NAME_SPACE_PREFIX instead +### AGE_GRAPH_NAME is precated +# AGE_GRAPH_NAME=lightrag ### Neo4j Configuration NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io @@ -140,9 +151,17 @@ NEO4J_USERNAME=neo4j NEO4J_PASSWORD='your_password' ### MongoDB Configuration -MONGODB_URI=mongodb://root:root@localhost:27017/ -MONGODB_DATABASE=LightRAG -MONGODB_GRAPH=false # deprecated (keep for backward compatibility) +MONGO_URI=mongodb://root:root@localhost:27017/ +MONGO_DATABASE=LightRAG +### separating all data from difference Lightrag instances(deprecating) +# MONGODB_GRAPH=false + +### Milvus Configuration +MILVUS_URI=http://localhost:19530 +MILVUS_DB_NAME=lightrag +# MILVUS_USER=root +# MILVUS_PASSWORD=your_password +# MILVUS_TOKEN=your_token ### Qdrant QDRANT_URL=http://localhost:16333 @@ -151,9 +170,13 @@ QDRANT_URL=http://localhost:16333 ### Redis REDIS_URI=redis://localhost:6379 -### For JWTt Auth -AUTH_USERNAME=admin # login name -AUTH_PASSWORD=admin123 # password -TOKEN_SECRET=your-key-for-LightRAG-API-Server # JWT key -TOKEN_EXPIRE_HOURS=4 # expire duration -WHITELIST_PATHS=/login,/health # white list +### For JWT Auth +# AUTH_ACCOUNTS='admin:admin123,user1:pass456' +# TOKEN_SECRET=Your-Key-For-LightRAG-API-Server +# TOKEN_EXPIRE_HOURS=48 +# GUEST_TOKEN_EXPIRE_HOURS=24 +# JWT_ALGORITHM=HS256 + +### API-Key to access LightRAG Server API +# LIGHTRAG_API_KEY=your-secure-api-key-here +# WHITELIST_PATHS=/health,/api/* diff --git a/examples/lightrag_api_ollama_demo.py b/examples/lightrag_api_ollama_demo.py deleted file mode 100644 index dad2a2e01d..0000000000 --- a/examples/lightrag_api_ollama_demo.py +++ /dev/null @@ -1,188 +0,0 @@ -from fastapi import FastAPI, HTTPException, File, UploadFile -from contextlib import asynccontextmanager -from pydantic import BaseModel -import os -from lightrag import LightRAG, QueryParam -from lightrag.llm.ollama import ollama_embed, ollama_model_complete -from lightrag.utils import EmbeddingFunc -from typing import Optional -import asyncio -import nest_asyncio -import aiofiles -from lightrag.kg.shared_storage import initialize_pipeline_status - -# Apply nest_asyncio to solve event loop issues -nest_asyncio.apply() - -DEFAULT_RAG_DIR = "index_default" - -DEFAULT_INPUT_FILE = "book.txt" -INPUT_FILE = os.environ.get("INPUT_FILE", f"{DEFAULT_INPUT_FILE}") -print(f"INPUT_FILE: {INPUT_FILE}") - -# Configure working directory -WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}") -print(f"WORKING_DIR: {WORKING_DIR}") - - -if not os.path.exists(WORKING_DIR): - os.mkdir(WORKING_DIR) - - -async def init(): - rag = LightRAG( - working_dir=WORKING_DIR, - llm_model_func=ollama_model_complete, - llm_model_name="gemma2:9b", - llm_model_max_async=4, - llm_model_max_token_size=8192, - llm_model_kwargs={ - "host": "http://localhost:11434", - "options": {"num_ctx": 8192}, - }, - embedding_func=EmbeddingFunc( - embedding_dim=768, - max_token_size=8192, - func=lambda texts: ollama_embed( - texts, embed_model="nomic-embed-text", host="http://localhost:11434" - ), - ), - ) - - # Add initialization code - await rag.initialize_storages() - await initialize_pipeline_status() - - return rag - - -@asynccontextmanager -async def lifespan(app: FastAPI): - global rag - rag = await init() - print("done!") - yield - - -app = FastAPI( - title="LightRAG API", description="API for RAG operations", lifespan=lifespan -) - - -# Data models -class QueryRequest(BaseModel): - query: str - mode: str = "hybrid" - only_need_context: bool = False - - -class InsertRequest(BaseModel): - text: str - - -class Response(BaseModel): - status: str - data: Optional[str] = None - message: Optional[str] = None - - -# API routes -@app.post("/query", response_model=Response) -async def query_endpoint(request: QueryRequest): - try: - loop = asyncio.get_event_loop() - result = await loop.run_in_executor( - None, - lambda: rag.query( - request.query, - param=QueryParam( - mode=request.mode, only_need_context=request.only_need_context - ), - ), - ) - return Response(status="success", data=result) - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -# insert by text -@app.post("/insert", response_model=Response) -async def insert_endpoint(request: InsertRequest): - try: - loop = asyncio.get_event_loop() - await loop.run_in_executor(None, lambda: rag.insert(request.text)) - return Response(status="success", message="Text inserted successfully") - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -# insert by file in payload -@app.post("/insert_file", response_model=Response) -async def insert_file(file: UploadFile = File(...)): - try: - file_content = await file.read() - # Read file content - try: - content = file_content.decode("utf-8") - except UnicodeDecodeError: - # If UTF-8 decoding fails, try other encodings - content = file_content.decode("gbk") - # Insert file content - loop = asyncio.get_event_loop() - await loop.run_in_executor(None, lambda: rag.insert(content)) - - return Response( - status="success", - message=f"File content from {file.filename} inserted successfully", - ) - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -# insert by local default file -@app.post("/insert_default_file", response_model=Response) -@app.get("/insert_default_file", response_model=Response) -async def insert_default_file(): - try: - # Read file content from book.txt - async with aiofiles.open(INPUT_FILE, "r", encoding="utf-8") as file: - content = await file.read() - print(f"read input file {INPUT_FILE} successfully") - # Insert file content - loop = asyncio.get_event_loop() - await loop.run_in_executor(None, lambda: rag.insert(content)) - - return Response( - status="success", - message=f"File content from {INPUT_FILE} inserted successfully", - ) - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -@app.get("/health") -async def health_check(): - return {"status": "healthy"} - - -if __name__ == "__main__": - import uvicorn - - uvicorn.run(app, host="0.0.0.0", port=8020) - -# Usage example -# To run the server, use the following command in your terminal: -# python lightrag_api_openai_compatible_demo.py - -# Example requests: -# 1. Query: -# curl -X POST "http://127.0.0.1:8020/query" -H "Content-Type: application/json" -d '{"query": "your query here", "mode": "hybrid"}' - -# 2. Insert text: -# curl -X POST "http://127.0.0.1:8020/insert" -H "Content-Type: application/json" -d '{"text": "your text here"}' - -# 3. Insert file: -# curl -X POST "http://127.0.0.1:8020/insert_file" -H "Content-Type: multipart/form-data" -F "file=@path/to/your/file.txt" - -# 4. Health check: -# curl -X GET "http://127.0.0.1:8020/health" diff --git a/examples/lightrag_api_openai_compatible_demo.py b/examples/lightrag_api_openai_compatible_demo.py deleted file mode 100644 index 312be87269..0000000000 --- a/examples/lightrag_api_openai_compatible_demo.py +++ /dev/null @@ -1,204 +0,0 @@ -from fastapi import FastAPI, HTTPException, File, UploadFile -from contextlib import asynccontextmanager -from pydantic import BaseModel -import os -from lightrag import LightRAG, QueryParam -from lightrag.llm.openai import openai_complete_if_cache, openai_embed -from lightrag.utils import EmbeddingFunc -import numpy as np -from typing import Optional -import asyncio -import nest_asyncio -from lightrag.kg.shared_storage import initialize_pipeline_status - -# Apply nest_asyncio to solve event loop issues -nest_asyncio.apply() - -DEFAULT_RAG_DIR = "index_default" -app = FastAPI(title="LightRAG API", description="API for RAG operations") - -# Configure working directory -WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}") -print(f"WORKING_DIR: {WORKING_DIR}") -LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o-mini") -print(f"LLM_MODEL: {LLM_MODEL}") -EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large") -print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}") -EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192)) -print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}") -BASE_URL = os.environ.get("BASE_URL", "https://api.openai.com/v1") -print(f"BASE_URL: {BASE_URL}") -API_KEY = os.environ.get("API_KEY", "xxxxxxxx") -print(f"API_KEY: {API_KEY}") - -if not os.path.exists(WORKING_DIR): - os.mkdir(WORKING_DIR) - - -# LLM model function - - -async def llm_model_func( - prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs -) -> str: - return await openai_complete_if_cache( - model=LLM_MODEL, - prompt=prompt, - system_prompt=system_prompt, - history_messages=history_messages, - base_url=BASE_URL, - api_key=API_KEY, - **kwargs, - ) - - -# Embedding function - - -async def embedding_func(texts: list[str]) -> np.ndarray: - return await openai_embed( - texts=texts, - model=EMBEDDING_MODEL, - base_url=BASE_URL, - api_key=API_KEY, - ) - - -async def get_embedding_dim(): - test_text = ["This is a test sentence."] - embedding = await embedding_func(test_text) - embedding_dim = embedding.shape[1] - print(f"{embedding_dim=}") - return embedding_dim - - -# Initialize RAG instance -async def init(): - embedding_dimension = await get_embedding_dim() - - rag = LightRAG( - working_dir=WORKING_DIR, - llm_model_func=llm_model_func, - embedding_func=EmbeddingFunc( - embedding_dim=embedding_dimension, - max_token_size=EMBEDDING_MAX_TOKEN_SIZE, - func=embedding_func, - ), - ) - - await rag.initialize_storages() - await initialize_pipeline_status() - - return rag - - -@asynccontextmanager -async def lifespan(app: FastAPI): - global rag - rag = await init() - print("done!") - yield - - -app = FastAPI( - title="LightRAG API", description="API for RAG operations", lifespan=lifespan -) - -# Data models - - -class QueryRequest(BaseModel): - query: str - mode: str = "hybrid" - only_need_context: bool = False - - -class InsertRequest(BaseModel): - text: str - - -class Response(BaseModel): - status: str - data: Optional[str] = None - message: Optional[str] = None - - -# API routes - - -@app.post("/query", response_model=Response) -async def query_endpoint(request: QueryRequest): - try: - loop = asyncio.get_event_loop() - result = await loop.run_in_executor( - None, - lambda: rag.query( - request.query, - param=QueryParam( - mode=request.mode, only_need_context=request.only_need_context - ), - ), - ) - return Response(status="success", data=result) - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -@app.post("/insert", response_model=Response) -async def insert_endpoint(request: InsertRequest): - try: - loop = asyncio.get_event_loop() - await loop.run_in_executor(None, lambda: rag.insert(request.text)) - return Response(status="success", message="Text inserted successfully") - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -@app.post("/insert_file", response_model=Response) -async def insert_file(file: UploadFile = File(...)): - try: - file_content = await file.read() - # Read file content - try: - content = file_content.decode("utf-8") - except UnicodeDecodeError: - # If UTF-8 decoding fails, try other encodings - content = file_content.decode("gbk") - # Insert file content - loop = asyncio.get_event_loop() - await loop.run_in_executor(None, lambda: rag.insert(content)) - - return Response( - status="success", - message=f"File content from {file.filename} inserted successfully", - ) - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -@app.get("/health") -async def health_check(): - return {"status": "healthy"} - - -if __name__ == "__main__": - import uvicorn - - uvicorn.run(app, host="0.0.0.0", port=8020) - -# Usage example -# To run the server, use the following command in your terminal: -# python lightrag_api_openai_compatible_demo.py - -# Example requests: -# 1. Query: -# curl -X POST "http://127.0.0.1:8020/query" -H "Content-Type: application/json" -d '{"query": "your query here", "mode": "hybrid"}' - -# 2. Insert text: -# curl -X POST "http://127.0.0.1:8020/insert" -H "Content-Type: application/json" -d '{"text": "your text here"}' - -# 3. Insert file: -# curl -X POST "http://127.0.0.1:8020/insert_file" -H "Content-Type: multipart/form-data" -F "file=@path/to/your/file.txt" - -# 4. Health check: -# curl -X GET "http://127.0.0.1:8020/health" diff --git a/examples/lightrag_api_oracle_demo.py b/examples/lightrag_api_oracle_demo.py deleted file mode 100644 index 3a82f47922..0000000000 --- a/examples/lightrag_api_oracle_demo.py +++ /dev/null @@ -1,267 +0,0 @@ -from fastapi import FastAPI, HTTPException, File, UploadFile -from fastapi import Query -from contextlib import asynccontextmanager -from pydantic import BaseModel -from typing import Optional, Any - -import sys -import os - - -from pathlib import Path - -import asyncio -import nest_asyncio -from lightrag import LightRAG, QueryParam -from lightrag.llm.openai import openai_complete_if_cache, openai_embed -from lightrag.utils import EmbeddingFunc -import numpy as np -from lightrag.kg.shared_storage import initialize_pipeline_status - - -print(os.getcwd()) -script_directory = Path(__file__).resolve().parent.parent -sys.path.append(os.path.abspath(script_directory)) - - -# Apply nest_asyncio to solve event loop issues -nest_asyncio.apply() - -DEFAULT_RAG_DIR = "index_default" - - -# We use OpenAI compatible API to call LLM on Oracle Cloud -# More docs here https://github.com/jin38324/OCI_GenAI_access_gateway -BASE_URL = "http://xxx.xxx.xxx.xxx:8088/v1/" -APIKEY = "ocigenerativeai" - -# Configure working directory -WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}") -print(f"WORKING_DIR: {WORKING_DIR}") -LLM_MODEL = os.environ.get("LLM_MODEL", "cohere.command-r-plus-08-2024") -print(f"LLM_MODEL: {LLM_MODEL}") -EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "cohere.embed-multilingual-v3.0") -print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}") -EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 512)) -print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}") - -if not os.path.exists(WORKING_DIR): - os.mkdir(WORKING_DIR) - -os.environ["ORACLE_USER"] = "" -os.environ["ORACLE_PASSWORD"] = "" -os.environ["ORACLE_DSN"] = "" -os.environ["ORACLE_CONFIG_DIR"] = "path_to_config_dir" -os.environ["ORACLE_WALLET_LOCATION"] = "path_to_wallet_location" -os.environ["ORACLE_WALLET_PASSWORD"] = "wallet_password" -os.environ["ORACLE_WORKSPACE"] = "company" - - -async def llm_model_func( - prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs -) -> str: - return await openai_complete_if_cache( - LLM_MODEL, - prompt, - system_prompt=system_prompt, - history_messages=history_messages, - api_key=APIKEY, - base_url=BASE_URL, - **kwargs, - ) - - -async def embedding_func(texts: list[str]) -> np.ndarray: - return await openai_embed( - texts, - model=EMBEDDING_MODEL, - api_key=APIKEY, - base_url=BASE_URL, - ) - - -async def get_embedding_dim(): - test_text = ["This is a test sentence."] - embedding = await embedding_func(test_text) - embedding_dim = embedding.shape[1] - return embedding_dim - - -async def init(): - # Detect embedding dimension - embedding_dimension = await get_embedding_dim() - print(f"Detected embedding dimension: {embedding_dimension}") - # Create Oracle DB connection - # The `config` parameter is the connection configuration of Oracle DB - # More docs here https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html - # We storage data in unified tables, so we need to set a `workspace` parameter to specify which docs we want to store and query - # Below is an example of how to connect to Oracle Autonomous Database on Oracle Cloud - - # Initialize LightRAG - # We use Oracle DB as the KV/vector/graph storage - rag = LightRAG( - enable_llm_cache=False, - working_dir=WORKING_DIR, - chunk_token_size=512, - llm_model_func=llm_model_func, - embedding_func=EmbeddingFunc( - embedding_dim=embedding_dimension, - max_token_size=512, - func=embedding_func, - ), - graph_storage="OracleGraphStorage", - kv_storage="OracleKVStorage", - vector_storage="OracleVectorDBStorage", - ) - - await rag.initialize_storages() - await initialize_pipeline_status() - - return rag - - -# Extract and Insert into LightRAG storage -# with open("./dickens/book.txt", "r", encoding="utf-8") as f: -# await rag.ainsert(f.read()) - -# # Perform search in different modes -# modes = ["naive", "local", "global", "hybrid"] -# for mode in modes: -# print("="*20, mode, "="*20) -# print(await rag.aquery("这篇文档是关于什么内容的?", param=QueryParam(mode=mode))) -# print("-"*100, "\n") - -# Data models - - -class QueryRequest(BaseModel): - query: str - mode: str = "hybrid" - only_need_context: bool = False - only_need_prompt: bool = False - - -class DataRequest(BaseModel): - limit: int = 100 - - -class InsertRequest(BaseModel): - text: str - - -class Response(BaseModel): - status: str - data: Optional[Any] = None - message: Optional[str] = None - - -# API routes - -rag = None - - -@asynccontextmanager -async def lifespan(app: FastAPI): - global rag - rag = await init() - print("done!") - yield - - -app = FastAPI( - title="LightRAG API", description="API for RAG operations", lifespan=lifespan -) - - -@app.post("/query", response_model=Response) -async def query_endpoint(request: QueryRequest): - # try: - # loop = asyncio.get_event_loop() - if request.mode == "naive": - top_k = 3 - else: - top_k = 60 - result = await rag.aquery( - request.query, - param=QueryParam( - mode=request.mode, - only_need_context=request.only_need_context, - only_need_prompt=request.only_need_prompt, - top_k=top_k, - ), - ) - return Response(status="success", data=result) - # except Exception as e: - # raise HTTPException(status_code=500, detail=str(e)) - - -@app.get("/data", response_model=Response) -async def query_all_nodes(type: str = Query("nodes"), limit: int = Query(100)): - if type == "nodes": - result = await rag.chunk_entity_relation_graph.get_all_nodes(limit=limit) - elif type == "edges": - result = await rag.chunk_entity_relation_graph.get_all_edges(limit=limit) - elif type == "statistics": - result = await rag.chunk_entity_relation_graph.get_statistics() - return Response(status="success", data=result) - - -@app.post("/insert", response_model=Response) -async def insert_endpoint(request: InsertRequest): - try: - loop = asyncio.get_event_loop() - await loop.run_in_executor(None, lambda: rag.insert(request.text)) - return Response(status="success", message="Text inserted successfully") - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -@app.post("/insert_file", response_model=Response) -async def insert_file(file: UploadFile = File(...)): - try: - file_content = await file.read() - # Read file content - try: - content = file_content.decode("utf-8") - except UnicodeDecodeError: - # If UTF-8 decoding fails, try other encodings - content = file_content.decode("gbk") - # Insert file content - loop = asyncio.get_event_loop() - await loop.run_in_executor(None, lambda: rag.insert(content)) - - return Response( - status="success", - message=f"File content from {file.filename} inserted successfully", - ) - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -@app.get("/health") -async def health_check(): - return {"status": "healthy"} - - -if __name__ == "__main__": - import uvicorn - - uvicorn.run(app, host="127.0.0.1", port=8020) - -# Usage example -# To run the server, use the following command in your terminal: -# python lightrag_api_openai_compatible_demo.py - -# Example requests: -# 1. Query: -# curl -X POST "http://127.0.0.1:8020/query" -H "Content-Type: application/json" -d '{"query": "your query here", "mode": "hybrid"}' - -# 2. Insert text: -# curl -X POST "http://127.0.0.1:8020/insert" -H "Content-Type: application/json" -d '{"text": "your text here"}' - -# 3. Insert file: -# curl -X POST "http://127.0.0.1:8020/insert_file" -H "Content-Type: multipart/form-data" -F "file=@path/to/your/file.txt" - - -# 4. Health check: -# curl -X GET "http://127.0.0.1:8020/health" diff --git a/examples/lightrag_gemini_track_token_demo.py b/examples/lightrag_gemini_track_token_demo.py new file mode 100644 index 0000000000..a72fc717a1 --- /dev/null +++ b/examples/lightrag_gemini_track_token_demo.py @@ -0,0 +1,151 @@ +# pip install -q -U google-genai to use gemini as a client + +import os +import asyncio +import numpy as np +import nest_asyncio +from google import genai +from google.genai import types +from dotenv import load_dotenv +from lightrag.utils import EmbeddingFunc +from lightrag import LightRAG, QueryParam +from lightrag.kg.shared_storage import initialize_pipeline_status +from lightrag.llm.siliconcloud import siliconcloud_embedding +from lightrag.utils import setup_logger +from lightrag.utils import TokenTracker + +setup_logger("lightrag", level="DEBUG") + +# Apply nest_asyncio to solve event loop issues +nest_asyncio.apply() + +load_dotenv() +gemini_api_key = os.getenv("GEMINI_API_KEY") +siliconflow_api_key = os.getenv("SILICONFLOW_API_KEY") + +WORKING_DIR = "./dickens" + +if not os.path.exists(WORKING_DIR): + os.mkdir(WORKING_DIR) + +token_tracker = TokenTracker() + + +async def llm_model_func( + prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs +) -> str: + # 1. Initialize the GenAI Client with your Gemini API Key + client = genai.Client(api_key=gemini_api_key) + + # 2. Combine prompts: system prompt, history, and user prompt + if history_messages is None: + history_messages = [] + + combined_prompt = "" + if system_prompt: + combined_prompt += f"{system_prompt}\n" + + for msg in history_messages: + # Each msg is expected to be a dict: {"role": "...", "content": "..."} + combined_prompt += f"{msg['role']}: {msg['content']}\n" + + # Finally, add the new user prompt + combined_prompt += f"user: {prompt}" + + # 3. Call the Gemini model + response = client.models.generate_content( + model="gemini-2.0-flash", + contents=[combined_prompt], + config=types.GenerateContentConfig( + max_output_tokens=5000, temperature=0, top_k=10 + ), + ) + + # 4. Get token counts with null safety + usage = getattr(response, "usage_metadata", None) + prompt_tokens = getattr(usage, "prompt_token_count", 0) or 0 + completion_tokens = getattr(usage, "candidates_token_count", 0) or 0 + total_tokens = getattr(usage, "total_token_count", 0) or ( + prompt_tokens + completion_tokens + ) + + token_counts = { + "prompt_tokens": prompt_tokens, + "completion_tokens": completion_tokens, + "total_tokens": total_tokens, + } + + token_tracker.add_usage(token_counts) + + # 5. Return the response text + return response.text + + +async def embedding_func(texts: list[str]) -> np.ndarray: + return await siliconcloud_embedding( + texts, + model="BAAI/bge-m3", + api_key=siliconflow_api_key, + max_token_size=512, + ) + + +async def initialize_rag(): + rag = LightRAG( + working_dir=WORKING_DIR, + entity_extract_max_gleaning=1, + enable_llm_cache=True, + enable_llm_cache_for_entity_extract=True, + embedding_cache_config={"enabled": True, "similarity_threshold": 0.90}, + llm_model_func=llm_model_func, + embedding_func=EmbeddingFunc( + embedding_dim=1024, + max_token_size=8192, + func=embedding_func, + ), + ) + + await rag.initialize_storages() + await initialize_pipeline_status() + + return rag + + +def main(): + # Initialize RAG instance + rag = asyncio.run(initialize_rag()) + + with open("./book.txt", "r", encoding="utf-8") as f: + rag.insert(f.read()) + + # Context Manager Method + with token_tracker: + print( + rag.query( + "What are the top themes in this story?", param=QueryParam(mode="naive") + ) + ) + + print( + rag.query( + "What are the top themes in this story?", param=QueryParam(mode="local") + ) + ) + + print( + rag.query( + "What are the top themes in this story?", + param=QueryParam(mode="global"), + ) + ) + + print( + rag.query( + "What are the top themes in this story?", + param=QueryParam(mode="hybrid"), + ) + ) + + +if __name__ == "__main__": + main() diff --git a/examples/lightrag_multi_model_all_modes_demo.py b/examples/lightrag_multi_model_all_modes_demo.py new file mode 100644 index 0000000000..16e18782ba --- /dev/null +++ b/examples/lightrag_multi_model_all_modes_demo.py @@ -0,0 +1,88 @@ +import os +import asyncio +from lightrag import LightRAG, QueryParam +from lightrag.llm.openai import gpt_4o_mini_complete, gpt_4o_complete, openai_embed +from lightrag.kg.shared_storage import initialize_pipeline_status + +WORKING_DIR = "./lightrag_demo" + +if not os.path.exists(WORKING_DIR): + os.mkdir(WORKING_DIR) + + +async def initialize_rag(): + rag = LightRAG( + working_dir=WORKING_DIR, + embedding_func=openai_embed, + llm_model_func=gpt_4o_mini_complete, # Default model for queries + ) + + await rag.initialize_storages() + await initialize_pipeline_status() + + return rag + + +def main(): + # Initialize RAG instance + rag = asyncio.run(initialize_rag()) + + # Load the data + with open("./book.txt", "r", encoding="utf-8") as f: + rag.insert(f.read()) + + # Query with naive mode (default model) + print("--- NAIVE mode ---") + print( + rag.query( + "What are the main themes in this story?", param=QueryParam(mode="naive") + ) + ) + + # Query with local mode (default model) + print("\n--- LOCAL mode ---") + print( + rag.query( + "What are the main themes in this story?", param=QueryParam(mode="local") + ) + ) + + # Query with global mode (default model) + print("\n--- GLOBAL mode ---") + print( + rag.query( + "What are the main themes in this story?", param=QueryParam(mode="global") + ) + ) + + # Query with hybrid mode (default model) + print("\n--- HYBRID mode ---") + print( + rag.query( + "What are the main themes in this story?", param=QueryParam(mode="hybrid") + ) + ) + + # Query with mix mode (default model) + print("\n--- MIX mode ---") + print( + rag.query( + "What are the main themes in this story?", param=QueryParam(mode="mix") + ) + ) + + # Query with a custom model (gpt-4o) for a more complex question + print("\n--- Using custom model for complex analysis ---") + print( + rag.query( + "How does the character development reflect Victorian-era attitudes?", + param=QueryParam( + mode="global", + model_func=gpt_4o_complete, # Override default model with more capable one + ), + ) + ) + + +if __name__ == "__main__": + main() diff --git a/examples/lightrag_ollama_gremlin_demo.py b/examples/lightrag_ollama_gremlin_demo.py index 893b5606c1..7ae6281086 100644 --- a/examples/lightrag_ollama_gremlin_demo.py +++ b/examples/lightrag_ollama_gremlin_demo.py @@ -1,3 +1,7 @@ +############################################## +# Gremlin storage implementation is deprecated +############################################## + import asyncio import inspect import os diff --git a/examples/lightrag_oracle_demo.py b/examples/lightrag_oracle_demo.py deleted file mode 100644 index 6663f6a134..0000000000 --- a/examples/lightrag_oracle_demo.py +++ /dev/null @@ -1,141 +0,0 @@ -import sys -import os -from pathlib import Path -import asyncio -from lightrag import LightRAG, QueryParam -from lightrag.llm.openai import openai_complete_if_cache, openai_embed -from lightrag.utils import EmbeddingFunc -import numpy as np -from lightrag.kg.shared_storage import initialize_pipeline_status - -print(os.getcwd()) -script_directory = Path(__file__).resolve().parent.parent -sys.path.append(os.path.abspath(script_directory)) - -WORKING_DIR = "./dickens" - -# We use OpenAI compatible API to call LLM on Oracle Cloud -# More docs here https://github.com/jin38324/OCI_GenAI_access_gateway -BASE_URL = "http://xxx.xxx.xxx.xxx:8088/v1/" -APIKEY = "ocigenerativeai" -CHATMODEL = "cohere.command-r-plus" -EMBEDMODEL = "cohere.embed-multilingual-v3.0" -CHUNK_TOKEN_SIZE = 1024 -MAX_TOKENS = 4000 - -if not os.path.exists(WORKING_DIR): - os.mkdir(WORKING_DIR) - -os.environ["ORACLE_USER"] = "username" -os.environ["ORACLE_PASSWORD"] = "xxxxxxxxx" -os.environ["ORACLE_DSN"] = "xxxxxxx_medium" -os.environ["ORACLE_CONFIG_DIR"] = "path_to_config_dir" -os.environ["ORACLE_WALLET_LOCATION"] = "path_to_wallet_location" -os.environ["ORACLE_WALLET_PASSWORD"] = "wallet_password" -os.environ["ORACLE_WORKSPACE"] = "company" - - -async def llm_model_func( - prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs -) -> str: - return await openai_complete_if_cache( - CHATMODEL, - prompt, - system_prompt=system_prompt, - history_messages=history_messages, - api_key=APIKEY, - base_url=BASE_URL, - **kwargs, - ) - - -async def embedding_func(texts: list[str]) -> np.ndarray: - return await openai_embed( - texts, - model=EMBEDMODEL, - api_key=APIKEY, - base_url=BASE_URL, - ) - - -async def get_embedding_dim(): - test_text = ["This is a test sentence."] - embedding = await embedding_func(test_text) - embedding_dim = embedding.shape[1] - return embedding_dim - - -async def initialize_rag(): - # Detect embedding dimension - embedding_dimension = await get_embedding_dim() - print(f"Detected embedding dimension: {embedding_dimension}") - - # Initialize LightRAG - # We use Oracle DB as the KV/vector/graph storage - # You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt - rag = LightRAG( - # log_level="DEBUG", - working_dir=WORKING_DIR, - entity_extract_max_gleaning=1, - enable_llm_cache=True, - enable_llm_cache_for_entity_extract=True, - embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90}, - chunk_token_size=CHUNK_TOKEN_SIZE, - llm_model_max_token_size=MAX_TOKENS, - llm_model_func=llm_model_func, - embedding_func=EmbeddingFunc( - embedding_dim=embedding_dimension, - max_token_size=500, - func=embedding_func, - ), - graph_storage="OracleGraphStorage", - kv_storage="OracleKVStorage", - vector_storage="OracleVectorDBStorage", - addon_params={ - "example_number": 1, - "language": "Simplfied Chinese", - "entity_types": ["organization", "person", "geo", "event"], - "insert_batch_size": 2, - }, - ) - await rag.initialize_storages() - await initialize_pipeline_status() - - return rag - - -async def main(): - try: - # Initialize RAG instance - rag = await initialize_rag() - - # Extract and Insert into LightRAG storage - with open(WORKING_DIR + "/docs.txt", "r", encoding="utf-8") as f: - all_text = f.read() - texts = [x for x in all_text.split("\n") if x] - - # New mode use pipeline - await rag.apipeline_enqueue_documents(texts) - await rag.apipeline_process_enqueue_documents() - - # Old method use ainsert - # await rag.ainsert(texts) - - # Perform search in different modes - modes = ["naive", "local", "global", "hybrid"] - for mode in modes: - print("=" * 20, mode, "=" * 20) - print( - await rag.aquery( - "What are the top themes in this story?", - param=QueryParam(mode=mode), - ) - ) - print("-" * 100, "\n") - - except Exception as e: - print(f"An error occurred: {e}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/lightrag_siliconcloud_track_token_demo.py b/examples/lightrag_siliconcloud_track_token_demo.py new file mode 100644 index 0000000000..d82a30bcb2 --- /dev/null +++ b/examples/lightrag_siliconcloud_track_token_demo.py @@ -0,0 +1,110 @@ +import os +import asyncio +from lightrag import LightRAG, QueryParam +from lightrag.llm.openai import openai_complete_if_cache +from lightrag.llm.siliconcloud import siliconcloud_embedding +from lightrag.utils import EmbeddingFunc +from lightrag.utils import TokenTracker +import numpy as np +from lightrag.kg.shared_storage import initialize_pipeline_status +from dotenv import load_dotenv + +load_dotenv() + +token_tracker = TokenTracker() +WORKING_DIR = "./dickens" + +if not os.path.exists(WORKING_DIR): + os.mkdir(WORKING_DIR) + + +async def llm_model_func( + prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs +) -> str: + return await openai_complete_if_cache( + "Qwen/Qwen2.5-7B-Instruct", + prompt, + system_prompt=system_prompt, + history_messages=history_messages, + api_key=os.getenv("SILICONFLOW_API_KEY"), + base_url="https://api.siliconflow.cn/v1/", + token_tracker=token_tracker, + **kwargs, + ) + + +async def embedding_func(texts: list[str]) -> np.ndarray: + return await siliconcloud_embedding( + texts, + model="BAAI/bge-m3", + api_key=os.getenv("SILICONFLOW_API_KEY"), + max_token_size=512, + ) + + +# function test +async def test_funcs(): + # Context Manager Method + with token_tracker: + result = await llm_model_func("How are you?") + print("llm_model_func: ", result) + + +asyncio.run(test_funcs()) + + +async def initialize_rag(): + rag = LightRAG( + working_dir=WORKING_DIR, + llm_model_func=llm_model_func, + embedding_func=EmbeddingFunc( + embedding_dim=1024, max_token_size=512, func=embedding_func + ), + ) + + await rag.initialize_storages() + await initialize_pipeline_status() + + return rag + + +def main(): + # Initialize RAG instance + rag = asyncio.run(initialize_rag()) + + # Reset tracker before processing queries + token_tracker.reset() + + with open("./book.txt", "r", encoding="utf-8") as f: + rag.insert(f.read()) + + print( + rag.query( + "What are the top themes in this story?", param=QueryParam(mode="naive") + ) + ) + + print( + rag.query( + "What are the top themes in this story?", param=QueryParam(mode="local") + ) + ) + + print( + rag.query( + "What are the top themes in this story?", param=QueryParam(mode="global") + ) + ) + + print( + rag.query( + "What are the top themes in this story?", param=QueryParam(mode="hybrid") + ) + ) + + # Display final token usage after main query + print("Token usage:", token_tracker.get_usage()) + + +if __name__ == "__main__": + main() diff --git a/examples/lightrag_tidb_demo.py b/examples/lightrag_tidb_demo.py index 5269556080..50eac2ca24 100644 --- a/examples/lightrag_tidb_demo.py +++ b/examples/lightrag_tidb_demo.py @@ -1,3 +1,7 @@ +########################################### +# TiDB storage implementation is deprecated +########################################### + import asyncio import os diff --git a/lightrag/__init__.py b/lightrag/__init__.py index 89475dca3e..2664f6e413 100644 --- a/lightrag/__init__.py +++ b/lightrag/__init__.py @@ -1,5 +1,5 @@ from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam -__version__ = "1.2.6" +__version__ = "1.3.2" __author__ = "Zirui Guo" __url__ = "https://github.com/HKUDS/LightRAG" diff --git a/lightrag/api/README-zh.md b/lightrag/api/README-zh.md new file mode 100644 index 0000000000..f6db7c88c1 --- /dev/null +++ b/lightrag/api/README-zh.md @@ -0,0 +1,613 @@ +# LightRAG 服务器和 Web 界面 + +LightRAG 服务器旨在提供 Web 界面和 API 支持。Web 界面便于文档索引、知识图谱探索和简单的 RAG 查询界面。LightRAG 服务器还提供了与 Ollama 兼容的接口,旨在将 LightRAG 模拟为 Ollama 聊天模型。这使得 AI 聊天机器人(如 Open WebUI)可以轻松访问 LightRAG。 + +![image-20250323122538997](./README.assets/image-20250323122538997.png) + +![image-20250323122754387](./README.assets/image-20250323122754387.png) + +![image-20250323123011220](./README.assets/image-20250323123011220.png) + +## 入门指南 + +### 安装 + +* 从 PyPI 安装 + +```bash +pip install "lightrag-hku[api]" +``` + +* 从源代码安装 + +```bash +# 克隆仓库 +git clone https://github.com/HKUDS/lightrag.git + +# 切换到仓库目录 +cd lightrag + +# 如有必要,创建 Python 虚拟环境 +# 以可编辑模式安装并支持 API +pip install -e ".[api]" +``` + +### 启动 LightRAG 服务器前的准备 + +LightRAG 需要同时集成 LLM(大型语言模型)和嵌入模型以有效执行文档索引和查询操作。在首次部署 LightRAG 服务器之前,必须配置 LLM 和嵌入模型的设置。LightRAG 支持绑定到各种 LLM/嵌入后端: + +* ollama +* lollms +* openai 或 openai 兼容 +* azure_openai + +建议使用环境变量来配置 LightRAG 服务器。项目根目录中有一个名为 `env.example` 的示例环境变量文件。请将此文件复制到启动目录并重命名为 `.env`。之后,您可以在 `.env` 文件中修改与 LLM 和嵌入模型相关的参数。需要注意的是,LightRAG 服务器每次启动时都会将 `.env` 中的环境变量加载到系统环境变量中。由于 LightRAG 服务器会优先使用系统环境变量中的设置,如果您在通过命令行启动 LightRAG 服务器后修改了 `.env` 文件,则需要执行 `source .env` 使新设置生效。 + +以下是 LLM 和嵌入模型的一些常见设置示例: + +* OpenAI LLM + Ollama 嵌入 + +``` +LLM_BINDING=openai +LLM_MODEL=gpt-4o +LLM_BINDING_HOST=https://api.openai.com/v1 +LLM_BINDING_API_KEY=your_api_key +### 发送给 LLM 的最大 token 数(小于模型上下文大小) +MAX_TOKENS=32768 + +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 +EMBEDDING_MODEL=bge-m3:latest +EMBEDDING_DIM=1024 +# EMBEDDING_BINDING_API_KEY=your_api_key +``` + +* Ollama LLM + Ollama 嵌入 + +``` +LLM_BINDING=ollama +LLM_MODEL=mistral-nemo:latest +LLM_BINDING_HOST=http://localhost:11434 +# LLM_BINDING_API_KEY=your_api_key +### 发送给 LLM 的最大 token 数(基于您的 Ollama 服务器容量) +MAX_TOKENS=8192 + +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 +EMBEDDING_MODEL=bge-m3:latest +EMBEDDING_DIM=1024 +# EMBEDDING_BINDING_API_KEY=your_api_key +``` + +### 启动 LightRAG 服务器 + +LightRAG 服务器支持两种运行模式: +* 简单高效的 Uvicorn 模式 + +``` +lightrag-server +``` +* 多进程 Gunicorn + Uvicorn 模式(生产模式,不支持 Windows 环境) + +``` +lightrag-gunicorn --workers 4 +``` +`.env` 文件必须放在启动目录中。启动时,LightRAG 服务器将创建一个文档目录(默认为 `./inputs`)和一个数据目录(默认为 `./rag_storage`)。这允许您从不同目录启动多个 LightRAG 服务器实例,每个实例配置为监听不同的网络端口。 + +以下是一些常用的启动参数: + +- `--host`:服务器监听地址(默认:0.0.0.0) +- `--port`:服务器监听端口(默认:9621) +- `--timeout`:LLM 请求超时时间(默认:150 秒) +- `--log-level`:日志级别(默认:INFO) +- --input-dir:指定要扫描文档的目录(默认:./input) + +> ** 要求将.env文件置于启动目录中是经过特意设计的**。 这样做的目的是支持用户同时启动多个LightRAG实例,并为不同实例配置不同的.env文件。 + +> **修改.env文件后,您需要重新打开终端以使新设置生效**。 这是因为每次启动时,LightRAG Server会将.env文件中的环境变量加载至系统环境变量,且系统环境变量的设置具有更高优先级。 + +### 启动时自动扫描 + +当使用 `--auto-scan-at-startup` 参数启动任何服务器时,系统将自动: + +1. 扫描输入目录中的新文件 +2. 为尚未在数据库中的新文档建立索引 +3. 使所有内容立即可用于 RAG 查询 + +> `--input-dir` 参数指定要扫描的输入目录。您可以从 webui 触发输入目录扫描。 + +### Gunicorn + Uvicorn 的多工作进程 + +LightRAG 服务器可以在 `Gunicorn + Uvicorn` 预加载模式下运行。Gunicorn 的多工作进程(多进程)功能可以防止文档索引任务阻塞 RAG 查询。使用 CPU 密集型文档提取工具(如 docling)在纯 Uvicorn 模式下可能会导致整个系统被阻塞。 + +虽然 LightRAG 服务器使用一个工作进程来处理文档索引流程,但通过 Uvicorn 的异步任务支持,可以并行处理多个文件。文档索引速度的瓶颈主要在于 LLM。如果您的 LLM 支持高并发,您可以通过增加 LLM 的并发级别来加速文档索引。以下是几个与并发处理相关的环境变量及其默认值: + +``` +### 工作进程数,数字不大于 (2 x 核心数) + 1 +WORKERS=2 +### 一批中并行处理的文件数 +MAX_PARALLEL_INSERT=2 +# LLM 的最大并发请求数 +MAX_ASYNC=4 +``` + +### 将 Lightrag 安装为 Linux 服务 + +从示例文件 `lightrag.sevice.example` 创建您的服务文件 `lightrag.sevice`。修改服务文件中的 WorkingDirectory 和 ExecStart: + +```text +Description=LightRAG Ollama Service +WorkingDirectory= +ExecStart=/lightrag/api/lightrag-api +``` + +修改您的服务启动脚本:`lightrag-api`。根据需要更改 python 虚拟环境激活命令: + +```shell +#!/bin/bash + +# 您的 python 虚拟环境激活命令 +source /home/netman/lightrag-xyj/venv/bin/activate +# 启动 lightrag api 服务器 +lightrag-server +``` + +安装 LightRAG 服务。如果您的系统是 Ubuntu,以下命令将生效: + +```shell +sudo cp lightrag.service /etc/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start lightrag.service +sudo systemctl status lightrag.service +sudo systemctl enable lightrag.service +``` + +## Ollama 模拟 + +我们为 LightRAG 提供了 Ollama 兼容接口,旨在将 LightRAG 模拟为 Ollama 聊天模型。这使得支持 Ollama 的 AI 聊天前端(如 Open WebUI)可以轻松访问 LightRAG。 + +### 将 Open WebUI 连接到 LightRAG + +启动 lightrag-server 后,您可以在 Open WebUI 管理面板中添加 Ollama 类型的连接。然后,一个名为 `lightrag:latest` 的模型将出现在 Open WebUI 的模型管理界面中。用户随后可以通过聊天界面向 LightRAG 发送查询。对于这种用例,最好将 LightRAG 安装为服务。 + +Open WebUI 使用 LLM 来执行会话标题和会话关键词生成任务。因此,Ollama 聊天补全 API 会检测并将 OpenWebUI 会话相关请求直接转发给底层 LLM。Open WebUI 的截图: + +![image-20250323194750379](./README.assets/image-20250323194750379.png) + +### 在聊天中选择查询模式 + +如果您从 LightRAG 的 Ollama 接口发送消息(查询),默认查询模式是 `hybrid`。您可以通过发送带有查询前缀的消息来选择查询模式。 + +查询字符串中的查询前缀可以决定使用哪种 LightRAG 查询模式来生成响应。支持的前缀包括: + +``` +/local +/global +/hybrid +/naive +/mix + +/bypass +/context +/localcontext +/globalcontext +/hybridcontext +/naivecontext +/mixcontext +``` + +例如,聊天消息 "/mix 唐僧有几个徒弟" 将触发 LightRAG 的混合模式查询。没有查询前缀的聊天消息默认会触发混合模式查询。 + +"/bypass" 不是 LightRAG 查询模式,它会告诉 API 服务器将查询连同聊天历史直接传递给底层 LLM。因此用户可以使用 LLM 基于聊天历史回答问题。如果您使用 Open WebUI 作为前端,您可以直接切换到普通 LLM 模型,而不是使用 /bypass 前缀。 + +"/context" 也不是 LightRAG 查询模式,它会告诉 LightRAG 只返回为 LLM 准备的上下文信息。您可以检查上下文是否符合您的需求,或者自行处理上下文。 + +## API 密钥和认证 + +默认情况下,LightRAG 服务器可以在没有任何认证的情况下访问。我们可以使用 API 密钥或账户凭证配置服务器以确保其安全。 + +* API 密钥 + +``` +LIGHTRAG_API_KEY=your-secure-api-key-here +WHITELIST_PATHS=/health,/api/* +``` + +> 健康检查和 Ollama 模拟端点默认不进行 API 密钥检查。 + +* 账户凭证(Web 界面需要登录后才能访问) + +LightRAG API 服务器使用基于 HS256 算法的 JWT 认证。要启用安全访问控制,需要以下环境变量: + +```bash +# JWT 认证 +AUTH_ACCOUNTS='admin:admin123,user1:pass456' +TOKEN_SECRET='your-key' +TOKEN_EXPIRE_HOURS=4 +``` + +> 目前仅支持配置一个管理员账户和密码。尚未开发和实现完整的账户系统。 + +如果未配置账户凭证,Web 界面将以访客身份访问系统。因此,即使仅配置了 API 密钥,所有 API 仍然可以通过访客账户访问,这仍然不安全。因此,要保护 API,需要同时配置这两种认证方法。 + +## Azure OpenAI 后端配置 + +可以使用以下 Azure CLI 命令创建 Azure OpenAI API(您需要先从 [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) 安装 Azure CLI): + +```bash +# 根据需要更改资源组名称、位置和 OpenAI 资源名称 +RESOURCE_GROUP_NAME=LightRAG +LOCATION=swedencentral +RESOURCE_NAME=LightRAG-OpenAI + +az login +az group create --name $RESOURCE_GROUP_NAME --location $LOCATION +az cognitiveservices account create --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --kind OpenAI --sku S0 --location swedencentral +az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name gpt-4o --model-name gpt-4o --model-version "2024-08-06" --sku-capacity 100 --sku-name "Standard" +az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name text-embedding-3-large --model-name text-embedding-3-large --model-version "1" --sku-capacity 80 --sku-name "Standard" +az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --query "properties.endpoint" +az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME +``` + +最后一个命令的输出将提供 OpenAI API 的端点和密钥。您可以使用这些值在 `.env` 文件中设置环境变量。 + +``` +# .env 中的 Azure OpenAI 配置 +LLM_BINDING=azure_openai +LLM_BINDING_HOST=your-azure-endpoint +LLM_MODEL=your-model-deployment-name +LLM_BINDING_API_KEY=your-azure-api-key +### API Version可选,默认为最新版本 +AZURE_OPENAI_API_VERSION=2024-08-01-preview + +### 如果使用 Azure OpenAI 进行嵌入 +EMBEDDING_BINDING=azure_openai +EMBEDDING_MODEL=your-embedding-deployment-name +``` + +## LightRAG 服务器详细配置 + +API 服务器可以通过三种方式配置(优先级从高到低): + +* 命令行参数 +* 环境变量或 .env 文件 +* Config.ini(仅用于存储配置) + +大多数配置都有默认设置,详细信息请查看示例文件:`.env.example`。数据存储配置也可以通过 config.ini 设置。为方便起见,提供了示例文件 `config.ini.example`。 + +### 支持的 LLM 和嵌入后端 + +LightRAG 支持绑定到各种 LLM/嵌入后端: + +* ollama +* lollms +* openai 和 openai 兼容 +* azure_openai + +使用环境变量 `LLM_BINDING` 或 CLI 参数 `--llm-binding` 选择 LLM 后端类型。使用环境变量 `EMBEDDING_BINDING` 或 CLI 参数 `--embedding-binding` 选择嵌入后端类型。 + +### 实体提取配置 +* ENABLE_LLM_CACHE_FOR_EXTRACT:为实体提取启用 LLM 缓存(默认:true) + +在测试环境中将 `ENABLE_LLM_CACHE_FOR_EXTRACT` 设置为 true 以减少 LLM 调用成本是很常见的做法。 + +### 支持的存储类型 + +LightRAG 使用 4 种类型的存储用于不同目的: + +* KV_STORAGE:llm 响应缓存、文本块、文档信息 +* VECTOR_STORAGE:实体向量、关系向量、块向量 +* GRAPH_STORAGE:实体关系图 +* DOC_STATUS_STORAGE:文档索引状态 + +每种存储类型都有几种实现: + +* KV_STORAGE 支持的实现名称 + +``` +JsonKVStorage JsonFile(默认) +PGKVStorage Postgres +RedisKVStorage Redis +MongoKVStorage MogonDB +``` + +* GRAPH_STORAGE 支持的实现名称 + +``` +NetworkXStorage NetworkX(默认) +Neo4JStorage Neo4J +PGGraphStorage Postgres +AGEStorage AGE +``` + +* VECTOR_STORAGE 支持的实现名称 + +``` +NanoVectorDBStorage NanoVector(默认) +PGVectorStorage Postgres +MilvusVectorDBStorge Milvus +ChromaVectorDBStorage Chroma +FaissVectorDBStorage Faiss +QdrantVectorDBStorage Qdrant +MongoVectorDBStorage MongoDB +``` + +* DOC_STATUS_STORAGE 支持的实现名称 + +``` +JsonDocStatusStorage JsonFile(默认) +PGDocStatusStorage Postgres +MongoDocStatusStorage MongoDB +``` + +### 如何选择存储实现 + +您可以通过环境变量选择存储实现。在首次启动 API 服务器之前,您可以将以下环境变量设置为特定的存储实现名称: + +``` +LIGHTRAG_KV_STORAGE=PGKVStorage +LIGHTRAG_VECTOR_STORAGE=PGVectorStorage +LIGHTRAG_GRAPH_STORAGE=PGGraphStorage +LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage +``` + +在向 LightRAG 添加文档后,您不能更改存储实现选择。目前尚不支持从一个存储实现迁移到另一个存储实现。更多信息请阅读示例 env 文件或 config.ini 文件。 + +### LightRag API 服务器命令行选项 + +| 参数 | 默认值 | 描述 | +|-----------|---------|-------------| +| --host | 0.0.0.0 | 服务器主机 | +| --port | 9621 | 服务器端口 | +| --working-dir | ./rag_storage | RAG 存储的工作目录 | +| --input-dir | ./inputs | 包含输入文档的目录 | +| --max-async | 4 | 最大异步操作数 | +| --max-tokens | 32768 | 最大 token 大小 | +| --timeout | 150 | 超时时间(秒)。None 表示无限超时(不推荐) | +| --log-level | INFO | 日志级别(DEBUG、INFO、WARNING、ERROR、CRITICAL) | +| --verbose | - | 详细调试输出(True、False) | +| --key | None | 用于认证的 API 密钥。保护 lightrag 服务器免受未授权访问 | +| --ssl | False | 启用 HTTPS | +| --ssl-certfile | None | SSL 证书文件路径(如果启用 --ssl 则必需) | +| --ssl-keyfile | None | SSL 私钥文件路径(如果启用 --ssl 则必需) | +| --top-k | 50 | 要检索的 top-k 项目数;在"local"模式下对应实体,在"global"模式下对应关系。 | +| --cosine-threshold | 0.4 | 节点和关系检索的余弦阈值,与 top-k 一起控制节点和关系的检索。 | +| --llm-binding | ollama | LLM 绑定类型(lollms、ollama、openai、openai-ollama、azure_openai) | +| --embedding-binding | ollama | 嵌入绑定类型(lollms、ollama、openai、azure_openai) | +| auto-scan-at-startup | - | 扫描输入目录中的新文件并开始索引 | + +### .env 文件示例 + +```bash +### Server Configuration +# HOST=0.0.0.0 +PORT=9621 +WORKERS=2 + +### Settings for document indexing +ENABLE_LLM_CACHE_FOR_EXTRACT=true +SUMMARY_LANGUAGE=Chinese +MAX_PARALLEL_INSERT=2 + +### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) +TIMEOUT=200 +TEMPERATURE=0.0 +MAX_ASYNC=4 +MAX_TOKENS=32768 + +LLM_BINDING=openai +LLM_MODEL=gpt-4o-mini +LLM_BINDING_HOST=https://api.openai.com/v1 +LLM_BINDING_API_KEY=your-api-key + +### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) +EMBEDDING_MODEL=bge-m3:latest +EMBEDDING_DIM=1024 +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 + +### For JWT Auth +# AUTH_ACCOUNTS='admin:admin123,user1:pass456' +# TOKEN_SECRET=your-key-for-LightRAG-API-Server-xxx +# TOKEN_EXPIRE_HOURS=48 + +# LIGHTRAG_API_KEY=your-secure-api-key-here-123 +# WHITELIST_PATHS=/api/* +# WHITELIST_PATHS=/health,/api/* +``` + + + +#### 使用 ollama 默认本地服务器作为 llm 和嵌入后端运行 Lightrag 服务器 + +Ollama 是 llm 和嵌入的默认后端,因此默认情况下您可以不带参数运行 lightrag-server,将使用默认值。确保已安装 ollama 并且正在运行,且默认模型已安装在 ollama 上。 + +```bash +# 使用 ollama 运行 lightrag,llm 使用 mistral-nemo:latest,嵌入使用 bge-m3:latest +lightrag-server + +# 使用认证密钥 +lightrag-server --key my-key +``` + +#### 使用 lollms 默认本地服务器作为 llm 和嵌入后端运行 Lightrag 服务器 + +```bash +# 使用 lollms 运行 lightrag,llm 使用 mistral-nemo:latest,嵌入使用 bge-m3:latest +# 在 .env 或 config.ini 中配置 LLM_BINDING=lollms 和 EMBEDDING_BINDING=lollms +lightrag-server + +# 使用认证密钥 +lightrag-server --key my-key +``` + +#### 使用 openai 服务器作为 llm 和嵌入后端运行 Lightrag 服务器 + +```bash +# 使用 openai 运行 lightrag,llm 使用 GPT-4o-mini,嵌入使用 text-embedding-3-small +# 在 .env 或 config.ini 中配置: +# LLM_BINDING=openai +# LLM_MODEL=GPT-4o-mini +# EMBEDDING_BINDING=openai +# EMBEDDING_MODEL=text-embedding-3-small +lightrag-server + +# 使用认证密钥 +lightrag-server --key my-key +``` + +#### 使用 azure openai 服务器作为 llm 和嵌入后端运行 Lightrag 服务器 + +```bash +# 使用 azure_openai 运行 lightrag +# 在 .env 或 config.ini 中配置: +# LLM_BINDING=azure_openai +# LLM_MODEL=your-model +# EMBEDDING_BINDING=azure_openai +# EMBEDDING_MODEL=your-embedding-model +lightrag-server + +# 使用认证密钥 +lightrag-server --key my-key +``` + +**重要说明:** +- 对于 LoLLMs:确保指定的模型已安装在您的 LoLLMs 实例中 +- 对于 Ollama:确保指定的模型已安装在您的 Ollama 实例中 +- 对于 OpenAI:确保您已设置 OPENAI_API_KEY 环境变量 +- 对于 Azure OpenAI:按照先决条件部分所述构建和配置您的服务器 + +要获取任何服务器的帮助,使用 --help 标志: +```bash +lightrag-server --help +``` + +注意:如果您不需要 API 功能,可以使用以下命令安装不带 API 支持的基本包: +```bash +pip install lightrag-hku +``` + +## API 端点 + +所有服务器(LoLLMs、Ollama、OpenAI 和 Azure OpenAI)都为 RAG 功能提供相同的 REST API 端点。当 API 服务器运行时,访问: + +- Swagger UI:http://localhost:9621/docs +- ReDoc:http://localhost:9621/redoc + +您可以使用提供的 curl 命令或通过 Swagger UI 界面测试 API 端点。确保: + +1. 启动适当的后端服务(LoLLMs、Ollama 或 OpenAI) +2. 启动 RAG 服务器 +3. 使用文档管理端点上传一些文档 +4. 使用查询端点查询系统 +5. 如果在输入目录中放入新文件,触发文档扫描 + +### 查询端点 + +#### POST /query +使用不同搜索模式查询 RAG 系统。 + +```bash +curl -X POST "http://localhost:9621/query" \ + -H "Content-Type: application/json" \ + -d '{"query": "您的问题", "mode": "hybrid", ""}' +``` + +#### POST /query/stream +从 RAG 系统流式获取响应。 + +```bash +curl -X POST "http://localhost:9621/query/stream" \ + -H "Content-Type: application/json" \ + -d '{"query": "您的问题", "mode": "hybrid"}' +``` + +### 文档管理端点 + +#### POST /documents/text +直接将文本插入 RAG 系统。 + +```bash +curl -X POST "http://localhost:9621/documents/text" \ + -H "Content-Type: application/json" \ + -d '{"text": "您的文本内容", "description": "可选描述"}' +``` + +#### POST /documents/file +向 RAG 系统上传单个文件。 + +```bash +curl -X POST "http://localhost:9621/documents/file" \ + -F "file=@/path/to/your/document.txt" \ + -F "description=可选描述" +``` + +#### POST /documents/batch +一次上传多个文件。 + +```bash +curl -X POST "http://localhost:9621/documents/batch" \ + -F "files=@/path/to/doc1.txt" \ + -F "files=@/path/to/doc2.txt" +``` + +#### POST /documents/scan + +触发输入目录中新文件的文档扫描。 + +```bash +curl -X POST "http://localhost:9621/documents/scan" --max-time 1800 +``` + +> 根据所有新文件的预计索引时间调整 max-time。 + +#### DELETE /documents + +从 RAG 系统中清除所有文档。 + +```bash +curl -X DELETE "http://localhost:9621/documents" +``` + +### Ollama 模拟端点 + +#### GET /api/version + +获取 Ollama 版本信息。 + +```bash +curl http://localhost:9621/api/version +``` + +#### GET /api/tags + +获取 Ollama 可用模型。 + +```bash +curl http://localhost:9621/api/tags +``` + +#### POST /api/chat + +处理聊天补全请求。通过根据查询前缀选择查询模式将用户查询路由到 LightRAG。检测并将 OpenWebUI 会话相关请求(用于元数据生成任务)直接转发给底层 LLM。 + +```shell +curl -N -X POST http://localhost:9621/api/chat -H "Content-Type: application/json" -d \ + '{"model":"lightrag:latest","messages":[{"role":"user","content":"猪八戒是谁"}],"stream":true}' +``` + +> 有关 Ollama API 的更多信息,请访问:[Ollama API 文档](https://github.com/ollama/ollama/blob/main/docs/api.md) + +#### POST /api/generate + +处理生成补全请求。为了兼容性目的,该请求不由 LightRAG 处理,而是由底层 LLM 模型处理。 + +### 实用工具端点 + +#### GET /health +检查服务器健康状况和配置。 + +```bash +curl "http://localhost:9621/health" + +``` diff --git a/lightrag/api/README.assets/image-20250323122538997.png b/lightrag/api/README.assets/image-20250323122538997.png new file mode 100644 index 0000000000..38743f7ea0 Binary files /dev/null and b/lightrag/api/README.assets/image-20250323122538997.png differ diff --git a/lightrag/api/README.assets/image-20250323122754387.png b/lightrag/api/README.assets/image-20250323122754387.png new file mode 100644 index 0000000000..4c91829ca0 Binary files /dev/null and b/lightrag/api/README.assets/image-20250323122754387.png differ diff --git a/lightrag/api/README.assets/image-20250323123011220.png b/lightrag/api/README.assets/image-20250323123011220.png new file mode 100644 index 0000000000..c05ec62fa7 Binary files /dev/null and b/lightrag/api/README.assets/image-20250323123011220.png differ diff --git a/lightrag/api/README.assets/image-20250323194750379.png b/lightrag/api/README.assets/image-20250323194750379.png new file mode 100644 index 0000000000..649a456eaa Binary files /dev/null and b/lightrag/api/README.assets/image-20250323194750379.png differ diff --git a/lightrag/api/README.md b/lightrag/api/README.md index 7d91ae8190..1d05ba784a 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -1,14 +1,24 @@ -## Install LightRAG as an API Server +# LightRAG Server and WebUI -LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG API Server in two ways: +The LightRAG Server is designed to provide a Web UI and API support. The Web UI facilitates document indexing, knowledge graph exploration, and a simple RAG query interface. LightRAG Server also provides an Ollama-compatible interface, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat bots, such as Open WebUI, to access LightRAG easily. -### Installation from PyPI +![image-20250323122538997](./README.assets/image-20250323122538997.png) + +![image-20250323122754387](./README.assets/image-20250323122754387.png) + +![image-20250323123011220](./README.assets/image-20250323123011220.png) + +## Getting Started + +### Installation + +* Install from PyPI ```bash pip install "lightrag-hku[api]" ``` -### Installation from Source (Development) +* Installation from Source ```bash # Clone the repository @@ -17,153 +27,115 @@ git clone https://github.com/HKUDS/lightrag.git # Change to the repository directory cd lightrag -# create a Python virtual enviroment if neccesary +# create a Python virtual environment if necessary # Install in editable mode with API support pip install -e ".[api]" ``` -### Starting API Server with Default Settings +### Before Starting LightRAG Server -After installing LightRAG with API support, you can start LightRAG by this command: `lightrag-server` +LightRAG necessitates the integration of both an LLM (Large Language Model) and an Embedding Model to effectively execute document indexing and querying operations. Prior to the initial deployment of the LightRAG server, it is essential to configure the settings for both the LLM and the Embedding Model. LightRAG supports binding to various LLM/Embedding backends: -LightRAG requires both LLM and Embedding Model to work together to complete document indexing and querying tasks. LightRAG supports binding to various LLM/Embedding backends: +* ollama +* lollms +* openai or openai compatible +* azure_openai -* ollama -* lollms -* openai & openai compatible -* azure_openai +It is recommended to use environment variables to configure the LightRAG Server. There is an example environment variable file named `env.example` in the root directory of the project. Please copy this file to the startup directory and rename it to `.env`. After that, you can modify the parameters related to the LLM and Embedding models in the `.env` file. It is important to note that the LightRAG Server will load the environment variables from `.env` into the system environment variables each time it starts. Since the LightRAG Server will prioritize the settings in the system environment variables, if you modify the `.env` file after starting the LightRAG Server via the command line, you need to execute `source .env` to make the new settings take effect. -Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding. -The LightRAG API Server provides default parameters for LLM and Embedding, allowing users to easily start the service through command line. These default configurations are: +Here are some examples of common settings for LLM and Embedding models: -* Default endpoint of LLM/Embeding backend(LLM_BINDING_HOST or EMBEDDING_BINDING_HOST) +* OpenAI LLM + Ollama Embedding: ``` -# for lollms backend -LLM_BINDING_HOST=http://localhost:11434 -EMBEDDING_BINDING_HOST=http://localhost:11434 - -# for lollms backend -LLM_BINDING_HOST=http://localhost:9600 -EMBEDDING_BINDING_HOST=http://localhost:9600 - -# for openai, openai compatible or azure openai backend +LLM_BINDING=openai +LLM_MODEL=gpt-4o LLM_BINDING_HOST=https://api.openai.com/v1 -EMBEDDING_BINDING_HOST=http://localhost:9600 +LLM_BINDING_API_KEY=your_api_key +### Max tokens sent to LLM (less than model context size) +MAX_TOKENS=32768 + +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 +EMBEDDING_MODEL=bge-m3:latest +EMBEDDING_DIM=1024 +# EMBEDDING_BINDING_API_KEY=your_api_key ``` -* Default model config +* Ollama LLM + Ollama Embedding: ``` +LLM_BINDING=ollama LLM_MODEL=mistral-nemo:latest +LLM_BINDING_HOST=http://localhost:11434 +# LLM_BINDING_API_KEY=your_api_key +### Max tokens sent to LLM (based on your Ollama Server capacity) +MAX_TOKENS=8192 +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 EMBEDDING_MODEL=bge-m3:latest EMBEDDING_DIM=1024 -MAX_EMBED_TOKENS=8192 -``` - -* API keys for LLM/Embedding backend - -When connecting to backend require API KEY, corresponding environment variables must be provided: - -``` -LLM_BINDING_API_KEY=your_api_key -EMBEDDING_BINDING_API_KEY=your_api_key +# EMBEDDING_BINDING_API_KEY=your_api_key ``` -* Use command line arguments to choose LLM/Embeding backend +### Starting LightRAG Server -Use `--llm-binding` to select LLM backend type, and use `--embedding-binding` to select the embedding backend type. All the supported backend types are: +The LightRAG Server supports two operational modes: +* The simple and efficient Uvicorn mode: ``` -openai: LLM default type -ollama: Embedding defult type -lollms: -azure_openai: -openai-ollama: select openai for LLM and ollama for embedding(only valid for --llm-binding) +lightrag-server ``` - -The LightRAG API Server allows you to mix different bindings for llm/embeddings. For example, you have the possibility to use ollama for the embedding and openai for the llm.With the above default parameters, you can start API Server with simple CLI arguments like these: +* The multiprocess Gunicorn + Uvicorn mode (production mode, not supported on Windows environments): ``` -# start with openai llm and ollama embedding -LLM_BINDING_API_KEY=your_api_key Light_server -LLM_BINDING_API_KEY=your_api_key Light_server --llm-binding openai-ollama - -# start with openai llm and openai embedding -LLM_BINDING_API_KEY=your_api_key Light_server --llm-binding openai --embedding-binding openai - -# start with ollama llm and ollama embedding (no apikey is needed) -light-server --llm-binding ollama --embedding-binding ollama +lightrag-gunicorn --workers 4 ``` +The `.env` file **must be placed in the startup directory**. -### Starting API Server with Gunicorn (Production) +Upon launching, the LightRAG Server will create a documents directory (default is `./inputs`) and a data directory (default is `./rag_storage`). This allows you to initiate multiple instances of LightRAG Server from different directories, with each instance configured to listen on a distinct network port. -For production deployments, it's recommended to use Gunicorn as the WSGI server to handle concurrent requests efficiently. LightRAG provides a dedicated Gunicorn startup script that handles shared data initialization, process management, and other critical functionalities. +Here are some commonly used startup parameters: -```bash -# Start with lightrag-gunicorn command -lightrag-gunicorn --workers 4 +- `--host`: Server listening address (default: 0.0.0.0) +- `--port`: Server listening port (default: 9621) +- `--timeout`: LLM request timeout (default: 150 seconds) +- `--log-level`: Logging level (default: INFO) +- `--input-dir`: Specifying the directory to scan for documents (default: ./inputs) -# Alternatively, you can use the module directly -python -m lightrag.api.run_with_gunicorn --workers 4 -``` +> The requirement for the .env file to be in the startup directory is intentionally designed this way. The purpose is to support users in launching multiple LightRAG instances simultaneously, allowing different .env files for different instances. -The `--workers` parameter is crucial for performance: +> **After changing the .env file, you need to open a new terminal to make the new settings take effect.** This because the LightRAG Server will load the environment variables from .env into the system environment variables each time it starts, and LightRAG Server will prioritize the settings in the system environment variables. -- Determines how many worker processes Gunicorn will spawn to handle requests -- Each worker can handle concurrent requests using asyncio -- Recommended value is (2 x number_of_cores) + 1 -- For example, on a 4-core machine, use 9 workers: (2 x 4) + 1 = 9 -- Consider your server's memory when setting this value, as each worker consumes memory +### Auto scan on startup -Other important startup parameters: +When starting any of the servers with the `--auto-scan-at-startup` parameter, the system will automatically: -- `--host`: Server listening address (default: 0.0.0.0) -- `--port`: Server listening port (default: 9621) -- `--timeout`: Request handling timeout (default: 150 seconds) -- `--log-level`: Logging level (default: INFO) -- `--ssl`: Enable HTTPS -- `--ssl-certfile`: Path to SSL certificate file -- `--ssl-keyfile`: Path to SSL private key file +1. Scan for new files in the input directory +2. Index new documents that aren't already in the database +3. Make all content immediately available for RAG queries -The command line parameters and enviroment variable run_with_gunicorn.py is exactly the same as `light-server`. +> The `--input-dir` parameter specifies the input directory to scan. You can trigger the input directory scan from the Web UI. -### For Azure OpenAI Backend +### Multiple workers for Gunicorn + Uvicorn -Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)): -```bash -# Change the resource group name, location and OpenAI resource name as needed -RESOURCE_GROUP_NAME=LightRAG -LOCATION=swedencentral -RESOURCE_NAME=LightRAG-OpenAI +The LightRAG Server can operate in the `Gunicorn + Uvicorn` preload mode. Gunicorn's multiple worker (multiprocess) capability prevents document indexing tasks from blocking RAG queries. Using CPU-exhaustive document extraction tools, such as docling, can lead to the entire system being blocked in pure Uvicorn mode. -az login -az group create --name $RESOURCE_GROUP_NAME --location $LOCATION -az cognitiveservices account create --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --kind OpenAI --sku S0 --location swedencentral -az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name gpt-4o --model-name gpt-4o --model-version "2024-08-06" --sku-capacity 100 --sku-name "Standard" -az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name text-embedding-3-large --model-name text-embedding-3-large --model-version "1" --sku-capacity 80 --sku-name "Standard" -az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --query "properties.endpoint" -az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME +Though LightRAG Server uses one worker to process the document indexing pipeline, with the async task support of Uvicorn, multiple files can be processed in parallel. The bottleneck of document indexing speed mainly lies with the LLM. If your LLM supports high concurrency, you can accelerate document indexing by increasing the concurrency level of the LLM. Below are several environment variables related to concurrent processing, along with their default values: ``` -The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file. - -``` -# Azure OpenAI Configuration in .env -LLM_BINDING=azure_openai -LLM_BINDING_HOST=your-azure-endpoint -LLM_MODEL=your-model-deployment-name -LLM_BINDING_API_KEY=your-azure-api-key -AZURE_OPENAI_API_VERSION=2024-08-01-preview # optional, defaults to latest version -EMBEDDING_BINDING=azure_openai # if using Azure OpenAI for embeddings -EMBEDDING_MODEL=your-embedding-deployment-name - +### Number of worker processes, not greater than (2 x number_of_cores) + 1 +WORKERS=2 +### Number of parallel files to process in one batch +MAX_PARALLEL_INSERT=2 +### Max concurrent requests to the LLM +MAX_ASYNC=4 ``` -### Install Lightrag as a Linux Service +### Install LightRAG as a Linux Service -Create a your service file `lightrag.sevice` from the sample file : `lightrag.sevice.example`. Modified the WorkingDirectoryand EexecStart in the service file: +Create your service file `lightrag.service` from the sample file: `lightrag.service.example`. Modify the `WorkingDirectory` and `ExecStart` in the service file: ```text Description=LightRAG Ollama Service @@ -171,7 +143,7 @@ WorkingDirectory= ExecStart=/lightrag/api/lightrag-api ``` -Modify your service startup script: `lightrag-api`. Change you python virtual environment activation command as needed: +Modify your service startup script: `lightrag-api`. Change your Python virtual environment activation command as needed: ```shell #!/bin/bash @@ -192,102 +164,188 @@ sudo systemctl status lightrag.service sudo systemctl enable lightrag.service ``` -### Automatic Document Indexing +## Ollama Emulation -When starting any of the servers with the `--auto-scan-at-startup` parameter, the system will automatically: +We provide Ollama-compatible interfaces for LightRAG, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat frontends supporting Ollama, such as Open WebUI, to access LightRAG easily. + +### Connect Open WebUI to LightRAG + +After starting the lightrag-server, you can add an Ollama-type connection in the Open WebUI admin panel. And then a model named `lightrag:latest` will appear in Open WebUI's model management interface. Users can then send queries to LightRAG through the chat interface. You should install LightRAG as a service for this use case. + +Open WebUI uses an LLM to do the session title and session keyword generation task. So the Ollama chat completion API detects and forwards OpenWebUI session-related requests directly to the underlying LLM. Screenshot from Open WebUI: + +![image-20250323194750379](./README.assets/image-20250323194750379.png) + +### Choose Query mode in chat + +The default query mode is `hybrid` if you send a message (query) from the Ollama interface of LightRAG. You can select query mode by sending a message with a query prefix. + +A query prefix in the query string can determine which LightRAG query mode is used to generate the response for the query. The supported prefixes include: + +``` +/local +/global +/hybrid +/naive +/mix + +/bypass +/context +/localcontext +/globalcontext +/hybridcontext +/naivecontext +/mixcontext +``` + +For example, the chat message `/mix What's LightRAG?` will trigger a mix mode query for LightRAG. A chat message without a query prefix will trigger a hybrid mode query by default. + +`/bypass` is not a LightRAG query mode; it will tell the API Server to pass the query directly to the underlying LLM, including the chat history. So the user can use the LLM to answer questions based on the chat history. If you are using Open WebUI as a front end, you can just switch the model to a normal LLM instead of using the `/bypass` prefix. + +`/context` is also not a LightRAG query mode; it will tell LightRAG to return only the context information prepared for the LLM. You can check the context if it's what you want, or process the context by yourself. + +## API Key and Authentication + +By default, the LightRAG Server can be accessed without any authentication. We can configure the server with an API Key or account credentials to secure it. + +* API Key: + +``` +LIGHTRAG_API_KEY=your-secure-api-key-here +WHITELIST_PATHS=/health,/api/* +``` + +> Health check and Ollama emulation endpoints are excluded from API Key check by default. + +* Account credentials (the Web UI requires login before access can be granted): + +LightRAG API Server implements JWT-based authentication using the HS256 algorithm. To enable secure access control, the following environment variables are required: + +```bash +# For jwt auth +AUTH_ACCOUNTS='admin:admin123,user1:pass456' +TOKEN_SECRET='your-key' +TOKEN_EXPIRE_HOURS=4 +``` + +> Currently, only the configuration of an administrator account and password is supported. A comprehensive account system is yet to be developed and implemented. + +If Account credentials are not configured, the Web UI will access the system as a Guest. Therefore, even if only an API Key is configured, all APIs can still be accessed through the Guest account, which remains insecure. Hence, to safeguard the API, it is necessary to configure both authentication methods simultaneously. + +## For Azure OpenAI Backend + +Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)): + +```bash +# Change the resource group name, location, and OpenAI resource name as needed +RESOURCE_GROUP_NAME=LightRAG +LOCATION=swedencentral +RESOURCE_NAME=LightRAG-OpenAI -1. Scan for new files in the input directory -2. Indexing new documents that aren't already in the database -3. Make all content immediately available for RAG queries +az login +az group create --name $RESOURCE_GROUP_NAME --location $LOCATION +az cognitiveservices account create --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --kind OpenAI --sku S0 --location swedencentral +az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name gpt-4o --model-name gpt-4o --model-version "2024-08-06" --sku-capacity 100 --sku-name "Standard" +az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name text-embedding-3-large --model-name text-embedding-3-large --model-version "1" --sku-capacity 80 --sku-name "Standard" +az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --query "properties.endpoint" +az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME -> The `--input-dir` parameter specify the input directory to scan for. +``` -## API Server Configuration +The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file. -API Server can be config in three way (highest priority first): +``` +# Azure OpenAI Configuration in .env: +LLM_BINDING=azure_openai +LLM_BINDING_HOST=your-azure-endpoint +LLM_MODEL=your-model-deployment-name +LLM_BINDING_API_KEY=your-azure-api-key +### API version is optional, defaults to latest version +AZURE_OPENAI_API_VERSION=2024-08-01-preview + +### If using Azure OpenAI for embeddings +EMBEDDING_BINDING=azure_openai +EMBEDDING_MODEL=your-embedding-deployment-name +``` -* Command line arguments -* Enviroment variables or .env file -* Config.ini (Only for storage configuration) +## LightRAG Server Configuration in Detail -Most of the configurations come with a default settings, check out details in sample file: `.env.example`. Datastorage configuration can be also set by config.ini. A sample file `config.ini.example` is provided for your convenience. +The API Server can be configured in three ways (highest priority first): + +* Command line arguments +* Environment variables or .env file +* Config.ini (Only for storage configuration) + +Most of the configurations come with default settings; check out the details in the sample file: `.env.example`. Data storage configuration can also be set by config.ini. A sample file `config.ini.example` is provided for your convenience. ### LLM and Embedding Backend Supported LightRAG supports binding to various LLM/Embedding backends: -* ollama -* lollms -* openai & openai compatible -* azure_openai +* ollama +* lollms +* openai & openai compatible +* azure_openai -Use environment variables `LLM_BINDING` or CLI argument `--llm-binding` to select LLM backend type. Use environment variables `EMBEDDING_BINDING` or CLI argument `--embedding-binding` to select LLM backend type. +Use environment variables `LLM_BINDING` or CLI argument `--llm-binding` to select the LLM backend type. Use environment variables `EMBEDDING_BINDING` or CLI argument `--embedding-binding` to select the Embedding backend type. ### Entity Extraction Configuration -* ENABLE_LLM_CACHE_FOR_EXTRACT: Enable LLM cache for entity extraction (default: true) +* ENABLE_LLM_CACHE_FOR_EXTRACT: Enable LLM cache for entity extraction (default: true) -It's very common to set `ENABLE_LLM_CACHE_FOR_EXTRACT` to true for test environment to reduce the cost of LLM calls. +It's very common to set `ENABLE_LLM_CACHE_FOR_EXTRACT` to true for a test environment to reduce the cost of LLM calls. ### Storage Types Supported -LightRAG uses 4 types of storage for difference purposes: +LightRAG uses 4 types of storage for different purposes: -* KV_STORAGE:llm response cache, text chunks, document information -* VECTOR_STORAGE:entities vectors, relation vectors, chunks vectors -* GRAPH_STORAGE:entity relation graph -* DOC_STATUS_STORAGE:documents indexing status +* KV_STORAGE: llm response cache, text chunks, document information +* VECTOR_STORAGE: entities vectors, relation vectors, chunks vectors +* GRAPH_STORAGE: entity relation graph +* DOC_STATUS_STORAGE: document indexing status -Each storage type have servals implementations: +Each storage type has several implementations: -* KV_STORAGE supported implement-name +* KV_STORAGE supported implementations: ``` -JsonKVStorage JsonFile(default) -MongoKVStorage MogonDB -RedisKVStorage Redis -TiDBKVStorage TiDB +JsonKVStorage JsonFile (default) PGKVStorage Postgres -OracleKVStorage Oracle +RedisKVStorage Redis +MongoKVStorage MongoDB ``` -* GRAPH_STORAGE supported implement-name +* GRAPH_STORAGE supported implementations: ``` -NetworkXStorage NetworkX(defualt) +NetworkXStorage NetworkX (default) Neo4JStorage Neo4J -MongoGraphStorage MongoDB -TiDBGraphStorage TiDB -AGEStorage AGE -GremlinStorage Gremlin PGGraphStorage Postgres -OracleGraphStorage Postgres +AGEStorage AGE ``` -* VECTOR_STORAGE supported implement-name +* VECTOR_STORAGE supported implementations: ``` -NanoVectorDBStorage NanoVector(default) -MilvusVectorDBStorge Milvus -ChromaVectorDBStorage Chroma -TiDBVectorDBStorage TiDB +NanoVectorDBStorage NanoVector (default) PGVectorStorage Postgres +MilvusVectorDBStorage Milvus +ChromaVectorDBStorage Chroma FaissVectorDBStorage Faiss QdrantVectorDBStorage Qdrant -OracleVectorDBStorage Oracle MongoVectorDBStorage MongoDB ``` -* DOC_STATUS_STORAGE:supported implement-name +* DOC_STATUS_STORAGE: supported implementations: ``` -JsonDocStatusStorage JsonFile(default) +JsonDocStatusStorage JsonFile (default) PGDocStatusStorage Postgres MongoDocStatusStorage MongoDB ``` -### How Select Storage Implementation +### How to Select Storage Implementation -You can select storage implementation by environment variables. Your can set the following environmental variables to a specific storage implement-name before the your first start of the API Server: +You can select storage implementation by environment variables. You can set the following environment variables to a specific storage implementation name before the first start of the API Server: ``` LIGHTRAG_KV_STORAGE=PGKVStorage @@ -296,131 +354,89 @@ LIGHTRAG_GRAPH_STORAGE=PGGraphStorage LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage ``` -You can not change storage implementation selection after you add documents to LightRAG. Data migration from one storage implementation to anthor is not supported yet. For further information please read the sample env file or config.ini file. - -### LightRag API Server Comand Line Options - -| Parameter | Default | Description | -|-----------|---------|-------------| -| --host | 0.0.0.0 | Server host | -| --port | 9621 | Server port | -| --working-dir | ./rag_storage | Working directory for RAG storage | -| --input-dir | ./inputs | Directory containing input documents | -| --max-async | 4 | Maximum async operations | -| --max-tokens | 32768 | Maximum token size | -| --timeout | 150 | Timeout in seconds. None for infinite timeout(not recommended) | -| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) | -| --verbose | - | Verbose debug output (True, Flase) | -| --key | None | API key for authentication. Protects lightrag server against unauthorized access | -| --ssl | False | Enable HTTPS | -| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) | -| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) | -| --top-k | 50 | Number of top-k items to retrieve; corresponds to entities in "local" mode and relationships in "global" mode. | -| --cosine-threshold | 0.4 | The cossine threshold for nodes and relations retrieval, works with top-k to control the retrieval of nodes and relations. | -| --llm-binding | ollama | LLM binding type (lollms, ollama, openai, openai-ollama, azure_openai) | -| --embedding-binding | ollama | Embedding binding type (lollms, ollama, openai, azure_openai) | -| auto-scan-at-startup | - | Scan input directory for new files and start indexing | - -### Example Usage - -#### Running a Lightrag server with ollama default local server as llm and embedding backends - -Ollama is the default backend for both llm and embedding, so by default you can run lightrag-server with no parameters and the default ones will be used. Make sure ollama is installed and is running and default models are already installed on ollama. - -```bash -# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding -lightrag-server - -# Using an authentication key -lightrag-server --key my-key -``` - -#### Running a Lightrag server with lollms default local server as llm and embedding backends - -```bash -# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding -# Configure LLM_BINDING=lollms and EMBEDDING_BINDING=lollms in .env or config.ini -lightrag-server - -# Using an authentication key -lightrag-server --key my-key -``` - -#### Running a Lightrag server with openai server as llm and embedding backends +You cannot change storage implementation selection after adding documents to LightRAG. Data migration from one storage implementation to another is not supported yet. For further information, please read the sample env file or config.ini file. + +### LightRAG API Server Command Line Options + +| Parameter | Default | Description | +| --------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| --host | 0.0.0.0 | Server host | +| --port | 9621 | Server port | +| --working-dir | ./rag_storage | Working directory for RAG storage | +| --input-dir | ./inputs | Directory containing input documents | +| --max-async | 4 | Maximum number of async operations | +| --max-tokens | 32768 | Maximum token size | +| --timeout | 150 | Timeout in seconds. None for infinite timeout (not recommended) | +| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) | +| --verbose | - | Verbose debug output (True, False) | +| --key | None | API key for authentication. Protects the LightRAG server against unauthorized access | +| --ssl | False | Enable HTTPS | +| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) | +| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) | +| --top-k | 50 | Number of top-k items to retrieve; corresponds to entities in "local" mode and relationships in "global" mode. | +| --cosine-threshold | 0.4 | The cosine threshold for nodes and relation retrieval, works with top-k to control the retrieval of nodes and relations. | +| --llm-binding | ollama | LLM binding type (lollms, ollama, openai, openai-ollama, azure_openai) | +| --embedding-binding | ollama | Embedding binding type (lollms, ollama, openai, azure_openai) | +| --auto-scan-at-startup| - | Scan input directory for new files and start indexing | + +### .env Examples ```bash -# Run lightrag with openai, GPT-4o-mini for llm, and text-embedding-3-small for embedding -# Configure in .env or config.ini: -# LLM_BINDING=openai -# LLM_MODEL=GPT-4o-mini -# EMBEDDING_BINDING=openai -# EMBEDDING_MODEL=text-embedding-3-small -lightrag-server - -# Using an authentication key -lightrag-server --key my-key -``` - -#### Running a Lightrag server with azure openai server as llm and embedding backends - -```bash -# Run lightrag with azure_openai -# Configure in .env or config.ini: -# LLM_BINDING=azure_openai -# LLM_MODEL=your-model -# EMBEDDING_BINDING=azure_openai -# EMBEDDING_MODEL=your-embedding-model -lightrag-server +### Server Configuration +# HOST=0.0.0.0 +PORT=9621 +WORKERS=2 + +### Settings for document indexing +ENABLE_LLM_CACHE_FOR_EXTRACT=true +SUMMARY_LANGUAGE=Chinese +MAX_PARALLEL_INSERT=2 + +### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) +TIMEOUT=200 +TEMPERATURE=0.0 +MAX_ASYNC=4 +MAX_TOKENS=32768 + +LLM_BINDING=openai +LLM_MODEL=gpt-4o-mini +LLM_BINDING_HOST=https://api.openai.com/v1 +LLM_BINDING_API_KEY=your-api-key -# Using an authentication key -lightrag-server --key my-key -``` +### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) +EMBEDDING_MODEL=bge-m3:latest +EMBEDDING_DIM=1024 +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 -**Important Notes:** -- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance -- For Ollama: Make sure the specified models are installed in your Ollama instance -- For OpenAI: Ensure you have set up your OPENAI_API_KEY environment variable -- For Azure OpenAI: Build and configure your server as stated in the Prequisites section +### For JWT Auth +# AUTH_ACCOUNTS='admin:admin123,user1:pass456' +# TOKEN_SECRET=your-key-for-LightRAG-API-Server-xxx +# TOKEN_EXPIRE_HOURS=48 -For help on any server, use the --help flag: -```bash -lightrag-server --help -``` +# LIGHTRAG_API_KEY=your-secure-api-key-here-123 +# WHITELIST_PATHS=/api/* +# WHITELIST_PATHS=/health,/api/* -Note: If you don't need the API functionality, you can install the base package without API support using: -```bash -pip install lightrag-hku ``` -## Authentication Endpoints - -### JWT Authentication Mechanism -LightRAG API Server implements JWT-based authentication using HS256 algorithm. To enable secure access control, the following environment variables are required: -```bash -# For jwt auth -AUTH_USERNAME=admin # login name -AUTH_PASSWORD=admin123 # password -TOKEN_SECRET=your-key # JWT key -TOKEN_EXPIRE_HOURS=4 # expire duration -WHITELIST_PATHS=/api1,/api2 # white list. /login,/health,/docs,/redoc,/openapi.json are whitelisted by default. -``` ## API Endpoints -All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. When API Server is running, visit: +All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. When the API Server is running, visit: -- Swagger UI: http://localhost:9621/docs -- ReDoc: http://localhost:9621/redoc +- Swagger UI: http://localhost:9621/docs +- ReDoc: http://localhost:9621/redoc You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to: -1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI) -2. Start the RAG server -3. Upload some documents using the document management endpoints -4. Query the system using the query endpoints -5. Trigger document scan if new files is put into inputs directory +1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI) +2. Start the RAG server +3. Upload some documents using the document management endpoints +4. Query the system using the query endpoints +5. Trigger document scan if new files are put into the inputs directory -### Query Endpoints +### Query Endpoints: #### POST /query Query the RAG system with options for different search modes. @@ -428,7 +444,7 @@ Query the RAG system with options for different search modes. ```bash curl -X POST "http://localhost:9621/query" \ -H "Content-Type: application/json" \ - -d '{"query": "Your question here", "mode": "hybrid", ""}' + -d '{"query": "Your question here", "mode": "hybrid"}' ``` #### POST /query/stream @@ -440,7 +456,7 @@ curl -X POST "http://localhost:9621/query/stream" \ -d '{"query": "Your question here", "mode": "hybrid"}' ``` -### Document Management Endpoints +### Document Management Endpoints: #### POST /documents/text Insert text directly into the RAG system. @@ -471,13 +487,13 @@ curl -X POST "http://localhost:9621/documents/batch" \ #### POST /documents/scan -Trigger document scan for new files in the Input directory. +Trigger document scan for new files in the input directory. ```bash curl -X POST "http://localhost:9621/documents/scan" --max-time 1800 ``` -> Ajust max-time according to the estimated index time for all new files. +> Adjust max-time according to the estimated indexing time for all new files. #### DELETE /documents @@ -487,7 +503,7 @@ Clear all documents from the RAG system. curl -X DELETE "http://localhost:9621/documents" ``` -### Ollama Emulation Endpoints +### Ollama Emulation Endpoints: #### GET /api/version @@ -499,7 +515,7 @@ curl http://localhost:9621/api/version #### GET /api/tags -Get Ollama available models. +Get available Ollama models. ```bash curl http://localhost:9621/api/tags @@ -507,20 +523,20 @@ curl http://localhost:9621/api/tags #### POST /api/chat -Handle chat completion requests. Routes user queries through LightRAG by selecting query mode based on query prefix. Detects and forwards OpenWebUI session-related requests (for meta data generation task) directly to underlying LLM. +Handle chat completion requests. Routes user queries through LightRAG by selecting query mode based on query prefix. Detects and forwards OpenWebUI session-related requests (for metadata generation task) directly to the underlying LLM. ```shell curl -N -X POST http://localhost:9621/api/chat -H "Content-Type: application/json" -d \ '{"model":"lightrag:latest","messages":[{"role":"user","content":"猪八戒是谁"}],"stream":true}' ``` -> For more information about Ollama API pls. visit : [Ollama API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) +> For more information about Ollama API, please visit: [Ollama API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) #### POST /api/generate -Handle generate completion requests. For compatibility purpose, the request is not processed by LightRAG, and will be handled by underlying LLM model. +Handle generate completion requests. For compatibility purposes, the request is not processed by LightRAG, and will be handled by the underlying LLM model. -### Utility Endpoints +### Utility Endpoints: #### GET /health Check server health and configuration. @@ -528,65 +544,3 @@ Check server health and configuration. ```bash curl "http://localhost:9621/health" ``` - -## Ollama Emulation - -We provide an Ollama-compatible interfaces for LightRAG, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat frontends supporting Ollama, such as Open WebUI, to access LightRAG easily. - -### Connect Open WebUI to LightRAG - -After starting the lightrag-server, you can add an Ollama-type connection in the Open WebUI admin pannel. And then a model named lightrag:latest will appear in Open WebUI's model management interface. Users can then send queries to LightRAG through the chat interface. You'd better install LightRAG as service for this use case. - -Open WebUI's use LLM to do the session title and session keyword generation task. So the Ollama chat chat completion API detects and forwards OpenWebUI session-related requests directly to underlying LLM. - -### Choose Query mode in chat - -A query prefix in the query string can determines which LightRAG query mode is used to generate the respond for the query. The supported prefixes include: - -``` -/local -/global -/hybrid -/naive -/mix -/bypass -``` - -For example, chat message "/mix 唐僧有几个徒弟" will trigger a mix mode query for LighRAG. A chat message without query prefix will trigger a hybrid mode query by default。 - -"/bypass" is not a LightRAG query mode, it will tell API Server to pass the query directly to the underlying LLM with chat history. So user can use LLM to answer question base on the chat history. If you are using Open WebUI as front end, you can just switch the model to a normal LLM instead of using /bypass prefix. - -## Dify External Knowledge API Adapter - -LightRAG supports integrating with Dify as an external knowledge base through the Dify External Knowledge API protocol. - -### Configuration - -Add the following settings to your .env file: - -```ini -# Enable Dify adapter -ENABLE_DIFY_ADAPTER=True - -# Comma-separated list of allowed API keys -DIFY_API_KEYS=your_key1,your_key2 -``` - -### Endpoints - -#### POST /dify/retrieval - -Example: -```bash -curl -X POST "http://localhost:9621/dify/retrieval" \ --H "Authorization: Bearer your_key" \ --H "Content-Type: application/json" \ --d '{ - "knowledge_id": "your_kb_id", - "query": "your query text", - "retrieval_setting": { - "top_k": 5, - "score_threshold": 0.5 - } -}' -``` diff --git a/lightrag/api/__init__.py b/lightrag/api/__init__.py index 0ff9f555ae..6ba40a2eb9 100644 --- a/lightrag/api/__init__.py +++ b/lightrag/api/__init__.py @@ -1,2 +1 @@ -# API routes package -__api_version__ = "1.0.5" +__api_version__ = "0150" diff --git a/lightrag/api/auth.py b/lightrag/api/auth.py index 78a1da1aff..0b61095d82 100644 --- a/lightrag/api/auth.py +++ b/lightrag/api/auth.py @@ -1,9 +1,17 @@ -import os from datetime import datetime, timedelta + import jwt +from dotenv import load_dotenv from fastapi import HTTPException, status from pydantic import BaseModel +from .config import global_args + +# use the .env that is inside the current folder +# allows to use different .env file for each lightrag instance +# the OS environment variables take precedence over the .env file +load_dotenv(dotenv_path=".env", override=False) + class TokenPayload(BaseModel): sub: str # Username @@ -14,12 +22,16 @@ class TokenPayload(BaseModel): class AuthHandler: def __init__(self): - self.secret = os.getenv("TOKEN_SECRET", "4f85ds4f56dsf46") - self.algorithm = "HS256" - self.expire_hours = int(os.getenv("TOKEN_EXPIRE_HOURS", 4)) - self.guest_expire_hours = int( - os.getenv("GUEST_TOKEN_EXPIRE_HOURS", 2) - ) # Guest token default expiration time + self.secret = global_args.token_secret + self.algorithm = global_args.jwt_algorithm + self.expire_hours = global_args.token_expire_hours + self.guest_expire_hours = global_args.guest_token_expire_hours + self.accounts = {} + auth_accounts = global_args.auth_accounts + if auth_accounts: + for account in auth_accounts.split(","): + username, password = account.split(":", 1) + self.accounts[username] = password def create_token( self, diff --git a/lightrag/api/config.py b/lightrag/api/config.py new file mode 100644 index 0000000000..268b41cb1b --- /dev/null +++ b/lightrag/api/config.py @@ -0,0 +1,336 @@ +""" +Configs for the LightRAG API. +""" + +import os +import argparse +import logging +from dotenv import load_dotenv + +# use the .env that is inside the current folder +# allows to use different .env file for each lightrag instance +# the OS environment variables take precedence over the .env file +load_dotenv(dotenv_path=".env", override=False) + + +class OllamaServerInfos: + # Constants for emulated Ollama model information + LIGHTRAG_NAME = "lightrag" + LIGHTRAG_TAG = os.getenv("OLLAMA_EMULATING_MODEL_TAG", "latest") + LIGHTRAG_MODEL = f"{LIGHTRAG_NAME}:{LIGHTRAG_TAG}" + LIGHTRAG_SIZE = 7365960935 # it's a dummy value + LIGHTRAG_CREATED_AT = "2024-01-15T00:00:00Z" + LIGHTRAG_DIGEST = "sha256:lightrag" + + +ollama_server_infos = OllamaServerInfos() + + +class DefaultRAGStorageConfig: + KV_STORAGE = "JsonKVStorage" + VECTOR_STORAGE = "NanoVectorDBStorage" + GRAPH_STORAGE = "NetworkXStorage" + DOC_STATUS_STORAGE = "JsonDocStatusStorage" + + +def get_default_host(binding_type: str) -> str: + default_hosts = { + "ollama": os.getenv("LLM_BINDING_HOST", "http://localhost:11434"), + "lollms": os.getenv("LLM_BINDING_HOST", "http://localhost:9600"), + "azure_openai": os.getenv("AZURE_OPENAI_ENDPOINT", "https://api.openai.com/v1"), + "openai": os.getenv("LLM_BINDING_HOST", "https://api.openai.com/v1"), + } + return default_hosts.get( + binding_type, os.getenv("LLM_BINDING_HOST", "http://localhost:11434") + ) # fallback to ollama if unknown + + +def get_env_value(env_key: str, default: any, value_type: type = str) -> any: + """ + Get value from environment variable with type conversion + + Args: + env_key (str): Environment variable key + default (any): Default value if env variable is not set + value_type (type): Type to convert the value to + + Returns: + any: Converted value from environment or default + """ + value = os.getenv(env_key) + if value is None: + return default + + if value_type is bool: + return value.lower() in ("true", "1", "yes", "t", "on") + try: + return value_type(value) + except ValueError: + return default + + +def parse_args() -> argparse.Namespace: + """ + Parse command line arguments with environment variable fallback + + Args: + is_uvicorn_mode: Whether running under uvicorn mode + + Returns: + argparse.Namespace: Parsed arguments + """ + + parser = argparse.ArgumentParser( + description="LightRAG FastAPI Server with separate working and input directories" + ) + + # Server configuration + parser.add_argument( + "--host", + default=get_env_value("HOST", "0.0.0.0"), + help="Server host (default: from env or 0.0.0.0)", + ) + parser.add_argument( + "--port", + type=int, + default=get_env_value("PORT", 9621, int), + help="Server port (default: from env or 9621)", + ) + + # Directory configuration + parser.add_argument( + "--working-dir", + default=get_env_value("WORKING_DIR", "./rag_storage"), + help="Working directory for RAG storage (default: from env or ./rag_storage)", + ) + parser.add_argument( + "--input-dir", + default=get_env_value("INPUT_DIR", "./inputs"), + help="Directory containing input documents (default: from env or ./inputs)", + ) + + def timeout_type(value): + if value is None: + return 150 + if value is None or value == "None": + return None + return int(value) + + parser.add_argument( + "--timeout", + default=get_env_value("TIMEOUT", None, timeout_type), + type=timeout_type, + help="Timeout in seconds (useful when using slow AI). Use None for infinite timeout", + ) + + # RAG configuration + parser.add_argument( + "--max-async", + type=int, + default=get_env_value("MAX_ASYNC", 4, int), + help="Maximum async operations (default: from env or 4)", + ) + parser.add_argument( + "--max-tokens", + type=int, + default=get_env_value("MAX_TOKENS", 32768, int), + help="Maximum token size (default: from env or 32768)", + ) + + # Logging configuration + parser.add_argument( + "--log-level", + default=get_env_value("LOG_LEVEL", "INFO"), + choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], + help="Logging level (default: from env or INFO)", + ) + parser.add_argument( + "--verbose", + action="store_true", + default=get_env_value("VERBOSE", False, bool), + help="Enable verbose debug output(only valid for DEBUG log-level)", + ) + + parser.add_argument( + "--key", + type=str, + default=get_env_value("LIGHTRAG_API_KEY", None), + help="API key for authentication. This protects lightrag server against unauthorized access", + ) + + # Optional https parameters + parser.add_argument( + "--ssl", + action="store_true", + default=get_env_value("SSL", False, bool), + help="Enable HTTPS (default: from env or False)", + ) + parser.add_argument( + "--ssl-certfile", + default=get_env_value("SSL_CERTFILE", None), + help="Path to SSL certificate file (required if --ssl is enabled)", + ) + parser.add_argument( + "--ssl-keyfile", + default=get_env_value("SSL_KEYFILE", None), + help="Path to SSL private key file (required if --ssl is enabled)", + ) + + parser.add_argument( + "--history-turns", + type=int, + default=get_env_value("HISTORY_TURNS", 3, int), + help="Number of conversation history turns to include (default: from env or 3)", + ) + + # Search parameters + parser.add_argument( + "--top-k", + type=int, + default=get_env_value("TOP_K", 60, int), + help="Number of most similar results to return (default: from env or 60)", + ) + parser.add_argument( + "--cosine-threshold", + type=float, + default=get_env_value("COSINE_THRESHOLD", 0.2, float), + help="Cosine similarity threshold (default: from env or 0.4)", + ) + + # Ollama model name + parser.add_argument( + "--simulated-model-name", + type=str, + default=get_env_value( + "SIMULATED_MODEL_NAME", ollama_server_infos.LIGHTRAG_MODEL + ), + help="Number of conversation history turns to include (default: from env or 3)", + ) + + # Namespace + parser.add_argument( + "--namespace-prefix", + type=str, + default=get_env_value("NAMESPACE_PREFIX", ""), + help="Prefix of the namespace", + ) + + parser.add_argument( + "--auto-scan-at-startup", + action="store_true", + default=False, + help="Enable automatic scanning when the program starts", + ) + + # Server workers configuration + parser.add_argument( + "--workers", + type=int, + default=get_env_value("WORKERS", 1, int), + help="Number of worker processes (default: from env or 1)", + ) + + # LLM and embedding bindings + parser.add_argument( + "--llm-binding", + type=str, + default=get_env_value("LLM_BINDING", "ollama"), + choices=["lollms", "ollama", "openai", "openai-ollama", "azure_openai"], + help="LLM binding type (default: from env or ollama)", + ) + parser.add_argument( + "--embedding-binding", + type=str, + default=get_env_value("EMBEDDING_BINDING", "ollama"), + choices=["lollms", "ollama", "openai", "azure_openai"], + help="Embedding binding type (default: from env or ollama)", + ) + + args = parser.parse_args() + + # convert relative path to absolute path + args.working_dir = os.path.abspath(args.working_dir) + args.input_dir = os.path.abspath(args.input_dir) + + # Inject storage configuration from environment variables + args.kv_storage = get_env_value( + "LIGHTRAG_KV_STORAGE", DefaultRAGStorageConfig.KV_STORAGE + ) + args.doc_status_storage = get_env_value( + "LIGHTRAG_DOC_STATUS_STORAGE", DefaultRAGStorageConfig.DOC_STATUS_STORAGE + ) + args.graph_storage = get_env_value( + "LIGHTRAG_GRAPH_STORAGE", DefaultRAGStorageConfig.GRAPH_STORAGE + ) + args.vector_storage = get_env_value( + "LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE + ) + + # Get MAX_PARALLEL_INSERT from environment + args.max_parallel_insert = get_env_value("MAX_PARALLEL_INSERT", 2, int) + + # Handle openai-ollama special case + if args.llm_binding == "openai-ollama": + args.llm_binding = "openai" + args.embedding_binding = "ollama" + + args.llm_binding_host = get_env_value( + "LLM_BINDING_HOST", get_default_host(args.llm_binding) + ) + args.embedding_binding_host = get_env_value( + "EMBEDDING_BINDING_HOST", get_default_host(args.embedding_binding) + ) + args.llm_binding_api_key = get_env_value("LLM_BINDING_API_KEY", None) + args.embedding_binding_api_key = get_env_value("EMBEDDING_BINDING_API_KEY", "") + + # Inject model configuration + args.llm_model = get_env_value("LLM_MODEL", "mistral-nemo:latest") + args.embedding_model = get_env_value("EMBEDDING_MODEL", "bge-m3:latest") + args.embedding_dim = get_env_value("EMBEDDING_DIM", 1024, int) + args.max_embed_tokens = get_env_value("MAX_EMBED_TOKENS", 8192, int) + + # Inject chunk configuration + args.chunk_size = get_env_value("CHUNK_SIZE", 1200, int) + args.chunk_overlap_size = get_env_value("CHUNK_OVERLAP_SIZE", 100, int) + + # Inject LLM cache configuration + args.enable_llm_cache_for_extract = get_env_value( + "ENABLE_LLM_CACHE_FOR_EXTRACT", True, bool + ) + args.enable_llm_cache = get_env_value("ENABLE_LLM_CACHE", True, bool) + + # Inject LLM temperature configuration + args.temperature = get_env_value("TEMPERATURE", 0.5, float) + + # Select Document loading tool (DOCLING, DEFAULT) + args.document_loading_engine = get_env_value("DOCUMENT_LOADING_ENGINE", "DEFAULT") + + # Add environment variables that were previously read directly + args.cors_origins = get_env_value("CORS_ORIGINS", "*") + args.summary_language = get_env_value("SUMMARY_LANGUAGE", "en") + args.whitelist_paths = get_env_value("WHITELIST_PATHS", "/health,/api/*") + + # For JWT Auth + args.auth_accounts = get_env_value("AUTH_ACCOUNTS", "") + args.token_secret = get_env_value("TOKEN_SECRET", "lightrag-jwt-default-secret") + args.token_expire_hours = get_env_value("TOKEN_EXPIRE_HOURS", 48, int) + args.guest_token_expire_hours = get_env_value("GUEST_TOKEN_EXPIRE_HOURS", 24, int) + args.jwt_algorithm = get_env_value("JWT_ALGORITHM", "HS256") + + ollama_server_infos.LIGHTRAG_MODEL = args.simulated_model_name + + return args + + +def update_uvicorn_mode_config(): + # If in uvicorn mode and workers > 1, force it to 1 and log warning + if global_args.workers > 1: + original_workers = global_args.workers + global_args.workers = 1 + # Log warning directly here + logging.warning( + f"In uvicorn mode, workers parameter was set to {original_workers}. Forcing workers=1" + ) + + +global_args = parse_args() diff --git a/lightrag/api/gunicorn_config.py b/lightrag/api/gunicorn_config.py index 23e4680784..0aef108ed7 100644 --- a/lightrag/api/gunicorn_config.py +++ b/lightrag/api/gunicorn_config.py @@ -29,7 +29,9 @@ worker_class = "uvicorn.workers.UvicornWorker" # Other Gunicorn configurations -timeout = int(os.getenv("TIMEOUT", 150)) # Default 150s to match run_with_gunicorn.py +timeout = int( + os.getenv("TIMEOUT", 150 * 2) +) # Default 150s *2 to match run_with_gunicorn.py keepalive = int(os.getenv("KEEPALIVE", 5)) # Default 5s # Logging configuration diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 41e15e97aa..b24368e75a 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -18,14 +18,19 @@ from contextlib import asynccontextmanager from dotenv import load_dotenv from lightrag.api.utils_api import ( - get_api_key_dependency, - parse_args, - get_default_host, + get_combined_auth_dependency, display_splash_screen, + check_env_file, ) -from lightrag import LightRAG -from lightrag.types import GPTKeywordExtractionFormat +from .config import ( + global_args, + update_uvicorn_mode_config, + get_default_host, +) +import sys +from lightrag import LightRAG, __version__ as core_version from lightrag.api import __api_version__ +from lightrag.types import GPTKeywordExtractionFormat from lightrag.utils import EmbeddingFunc from lightrag.api.routers.document_routes import ( DocumentManager, @@ -45,20 +50,26 @@ get_namespace_data, get_pipeline_status_lock, initialize_pipeline_status, - get_all_update_flags_status, ) from fastapi.security import OAuth2PasswordRequestForm -from .auth import auth_handler +from lightrag.api.auth import auth_handler + +# use the .env that is inside the current folder +# allows to use different .env file for each lightrag instance +# the OS environment variables take precedence over the .env file +load_dotenv(dotenv_path=".env", override=False) -# Load environment variables -# Updated to use the .env that is inside the current folder -# This update allows the user to put a different.env file for each lightrag folder -load_dotenv(".env", override=True) + +webui_title = os.getenv("WEBUI_TITLE") +webui_description = os.getenv("WEBUI_DESCRIPTION") # Initialize config parser config = configparser.ConfigParser() config.read("config.ini") +# Global authentication configuration +auth_configured = bool(auth_handler.accounts) + def create_app(args): # Setup logging @@ -140,25 +151,33 @@ async def lifespan(app: FastAPI): await rag.finalize_storages() # Initialize FastAPI - app = FastAPI( - title="LightRAG API", - description="API for querying text using LightRAG with separate storage and input directories" + app_kwargs = { + "title": "LightRAG Server API", + "description": "Providing API for LightRAG core, Web UI and Ollama Model Emulation" + "(With authentication)" if api_key else "", - version=__api_version__, - openapi_url="/openapi.json", # Explicitly set OpenAPI schema URL - docs_url="/docs", # Explicitly set docs URL - redoc_url="/redoc", # Explicitly set redoc URL - openapi_tags=[{"name": "api"}], - lifespan=lifespan, - ) + "version": __api_version__, + "openapi_url": "/openapi.json", # Explicitly set OpenAPI schema URL + "docs_url": "/docs", # Explicitly set docs URL + "redoc_url": "/redoc", # Explicitly set redoc URL + "lifespan": lifespan, + } + + # Configure Swagger UI parameters + # Enable persistAuthorization and tryItOutEnabled for better user experience + app_kwargs["swagger_ui_parameters"] = { + "persistAuthorization": True, + "tryItOutEnabled": True, + } + + app = FastAPI(**app_kwargs) def get_cors_origins(): - """Get allowed origins from environment variable + """Get allowed origins from global_args Returns a list of allowed origins, defaults to ["*"] if not set """ - origins_str = os.getenv("CORS_ORIGINS", "*") + origins_str = global_args.cors_origins if origins_str == "*": return ["*"] return [origin.strip() for origin in origins_str.split(",")] @@ -172,8 +191,8 @@ def get_cors_origins(): allow_headers=["*"], ) - # Create the optional API key dependency - optional_api_key = get_api_key_dependency(api_key) + # Create combined auth dependency for all endpoints + combined_auth = get_combined_auth_dependency(api_key) # Create working directory if it doesn't exist Path(args.working_dir).mkdir(parents=True, exist_ok=True) @@ -204,6 +223,7 @@ async def openai_alike_model_complete( kwargs["response_format"] = GPTKeywordExtractionFormat if history_messages is None: history_messages = [] + kwargs["temperature"] = args.temperature return await openai_complete_if_cache( args.llm_model, prompt, @@ -226,6 +246,7 @@ async def azure_openai_model_complete( kwargs["response_format"] = GPTKeywordExtractionFormat if history_messages is None: history_messages = [] + kwargs["temperature"] = args.temperature return await azure_openai_complete_if_cache( args.llm_model, prompt, @@ -299,13 +320,16 @@ async def azure_openai_model_complete( "cosine_better_than_threshold": args.cosine_threshold }, enable_llm_cache_for_entity_extract=args.enable_llm_cache_for_extract, + enable_llm_cache=args.enable_llm_cache, embedding_cache_config={ "enabled": True, "similarity_threshold": 0.95, "use_llm_check": False, }, - namespace_prefix=args.namespace_prefix, + # namespace_prefix=args.namespace_prefix, auto_manage_storages_states=False, + max_parallel_insert=args.max_parallel_insert, + addon_params={"language": args.summary_language}, ) else: # azure_openai rag = LightRAG( @@ -328,13 +352,16 @@ async def azure_openai_model_complete( "cosine_better_than_threshold": args.cosine_threshold }, enable_llm_cache_for_entity_extract=args.enable_llm_cache_for_extract, + enable_llm_cache=args.enable_llm_cache, embedding_cache_config={ "enabled": True, "similarity_threshold": 0.95, "use_llm_check": False, }, - namespace_prefix=args.namespace_prefix, + # namespace_prefix=args.namespace_prefix, auto_manage_storages_states=False, + max_parallel_insert=args.max_parallel_insert, + addon_params={"language": args.summary_language}, ) # Add routes @@ -342,14 +369,14 @@ async def azure_openai_model_complete( app.include_router(create_query_routes(rag, api_key, args.top_k)) app.include_router(create_graph_routes(rag, api_key)) app.include_router(create_kg_routes(rag, api_key)) - + # Add Dify External Knowledge API routes if enabled if os.getenv("ENABLE_DIFY_ADAPTER", "False").lower() == "true": ASCIIColors.info("Enabling Dify External Knowledge API adapter") app.include_router(create_dify_routes(rag, api_key)) # Add Ollama API routes - ollama_api = OllamaAPI(rag, top_k=args.top_k) + ollama_api = OllamaAPI(rag, top_k=args.top_k, api_key=api_key) app.include_router(ollama_api.router, prefix="/api") @app.get("/") @@ -357,13 +384,11 @@ async def redirect_to_webui(): """Redirect root path to /webui""" return RedirectResponse(url="/webui") - @app.get("/auth-status", dependencies=[Depends(optional_api_key)]) + @app.get("/auth-status") async def get_auth_status(): """Get authentication status and guest token if auth is not configured""" - username = os.getenv("AUTH_USERNAME") - password = os.getenv("AUTH_PASSWORD") - if not (username and password): + if not auth_handler.accounts: # Authentication not configured, return guest token guest_token = auth_handler.create_token( username="guest", role="guest", metadata={"auth_mode": "disabled"} @@ -374,16 +399,24 @@ async def get_auth_status(): "token_type": "bearer", "auth_mode": "disabled", "message": "Authentication is disabled. Using guest access.", + "core_version": core_version, + "api_version": __api_version__, + "webui_title": webui_title, + "webui_description": webui_description, } - return {"auth_configured": True, "auth_mode": "enabled"} + return { + "auth_configured": True, + "auth_mode": "enabled", + "core_version": core_version, + "api_version": __api_version__, + "webui_title": webui_title, + "webui_description": webui_description, + } - @app.post("/login", dependencies=[Depends(optional_api_key)]) + @app.post("/login") async def login(form_data: OAuth2PasswordRequestForm = Depends()): - username = os.getenv("AUTH_USERNAME") - password = os.getenv("AUTH_PASSWORD") - - if not (username and password): + if not auth_handler.accounts: # Authentication not configured, return guest token guest_token = auth_handler.create_token( username="guest", role="guest", metadata={"auth_mode": "disabled"} @@ -393,9 +426,13 @@ async def login(form_data: OAuth2PasswordRequestForm = Depends()): "token_type": "bearer", "auth_mode": "disabled", "message": "Authentication is disabled. Using guest access.", + "core_version": core_version, + "api_version": __api_version__, + "webui_title": webui_title, + "webui_description": webui_description, } - - if form_data.username != username or form_data.password != password: + username = form_data.username + if auth_handler.accounts.get(username) != form_data.password: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect credentials" ) @@ -408,36 +445,54 @@ async def login(form_data: OAuth2PasswordRequestForm = Depends()): "access_token": user_token, "token_type": "bearer", "auth_mode": "enabled", + "core_version": core_version, + "api_version": __api_version__, + "webui_title": webui_title, + "webui_description": webui_description, } - @app.get("/health", dependencies=[Depends(optional_api_key)]) + @app.get("/health", dependencies=[Depends(combined_auth)]) async def get_status(): """Get current system status""" - # Get update flags status for all namespaces - update_status = await get_all_update_flags_status() + try: + pipeline_status = await get_namespace_data("pipeline_status") - return { - "status": "healthy", - "working_directory": str(args.working_dir), - "input_directory": str(args.input_dir), - "configuration": { - # LLM configuration binding/host address (if applicable)/model (if applicable) - "llm_binding": args.llm_binding, - "llm_binding_host": args.llm_binding_host, - "llm_model": args.llm_model, - # embedding model configuration binding/host address (if applicable)/model (if applicable) - "embedding_binding": args.embedding_binding, - "embedding_binding_host": args.embedding_binding_host, - "embedding_model": args.embedding_model, - "max_tokens": args.max_tokens, - "kv_storage": args.kv_storage, - "doc_status_storage": args.doc_status_storage, - "graph_storage": args.graph_storage, - "vector_storage": args.vector_storage, - "enable_llm_cache_for_extract": args.enable_llm_cache_for_extract, - }, - "update_status": update_status, - } + if not auth_configured: + auth_mode = "disabled" + else: + auth_mode = "enabled" + + return { + "status": "healthy", + "working_directory": str(args.working_dir), + "input_directory": str(args.input_dir), + "configuration": { + # LLM configuration binding/host address (if applicable)/model (if applicable) + "llm_binding": args.llm_binding, + "llm_binding_host": args.llm_binding_host, + "llm_model": args.llm_model, + # embedding model configuration binding/host address (if applicable)/model (if applicable) + "embedding_binding": args.embedding_binding, + "embedding_binding_host": args.embedding_binding_host, + "embedding_model": args.embedding_model, + "max_tokens": args.max_tokens, + "kv_storage": args.kv_storage, + "doc_status_storage": args.doc_status_storage, + "graph_storage": args.graph_storage, + "vector_storage": args.vector_storage, + "enable_llm_cache_for_extract": args.enable_llm_cache_for_extract, + "enable_llm_cache": args.enable_llm_cache, + }, + "auth_mode": auth_mode, + "pipeline_busy": pipeline_status.get("busy", False), + "core_version": core_version, + "api_version": __api_version__, + "webui_title": webui_title, + "webui_description": webui_description, + } + except Exception as e: + logger.error(f"Error getting health status: {str(e)}") + raise HTTPException(status_code=500, detail=str(e)) # Custom StaticFiles class to prevent caching of HTML files class NoCacheStaticFiles(StaticFiles): @@ -466,7 +521,7 @@ async def get_response(self, path: str, scope): def get_application(args=None): """Factory function for creating the FastAPI application""" if args is None: - args = parse_args() + args = global_args return create_app(args) @@ -574,6 +629,10 @@ def main(): print("Running under Gunicorn - worker management handled by Gunicorn") return + # Check .env file + if not check_env_file(): + sys.exit(1) + # Check and install dependencies check_and_install_dependencies() @@ -583,30 +642,31 @@ def main(): # Configure logging before parsing args configure_logging() - - args = parse_args(is_uvicorn_mode=True) - display_splash_screen(args) + update_uvicorn_mode_config() + display_splash_screen(global_args) # Create application instance directly instead of using factory function - app = create_app(args) + app = create_app(global_args) # Start Uvicorn in single process mode uvicorn_config = { "app": app, # Pass application instance directly instead of string path - "host": args.host, - "port": args.port, + "host": global_args.host, + "port": global_args.port, "log_config": None, # Disable default config } - if args.ssl: + if global_args.ssl: uvicorn_config.update( { - "ssl_certfile": args.ssl_certfile, - "ssl_keyfile": args.ssl_keyfile, + "ssl_certfile": global_args.ssl_certfile, + "ssl_keyfile": global_args.ssl_keyfile, } ) - print(f"Starting Uvicorn server in single-process mode on {args.host}:{args.port}") + print( + f"Starting Uvicorn server in single-process mode on {global_args.host}:{global_args.port}" + ) uvicorn.run(**uvicorn_config) diff --git a/lightrag/api/routers/dify_routes.py b/lightrag/api/routers/dify_routes.py index eee25d85ed..109f80d586 100644 --- a/lightrag/api/routers/dify_routes.py +++ b/lightrag/api/routers/dify_routes.py @@ -236,7 +236,7 @@ async def retrieval(request: RetrievalRequest): return DifyResponse(records=[]) records = [] - + # 处理knowledge graph结果 if kg_context := result.get("kg_context"): logger.debug(f"Found kg_context") diff --git a/lightrag/api/routers/document_routes.py b/lightrag/api/routers/document_routes.py index d3e176528f..ba86f7bdad 100644 --- a/lightrag/api/routers/document_routes.py +++ b/lightrag/api/routers/document_routes.py @@ -11,29 +11,55 @@ import csv from datetime import datetime from pathlib import Path -from typing import Dict, List, Optional, Any +from typing import Dict, List, Optional, Any, Literal from fastapi import APIRouter, BackgroundTasks, Depends, File, HTTPException, UploadFile from pydantic import BaseModel, Field, field_validator from lightrag import LightRAG from lightrag.base import DocProcessingStatus, DocStatus -from lightrag.api.utils_api import ( - get_api_key_dependency, - global_args, - get_auth_dependency, -) +from lightrag.api.utils_api import get_combined_auth_dependency +from ..config import global_args router = APIRouter( prefix="/documents", tags=["documents"], - dependencies=[Depends(get_auth_dependency())], ) # Temporary file prefix temp_prefix = "__tmp__" +class ScanResponse(BaseModel): + """Response model for document scanning operation + + Attributes: + status: Status of the scanning operation + message: Optional message with additional details + """ + + status: Literal["scanning_started"] = Field( + description="Status of the scanning operation" + ) + message: Optional[str] = Field( + default=None, description="Additional details about the scanning operation" + ) + + class Config: + json_schema_extra = { + "example": { + "status": "scanning_started", + "message": "Scanning process has been initiated in the background", + } + } + + class InsertTextRequest(BaseModel): + """Request model for inserting a single text document + + Attributes: + text: The text content to be inserted into the RAG system + """ + text: str = Field( min_length=1, description="The text to insert", @@ -52,8 +78,21 @@ class InsertTextRequest(BaseModel): def strip_after(cls, text: str) -> str: return text.strip() + class Config: + json_schema_extra = { + "example": { + "text": "This is a sample text to be inserted into the RAG system." + } + } + class InsertTextsRequest(BaseModel): + """Request model for inserting multiple text documents + + Attributes: + texts: List of text contents to be inserted into the RAG system + """ + texts: list[str] = Field( min_length=1, description="The texts to insert", @@ -72,11 +111,116 @@ class InsertTextsRequest(BaseModel): def strip_after(cls, texts: list[str]) -> list[str]: return [text.strip() for text in texts] + class Config: + json_schema_extra = { + "example": { + "texts": [ + "This is the first text to be inserted.", + "This is the second text to be inserted.", + ] + } + } + class InsertResponse(BaseModel): - status: str = Field(description="Status of the operation") + """Response model for document insertion operations + + Attributes: + status: Status of the operation (success, duplicated, partial_success, failure) + message: Detailed message describing the operation result + """ + + status: Literal["success", "duplicated", "partial_success", "failure"] = Field( + description="Status of the operation" + ) message: str = Field(description="Message describing the operation result") + class Config: + json_schema_extra = { + "example": { + "status": "success", + "message": "File 'document.pdf' uploaded successfully. Processing will continue in background.", + } + } + + +class ClearDocumentsResponse(BaseModel): + """Response model for document clearing operation + + Attributes: + status: Status of the clear operation + message: Detailed message describing the operation result + """ + + status: Literal["success", "partial_success", "busy", "fail"] = Field( + description="Status of the clear operation" + ) + message: str = Field(description="Message describing the operation result") + + class Config: + json_schema_extra = { + "example": { + "status": "success", + "message": "All documents cleared successfully. Deleted 15 files.", + } + } + + +class ClearCacheRequest(BaseModel): + """Request model for clearing cache + + Attributes: + modes: Optional list of cache modes to clear + """ + + modes: Optional[ + List[Literal["default", "naive", "local", "global", "hybrid", "mix"]] + ] = Field( + default=None, + description="Modes of cache to clear. If None, clears all cache.", + ) + + class Config: + json_schema_extra = {"example": {"modes": ["default", "naive"]}} + + +class ClearCacheResponse(BaseModel): + """Response model for cache clearing operation + + Attributes: + status: Status of the clear operation + message: Detailed message describing the operation result + """ + + status: Literal["success", "fail"] = Field( + description="Status of the clear operation" + ) + message: str = Field(description="Message describing the operation result") + + class Config: + json_schema_extra = { + "example": { + "status": "success", + "message": "Successfully cleared cache for modes: ['default', 'naive']", + } + } + + +"""Response model for document status + +Attributes: + id: Document identifier + content_summary: Summary of document content + content_length: Length of document content + status: Current processing status + created_at: Creation timestamp (ISO format string) + updated_at: Last update timestamp (ISO format string) + chunks_count: Number of chunks (optional) + error: Error message if any (optional) + metadata: Additional metadata (optional) + file_path: Path to the document file +""" + class DocStatusResponse(BaseModel): @staticmethod @@ -87,33 +231,82 @@ def format_datetime(dt: Any) -> Optional[str]: return dt return dt.isoformat() - """Response model for document status + id: str = Field(description="Document identifier") + content_summary: str = Field(description="Summary of document content") + content_length: int = Field(description="Length of document content in characters") + status: DocStatus = Field(description="Current processing status") + created_at: str = Field(description="Creation timestamp (ISO format string)") + updated_at: str = Field(description="Last update timestamp (ISO format string)") + chunks_count: Optional[int] = Field( + default=None, description="Number of chunks the document was split into" + ) + error: Optional[str] = Field( + default=None, description="Error message if processing failed" + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, description="Additional metadata about the document" + ) + file_path: str = Field(description="Path to the document file") + + class Config: + json_schema_extra = { + "example": { + "id": "doc_123456", + "content_summary": "Research paper on machine learning", + "content_length": 15240, + "status": "PROCESSED", + "created_at": "2025-03-31T12:34:56", + "updated_at": "2025-03-31T12:35:30", + "chunks_count": 12, + "error": None, + "metadata": {"author": "John Doe", "year": 2025}, + "file_path": "research_paper.pdf", + } + } + + +class DocsStatusesResponse(BaseModel): + """Response model for document statuses Attributes: - id: Document identifier - content_summary: Summary of document content - content_length: Length of document content - status: Current processing status - created_at: Creation timestamp (ISO format string) - updated_at: Last update timestamp (ISO format string) - chunks_count: Number of chunks (optional) - error: Error message if any (optional) - metadata: Additional metadata (optional) + statuses: Dictionary mapping document status to lists of document status responses """ - id: str - content_summary: str - content_length: int - status: DocStatus - created_at: str - updated_at: str - chunks_count: Optional[int] = None - error: Optional[str] = None - metadata: Optional[dict[str, Any]] = None - + statuses: Dict[DocStatus, List[DocStatusResponse]] = Field( + default_factory=dict, + description="Dictionary mapping document status to lists of document status responses", + ) -class DocsStatusesResponse(BaseModel): - statuses: Dict[DocStatus, List[DocStatusResponse]] = {} + class Config: + json_schema_extra = { + "example": { + "statuses": { + "PENDING": [ + { + "id": "doc_123", + "content_summary": "Pending document", + "content_length": 5000, + "status": "PENDING", + "created_at": "2025-03-31T10:00:00", + "updated_at": "2025-03-31T10:00:00", + "file_path": "pending_doc.pdf", + } + ], + "PROCESSED": [ + { + "id": "doc_456", + "content_summary": "Processed document", + "content_length": 8000, + "status": "PROCESSED", + "created_at": "2025-03-31T09:00:00", + "updated_at": "2025-03-31T09:05:00", + "chunks_count": 8, + "file_path": "processed_doc.pdf", + } + ], + } + } + } class PipelineStatusResponse(BaseModel): @@ -130,6 +323,7 @@ class PipelineStatusResponse(BaseModel): request_pending: Flag for pending request for processing latest_message: Latest message from pipeline processing history_messages: List of history messages + update_status: Status of update flags for all namespaces """ autoscanned: bool = False @@ -142,6 +336,7 @@ class PipelineStatusResponse(BaseModel): request_pending: bool = False latest_message: str = "" history_messages: Optional[List[str]] = None + update_status: Optional[dict] = None class Config: extra = "allow" # Allow additional fields from the pipeline status @@ -307,7 +502,9 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool: formatted_row = [] for header, value in zip(headers, row): if value.strip(): # 只包含非空值 - formatted_row.append(f"{header.strip()}: {value.strip()}") + formatted_row.append( + f"{header.strip()}: {value.strip()}" + ) if formatted_row: # 只添加非空行 processed_lines.append(" | ".join(formatted_row)) row_count += 1 @@ -316,10 +513,10 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool: content = "\n\n".join(processed_lines) except UnicodeDecodeError: return False - except Exception as e: + except Exception: return False case ".pdf": - if global_args["main_args"].document_loading_engine == "DOCLING": + if global_args.document_loading_engine == "DOCLING": if not pm.is_installed("docling"): # type: ignore pm.install("docling") from docling.document_converter import DocumentConverter # type: ignore @@ -338,7 +535,7 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool: for page in reader.pages: content += page.extract_text() + "\n" case ".docx": - if global_args["main_args"].document_loading_engine == "DOCLING": + if global_args.document_loading_engine == "DOCLING": if not pm.is_installed("docling"): # type: ignore pm.install("docling") from docling.document_converter import DocumentConverter # type: ignore @@ -348,7 +545,10 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool: content = result.document.export_to_markdown() else: if not pm.is_installed("python-docx"): # type: ignore - pm.install("docx") + try: + pm.install("python-docx") + except Exception: + pm.install("docx") from docx import Document # type: ignore from io import BytesIO @@ -358,7 +558,7 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool: [paragraph.text for paragraph in doc.paragraphs] ) case ".pptx": - if global_args["main_args"].document_loading_engine == "DOCLING": + if global_args.document_loading_engine == "DOCLING": if not pm.is_installed("docling"): # type: ignore pm.install("docling") from docling.document_converter import DocumentConverter # type: ignore @@ -379,7 +579,7 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool: if hasattr(shape, "text"): content += shape.text + "\n" case ".xlsx": - if global_args["main_args"].document_loading_engine == "DOCLING": + if global_args.document_loading_engine == "DOCLING": if not pm.is_installed("docling"): # type: ignore pm.install("docling") from docling.document_converter import DocumentConverter # type: ignore @@ -414,7 +614,7 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool: # Insert into the RAG queue if content: - await rag.apipeline_enqueue_documents(content) + await rag.apipeline_enqueue_documents(content, file_paths=file_path.name) logger.info(f"Successfully fetched and enqueued file: {file_path.name}") return True else: @@ -449,7 +649,7 @@ async def pipeline_index_file(rag: LightRAG, file_path: Path): async def pipeline_index_files(rag: LightRAG, file_paths: List[Path]): - """Index multiple files concurrently + """Index multiple files sequentially to avoid high CPU load Args: rag: LightRAG instance @@ -460,12 +660,12 @@ async def pipeline_index_files(rag: LightRAG, file_paths: List[Path]): try: enqueued = False - if len(file_paths) == 1: - enqueued = await pipeline_enqueue_file(rag, file_paths[0]) - else: - tasks = [pipeline_enqueue_file(rag, path) for path in file_paths] - enqueued = any(await asyncio.gather(*tasks)) + # Process files sequentially + for file_path in file_paths: + if await pipeline_enqueue_file(rag, file_path): + enqueued = True + # Process the queue only if at least one file was successfully enqueued if enqueued: await rag.apipeline_process_enqueue_documents() except Exception as e: @@ -474,9 +674,7 @@ async def pipeline_index_files(rag: LightRAG, file_paths: List[Path]): async def pipeline_index_texts( - rag: LightRAG, - texts: List[str], - split_by_character: str | None = None + rag: LightRAG, texts: List[str], split_by_character: str | None = None ): """Index a list of texts @@ -491,6 +689,7 @@ async def pipeline_index_texts( await rag.apipeline_process_enqueue_documents(split_by_character=split_by_character) +# TODO: deprecate after /insert_file is removed async def save_temp_file(input_dir: Path, file: UploadFile = File(...)) -> Path: """Save the uploaded file to a temporary location @@ -521,22 +720,45 @@ async def run_scanning_process(rag: LightRAG, doc_manager: DocumentManager): total_files = len(new_files) logger.info(f"Found {total_files} new files to index.") - for idx, file_path in enumerate(new_files): - try: - await pipeline_index_file(rag, file_path) - except Exception as e: - logger.error(f"Error indexing file {file_path}: {str(e)}") + if not new_files: + return + + # Get MAX_PARALLEL_INSERT from global_args + max_parallel = global_args.max_parallel_insert + # Calculate batch size as 2 * MAX_PARALLEL_INSERT + batch_size = 2 * max_parallel + + # Process files in batches + for i in range(0, total_files, batch_size): + batch_files = new_files[i : i + batch_size] + batch_num = i // batch_size + 1 + total_batches = (total_files + batch_size - 1) // batch_size + + logger.info( + f"Processing batch {batch_num}/{total_batches} with {len(batch_files)} files" + ) + await pipeline_index_files(rag, batch_files) + + # Log progress + processed = min(i + batch_size, total_files) + logger.info( + f"Processed {processed}/{total_files} files ({processed/total_files*100:.1f}%)" + ) except Exception as e: logger.error(f"Error during scanning process: {str(e)}") + logger.error(traceback.format_exc()) def create_document_routes( rag: LightRAG, doc_manager: DocumentManager, api_key: Optional[str] = None ): - optional_api_key = get_api_key_dependency(api_key) + # Create combined auth dependency for document routes + combined_auth = get_combined_auth_dependency(api_key) - @router.post("/scan", dependencies=[Depends(optional_api_key)]) + @router.post( + "/scan", response_model=ScanResponse, dependencies=[Depends(combined_auth)] + ) async def scan_for_new_documents(background_tasks: BackgroundTasks): """ Trigger the scanning process for new documents. @@ -546,13 +768,18 @@ async def scan_for_new_documents(background_tasks: BackgroundTasks): that fact. Returns: - dict: A dictionary containing the scanning status + ScanResponse: A response object containing the scanning status """ # Start the scanning process in the background background_tasks.add_task(run_scanning_process, rag, doc_manager) - return {"status": "scanning_started"} + return ScanResponse( + status="scanning_started", + message="Scanning process has been initiated in the background", + ) - @router.post("/upload", dependencies=[Depends(optional_api_key)]) + @router.post( + "/upload", response_model=InsertResponse, dependencies=[Depends(combined_auth)] + ) async def upload_to_input_dir( background_tasks: BackgroundTasks, file: UploadFile = File(...) ): @@ -569,6 +796,7 @@ async def upload_to_input_dir( Returns: InsertResponse: A response object containing the upload status and a message. + status can be "success", "duplicated", or error is thrown. Raises: HTTPException: If the file type is not supported (400) or other errors occur (500). @@ -581,6 +809,13 @@ async def upload_to_input_dir( ) file_path = doc_manager.input_dir / file.filename + # Check if file already exists + if file_path.exists(): + return InsertResponse( + status="duplicated", + message=f"File '{file.filename}' already exists in the input directory.", + ) + with open(file_path, "wb") as buffer: shutil.copyfileobj(file.file, buffer) @@ -597,7 +832,7 @@ async def upload_to_input_dir( raise HTTPException(status_code=500, detail=str(e)) @router.post( - "/text", response_model=InsertResponse, dependencies=[Depends(optional_api_key)] + "/text", response_model=InsertResponse, dependencies=[Depends(combined_auth)] ) async def insert_text( request: InsertTextRequest, background_tasks: BackgroundTasks @@ -621,8 +856,9 @@ async def insert_text( try: background_tasks.add_task( pipeline_index_texts, - rag, [request.text], - split_by_character=request.split_by_character + rag, + [request.text], + split_by_character=request.split_by_character, ) return InsertResponse( status="success", @@ -636,7 +872,7 @@ async def insert_text( @router.post( "/texts", response_model=InsertResponse, - dependencies=[Depends(optional_api_key)], + dependencies=[Depends(combined_auth)], ) async def insert_texts( request: InsertTextsRequest, background_tasks: BackgroundTasks @@ -659,9 +895,10 @@ async def insert_texts( """ try: background_tasks.add_task( - pipeline_index_texts, - rag, request.texts, - split_by_character=request.split_by_character + pipeline_index_texts, + rag, + request.texts, + split_by_character=request.split_by_character, ) return InsertResponse( status="success", @@ -672,8 +909,9 @@ async def insert_texts( logger.error(traceback.format_exc()) raise HTTPException(status_code=500, detail=str(e)) + # TODO: deprecated, use /upload instead @router.post( - "/file", response_model=InsertResponse, dependencies=[Depends(optional_api_key)] + "/file", response_model=InsertResponse, dependencies=[Depends(combined_auth)] ) async def insert_file( background_tasks: BackgroundTasks, file: UploadFile = File(...) @@ -715,10 +953,11 @@ async def insert_file( logger.error(traceback.format_exc()) raise HTTPException(status_code=500, detail=str(e)) + # TODO: deprecated, use /upload instead @router.post( "/file_batch", response_model=InsertResponse, - dependencies=[Depends(optional_api_key)], + dependencies=[Depends(combined_auth)], ) async def insert_batch( background_tasks: BackgroundTasks, files: List[UploadFile] = File(...) @@ -779,36 +1018,190 @@ async def insert_batch( raise HTTPException(status_code=500, detail=str(e)) @router.delete( - "", response_model=InsertResponse, dependencies=[Depends(optional_api_key)] + "", response_model=ClearDocumentsResponse, dependencies=[Depends(combined_auth)] ) async def clear_documents(): """ Clear all documents from the RAG system. - This endpoint deletes all text chunks, entities vector database, and relationships - vector database, effectively clearing all documents from the RAG system. + This endpoint deletes all documents, entities, relationships, and files from the system. + It uses the storage drop methods to properly clean up all data and removes all files + from the input directory. Returns: - InsertResponse: A response object containing the status and message. + ClearDocumentsResponse: A response object containing the status and message. + - status="success": All documents and files were successfully cleared. + - status="partial_success": Document clear job exit with some errors. + - status="busy": Operation could not be completed because the pipeline is busy. + - status="fail": All storage drop operations failed, with message + - message: Detailed information about the operation results, including counts + of deleted files and any errors encountered. Raises: - HTTPException: If an error occurs during the clearing process (500). + HTTPException: Raised when a serious error occurs during the clearing process, + with status code 500 and error details in the detail field. """ - try: - rag.text_chunks = [] - rag.entities_vdb = None - rag.relationships_vdb = None - return InsertResponse( - status="success", message="All documents cleared successfully" + from lightrag.kg.shared_storage import ( + get_namespace_data, + get_pipeline_status_lock, + ) + + # Get pipeline status and lock + pipeline_status = await get_namespace_data("pipeline_status") + pipeline_status_lock = get_pipeline_status_lock() + + # Check and set status with lock + async with pipeline_status_lock: + if pipeline_status.get("busy", False): + return ClearDocumentsResponse( + status="busy", + message="Cannot clear documents while pipeline is busy", + ) + # Set busy to true + pipeline_status.update( + { + "busy": True, + "job_name": "Clearing Documents", + "job_start": datetime.now().isoformat(), + "docs": 0, + "batchs": 0, + "cur_batch": 0, + "request_pending": False, # Clear any previous request + "latest_message": "Starting document clearing process", + } ) + # Cleaning history_messages without breaking it as a shared list object + del pipeline_status["history_messages"][:] + pipeline_status["history_messages"].append( + "Starting document clearing process" + ) + + try: + # Use drop method to clear all data + drop_tasks = [] + storages = [ + rag.text_chunks, + rag.full_docs, + rag.entities_vdb, + rag.relationships_vdb, + rag.chunks_vdb, + rag.chunk_entity_relation_graph, + rag.doc_status, + ] + + # Log storage drop start + if "history_messages" in pipeline_status: + pipeline_status["history_messages"].append( + "Starting to drop storage components" + ) + + for storage in storages: + if storage is not None: + drop_tasks.append(storage.drop()) + + # Wait for all drop tasks to complete + drop_results = await asyncio.gather(*drop_tasks, return_exceptions=True) + + # Check for errors and log results + errors = [] + storage_success_count = 0 + storage_error_count = 0 + + for i, result in enumerate(drop_results): + storage_name = storages[i].__class__.__name__ + if isinstance(result, Exception): + error_msg = f"Error dropping {storage_name}: {str(result)}" + errors.append(error_msg) + logger.error(error_msg) + storage_error_count += 1 + else: + logger.info(f"Successfully dropped {storage_name}") + storage_success_count += 1 + + # Log storage drop results + if "history_messages" in pipeline_status: + if storage_error_count > 0: + pipeline_status["history_messages"].append( + f"Dropped {storage_success_count} storage components with {storage_error_count} errors" + ) + else: + pipeline_status["history_messages"].append( + f"Successfully dropped all {storage_success_count} storage components" + ) + + # If all storage operations failed, return error status and don't proceed with file deletion + if storage_success_count == 0 and storage_error_count > 0: + error_message = "All storage drop operations failed. Aborting document clearing process." + logger.error(error_message) + if "history_messages" in pipeline_status: + pipeline_status["history_messages"].append(error_message) + return ClearDocumentsResponse(status="fail", message=error_message) + + # Log file deletion start + if "history_messages" in pipeline_status: + pipeline_status["history_messages"].append( + "Starting to delete files in input directory" + ) + + # Delete all files in input_dir + deleted_files_count = 0 + file_errors_count = 0 + + for file_path in doc_manager.input_dir.glob("**/*"): + if file_path.is_file(): + try: + file_path.unlink() + deleted_files_count += 1 + except Exception as e: + logger.error(f"Error deleting file {file_path}: {str(e)}") + file_errors_count += 1 + + # Log file deletion results + if "history_messages" in pipeline_status: + if file_errors_count > 0: + pipeline_status["history_messages"].append( + f"Deleted {deleted_files_count} files with {file_errors_count} errors" + ) + errors.append(f"Failed to delete {file_errors_count} files") + else: + pipeline_status["history_messages"].append( + f"Successfully deleted {deleted_files_count} files" + ) + + # Prepare final result message + final_message = "" + if errors: + final_message = f"Cleared documents with some errors. Deleted {deleted_files_count} files." + status = "partial_success" + else: + final_message = f"All documents cleared successfully. Deleted {deleted_files_count} files." + status = "success" + + # Log final result + if "history_messages" in pipeline_status: + pipeline_status["history_messages"].append(final_message) + + # Return response based on results + return ClearDocumentsResponse(status=status, message=final_message) except Exception as e: - logger.error(f"Error DELETE /documents: {str(e)}") + error_msg = f"Error clearing documents: {str(e)}" + logger.error(error_msg) logger.error(traceback.format_exc()) + if "history_messages" in pipeline_status: + pipeline_status["history_messages"].append(error_msg) raise HTTPException(status_code=500, detail=str(e)) + finally: + # Reset busy status after completion + async with pipeline_status_lock: + pipeline_status["busy"] = False + completion_msg = "Document clearing process completed" + pipeline_status["latest_message"] = completion_msg + if "history_messages" in pipeline_status: + pipeline_status["history_messages"].append(completion_msg) @router.get( "/pipeline_status", - dependencies=[Depends(optional_api_key)], + dependencies=[Depends(combined_auth)], response_model=PipelineStatusResponse, ) async def get_pipeline_status() -> PipelineStatusResponse: @@ -835,13 +1228,34 @@ async def get_pipeline_status() -> PipelineStatusResponse: HTTPException: If an error occurs while retrieving pipeline status (500) """ try: - from lightrag.kg.shared_storage import get_namespace_data + from lightrag.kg.shared_storage import ( + get_namespace_data, + get_all_update_flags_status, + ) pipeline_status = await get_namespace_data("pipeline_status") + # Get update flags status for all namespaces + update_status = await get_all_update_flags_status() + + # Convert MutableBoolean objects to regular boolean values + processed_update_status = {} + for namespace, flags in update_status.items(): + processed_flags = [] + for flag in flags: + # Handle both multiprocess and single process cases + if hasattr(flag, "value"): + processed_flags.append(bool(flag.value)) + else: + processed_flags.append(bool(flag)) + processed_update_status[namespace] = processed_flags + # Convert to regular dict if it's a Manager.dict status_dict = dict(pipeline_status) + # Add processed update_status to the status dictionary + status_dict["update_status"] = processed_update_status + # Convert history_messages to a regular list if it's a Manager.list if "history_messages" in status_dict: status_dict["history_messages"] = list(status_dict["history_messages"]) @@ -856,7 +1270,9 @@ async def get_pipeline_status() -> PipelineStatusResponse: logger.error(traceback.format_exc()) raise HTTPException(status_code=500, detail=str(e)) - @router.get("", dependencies=[Depends(optional_api_key)]) + @router.get( + "", response_model=DocsStatusesResponse, dependencies=[Depends(combined_auth)] + ) async def documents() -> DocsStatusesResponse: """ Get the status of all documents in the system. @@ -905,6 +1321,7 @@ async def documents() -> DocsStatusesResponse: chunks_count=doc_status.chunks_count, error=doc_status.error, metadata=doc_status.metadata, + file_path=doc_status.file_path, ) ) return response @@ -913,35 +1330,57 @@ async def documents() -> DocsStatusesResponse: logger.error(traceback.format_exc()) raise HTTPException(status_code=500, detail=str(e)) - @router.get("/{doc_id}", dependencies=[Depends(optional_api_key)]) - async def get_document_content( - doc_id: str, - ) -> dict: - """获取指定文档的内容 - + @router.post( + "/clear_cache", + response_model=ClearCacheResponse, + dependencies=[Depends(combined_auth)], + ) + async def clear_cache(request: ClearCacheRequest): + """ + Clear cache data from the LLM response cache storage. + + This endpoint allows clearing specific modes of cache or all cache if no modes are specified. + Valid modes include: "default", "naive", "local", "global", "hybrid", "mix". + - "default" represents extraction cache. + - Other modes correspond to different query modes. + Args: - doc_id: 要获取的文档ID - rag: LightRAG实例 - + request (ClearCacheRequest): The request body containing optional modes to clear. + Returns: - dict: 包含文档内容和元数据的字典 - + ClearCacheResponse: A response object containing the status and message. + Raises: - HTTPException: 如果文档未找到返回404 + HTTPException: If an error occurs during cache clearing (400 for invalid modes, 500 for other errors). """ try: - doc = await rag.full_docs.get_by_id(doc_id) - if not doc: + # Validate modes if provided + valid_modes = ["default", "naive", "local", "global", "hybrid", "mix"] + if request.modes and not all(mode in valid_modes for mode in request.modes): + invalid_modes = [ + mode for mode in request.modes if mode not in valid_modes + ] raise HTTPException( - status_code=404, - detail="Document not found" + status_code=400, + detail=f"Invalid mode(s): {invalid_modes}. Valid modes are: {valid_modes}", ) - return { - "content": doc["content"], - "metadata": doc.get("metadata", {}) - } + + # Call the aclear_cache method + await rag.aclear_cache(request.modes) + + # Prepare success message + if request.modes: + message = f"Successfully cleared cache for modes: {request.modes}" + else: + message = "Successfully cleared all cache" + + return ClearCacheResponse(status="success", message=message) + except HTTPException: + # Re-raise HTTP exceptions + raise except Exception as e: - logging.error(f"Error GET /documents/{doc_id}: {str(e)}") + logger.error(f"Error clearing cache: {str(e)}") + logger.error(traceback.format_exc()) raise HTTPException(status_code=500, detail=str(e)) - + return router diff --git a/lightrag/api/routers/graph_routes.py b/lightrag/api/routers/graph_routes.py index 9580218528..f02779df3a 100644 --- a/lightrag/api/routers/graph_routes.py +++ b/lightrag/api/routers/graph_routes.py @@ -2,18 +2,33 @@ This module contains all graph-related routes for the LightRAG API. """ -from typing import Optional -from fastapi import APIRouter, Depends +from typing import Optional, Dict, Any +import traceback +from fastapi import APIRouter, Depends, Query, HTTPException +from pydantic import BaseModel -from ..utils_api import get_api_key_dependency, get_auth_dependency +from lightrag.utils import logger +from ..utils_api import get_combined_auth_dependency -router = APIRouter(tags=["graph"], dependencies=[Depends(get_auth_dependency())]) +router = APIRouter(tags=["graph"]) + + +class EntityUpdateRequest(BaseModel): + entity_name: str + updated_data: Dict[str, Any] + allow_rename: bool = False + + +class RelationUpdateRequest(BaseModel): + source_id: str + target_id: str + updated_data: Dict[str, Any] def create_graph_routes(rag, api_key: Optional[str] = None): - optional_api_key = get_api_key_dependency(api_key) + combined_auth = get_combined_auth_dependency(api_key) - @router.get("/graph/label/list", dependencies=[Depends(optional_api_key)]) + @router.get("/graph/label/list", dependencies=[Depends(combined_auth)]) async def get_graph_labels(): """ Get all graph labels @@ -21,36 +36,138 @@ async def get_graph_labels(): Returns: List[str]: List of graph labels """ - return await rag.get_graph_labels() + try: + return await rag.get_graph_labels() + except Exception as e: + logger.error(f"Error getting graph labels: {str(e)}") + logger.error(traceback.format_exc()) + raise HTTPException( + status_code=500, detail=f"Error getting graph labels: {str(e)}" + ) - @router.get("/graphs", dependencies=[Depends(optional_api_key)]) + @router.get("/graphs", dependencies=[Depends(combined_auth)]) async def get_knowledge_graph( - label: str, max_depth: int = 3, min_degree: int = 0, inclusive: bool = False + label: str = Query(..., description="Label to get knowledge graph for"), + max_depth: int = Query(3, description="Maximum depth of graph", ge=1), + max_nodes: int = Query(1000, description="Maximum nodes to return", ge=1), ): """ Retrieve a connected subgraph of nodes where the label includes the specified label. - Maximum number of nodes is constrained by the environment variable `MAX_GRAPH_NODES` (default: 1000). When reducing the number of nodes, the prioritization criteria are as follows: - 1. min_degree does not affect nodes directly connected to the matching nodes - 2. Label matching nodes take precedence - 3. Followed by nodes directly connected to the matching nodes - 4. Finally, the degree of the nodes - Maximum number of nodes is limited to env MAX_GRAPH_NODES(default: 1000) + 1. Hops(path) to the staring node take precedence + 2. Followed by the degree of the nodes Args: - label (str): Label to get knowledge graph for - max_depth (int, optional): Maximum depth of graph. Defaults to 3. - inclusive_search (bool, optional): If True, search for nodes that include the label. Defaults to False. - min_degree (int, optional): Minimum degree of nodes. Defaults to 0. + label (str): Label of the starting node + max_depth (int, optional): Maximum depth of the subgraph,Defaults to 3 + max_nodes: Maxiumu nodes to return Returns: Dict[str, List[str]]: Knowledge graph for label """ - return await rag.get_knowledge_graph( - node_label=label, - max_depth=max_depth, - inclusive=inclusive, - min_degree=min_degree, - ) + try: + return await rag.get_knowledge_graph( + node_label=label, + max_depth=max_depth, + max_nodes=max_nodes, + ) + except Exception as e: + logger.error(f"Error getting knowledge graph for label '{label}': {str(e)}") + logger.error(traceback.format_exc()) + raise HTTPException( + status_code=500, detail=f"Error getting knowledge graph: {str(e)}" + ) + + @router.get("/graph/entity/exists", dependencies=[Depends(combined_auth)]) + async def check_entity_exists( + name: str = Query(..., description="Entity name to check"), + ): + """ + Check if an entity with the given name exists in the knowledge graph + + Args: + name (str): Name of the entity to check + + Returns: + Dict[str, bool]: Dictionary with 'exists' key indicating if entity exists + """ + try: + exists = await rag.chunk_entity_relation_graph.has_node(name) + return {"exists": exists} + except Exception as e: + logger.error(f"Error checking entity existence for '{name}': {str(e)}") + logger.error(traceback.format_exc()) + raise HTTPException( + status_code=500, detail=f"Error checking entity existence: {str(e)}" + ) + + @router.post("/graph/entity/edit", dependencies=[Depends(combined_auth)]) + async def update_entity(request: EntityUpdateRequest): + """ + Update an entity's properties in the knowledge graph + + Args: + request (EntityUpdateRequest): Request containing entity name, updated data, and rename flag + + Returns: + Dict: Updated entity information + """ + try: + result = await rag.aedit_entity( + entity_name=request.entity_name, + updated_data=request.updated_data, + allow_rename=request.allow_rename, + ) + return { + "status": "success", + "message": "Entity updated successfully", + "data": result, + } + except ValueError as ve: + logger.error( + f"Validation error updating entity '{request.entity_name}': {str(ve)}" + ) + raise HTTPException(status_code=400, detail=str(ve)) + except Exception as e: + logger.error(f"Error updating entity '{request.entity_name}': {str(e)}") + logger.error(traceback.format_exc()) + raise HTTPException( + status_code=500, detail=f"Error updating entity: {str(e)}" + ) + + @router.post("/graph/relation/edit", dependencies=[Depends(combined_auth)]) + async def update_relation(request: RelationUpdateRequest): + """Update a relation's properties in the knowledge graph + + Args: + request (RelationUpdateRequest): Request containing source ID, target ID and updated data + + Returns: + Dict: Updated relation information + """ + try: + result = await rag.aedit_relation( + source_entity=request.source_id, + target_entity=request.target_id, + updated_data=request.updated_data, + ) + return { + "status": "success", + "message": "Relation updated successfully", + "data": result, + } + except ValueError as ve: + logger.error( + f"Validation error updating relation between '{request.source_id}' and '{request.target_id}': {str(ve)}" + ) + raise HTTPException(status_code=400, detail=str(ve)) + except Exception as e: + logger.error( + f"Error updating relation between '{request.source_id}' and '{request.target_id}': {str(e)}" + ) + logger.error(traceback.format_exc()) + raise HTTPException( + status_code=500, detail=f"Error updating relation: {str(e)}" + ) return router diff --git a/lightrag/api/routers/kg_routes.py b/lightrag/api/routers/kg_routes.py index abf0fcb32b..4d5007c867 100644 --- a/lightrag/api/routers/kg_routes.py +++ b/lightrag/api/routers/kg_routes.py @@ -2,18 +2,22 @@ Knowledge Graph API router implementation """ -from fastapi import APIRouter, Body, HTTPException, Request +from fastapi import APIRouter, Body, Depends, HTTPException, Request from typing import Any, Dict, Optional, List import logging from lightrag import LightRAG # 修改导入方式 +from lightrag.api.utils_api import get_combined_auth_dependency logger = logging.getLogger(__name__) def create_kg_routes(rag: LightRAG, api_key: Optional[str] = None) -> APIRouter: router = APIRouter(prefix="/kg", tags=["Knowledge Graph"]) + + # 创建认证依赖项 + combined_auth = get_combined_auth_dependency(api_key) - @router.post("/insert") + @router.post("/insert", dependencies=[Depends(combined_auth)]) async def insert_custom_kg( kg_data: Dict[str, Any] = Body(...), ): @@ -72,7 +76,7 @@ async def insert_custom_kg( logger.error(f"插入自定义知识图谱数据时出错: {str(e)}") raise HTTPException(status_code=500, detail=str(e)) - @router.post("/relations") + @router.post("/relations", dependencies=[Depends(combined_auth)]) async def insert_custom_relations( relations_data: List[Dict[str, Any]] = Body(...), ): @@ -119,7 +123,7 @@ async def insert_custom_relations( logger.error(f"添加自定义关系时出错: {str(e)}") raise HTTPException(status_code=500, detail=str(e)) - @router.get("/entity/{entity_name}") + @router.get("/entity/{entity_name}", dependencies=[Depends(combined_auth)]) async def get_entity_info( entity_name: str, include_vector_data: bool = False, @@ -144,7 +148,7 @@ async def get_entity_info( logger.error(f"获取实体信息时出错: {str(e)}") raise HTTPException(status_code=500, detail=str(e)) - @router.get("/{node_label}") + @router.get("/{node_label}", dependencies=[Depends(combined_auth)]) async def get_knowledge_graph( node_label: str, max_depth: int = 2, @@ -164,7 +168,7 @@ async def get_knowledge_graph( logger.error(f"获取知识图谱时出错: {str(e)}") raise HTTPException(status_code=500, detail=str(e)) - @router.delete("/entity/{entity_name}") + @router.delete("/entity/{entity_name}", dependencies=[Depends(combined_auth)]) async def delete_entity( entity_name: str, ): @@ -182,7 +186,7 @@ async def delete_entity( logger.error(f"删除实体时出错: {str(e)}") raise HTTPException(status_code=500, detail=str(e)) - @router.put("/entity/{entity_name}") + @router.put("/entity/{entity_name}", dependencies=[Depends(combined_auth)]) async def update_entity( entity_name: str, entity_data: Dict[str, Any] = Body(...), @@ -220,7 +224,7 @@ async def update_entity( logger.error(f"更新实体信息时出错: {e}") raise HTTPException(status_code=500, detail=str(e)) - @router.put("/relation/{src_entity}/{tgt_entity}") + @router.put("/relation/{src_entity}/{tgt_entity}", dependencies=[Depends(combined_auth)]) async def update_relation( src_entity: str, tgt_entity: str, @@ -246,10 +250,10 @@ async def update_relation( from urllib.parse import unquote decoded_src_entity = unquote(src_entity) decoded_tgt_entity = unquote(tgt_entity) - + # 处理数据类型,确保数据类型正确 processed_data = relation_data.copy() - + # 处理weight字段,确保是数值类型 if "weight" in processed_data: try: @@ -259,7 +263,7 @@ async def update_relation( status_code=400, detail="weight 字段必须是有效的数值" ) - + # 处理字符串字段,清理可能存在的多余引号 string_fields = ["description", "keywords", "source_id"] for field in string_fields: @@ -268,20 +272,20 @@ async def update_relation( if (processed_data[field].startswith('"') and processed_data[field].endswith('"')) or \ (processed_data[field].startswith("'") and processed_data[field].endswith("'")): processed_data[field] = processed_data[field][1:-1] - + logger.info(f"Processing update relation request for: {decoded_src_entity} -> {decoded_tgt_entity}") logger.info(f"Cleaned data: {processed_data}") result = await rag.aupdate_relation(decoded_src_entity, decoded_tgt_entity, processed_data) - + if result["status"] == "failed": raise HTTPException(status_code=404, detail=result["message"]) - + return result - + except HTTPException: raise except Exception as e: logger.error(f"更新关系信息时出错: {str(e)}") raise HTTPException(status_code=500, detail=str(e)) - + return router diff --git a/lightrag/api/routers/ollama_api.py b/lightrag/api/routers/ollama_api.py index 37d7354e5f..088cd02c9d 100644 --- a/lightrag/api/routers/ollama_api.py +++ b/lightrag/api/routers/ollama_api.py @@ -11,7 +11,8 @@ from ascii_colors import trace_exception from lightrag import LightRAG, QueryParam from lightrag.utils import encode_string_by_tiktoken -from lightrag.api.utils_api import ollama_server_infos +from lightrag.api.utils_api import ollama_server_infos, get_combined_auth_dependency +from fastapi import Depends # query mode according to query prefix (bypass is not LightRAG quer mode) @@ -22,6 +23,7 @@ class SearchMode(str, Enum): hybrid = "hybrid" mix = "mix" bypass = "bypass" + context = "context" class OllamaMessage(BaseModel): @@ -99,43 +101,59 @@ def estimate_tokens(text: str) -> int: return len(tokens) -def parse_query_mode(query: str) -> tuple[str, SearchMode]: +def parse_query_mode(query: str) -> tuple[str, SearchMode, bool]: """Parse query prefix to determine search mode - Returns tuple of (cleaned_query, search_mode) + Returns tuple of (cleaned_query, search_mode, only_need_context) """ mode_map = { - "/local ": SearchMode.local, - "/global ": SearchMode.global_, # global_ is used because 'global' is a Python keyword - "/naive ": SearchMode.naive, - "/hybrid ": SearchMode.hybrid, - "/mix ": SearchMode.mix, - "/bypass ": SearchMode.bypass, + "/local ": (SearchMode.local, False), + "/global ": ( + SearchMode.global_, + False, + ), # global_ is used because 'global' is a Python keyword + "/naive ": (SearchMode.naive, False), + "/hybrid ": (SearchMode.hybrid, False), + "/mix ": (SearchMode.mix, False), + "/bypass ": (SearchMode.bypass, False), + "/context": ( + SearchMode.hybrid, + True, + ), + "/localcontext": (SearchMode.local, True), + "/globalcontext": (SearchMode.global_, True), + "/hybridcontext": (SearchMode.hybrid, True), + "/naivecontext": (SearchMode.naive, True), + "/mixcontext": (SearchMode.mix, True), } - for prefix, mode in mode_map.items(): + for prefix, (mode, only_need_context) in mode_map.items(): if query.startswith(prefix): # After removing prefix an leading spaces cleaned_query = query[len(prefix) :].lstrip() - return cleaned_query, mode + return cleaned_query, mode, only_need_context - return query, SearchMode.hybrid + return query, SearchMode.hybrid, False class OllamaAPI: - def __init__(self, rag: LightRAG, top_k: int = 60): + def __init__(self, rag: LightRAG, top_k: int = 60, api_key: Optional[str] = None): self.rag = rag self.ollama_server_infos = ollama_server_infos self.top_k = top_k + self.api_key = api_key self.router = APIRouter(tags=["ollama"]) self.setup_routes() def setup_routes(self): - @self.router.get("/version") + # Create combined auth dependency for Ollama API routes + combined_auth = get_combined_auth_dependency(self.api_key) + + @self.router.get("/version", dependencies=[Depends(combined_auth)]) async def get_version(): """Get Ollama version information""" return OllamaVersionResponse(version="0.5.4") - @self.router.get("/tags") + @self.router.get("/tags", dependencies=[Depends(combined_auth)]) async def get_tags(): """Return available models acting as an Ollama server""" return OllamaTagResponse( @@ -158,7 +176,7 @@ async def get_tags(): ] ) - @self.router.post("/generate") + @self.router.post("/generate", dependencies=[Depends(combined_auth)]) async def generate(raw_request: Request, request: OllamaGenerateRequest): """Handle generate completion requests acting as an Ollama model For compatibility purpose, the request is not processed by LightRAG, @@ -290,7 +308,7 @@ async def stream_generator(): "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Type": "application/x-ndjson", - "X-Accel-Buffering": "no", # 确保在Nginx代理时正确处理流式响应 + "X-Accel-Buffering": "no", # Ensure proper handling of streaming responses in Nginx proxy }, ) else: @@ -324,7 +342,7 @@ async def stream_generator(): trace_exception(e) raise HTTPException(status_code=500, detail=str(e)) - @self.router.post("/chat") + @self.router.post("/chat", dependencies=[Depends(combined_auth)]) async def chat(raw_request: Request, request: OllamaChatRequest): """Process chat completion requests acting as an Ollama model Routes user queries through LightRAG by selecting query mode based on prefix indicators. @@ -344,7 +362,7 @@ async def chat(raw_request: Request, request: OllamaChatRequest): ] # Check for query prefix - cleaned_query, mode = parse_query_mode(query) + cleaned_query, mode, only_need_context = parse_query_mode(query) start_time = time.time_ns() prompt_tokens = estimate_tokens(cleaned_query) @@ -352,7 +370,7 @@ async def chat(raw_request: Request, request: OllamaChatRequest): param_dict = { "mode": mode, "stream": request.stream, - "only_need_context": False, + "only_need_context": only_need_context, "conversation_history": conversation_history, "top_k": self.top_k, } diff --git a/lightrag/api/routers/query_routes.py b/lightrag/api/routers/query_routes.py index 7a5bd8c320..816034877f 100644 --- a/lightrag/api/routers/query_routes.py +++ b/lightrag/api/routers/query_routes.py @@ -8,12 +8,12 @@ from fastapi import APIRouter, Depends, HTTPException from lightrag.base import QueryParam -from ..utils_api import get_api_key_dependency, get_auth_dependency +from ..utils_api import get_combined_auth_dependency from pydantic import BaseModel, Field, field_validator from ascii_colors import trace_exception -router = APIRouter(tags=["query"], dependencies=[Depends(get_auth_dependency())]) +router = APIRouter(tags=["query"]) class QueryRequest(BaseModel): @@ -22,7 +22,7 @@ class QueryRequest(BaseModel): description="The query text", ) - mode: Literal["local", "global", "hybrid", "naive", "mix"] = Field( + mode: Literal["local", "global", "hybrid", "naive", "mix", "bypass"] = Field( default="hybrid", description="Query mode", ) @@ -139,10 +139,10 @@ class QueryResponse(BaseModel): def create_query_routes(rag, api_key: Optional[str] = None, top_k: int = 60): - optional_api_key = get_api_key_dependency(api_key) + combined_auth = get_combined_auth_dependency(api_key) @router.post( - "/query", response_model=QueryResponse, dependencies=[Depends(optional_api_key)] + "/query", response_model=QueryResponse, dependencies=[Depends(combined_auth)] ) async def query_text(request: QueryRequest): """ @@ -176,7 +176,7 @@ async def query_text(request: QueryRequest): trace_exception(e) raise HTTPException(status_code=500, detail=str(e)) - @router.post("/query/stream", dependencies=[Depends(optional_api_key)]) + @router.post("/query/stream", dependencies=[Depends(combined_auth)]) async def query_text_stream(request: QueryRequest): """ This endpoint performs a retrieval-augmented generation (RAG) query and streams the response. diff --git a/lightrag/api/run_with_gunicorn.py b/lightrag/api/run_with_gunicorn.py index cf9b3b9154..5b41af8ae3 100644 --- a/lightrag/api/run_with_gunicorn.py +++ b/lightrag/api/run_with_gunicorn.py @@ -7,13 +7,9 @@ import sys import signal import pipmaster as pm -from lightrag.api.utils_api import parse_args, display_splash_screen +from lightrag.api.utils_api import display_splash_screen, check_env_file from lightrag.kg.shared_storage import initialize_share_data, finalize_share_data -from dotenv import load_dotenv - -# Updated to use the .env that is inside the current folder -# This update allows the user to put a different.env file for each lightrag folder -load_dotenv(".env") +from .config import global_args def check_and_install_dependencies(): @@ -47,6 +43,10 @@ def signal_handler(sig, frame): def main(): + # Check .env file + if not check_env_file(): + sys.exit(1) + # Check and install dependencies check_and_install_dependencies() @@ -54,20 +54,17 @@ def main(): signal.signal(signal.SIGINT, signal_handler) # Ctrl+C signal.signal(signal.SIGTERM, signal_handler) # kill command - # Parse all arguments using parse_args - args = parse_args(is_uvicorn_mode=False) - # Display startup information - display_splash_screen(args) + display_splash_screen(global_args) print("🚀 Starting LightRAG with Gunicorn") - print(f"🔄 Worker management: Gunicorn (workers={args.workers})") + print(f"🔄 Worker management: Gunicorn (workers={global_args.workers})") print("🔍 Preloading app: Enabled") print("📝 Note: Using Gunicorn's preload feature for shared data initialization") print("\n\n" + "=" * 80) print("MAIN PROCESS INITIALIZATION") print(f"Process ID: {os.getpid()}") - print(f"Workers setting: {args.workers}") + print(f"Workers setting: {global_args.workers}") print("=" * 80 + "\n") # Import Gunicorn's StandaloneApplication @@ -123,31 +120,43 @@ def load_config(self): # Set configuration variables in gunicorn_config, prioritizing command line arguments gunicorn_config.workers = ( - args.workers if args.workers else int(os.getenv("WORKERS", 1)) + global_args.workers + if global_args.workers + else int(os.getenv("WORKERS", 1)) ) # Bind configuration prioritizes command line arguments - host = args.host if args.host != "0.0.0.0" else os.getenv("HOST", "0.0.0.0") - port = args.port if args.port != 9621 else int(os.getenv("PORT", 9621)) + host = ( + global_args.host + if global_args.host != "0.0.0.0" + else os.getenv("HOST", "0.0.0.0") + ) + port = ( + global_args.port + if global_args.port != 9621 + else int(os.getenv("PORT", 9621)) + ) gunicorn_config.bind = f"{host}:{port}" # Log level configuration prioritizes command line arguments gunicorn_config.loglevel = ( - args.log_level.lower() - if args.log_level + global_args.log_level.lower() + if global_args.log_level else os.getenv("LOG_LEVEL", "info") ) # Timeout configuration prioritizes command line arguments gunicorn_config.timeout = ( - args.timeout if args.timeout else int(os.getenv("TIMEOUT", 150)) + global_args.timeout * 2 + if global_args.timeout is not None + else int(os.getenv("TIMEOUT", 150 * 2)) ) # Keepalive configuration gunicorn_config.keepalive = int(os.getenv("KEEPALIVE", 5)) # SSL configuration prioritizes command line arguments - if args.ssl or os.getenv("SSL", "").lower() in ( + if global_args.ssl or os.getenv("SSL", "").lower() in ( "true", "1", "yes", @@ -155,12 +164,14 @@ def load_config(self): "on", ): gunicorn_config.certfile = ( - args.ssl_certfile - if args.ssl_certfile + global_args.ssl_certfile + if global_args.ssl_certfile else os.getenv("SSL_CERTFILE") ) gunicorn_config.keyfile = ( - args.ssl_keyfile if args.ssl_keyfile else os.getenv("SSL_KEYFILE") + global_args.ssl_keyfile + if global_args.ssl_keyfile + else os.getenv("SSL_KEYFILE") ) # Set configuration options from the module @@ -185,13 +196,13 @@ def load(self): # Import the application from lightrag.api.lightrag_server import get_application - return get_application(args) + return get_application(global_args) # Create the application app = GunicornApp("") # Force workers to be an integer and greater than 1 for multi-process mode - workers_count = int(args.workers) + workers_count = int(global_args.workers) if workers_count > 1: # Set a flag to indicate we're in the main process os.environ["LIGHTRAG_MAIN_PROCESS"] = "1" diff --git a/lightrag/api/utils_api.py b/lightrag/api/utils_api.py index 4372ca3b88..a1dade88a4 100644 --- a/lightrag/api/utils_api.py +++ b/lightrag/api/utils_api.py @@ -4,403 +4,163 @@ import os import argparse -from typing import Optional +from typing import Optional, List, Tuple import sys -import logging from ascii_colors import ASCIIColors -from lightrag.api import __api_version__ -from fastapi import HTTPException, Security, Depends, Request, status -from dotenv import load_dotenv +from lightrag.api import __api_version__ as api_version +from lightrag import __version__ as core_version +from fastapi import HTTPException, Security, Request, status from fastapi.security import APIKeyHeader, OAuth2PasswordBearer from starlette.status import HTTP_403_FORBIDDEN from .auth import auth_handler +from .config import ollama_server_infos, global_args -# Load environment variables -load_dotenv(override=True) - -global_args = {"main_args": None} +def check_env_file(): + """ + Check if .env file exists and handle user confirmation if needed. + Returns True if should continue, False if should exit. + """ + if not os.path.exists(".env"): + warning_msg = "Warning: Startup directory must contain .env file for multi-instance support." + ASCIIColors.yellow(warning_msg) + + # Check if running in interactive terminal + if sys.stdin.isatty(): + response = input("Do you want to continue? (yes/no): ") + if response.lower() != "yes": + ASCIIColors.red("Server startup cancelled") + return False + return True + + +# Get whitelist paths from global_args, only once during initialization +whitelist_paths = global_args.whitelist_paths.split(",") + +# Pre-compile path matching patterns +whitelist_patterns: List[Tuple[str, bool]] = [] +for path in whitelist_paths: + path = path.strip() + if path: + # If path ends with /*, match all paths with that prefix + if path.endswith("/*"): + prefix = path[:-2] + whitelist_patterns.append((prefix, True)) # (prefix, is_prefix_match) + else: + whitelist_patterns.append((path, False)) # (exact_path, is_prefix_match) + +# Global authentication configuration +auth_configured = bool(auth_handler.accounts) + + +def get_combined_auth_dependency(api_key: Optional[str] = None): + """ + Create a combined authentication dependency that implements authentication logic + based on API key, OAuth2 token, and whitelist paths. -class OllamaServerInfos: - # Constants for emulated Ollama model information - LIGHTRAG_NAME = "lightrag" - LIGHTRAG_TAG = os.getenv("OLLAMA_EMULATING_MODEL_TAG", "latest") - LIGHTRAG_MODEL = f"{LIGHTRAG_NAME}:{LIGHTRAG_TAG}" - LIGHTRAG_SIZE = 7365960935 # it's a dummy value - LIGHTRAG_CREATED_AT = "2024-01-15T00:00:00Z" - LIGHTRAG_DIGEST = "sha256:lightrag" + Args: + api_key (Optional[str]): API key for validation + Returns: + Callable: A dependency function that implements the authentication logic + """ + # Use global whitelist_patterns and auth_configured variables + # whitelist_patterns and auth_configured are already initialized at module level -ollama_server_infos = OllamaServerInfos() + # Only calculate api_key_configured as it depends on the function parameter + api_key_configured = bool(api_key) + # Create security dependencies with proper descriptions for Swagger UI + oauth2_scheme = OAuth2PasswordBearer( + tokenUrl="login", auto_error=False, description="OAuth2 Password Authentication" + ) -def get_auth_dependency(): - # Set default whitelist paths - whitelist = os.getenv("WHITELIST_PATHS", "/login,/health").split(",") + # If API key is configured, create an API key header security + api_key_header = None + if api_key_configured: + api_key_header = APIKeyHeader( + name="X-API-Key", auto_error=False, description="API Key Authentication" + ) - async def dependency( + async def combined_dependency( request: Request, - token: str = Depends(OAuth2PasswordBearer(tokenUrl="login", auto_error=False)), + token: str = Security(oauth2_scheme), + api_key_header_value: Optional[str] = None + if api_key_header is None + else Security(api_key_header), ): - # Check if authentication is configured - auth_configured = bool( - os.getenv("AUTH_USERNAME") and os.getenv("AUTH_PASSWORD") - ) - - # If authentication is not configured, skip all validation - if not auth_configured: - return - - # For configured auth, allow whitelist paths without token - if request.url.path in whitelist: - return - - # Require token for all other paths when auth is configured - if not token: - raise HTTPException( - status_code=status.HTTP_401_UNAUTHORIZED, detail="Token required" - ) - - try: - token_info = auth_handler.validate_token(token) - # Reject guest tokens when authentication is configured - if token_info.get("role") == "guest": + # 1. Check if path is in whitelist + path = request.url.path + for pattern, is_prefix in whitelist_patterns: + if (is_prefix and path.startswith(pattern)) or ( + not is_prefix and path == pattern + ): + return # Whitelist path, allow access + + # 2. Validate token first if provided in the request (Ensure 401 error if token is invalid) + if token: + try: + token_info = auth_handler.validate_token(token) + # Accept guest token if no auth is configured + if not auth_configured and token_info.get("role") == "guest": + return + # Accept non-guest token if auth is configured + if auth_configured and token_info.get("role") != "guest": + return + + # Token validation failed, immediately return 401 error raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, - detail="Authentication required. Guest access not allowed when authentication is configured.", + detail="Invalid token. Please login again.", ) - except Exception: - raise HTTPException( - status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token" - ) - - return - - return dependency - - -def get_api_key_dependency(api_key: Optional[str]): - """ - Create an API key dependency for route protection. - - Args: - api_key (Optional[str]): The API key to validate against. - If None, no authentication is required. - - Returns: - Callable: A dependency function that validates the API key. - """ - if not api_key: - # If no API key is configured, return a dummy dependency that always succeeds - async def no_auth(): - return None + except HTTPException as e: + # If already a 401 error, re-raise it + if e.status_code == status.HTTP_401_UNAUTHORIZED: + raise + # For other exceptions, continue processing + + # 3. Acept all request if no API protection needed + if not auth_configured and not api_key_configured: + return - return no_auth + # 4. Validate API key if provided and API-Key authentication is configured + if ( + api_key_configured + and api_key_header_value + and api_key_header_value == api_key + ): + return # API key validation successful - # If API key is configured, use proper authentication - api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False) + ### Authentication failed #### - async def api_key_auth( - api_key_header_value: Optional[str] = Security(api_key_header), - ): - if not api_key_header_value: + # if password authentication is configured but not provided, ensure 401 error if auth_configured + if auth_configured and not token: raise HTTPException( - status_code=HTTP_403_FORBIDDEN, detail="API Key required" + status_code=status.HTTP_401_UNAUTHORIZED, + detail="No credentials provided. Please login.", ) - if api_key_header_value != api_key: + + # if api key is provided but validation failed + if api_key_header_value: raise HTTPException( - status_code=HTTP_403_FORBIDDEN, detail="Invalid API Key" + status_code=HTTP_403_FORBIDDEN, + detail="Invalid API Key", ) - return api_key_header_value - - return api_key_auth - -class DefaultRAGStorageConfig: - KV_STORAGE = "JsonKVStorage" - VECTOR_STORAGE = "NanoVectorDBStorage" - GRAPH_STORAGE = "NetworkXStorage" - DOC_STATUS_STORAGE = "JsonDocStatusStorage" - - -def get_default_host(binding_type: str) -> str: - default_hosts = { - "ollama": os.getenv("LLM_BINDING_HOST", "http://localhost:11434"), - "lollms": os.getenv("LLM_BINDING_HOST", "http://localhost:9600"), - "azure_openai": os.getenv("AZURE_OPENAI_ENDPOINT", "https://api.openai.com/v1"), - "openai": os.getenv("LLM_BINDING_HOST", "https://api.openai.com/v1"), - } - return default_hosts.get( - binding_type, os.getenv("LLM_BINDING_HOST", "http://localhost:11434") - ) # fallback to ollama if unknown - - -def get_env_value(env_key: str, default: any, value_type: type = str) -> any: - """ - Get value from environment variable with type conversion - - Args: - env_key (str): Environment variable key - default (any): Default value if env variable is not set - value_type (type): Type to convert the value to - - Returns: - any: Converted value from environment or default - """ - value = os.getenv(env_key) - if value is None: - return default - - if value_type is bool: - return value.lower() in ("true", "1", "yes", "t", "on") - try: - return value_type(value) - except ValueError: - return default - - -def parse_args(is_uvicorn_mode: bool = False) -> argparse.Namespace: - """ - Parse command line arguments with environment variable fallback - - Args: - is_uvicorn_mode: Whether running under uvicorn mode - - Returns: - argparse.Namespace: Parsed arguments - """ - - parser = argparse.ArgumentParser( - description="LightRAG FastAPI Server with separate working and input directories" - ) - - # Server configuration - parser.add_argument( - "--host", - default=get_env_value("HOST", "0.0.0.0"), - help="Server host (default: from env or 0.0.0.0)", - ) - parser.add_argument( - "--port", - type=int, - default=get_env_value("PORT", 9621, int), - help="Server port (default: from env or 9621)", - ) - - # Directory configuration - parser.add_argument( - "--working-dir", - default=get_env_value("WORKING_DIR", "./rag_storage"), - help="Working directory for RAG storage (default: from env or ./rag_storage)", - ) - parser.add_argument( - "--input-dir", - default=get_env_value("INPUT_DIR", "./inputs"), - help="Directory containing input documents (default: from env or ./inputs)", - ) - - def timeout_type(value): - if value is None: - return 150 - if value is None or value == "None": - return None - return int(value) - - parser.add_argument( - "--timeout", - default=get_env_value("TIMEOUT", None, timeout_type), - type=timeout_type, - help="Timeout in seconds (useful when using slow AI). Use None for infinite timeout", - ) - - # RAG configuration - parser.add_argument( - "--max-async", - type=int, - default=get_env_value("MAX_ASYNC", 24, int), - help="Maximum async operations (default: from env or 24)", - ) - parser.add_argument( - "--max-tokens", - type=int, - default=get_env_value("MAX_TOKENS", 32768, int), - help="Maximum token size (default: from env or 32768)", - ) - - # Logging configuration - parser.add_argument( - "--log-level", - default=get_env_value("LOG_LEVEL", "INFO"), - choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], - help="Logging level (default: from env or INFO)", - ) - parser.add_argument( - "--verbose", - action="store_true", - default=get_env_value("VERBOSE", False, bool), - help="Enable verbose debug output(only valid for DEBUG log-level)", - ) - - parser.add_argument( - "--key", - type=str, - default=get_env_value("LIGHTRAG_API_KEY", None), - help="API key for authentication. This protects lightrag server against unauthorized access", - ) - - # Optional https parameters - parser.add_argument( - "--ssl", - action="store_true", - default=get_env_value("SSL", False, bool), - help="Enable HTTPS (default: from env or False)", - ) - parser.add_argument( - "--ssl-certfile", - default=get_env_value("SSL_CERTFILE", None), - help="Path to SSL certificate file (required if --ssl is enabled)", - ) - parser.add_argument( - "--ssl-keyfile", - default=get_env_value("SSL_KEYFILE", None), - help="Path to SSL private key file (required if --ssl is enabled)", - ) - - parser.add_argument( - "--history-turns", - type=int, - default=get_env_value("HISTORY_TURNS", 3, int), - help="Number of conversation history turns to include (default: from env or 3)", - ) - - # Search parameters - parser.add_argument( - "--top-k", - type=int, - default=get_env_value("TOP_K", 60, int), - help="Number of most similar results to return (default: from env or 60)", - ) - parser.add_argument( - "--cosine-threshold", - type=float, - default=get_env_value("COSINE_THRESHOLD", 0.2, float), - help="Cosine similarity threshold (default: from env or 0.4)", - ) - - # Ollama model name - parser.add_argument( - "--simulated-model-name", - type=str, - default=get_env_value( - "SIMULATED_MODEL_NAME", ollama_server_infos.LIGHTRAG_MODEL - ), - help="Number of conversation history turns to include (default: from env or 3)", - ) - - # Namespace - parser.add_argument( - "--namespace-prefix", - type=str, - default=get_env_value("NAMESPACE_PREFIX", ""), - help="Prefix of the namespace", - ) - - parser.add_argument( - "--auto-scan-at-startup", - action="store_true", - default=False, - help="Enable automatic scanning when the program starts", - ) - - # Server workers configuration - parser.add_argument( - "--workers", - type=int, - default=get_env_value("WORKERS", 1, int), - help="Number of worker processes (default: from env or 1)", - ) - - # LLM and embedding bindings - parser.add_argument( - "--llm-binding", - type=str, - default=get_env_value("LLM_BINDING", "ollama"), - choices=["lollms", "ollama", "openai", "openai-ollama", "azure_openai"], - help="LLM binding type (default: from env or ollama)", - ) - parser.add_argument( - "--embedding-binding", - type=str, - default=get_env_value("EMBEDDING_BINDING", "ollama"), - choices=["lollms", "ollama", "openai", "azure_openai"], - help="Embedding binding type (default: from env or ollama)", - ) - - args = parser.parse_args() + # if api_key_configured but not provided + if api_key_configured and not api_key_header_value: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, + detail="API Key required", + ) - # If in uvicorn mode and workers > 1, force it to 1 and log warning - if is_uvicorn_mode and args.workers > 1: - original_workers = args.workers - args.workers = 1 - # Log warning directly here - logging.warning( - f"In uvicorn mode, workers parameter was set to {original_workers}. Forcing workers=1" + # Otherwise: refuse access and return 403 error + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, + detail="API Key required or login authentication required.", ) - # convert relative path to absolute path - args.working_dir = os.path.abspath(args.working_dir) - args.input_dir = os.path.abspath(args.input_dir) - - # Inject storage configuration from environment variables - args.kv_storage = get_env_value( - "LIGHTRAG_KV_STORAGE", DefaultRAGStorageConfig.KV_STORAGE - ) - args.doc_status_storage = get_env_value( - "LIGHTRAG_DOC_STATUS_STORAGE", DefaultRAGStorageConfig.DOC_STATUS_STORAGE - ) - args.graph_storage = get_env_value( - "LIGHTRAG_GRAPH_STORAGE", DefaultRAGStorageConfig.GRAPH_STORAGE - ) - args.vector_storage = get_env_value( - "LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE - ) - - # Handle openai-ollama special case - if args.llm_binding == "openai-ollama": - args.llm_binding = "openai" - args.embedding_binding = "ollama" - - args.llm_binding_host = get_env_value( - "LLM_BINDING_HOST", get_default_host(args.llm_binding) - ) - args.embedding_binding_host = get_env_value( - "EMBEDDING_BINDING_HOST", get_default_host(args.embedding_binding) - ) - args.llm_binding_api_key = get_env_value("LLM_BINDING_API_KEY", None) - args.embedding_binding_api_key = get_env_value("EMBEDDING_BINDING_API_KEY", "") - - # Inject model configuration - args.llm_model = get_env_value("LLM_MODEL", "mistral-nemo:latest") - args.embedding_model = get_env_value("EMBEDDING_MODEL", "bge-m3:latest") - args.embedding_dim = get_env_value("EMBEDDING_DIM", 1024, int) - args.max_embed_tokens = get_env_value("MAX_EMBED_TOKENS", 8192, int) - - # Inject chunk configuration - args.chunk_size = get_env_value("CHUNK_SIZE", 1200, int) - args.chunk_overlap_size = get_env_value("CHUNK_OVERLAP_SIZE", 100, int) - - # Inject LLM cache configuration - args.enable_llm_cache_for_extract = get_env_value( - "ENABLE_LLM_CACHE_FOR_EXTRACT", True, bool - ) - - # Select Document loading tool (DOCLING, DEFAULT) - args.document_loading_engine = get_env_value("DOCUMENT_LOADING_ENGINE", "DEFAULT") - - ollama_server_infos.LIGHTRAG_MODEL = args.simulated_model_name - - global_args["main_args"] = args - return args + return combined_dependency def display_splash_screen(args: argparse.Namespace) -> None: @@ -413,7 +173,7 @@ def display_splash_screen(args: argparse.Namespace) -> None: # Banner ASCIIColors.cyan(f""" ╔══════════════════════════════════════════════════════════════╗ - ║ 🚀 LightRAG Server v{__api_version__} ║ + ║ 🚀 LightRAG Server v{core_version}/{api_version} ║ ║ Fast, Lightweight RAG Server Implementation ║ ╚══════════════════════════════════════════════════════════════╝ """) @@ -427,7 +187,7 @@ def display_splash_screen(args: argparse.Namespace) -> None: ASCIIColors.white(" ├─ Workers: ", end="") ASCIIColors.yellow(f"{args.workers}") ASCIIColors.white(" ├─ CORS Origins: ", end="") - ASCIIColors.yellow(f"{os.getenv('CORS_ORIGINS', '*')}") + ASCIIColors.yellow(f"{args.cors_origins}") ASCIIColors.white(" ├─ SSL Enabled: ", end="") ASCIIColors.yellow(f"{args.ssl}") if args.ssl: @@ -441,10 +201,12 @@ def display_splash_screen(args: argparse.Namespace) -> None: ASCIIColors.yellow(f"{args.log_level}") ASCIIColors.white(" ├─ Verbose Debug: ", end="") ASCIIColors.yellow(f"{args.verbose}") - ASCIIColors.white(" ├─ Timeout: ", end="") - ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}") - ASCIIColors.white(" └─ API Key: ", end="") + ASCIIColors.white(" ├─ History Turns: ", end="") + ASCIIColors.yellow(f"{args.history_turns}") + ASCIIColors.white(" ├─ API Key: ", end="") ASCIIColors.yellow("Set" if args.key else "Not Set") + ASCIIColors.white(" └─ JWT Auth: ", end="") + ASCIIColors.yellow("Enabled" if args.auth_accounts else "Disabled") # Directory Configuration ASCIIColors.magenta("\n📂 Directory Configuration:") @@ -459,8 +221,20 @@ def display_splash_screen(args: argparse.Namespace) -> None: ASCIIColors.yellow(f"{args.llm_binding}") ASCIIColors.white(" ├─ Host: ", end="") ASCIIColors.yellow(f"{args.llm_binding_host}") - ASCIIColors.white(" └─ Model: ", end="") + ASCIIColors.white(" ├─ Model: ", end="") ASCIIColors.yellow(f"{args.llm_model}") + ASCIIColors.white(" ├─ Temperature: ", end="") + ASCIIColors.yellow(f"{args.temperature}") + ASCIIColors.white(" ├─ Max Async for LLM: ", end="") + ASCIIColors.yellow(f"{args.max_async}") + ASCIIColors.white(" ├─ Max Tokens: ", end="") + ASCIIColors.yellow(f"{args.max_tokens}") + ASCIIColors.white(" ├─ Timeout: ", end="") + ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}") + ASCIIColors.white(" ├─ LLM Cache Enabled: ", end="") + ASCIIColors.yellow(f"{args.enable_llm_cache}") + ASCIIColors.white(" └─ LLM Cache for Extraction Enabled: ", end="") + ASCIIColors.yellow(f"{args.enable_llm_cache_for_extract}") # Embedding Configuration ASCIIColors.magenta("\n📊 Embedding Configuration:") @@ -475,24 +249,24 @@ def display_splash_screen(args: argparse.Namespace) -> None: # RAG Configuration ASCIIColors.magenta("\n⚙️ RAG Configuration:") - ASCIIColors.white(" ├─ Max Async Operations: ", end="") - ASCIIColors.yellow(f"{args.max_async}") - ASCIIColors.white(" ├─ Max Tokens: ", end="") - ASCIIColors.yellow(f"{args.max_tokens}") + ASCIIColors.white(" ├─ Summary Language: ", end="") + ASCIIColors.yellow(f"{args.summary_language}") + ASCIIColors.white(" ├─ Max Parallel Insert: ", end="") + ASCIIColors.yellow(f"{args.max_parallel_insert}") ASCIIColors.white(" ├─ Max Embed Tokens: ", end="") ASCIIColors.yellow(f"{args.max_embed_tokens}") ASCIIColors.white(" ├─ Chunk Size: ", end="") ASCIIColors.yellow(f"{args.chunk_size}") ASCIIColors.white(" ├─ Chunk Overlap Size: ", end="") ASCIIColors.yellow(f"{args.chunk_overlap_size}") - ASCIIColors.white(" ├─ History Turns: ", end="") - ASCIIColors.yellow(f"{args.history_turns}") ASCIIColors.white(" ├─ Cosine Threshold: ", end="") ASCIIColors.yellow(f"{args.cosine_threshold}") ASCIIColors.white(" ├─ Top-K: ", end="") ASCIIColors.yellow(f"{args.top_k}") - ASCIIColors.white(" └─ LLM Cache for Extraction Enabled: ", end="") - ASCIIColors.yellow(f"{args.enable_llm_cache_for_extract}") + ASCIIColors.white(" ├─ Max Token Summary: ", end="") + ASCIIColors.yellow(f"{int(os.getenv('MAX_TOKEN_SUMMARY', 500))}") + ASCIIColors.white(" └─ Force LLM Summary on Merge: ", end="") + ASCIIColors.yellow(f"{int(os.getenv('FORCE_LLM_SUMMARY_ON_MERGE', 6))}") # System Configuration ASCIIColors.magenta("\n💾 Storage Configuration:") @@ -512,19 +286,17 @@ def display_splash_screen(args: argparse.Namespace) -> None: protocol = "https" if args.ssl else "http" if args.host == "0.0.0.0": ASCIIColors.magenta("\n🌐 Server Access Information:") - ASCIIColors.white(" ├─ Local Access: ", end="") + ASCIIColors.white(" ├─ WebUI (local): ", end="") ASCIIColors.yellow(f"{protocol}://localhost:{args.port}") ASCIIColors.white(" ├─ Remote Access: ", end="") ASCIIColors.yellow(f"{protocol}://:{args.port}") ASCIIColors.white(" ├─ API Documentation (local): ", end="") ASCIIColors.yellow(f"{protocol}://localhost:{args.port}/docs") - ASCIIColors.white(" ├─ Alternative Documentation (local): ", end="") + ASCIIColors.white(" └─ Alternative Documentation (local): ", end="") ASCIIColors.yellow(f"{protocol}://localhost:{args.port}/redoc") - ASCIIColors.white(" └─ WebUI (local): ", end="") - ASCIIColors.yellow(f"{protocol}://localhost:{args.port}/webui") - ASCIIColors.yellow("\n📝 Note:") - ASCIIColors.white(""" Since the server is running on 0.0.0.0: + ASCIIColors.magenta("\n📝 Note:") + ASCIIColors.cyan(""" Since the server is running on 0.0.0.0: - Use 'localhost' or '127.0.0.1' for local access - Use your machine's IP address for remote access - To find your IP address: @@ -534,42 +306,24 @@ def display_splash_screen(args: argparse.Namespace) -> None: else: base_url = f"{protocol}://{args.host}:{args.port}" ASCIIColors.magenta("\n🌐 Server Access Information:") - ASCIIColors.white(" ├─ Base URL: ", end="") + ASCIIColors.white(" ├─ WebUI (local): ", end="") ASCIIColors.yellow(f"{base_url}") ASCIIColors.white(" ├─ API Documentation: ", end="") ASCIIColors.yellow(f"{base_url}/docs") ASCIIColors.white(" └─ Alternative Documentation: ", end="") ASCIIColors.yellow(f"{base_url}/redoc") - # Usage Examples - ASCIIColors.magenta("\n📚 Quick Start Guide:") - ASCIIColors.cyan(""" - 1. Access the Swagger UI: - Open your browser and navigate to the API documentation URL above - - 2. API Authentication:""") - if args.key: - ASCIIColors.cyan(""" Add the following header to your requests: - X-API-Key: - """) - else: - ASCIIColors.cyan(" No authentication required\n") - - ASCIIColors.cyan(""" 3. Basic Operations: - - POST /upload_document: Upload new documents to RAG - - POST /query: Query your document collection - - 4. Monitor the server: - - Check server logs for detailed operation information - - Use healthcheck endpoint: GET /health - """) - # Security Notice if args.key: ASCIIColors.yellow("\n⚠️ Security Notice:") ASCIIColors.white(""" API Key authentication is enabled. Make sure to include the X-API-Key header in all your requests. """) + if args.auth_accounts: + ASCIIColors.yellow("\n⚠️ Security Notice:") + ASCIIColors.white(""" JWT authentication is enabled. + Make sure to login before making the request, and include the 'Authorization' in the header. + """) # Ensure splash output flush to system log sys.stdout.flush() diff --git a/lightrag/api/webui/assets/index-BJDb04H1.css b/lightrag/api/webui/assets/index-BJDb04H1.css new file mode 100644 index 0000000000..b0d1550600 --- /dev/null +++ b/lightrag/api/webui/assets/index-BJDb04H1.css @@ -0,0 +1 @@ +/*! tailwindcss v4.0.8 | MIT License | https://tailwindcss.com */@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-100:oklch(.936 .032 17.717);--color-red-400:oklch(.704 .191 22.216);--color-red-500:oklch(.637 .237 25.331);--color-red-600:oklch(.577 .245 27.325);--color-red-900:oklch(.396 .141 25.723);--color-red-950:oklch(.258 .092 26.042);--color-amber-100:oklch(.962 .059 95.617);--color-amber-200:oklch(.924 .12 95.746);--color-amber-700:oklch(.555 .163 48.998);--color-amber-800:oklch(.473 .137 46.201);--color-amber-900:oklch(.414 .112 45.904);--color-yellow-100:oklch(.973 .071 103.193);--color-yellow-400:oklch(.852 .199 91.936);--color-yellow-600:oklch(.681 .162 75.834);--color-yellow-900:oklch(.421 .095 57.708);--color-green-100:oklch(.962 .044 156.743);--color-green-400:oklch(.792 .209 151.711);--color-green-500:oklch(.723 .219 149.579);--color-green-600:oklch(.627 .194 149.214);--color-green-900:oklch(.393 .095 152.535);--color-emerald-50:oklch(.979 .021 166.113);--color-emerald-400:oklch(.765 .177 163.223);--color-emerald-700:oklch(.508 .118 165.612);--color-teal-100:oklch(.953 .051 180.801);--color-blue-100:oklch(.932 .032 255.585);--color-blue-400:oklch(.707 .165 254.624);--color-blue-600:oklch(.546 .245 262.881);--color-blue-700:oklch(.488 .243 264.376);--color-blue-900:oklch(.379 .146 265.522);--color-violet-700:oklch(.491 .27 292.581);--color-gray-100:oklch(.967 .003 264.542);--color-gray-200:oklch(.928 .006 264.531);--color-gray-300:oklch(.872 .01 258.338);--color-gray-400:oklch(.707 .022 261.325);--color-gray-500:oklch(.551 .027 264.364);--color-gray-600:oklch(.446 .03 256.802);--color-gray-700:oklch(.373 .034 259.733);--color-gray-800:oklch(.278 .033 256.848);--color-gray-900:oklch(.21 .034 264.665);--color-zinc-50:oklch(.985 0 0);--color-zinc-100:oklch(.967 .001 286.375);--color-zinc-200:oklch(.92 .004 286.32);--color-zinc-300:oklch(.871 .006 286.286);--color-zinc-600:oklch(.442 .017 285.786);--color-zinc-700:oklch(.37 .013 285.805);--color-zinc-800:oklch(.274 .006 286.033);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-xs:20rem;--container-md:28rem;--container-lg:32rem;--container-xl:36rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75/1.125);--text-3xl:1.875rem;--text-3xl--line-height: 1.2 ;--font-weight-medium:500;--font-weight-semibold:600;--font-weight-bold:700;--tracking-tight:-.025em;--tracking-wide:.025em;--tracking-widest:.1em;--leading-relaxed:1.625;--radius-xs:.125rem;--animate-spin:spin 1s linear infinite;--animate-pulse:pulse 2s cubic-bezier(.4,0,.6,1)infinite;--blur-sm:8px;--blur-lg:16px;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-font-feature-settings:var(--font-sans--font-feature-settings);--default-font-variation-settings:var(--font-sans--font-variation-settings);--default-mono-font-family:var(--font-mono);--default-mono-font-feature-settings:var(--font-mono--font-feature-settings);--default-mono-font-variation-settings:var(--font-mono--font-variation-settings)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;-moz-tab-size:4;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}body{line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1;color:color-mix(in oklab,currentColor 50%,transparent)}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){-webkit-appearance:button;-moz-appearance:button;appearance:button}::file-selector-button{-webkit-appearance:button;-moz-appearance:button;appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}*{border-color:var(--border);outline-color:color-mix(in oklab,var(--ring)50%,transparent)}body{background-color:var(--background);color:var(--foreground)}*{scrollbar-color:initial;scrollbar-width:initial}}@layer components;@layer utilities{.pointer-events-none{pointer-events:none}.invisible{visibility:hidden}.visible{visibility:visible}.sr-only{clip:rect(0,0,0,0);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.sticky{position:sticky}.inset-0{inset:calc(var(--spacing)*0)}.inset-\[-1px\]{top:-1px;right:-1px;bottom:-1px;left:-1px}.top-0{top:calc(var(--spacing)*0)}.top-1\/2{top:50%}.top-2{top:calc(var(--spacing)*2)}.top-4{top:calc(var(--spacing)*4)}.top-\[50\%\]{top:50%}.right-0{right:calc(var(--spacing)*0)}.right-2{right:calc(var(--spacing)*2)}.right-4{right:calc(var(--spacing)*4)}.bottom-0{bottom:calc(var(--spacing)*0)}.bottom-2{bottom:calc(var(--spacing)*2)}.bottom-4{bottom:calc(var(--spacing)*4)}.bottom-10{bottom:calc(var(--spacing)*10)}.\!left-1\/2{left:50%!important}.\!left-\[25\%\]{left:25%!important}.\!left-\[75\%\]{left:75%!important}.left-0{left:calc(var(--spacing)*0)}.left-2{left:calc(var(--spacing)*2)}.left-\[50\%\]{left:50%}.left-\[calc\(1rem\+2\.5rem\)\]{left:3.5rem}.z-10{z-index:10}.z-50{z-index:50}.z-60{z-index:60}.\!container{width:100%!important}@media (width>=40rem){.\!container{max-width:40rem!important}}@media (width>=48rem){.\!container{max-width:48rem!important}}@media (width>=64rem){.\!container{max-width:64rem!important}}@media (width>=80rem){.\!container{max-width:80rem!important}}@media (width>=96rem){.\!container{max-width:96rem!important}}.container{width:100%}@media (width>=40rem){.container{max-width:40rem}}@media (width>=48rem){.container{max-width:48rem}}@media (width>=64rem){.container{max-width:64rem}}@media (width>=80rem){.container{max-width:80rem}}@media (width>=96rem){.container{max-width:96rem}}.\!m-0{margin:calc(var(--spacing)*0)!important}.m-0{margin:calc(var(--spacing)*0)}.\!mx-4{margin-inline:calc(var(--spacing)*4)!important}.-mx-1{margin-inline:calc(var(--spacing)*-1)}.mx-1{margin-inline:calc(var(--spacing)*1)}.mx-4{margin-inline:calc(var(--spacing)*4)}.my-1{margin-block:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-4{margin-top:calc(var(--spacing)*4)}.mr-1{margin-right:calc(var(--spacing)*1)}.mr-2{margin-right:calc(var(--spacing)*2)}.mr-4{margin-right:calc(var(--spacing)*4)}.mr-8{margin-right:calc(var(--spacing)*8)}.mb-1{margin-bottom:calc(var(--spacing)*1)}.mb-2{margin-bottom:calc(var(--spacing)*2)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.ml-1{margin-left:calc(var(--spacing)*1)}.ml-2{margin-left:calc(var(--spacing)*2)}.ml-auto{margin-left:auto}.line-clamp-1{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.\!inline{display:inline!important}.block{display:block}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-flex{display:inline-flex}.table{display:table}.aspect-square{aspect-ratio:1}.\!size-full{width:100%!important;height:100%!important}.size-4{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.size-6{width:calc(var(--spacing)*6);height:calc(var(--spacing)*6)}.size-7{width:calc(var(--spacing)*7);height:calc(var(--spacing)*7)}.size-8{width:calc(var(--spacing)*8);height:calc(var(--spacing)*8)}.size-10{width:calc(var(--spacing)*10);height:calc(var(--spacing)*10)}.size-full{width:100%;height:100%}.h-1\/2{height:50%}.h-2{height:calc(var(--spacing)*2)}.h-2\.5{height:calc(var(--spacing)*2.5)}.h-3{height:calc(var(--spacing)*3)}.h-3\.5{height:calc(var(--spacing)*3.5)}.h-4{height:calc(var(--spacing)*4)}.h-5{height:calc(var(--spacing)*5)}.h-6{height:calc(var(--spacing)*6)}.h-7{height:calc(var(--spacing)*7)}.h-8{height:calc(var(--spacing)*8)}.h-9{height:calc(var(--spacing)*9)}.h-10{height:calc(var(--spacing)*10)}.h-11{height:calc(var(--spacing)*11)}.h-12{height:calc(var(--spacing)*12)}.h-24{height:calc(var(--spacing)*24)}.h-52{height:calc(var(--spacing)*52)}.h-\[1px\]{height:1px}.h-\[var\(--radix-select-trigger-height\)\]{height:var(--radix-select-trigger-height)}.h-fit{height:fit-content}.h-full{height:100%}.h-px{height:1px}.h-screen{height:100vh}.max-h-8{max-height:calc(var(--spacing)*8)}.max-h-48{max-height:calc(var(--spacing)*48)}.max-h-80{max-height:calc(var(--spacing)*80)}.max-h-96{max-height:calc(var(--spacing)*96)}.max-h-\[40vh\]{max-height:40vh}.max-h-\[50vh\]{max-height:50vh}.max-h-\[60vh\]{max-height:60vh}.max-h-\[300px\]{max-height:300px}.max-h-full{max-height:100%}.min-h-0{min-height:calc(var(--spacing)*0)}.min-h-\[7\.5em\]{min-height:7.5em}.min-h-\[10em\]{min-height:10em}.w-2{width:calc(var(--spacing)*2)}.w-2\.5{width:calc(var(--spacing)*2.5)}.w-3{width:calc(var(--spacing)*3)}.w-3\.5{width:calc(var(--spacing)*3.5)}.w-4{width:calc(var(--spacing)*4)}.w-5{width:calc(var(--spacing)*5)}.w-6{width:calc(var(--spacing)*6)}.w-7{width:calc(var(--spacing)*7)}.w-8{width:calc(var(--spacing)*8)}.w-9{width:calc(var(--spacing)*9)}.w-12{width:calc(var(--spacing)*12)}.w-16{width:calc(var(--spacing)*16)}.w-24{width:calc(var(--spacing)*24)}.w-56{width:calc(var(--spacing)*56)}.w-\[1px\]{width:1px}.w-\[200px\]{width:200px}.w-auto{width:auto}.w-full{width:100%}.w-screen{width:100vw}.max-w-80{max-width:calc(var(--spacing)*80)}.max-w-\[80\%\]{max-width:80%}.max-w-\[200px\]{max-width:200px}.max-w-\[250px\]{max-width:250px}.max-w-\[480px\]{max-width:480px}.max-w-lg{max-width:var(--container-lg)}.max-w-none{max-width:none}.max-w-xs{max-width:var(--container-xs)}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-45{min-width:calc(var(--spacing)*45)}.min-w-\[8rem\]{min-width:8rem}.min-w-\[180px\]{min-width:180px}.min-w-\[200px\]{min-width:200px}.min-w-\[300px\]{min-width:300px}.min-w-\[var\(--radix-select-trigger-width\)\]{min-width:var(--radix-select-trigger-width)}.min-w-auto{min-width:auto}.flex-1{flex:1}.flex-none{flex:none}.flex-shrink-0,.shrink-0{flex-shrink:0}.grow{flex-grow:1}.caption-bottom{caption-side:bottom}.\!-translate-x-1\/2{--tw-translate-x: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)!important}.\!translate-x-\[-50\%\]{--tw-translate-x:-50%;translate:var(--tw-translate-x)var(--tw-translate-y)!important}.translate-x-\[-50\%\]{--tw-translate-x:-50%;translate:var(--tw-translate-x)var(--tw-translate-y)}.-translate-y-1\/2{--tw-translate-y: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-y-\[-50\%\]{--tw-translate-y:-50%;translate:var(--tw-translate-x)var(--tw-translate-y)}.scale-125{--tw-scale-x:125%;--tw-scale-y:125%;--tw-scale-z:125%;scale:var(--tw-scale-x)var(--tw-scale-y)}.transform{transform:var(--tw-rotate-x)var(--tw-rotate-y)var(--tw-rotate-z)var(--tw-skew-x)var(--tw-skew-y)}.animate-pulse{animation:var(--animate-pulse)}.animate-spin{animation:var(--animate-spin)}.cursor-default{cursor:default}.cursor-not-allowed{cursor:not-allowed}.cursor-pointer{cursor:pointer}.touch-none{touch-action:none}.resize{resize:both}.resize-y{resize:vertical}.\[appearance\:textfield\]{-webkit-appearance:textfield;-moz-appearance:textfield;appearance:textfield}.grid-cols-\[120px_1fr\]{grid-template-columns:120px 1fr}.flex-col{flex-direction:column}.flex-col-reverse{flex-direction:column-reverse}.flex-row{flex-direction:row}.place-items-center{place-items:center}.items-center{align-items:center}.items-start{align-items:flex-start}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}.justify-start{justify-content:flex-start}.gap-1{gap:calc(var(--spacing)*1)}.gap-1\.5{gap:calc(var(--spacing)*1.5)}.gap-2{gap:calc(var(--spacing)*2)}.gap-2\.5{gap:calc(var(--spacing)*2.5)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}.gap-6{gap:calc(var(--spacing)*6)}.gap-px{gap:1px}:where(.space-y-1>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*2)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-4>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*4)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*4)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.self-center{align-self:center}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.\!overflow-hidden{overflow:hidden!important}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-visible{overflow:visible}.overflow-x-hidden{overflow-x:hidden}.overflow-y-auto{overflow-y:auto}.\!rounded-none{border-radius:0!important}.rounded{border-radius:.25rem}.rounded-\[inherit\]{border-radius:inherit}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius)}.rounded-md{border-radius:calc(var(--radius) - 2px)}.rounded-sm{border-radius:calc(var(--radius) - 4px)}.rounded-xl{border-radius:calc(var(--radius) + 4px)}.rounded-xs{border-radius:var(--radius-xs)}.rounded-l-none{border-top-left-radius:0;border-bottom-left-radius:0}.rounded-tr-none{border-top-right-radius:0}.rounded-br-none{border-bottom-right-radius:0}.border,.border-1{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-4{border-style:var(--tw-border-style);border-width:4px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.\!border-input{border-color:var(--input)!important}.border-blue-400{border-color:var(--color-blue-400)}.border-border\/40{border-color:color-mix(in oklab,var(--border)40%,transparent)}.border-destructive\/50{border-color:color-mix(in oklab,var(--destructive)50%,transparent)}.border-gray-200{border-color:var(--color-gray-200)}.border-gray-400{border-color:var(--color-gray-400)}.border-green-400{border-color:var(--color-green-400)}.border-input{border-color:var(--input)}.border-muted-foreground\/25{border-color:color-mix(in oklab,var(--muted-foreground)25%,transparent)}.border-muted-foreground\/50{border-color:color-mix(in oklab,var(--muted-foreground)50%,transparent)}.border-primary{border-color:var(--primary)}.border-red-400{border-color:var(--color-red-400)}.border-transparent{border-color:#0000}.border-yellow-400{border-color:var(--color-yellow-400)}.border-t-transparent{border-top-color:#0000}.border-l-transparent{border-left-color:#0000}.\!bg-background{background-color:var(--background)!important}.\!bg-emerald-400{background-color:var(--color-emerald-400)!important}.bg-amber-100{background-color:var(--color-amber-100)}.bg-background{background-color:var(--background)}.bg-background\/60{background-color:color-mix(in oklab,var(--background)60%,transparent)}.bg-background\/80{background-color:color-mix(in oklab,var(--background)80%,transparent)}.bg-background\/95{background-color:color-mix(in oklab,var(--background)95%,transparent)}.bg-black\/10{background-color:color-mix(in oklab,var(--color-black)10%,transparent)}.bg-black\/30{background-color:color-mix(in oklab,var(--color-black)30%,transparent)}.bg-black\/50{background-color:color-mix(in oklab,var(--color-black)50%,transparent)}.bg-blue-100{background-color:var(--color-blue-100)}.bg-border{background-color:var(--border)}.bg-card{background-color:var(--card)}.bg-card\/95{background-color:color-mix(in oklab,var(--card)95%,transparent)}.bg-destructive{background-color:var(--destructive)}.bg-foreground\/10{background-color:color-mix(in oklab,var(--foreground)10%,transparent)}.bg-gray-100{background-color:var(--color-gray-100)}.bg-gray-300{background-color:var(--color-gray-300)}.bg-green-100{background-color:var(--color-green-100)}.bg-green-500{background-color:var(--color-green-500)}.bg-muted{background-color:var(--muted)}.bg-muted\/50{background-color:color-mix(in oklab,var(--muted)50%,transparent)}.bg-popover{background-color:var(--popover)}.bg-primary{background-color:var(--primary)}.bg-primary-foreground\/60{background-color:color-mix(in oklab,var(--primary-foreground)60%,transparent)}.bg-primary\/5{background-color:color-mix(in oklab,var(--primary)5%,transparent)}.bg-red-100{background-color:var(--color-red-100)}.bg-red-400{background-color:var(--color-red-400)}.bg-red-500{background-color:var(--color-red-500)}.bg-secondary{background-color:var(--secondary)}.bg-transparent{background-color:#0000}.bg-white\/30{background-color:color-mix(in oklab,var(--color-white)30%,transparent)}.bg-yellow-100{background-color:var(--color-yellow-100)}.bg-zinc-200{background-color:var(--color-zinc-200)}.bg-zinc-800{background-color:var(--color-zinc-800)}.bg-gradient-to-br{--tw-gradient-position:to bottom right in oklab;background-image:linear-gradient(var(--tw-gradient-stops))}.from-emerald-50{--tw-gradient-from:var(--color-emerald-50);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.to-teal-100{--tw-gradient-to:var(--color-teal-100);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.object-cover{object-fit:cover}.\!p-0{padding:calc(var(--spacing)*0)!important}.p-0{padding:calc(var(--spacing)*0)}.p-1{padding:calc(var(--spacing)*1)}.p-2{padding:calc(var(--spacing)*2)}.p-3{padding:calc(var(--spacing)*3)}.p-4{padding:calc(var(--spacing)*4)}.p-6{padding:calc(var(--spacing)*6)}.p-16{padding:calc(var(--spacing)*16)}.p-\[1px\]{padding:1px}.px-1{padding-inline:calc(var(--spacing)*1)}.px-2{padding-inline:calc(var(--spacing)*2)}.px-2\.5{padding-inline:calc(var(--spacing)*2.5)}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-5{padding-inline:calc(var(--spacing)*5)}.px-6{padding-inline:calc(var(--spacing)*6)}.px-8{padding-inline:calc(var(--spacing)*8)}.py-0\.5{padding-block:calc(var(--spacing)*.5)}.py-1{padding-block:calc(var(--spacing)*1)}.py-1\.5{padding-block:calc(var(--spacing)*1.5)}.py-2{padding-block:calc(var(--spacing)*2)}.py-2\.5{padding-block:calc(var(--spacing)*2.5)}.py-3{padding-block:calc(var(--spacing)*3)}.py-4{padding-block:calc(var(--spacing)*4)}.py-6{padding-block:calc(var(--spacing)*6)}.pt-0{padding-top:calc(var(--spacing)*0)}.pt-1{padding-top:calc(var(--spacing)*1)}.pt-2{padding-top:calc(var(--spacing)*2)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-6{padding-top:calc(var(--spacing)*6)}.pr-1{padding-right:calc(var(--spacing)*1)}.pr-2{padding-right:calc(var(--spacing)*2)}.pb-1{padding-bottom:calc(var(--spacing)*1)}.pb-2{padding-bottom:calc(var(--spacing)*2)}.pb-8{padding-bottom:calc(var(--spacing)*8)}.pb-12{padding-bottom:calc(var(--spacing)*12)}.pl-1{padding-left:calc(var(--spacing)*1)}.pl-8{padding-left:calc(var(--spacing)*8)}.text-center{text-align:center}.text-left{text-align:left}.align-middle{vertical-align:middle}.font-mono{font-family:var(--font-mono)}.text-3xl{font-size:var(--text-3xl);line-height:var(--tw-leading,var(--text-3xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.leading-none{--tw-leading:1;line-height:1}.font-bold{--tw-font-weight:var(--font-weight-bold);font-weight:var(--font-weight-bold)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-tight{--tw-tracking:var(--tracking-tight);letter-spacing:var(--tracking-tight)}.tracking-wide{--tw-tracking:var(--tracking-wide);letter-spacing:var(--tracking-wide)}.tracking-widest{--tw-tracking:var(--tracking-widest);letter-spacing:var(--tracking-widest)}.break-words{overflow-wrap:break-word}.text-ellipsis{text-overflow:ellipsis}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.\!text-zinc-50{color:var(--color-zinc-50)!important}.text-amber-700{color:var(--color-amber-700)}.text-amber-800{color:var(--color-amber-800)}.text-blue-600{color:var(--color-blue-600)}.text-blue-700{color:var(--color-blue-700)}.text-card-foreground{color:var(--card-foreground)}.text-current{color:currentColor}.text-destructive{color:var(--destructive)}.text-destructive-foreground{color:var(--destructive-foreground)}.text-emerald-400{color:var(--color-emerald-400)}.text-emerald-700{color:var(--color-emerald-700)}.text-foreground{color:var(--foreground)}.text-foreground\/80{color:color-mix(in oklab,var(--foreground)80%,transparent)}.text-gray-400{color:var(--color-gray-400)}.text-gray-500{color:var(--color-gray-500)}.text-gray-700{color:var(--color-gray-700)}.text-gray-900{color:var(--color-gray-900)}.text-green-600{color:var(--color-green-600)}.text-muted-foreground{color:var(--muted-foreground)}.text-muted-foreground\/70{color:color-mix(in oklab,var(--muted-foreground)70%,transparent)}.text-popover-foreground{color:var(--popover-foreground)}.text-primary{color:var(--primary)}.text-primary-foreground{color:var(--primary-foreground)}.text-primary\/60{color:color-mix(in oklab,var(--primary)60%,transparent)}.text-red-400{color:var(--color-red-400)}.text-red-500{color:var(--color-red-500)}.text-red-600{color:var(--color-red-600)}.text-secondary-foreground{color:var(--secondary-foreground)}.text-violet-700{color:var(--color-violet-700)}.text-yellow-600{color:var(--color-yellow-600)}.text-zinc-100{color:var(--color-zinc-100)}.text-zinc-800{color:var(--color-zinc-800)}.lowercase{text-transform:lowercase}.underline-offset-4{text-underline-offset:4px}.opacity-0{opacity:0}.opacity-20{opacity:.2}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-80{opacity:.8}.opacity-100{opacity:1}.shadow{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[0_0_8px_rgba\(0\,0\,0\,0\.2\)\]{--tw-shadow:0 0 8px var(--tw-shadow-color,#0003);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[0_0_12px_rgba\(34\,197\,94\,0\.4\)\]{--tw-shadow:0 0 12px var(--tw-shadow-color,#22c55e66);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[0_0_12px_rgba\(239\,68\,68\,0\.4\)\]{--tw-shadow:0 0 12px var(--tw-shadow-color,#ef444466);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[inset_0_-1px_0_rgba\(0\,0\,0\,0\.1\)\]{--tw-shadow:inset 0 -1px 0 var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a),0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-offset-background{--tw-ring-offset-color:var(--background)}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.blur{--tw-blur:blur(8px);filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.filter{filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur{--tw-backdrop-blur:blur(8px);-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.backdrop-blur-lg{--tw-backdrop-blur:blur(var(--blur-lg));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.backdrop-blur-sm{--tw-backdrop-blur:blur(var(--blur-sm));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to,opacity,box-shadow,transform,translate,scale,rotate,filter,-webkit-backdrop-filter,backdrop-filter;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-opacity{transition-property:opacity;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.duration-2000{--tw-duration:2s;transition-duration:2s}.animate-in{--tw-enter-opacity:initial;--tw-enter-scale:initial;--tw-enter-rotate:initial;--tw-enter-translate-x:initial;--tw-enter-translate-y:initial;animation-name:enter;animation-duration:.15s}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}.duration-200{animation-duration:.2s}.duration-300{animation-duration:.3s}.duration-2000{animation-duration:2s}.fade-in-0{--tw-enter-opacity:0}.running{animation-play-state:running}.zoom-in-95{--tw-enter-scale:.95}@media (hover:hover){.group-hover\:visible:is(:where(.group):hover *){visibility:visible}}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-70:is(:where(.peer):disabled~*){opacity:.7}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.file\:text-foreground::file-selector-button{color:var(--foreground)}.placeholder\:text-muted-foreground::placeholder{color:var(--muted-foreground)}@media (hover:hover){.hover\:w-fit:hover{width:fit-content}.hover\:bg-accent:hover{background-color:var(--accent)}.hover\:bg-background\/60:hover{background-color:color-mix(in oklab,var(--background)60%,transparent)}.hover\:bg-destructive\/80:hover{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}.hover\:bg-destructive\/90:hover{background-color:color-mix(in oklab,var(--destructive)90%,transparent)}.hover\:bg-gray-100:hover{background-color:var(--color-gray-100)}.hover\:bg-gray-200:hover{background-color:var(--color-gray-200)}.hover\:bg-muted:hover{background-color:var(--muted)}.hover\:bg-muted\/25:hover{background-color:color-mix(in oklab,var(--muted)25%,transparent)}.hover\:bg-muted\/50:hover{background-color:color-mix(in oklab,var(--muted)50%,transparent)}.hover\:bg-primary\/5:hover{background-color:color-mix(in oklab,var(--primary)5%,transparent)}.hover\:bg-primary\/20:hover{background-color:color-mix(in oklab,var(--primary)20%,transparent)}.hover\:bg-primary\/80:hover{background-color:color-mix(in oklab,var(--primary)80%,transparent)}.hover\:bg-primary\/90:hover{background-color:color-mix(in oklab,var(--primary)90%,transparent)}.hover\:bg-secondary\/80:hover{background-color:color-mix(in oklab,var(--secondary)80%,transparent)}.hover\:bg-zinc-300:hover{background-color:var(--color-zinc-300)}.hover\:text-accent-foreground:hover{color:var(--accent-foreground)}.hover\:text-foreground:hover{color:var(--foreground)}.hover\:text-gray-700:hover{color:var(--color-gray-700)}.hover\:underline:hover{text-decoration-line:underline}.hover\:opacity-100:hover{opacity:1}}.focus\:bg-accent:focus{background-color:var(--accent)}.focus\:text-accent-foreground:focus{color:var(--accent-foreground)}.focus\:ring-0:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(0px + var(--tw-ring-offset-width))var(--tw-ring-color,currentColor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentColor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-ring:focus{--tw-ring-color:var(--ring)}.focus\:ring-offset-0:focus{--tw-ring-offset-width:0px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-0:focus{outline-style:var(--tw-outline-style);outline-width:0}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:relative:focus-visible{position:relative}.focus-visible\:ring-1:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color,currentColor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentColor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-ring:focus-visible{--tw-ring-color:var(--ring)}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.active\:right-0:active{right:calc(var(--spacing)*0)}.disabled\:pointer-events-none:disabled{pointer-events:none}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:opacity-50:disabled{opacity:.5}.data-\[disabled\]\:pointer-events-none[data-disabled]{pointer-events:none}.data-\[disabled\]\:opacity-50[data-disabled]{opacity:.5}.data-\[disabled\=true\]\:pointer-events-none[data-disabled=true]{pointer-events:none}.data-\[disabled\=true\]\:opacity-50[data-disabled=true]{opacity:.5}.data-\[selected\=\'true\'\]\:bg-accent[data-selected=true]{background-color:var(--accent)}.data-\[selected\=true\]\:text-accent-foreground[data-selected=true]{color:var(--accent-foreground)}.data-\[side\=bottom\]\:translate-y-1[data-side=bottom]{--tw-translate-y:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=bottom\]\:slide-in-from-top-2[data-side=bottom]{--tw-enter-translate-y:-.5rem}.data-\[side\=left\]\:-translate-x-1[data-side=left]{--tw-translate-x:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=left\]\:slide-in-from-right-2[data-side=left]{--tw-enter-translate-x:.5rem}.data-\[side\=right\]\:translate-x-1[data-side=right]{--tw-translate-x:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=right\]\:slide-in-from-left-2[data-side=right]{--tw-enter-translate-x:-.5rem}.data-\[side\=top\]\:-translate-y-1[data-side=top]{--tw-translate-y:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=top\]\:slide-in-from-bottom-2[data-side=top]{--tw-enter-translate-y:.5rem}.data-\[state\=active\]\:visible[data-state=active]{visibility:visible}.data-\[state\=active\]\:bg-background[data-state=active]{background-color:var(--background)}.data-\[state\=active\]\:text-foreground[data-state=active]{color:var(--foreground)}.data-\[state\=active\]\:shadow-sm[data-state=active]{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.data-\[state\=checked\]\:bg-muted[data-state=checked]{background-color:var(--muted)}.data-\[state\=checked\]\:text-muted-foreground[data-state=checked]{color:var(--muted-foreground)}.data-\[state\=closed\]\:animate-out[data-state=closed]{--tw-exit-opacity:initial;--tw-exit-scale:initial;--tw-exit-rotate:initial;--tw-exit-translate-x:initial;--tw-exit-translate-y:initial;animation-name:exit;animation-duration:.15s}.data-\[state\=closed\]\:fade-out-0[data-state=closed]{--tw-exit-opacity:0}.data-\[state\=closed\]\:slide-out-to-top-\[48\%\][data-state=closed]{--tw-exit-translate-y:-48%}.data-\[state\=closed\]\:zoom-out-95[data-state=closed]{--tw-exit-scale:.95}.data-\[state\=inactive\]\:invisible[data-state=inactive]{visibility:hidden}.data-\[state\=open\]\:bg-accent[data-state=open]{background-color:var(--accent)}.data-\[state\=open\]\:text-muted-foreground[data-state=open]{color:var(--muted-foreground)}.data-\[state\=open\]\:animate-in[data-state=open]{--tw-enter-opacity:initial;--tw-enter-scale:initial;--tw-enter-rotate:initial;--tw-enter-translate-x:initial;--tw-enter-translate-y:initial;animation-name:enter;animation-duration:.15s}.data-\[state\=open\]\:fade-in-0[data-state=open]{--tw-enter-opacity:0}.data-\[state\=open\]\:slide-in-from-top-\[48\%\][data-state=open]{--tw-enter-translate-y:-48%}.data-\[state\=open\]\:zoom-in-95[data-state=open]{--tw-enter-scale:.95}.data-\[state\=selected\]\:bg-muted[data-state=selected]{background-color:var(--muted)}@supports ((-webkit-backdrop-filter:var(--tw)) or (backdrop-filter:var(--tw))){.supports-\[backdrop-filter\]\:bg-background\/60{background-color:color-mix(in oklab,var(--background)60%,transparent)}.supports-\[backdrop-filter\]\:bg-card\/75{background-color:color-mix(in oklab,var(--card)75%,transparent)}}@media (width>=40rem){.sm\:mt-0{margin-top:calc(var(--spacing)*0)}.sm\:max-w-\[500px\]{max-width:500px}.sm\:max-w-\[600px\]{max-width:600px}.sm\:max-w-md{max-width:var(--container-md)}.sm\:max-w-xl{max-width:var(--container-xl)}.sm\:flex-row{flex-direction:row}.sm\:justify-end{justify-content:flex-end}:where(.sm\:space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.sm\:rounded-lg{border-radius:var(--radius)}.sm\:px-5{padding-inline:calc(var(--spacing)*5)}.sm\:text-left{text-align:left}}@media (width>=48rem){.md\:inline-block{display:inline-block}.md\:text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}}.dark\:border-blue-600:is(.dark *){border-color:var(--color-blue-600)}.dark\:border-destructive:is(.dark *){border-color:var(--destructive)}.dark\:border-gray-500:is(.dark *){border-color:var(--color-gray-500)}.dark\:border-gray-600:is(.dark *){border-color:var(--color-gray-600)}.dark\:border-gray-700:is(.dark *){border-color:var(--color-gray-700)}.dark\:border-green-600:is(.dark *){border-color:var(--color-green-600)}.dark\:border-red-600:is(.dark *){border-color:var(--color-red-600)}.dark\:border-yellow-600:is(.dark *){border-color:var(--color-yellow-600)}.dark\:bg-amber-900:is(.dark *){background-color:var(--color-amber-900)}.dark\:bg-blue-900\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-900)30%,transparent)}.dark\:bg-gray-100\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-100)20%,transparent)}.dark\:bg-gray-800\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)30%,transparent)}.dark\:bg-gray-900:is(.dark *){background-color:var(--color-gray-900)}.dark\:bg-green-900\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-green-900)30%,transparent)}.dark\:bg-red-900\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-red-900)30%,transparent)}.dark\:bg-red-950:is(.dark *){background-color:var(--color-red-950)}.dark\:bg-yellow-900\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-yellow-900)30%,transparent)}.dark\:bg-zinc-700:is(.dark *){background-color:var(--color-zinc-700)}.dark\:from-gray-900:is(.dark *){--tw-gradient-from:var(--color-gray-900);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.dark\:to-gray-800:is(.dark *){--tw-gradient-to:var(--color-gray-800);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.dark\:text-amber-200:is(.dark *){color:var(--color-amber-200)}.dark\:text-gray-300:is(.dark *){color:var(--color-gray-300)}.dark\:text-gray-400:is(.dark *){color:var(--color-gray-400)}.dark\:text-red-400:is(.dark *){color:var(--color-red-400)}.dark\:text-zinc-200:is(.dark *){color:var(--color-zinc-200)}@media (hover:hover){.dark\:hover\:bg-gray-700:is(.dark *):hover{background-color:var(--color-gray-700)}.dark\:hover\:bg-gray-800:is(.dark *):hover{background-color:var(--color-gray-800)}.dark\:hover\:bg-zinc-600:is(.dark *):hover{background-color:var(--color-zinc-600)}}.\[\&_\[cmdk-group-heading\]\]\:px-2 [cmdk-group-heading]{padding-inline:calc(var(--spacing)*2)}.\[\&_\[cmdk-group-heading\]\]\:py-1\.5 [cmdk-group-heading]{padding-block:calc(var(--spacing)*1.5)}.\[\&_\[cmdk-group-heading\]\]\:text-xs [cmdk-group-heading]{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.\[\&_\[cmdk-group-heading\]\]\:font-medium [cmdk-group-heading]{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.\[\&_\[cmdk-group-heading\]\]\:text-muted-foreground [cmdk-group-heading]{color:var(--muted-foreground)}.\[\&_\[cmdk-group\]\]\:px-2 [cmdk-group]{padding-inline:calc(var(--spacing)*2)}.\[\&_\[cmdk-group\]\:not\(\[hidden\]\)_\~\[cmdk-group\]\]\:pt-0 [cmdk-group]:not([hidden])~[cmdk-group]{padding-top:calc(var(--spacing)*0)}.\[\&_\[cmdk-input-wrapper\]_svg\]\:h-5 [cmdk-input-wrapper] svg{height:calc(var(--spacing)*5)}.\[\&_\[cmdk-input-wrapper\]_svg\]\:w-5 [cmdk-input-wrapper] svg{width:calc(var(--spacing)*5)}.\[\&_\[cmdk-input\]\]\:h-12 [cmdk-input]{height:calc(var(--spacing)*12)}.\[\&_\[cmdk-item\]\]\:px-2 [cmdk-item]{padding-inline:calc(var(--spacing)*2)}.\[\&_\[cmdk-item\]\]\:py-3 [cmdk-item]{padding-block:calc(var(--spacing)*3)}.\[\&_\[cmdk-item\]_svg\]\:h-5 [cmdk-item] svg{height:calc(var(--spacing)*5)}.\[\&_\[cmdk-item\]_svg\]\:w-5 [cmdk-item] svg{width:calc(var(--spacing)*5)}.\[\&_p\]\:leading-relaxed p{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.\[\&_svg\]\:pointer-events-none svg{pointer-events:none}.\[\&_svg\]\:size-4 svg{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.\[\&_svg\]\:shrink-0 svg{flex-shrink:0}.\[\&_tr\]\:border-b tr{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.\[\&_tr\:last-child\]\:border-0 tr:last-child{border-style:var(--tw-border-style);border-width:0}.\[\&\:\:-webkit-inner-spin-button\]\:appearance-none::-webkit-inner-spin-button{-webkit-appearance:none;-moz-appearance:none;appearance:none}.\[\&\:\:-webkit-inner-spin-button\]\:opacity-50::-webkit-inner-spin-button{opacity:.5}.\[\&\:\:-webkit-outer-spin-button\]\:appearance-none::-webkit-outer-spin-button{-webkit-appearance:none;-moz-appearance:none;appearance:none}.\[\&\:\:-webkit-outer-spin-button\]\:opacity-50::-webkit-outer-spin-button{opacity:.5}.\[\&\:has\(\[role\=checkbox\]\)\]\:pr-0:has([role=checkbox]){padding-right:calc(var(--spacing)*0)}.\[\&\>\[role\=checkbox\]\]\:translate-y-\[2px\]>[role=checkbox]{--tw-translate-y:2px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>span\]\:line-clamp-1>span{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.\[\&\>svg\]\:absolute>svg{position:absolute}.\[\&\>svg\]\:top-4>svg{top:calc(var(--spacing)*4)}.\[\&\>svg\]\:left-4>svg{left:calc(var(--spacing)*4)}.\[\&\>svg\]\:text-destructive>svg{color:var(--destructive)}.\[\&\>svg\]\:text-foreground>svg{color:var(--foreground)}.\[\&\>svg\+div\]\:translate-y-\[-3px\]>svg+div{--tw-translate-y:-3px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>svg\~\*\]\:pl-7>svg~*{padding-left:calc(var(--spacing)*7)}.\[\&\>tr\]\:last\:border-b-0>tr:last-child{border-bottom-style:var(--tw-border-style);border-bottom-width:0}}:root{--background:#fff;--foreground:#09090b;--card:#fff;--card-foreground:#09090b;--popover:#fff;--popover-foreground:#09090b;--primary:#18181b;--primary-foreground:#fafafa;--secondary:#f4f4f5;--secondary-foreground:#18181b;--muted:#f4f4f5;--muted-foreground:#71717a;--accent:#f4f4f5;--accent-foreground:#18181b;--destructive:#ef4444;--destructive-foreground:#fafafa;--border:#e4e4e7;--input:#e4e4e7;--ring:#09090b;--chart-1:#e76e50;--chart-2:#2a9d90;--chart-3:#274754;--chart-4:#e8c468;--chart-5:#f4a462;--radius:.6rem;--sidebar-background:#fafafa;--sidebar-foreground:#3f3f46;--sidebar-primary:#18181b;--sidebar-primary-foreground:#fafafa;--sidebar-accent:#f4f4f5;--sidebar-accent-foreground:#18181b;--sidebar-border:#e5e7eb;--sidebar-ring:#3b82f6}.dark{--background:#09090b;--foreground:#fafafa;--card:#09090b;--card-foreground:#fafafa;--popover:#09090b;--popover-foreground:#fafafa;--primary:#fafafa;--primary-foreground:#18181b;--secondary:#27272a;--secondary-foreground:#fafafa;--muted:#27272a;--muted-foreground:#a1a1aa;--accent:#27272a;--accent-foreground:#fafafa;--destructive:#7f1d1d;--destructive-foreground:#fafafa;--border:#27272a;--input:#27272a;--ring:#d4d4d8;--chart-1:#2662d9;--chart-2:#2eb88a;--chart-3:#e88c30;--chart-4:#af57db;--chart-5:#e23670;--sidebar-background:#18181b;--sidebar-foreground:#f4f4f5;--sidebar-primary:#1d4ed8;--sidebar-primary-foreground:#fff;--sidebar-accent:#27272a;--sidebar-accent-foreground:#f4f4f5;--sidebar-border:#27272a;--sidebar-ring:#3b82f6}::-webkit-scrollbar{width:10px;height:10px}::-webkit-scrollbar-thumb{background-color:#ccc;border-radius:5px}::-webkit-scrollbar-track{background-color:#f2f2f2}.dark ::-webkit-scrollbar-thumb{background-color:#e6e6e6}.dark ::-webkit-scrollbar-track{background-color:#000}@keyframes enter{0%{opacity:var(--tw-enter-opacity,1);transform:translate3d(var(--tw-enter-translate-x,0),var(--tw-enter-translate-y,0),0)scale3d(var(--tw-enter-scale,1),var(--tw-enter-scale,1),var(--tw-enter-scale,1))rotate(var(--tw-enter-rotate,0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity,1);transform:translate3d(var(--tw-exit-translate-x,0),var(--tw-exit-translate-y,0),0)scale3d(var(--tw-exit-scale,1),var(--tw-exit-scale,1),var(--tw-exit-scale,1))rotate(var(--tw-exit-rotate,0))}}@keyframes spin{to{transform:rotate(360deg)}}@keyframes pulse{50%{opacity:.5}}@property --tw-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-z{syntax:"*";inherits:false;initial-value:0}@property --tw-scale-x{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-y{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-z{syntax:"*";inherits:false;initial-value:1}@property --tw-rotate-x{syntax:"*";inherits:false;initial-value:rotateX(0)}@property --tw-rotate-y{syntax:"*";inherits:false;initial-value:rotateY(0)}@property --tw-rotate-z{syntax:"*";inherits:false;initial-value:rotateZ(0)}@property --tw-skew-x{syntax:"*";inherits:false;initial-value:skewX(0)}@property --tw-skew-y{syntax:"*";inherits:false;initial-value:skewY(0)}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-gradient-position{syntax:"*";inherits:false}@property --tw-gradient-from{syntax:"";inherits:false;initial-value:#0000}@property --tw-gradient-via{syntax:"";inherits:false;initial-value:#0000}@property --tw-gradient-to{syntax:"";inherits:false;initial-value:#0000}@property --tw-gradient-stops{syntax:"*";inherits:false}@property --tw-gradient-via-stops{syntax:"*";inherits:false}@property --tw-gradient-from-position{syntax:"";inherits:false;initial-value:0%}@property --tw-gradient-via-position{syntax:"";inherits:false;initial-value:50%}@property --tw-gradient-to-position{syntax:"";inherits:false;initial-value:100%}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@property --tw-duration{syntax:"*";inherits:false}:root{--sigma-background-color:#fff;--sigma-controls-background-color:#fff;--sigma-controls-background-color-hover:rgba(0,0,0,.2);--sigma-controls-border-color:rgba(0,0,0,.2);--sigma-controls-color:#000;--sigma-controls-zindex:100;--sigma-controls-margin:5px;--sigma-controls-size:30px}div.react-sigma{height:100%;width:100%;position:relative;background:var(--sigma-background-color)}div.sigma-container{height:100%;width:100%}.react-sigma-controls{position:absolute;z-index:var(--sigma-controls-zindex);border:2px solid var(--sigma-controls-border-color);border-radius:4px;color:var(--sigma-controls-color);background-color:var(--sigma-controls-background-color)}.react-sigma-controls.bottom-right{bottom:var(--sigma-controls-margin);right:var(--sigma-controls-margin)}.react-sigma-controls.bottom-left{bottom:var(--sigma-controls-margin);left:var(--sigma-controls-margin)}.react-sigma-controls.top-right{top:var(--sigma-controls-margin);right:var(--sigma-controls-margin)}.react-sigma-controls.top-left{top:var(--sigma-controls-margin);left:var(--sigma-controls-margin)}.react-sigma-controls:first-child{border-top-left-radius:2px;border-top-right-radius:2px}.react-sigma-controls:last-child{border-bottom-left-radius:2px;border-bottom-right-radius:2px}.react-sigma-control{width:var(--sigma-controls-size);height:var(--sigma-controls-size);line-height:var(--sigma-controls-size);background-color:var(--sigma-controls-background-color);border-bottom:1px solid var(--sigma-controls-border-color)}.react-sigma-control:last-child{border-bottom:none}.react-sigma-control>*{box-sizing:border-box}.react-sigma-control>button{display:block;border:none;margin:0;padding:0;width:var(--sigma-controls-size);height:var(--sigma-controls-size);line-height:var(--sigma-controls-size);background-position:center;background-size:50%;background-repeat:no-repeat;background-color:var(--sigma-controls-background-color);clip:rect(0,0,0,0)}.react-sigma-control>button:hover{background-color:var(--sigma-controls-background-color-hover)}.react-sigma-search{background-color:var(--sigma-controls-background-color)}.react-sigma-search label{visibility:hidden}.react-sigma-search input{color:var(--sigma-controls-color);background-color:var(--sigma-controls-background-color);font-size:1em;width:100%;margin:0;border:none;padding:var(--sigma-controls-margin);box-sizing:border-box}:root{--sigma-grey-color:#ccc}.react-sigma .option.hoverable{cursor:pointer!important}.react-sigma .text-ellipsis{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}.react-sigma .react-select__clear-indicator{cursor:pointer!important}.react-sigma .text-muted{color:var(--sigma-grey-color)}.react-sigma .text-italic{font-style:italic}.react-sigma .text-center{text-align:center}.react-sigma .graph-search{min-width:250px}.react-sigma .graph-search .option{padding:2px 8px}.react-sigma .graph-search .dropdown-indicator{font-size:1.25em;padding:4px}.react-sigma .graph-search .option.selected{background-color:var(--sigma-grey-color)}.react-sigma .node .render{position:relative;display:inline-block;width:1em;height:1em;border-radius:1em;background-color:var(--sigma-grey-color);margin-right:8px}.react-sigma .node{display:flex;flex-direction:row;align-items:center}.react-sigma .node .render{flex-grow:0;flex-shrink:0;margin-right:0 .25em}.react-sigma .node .label{flex-grow:1;flex-shrink:1}.react-sigma .edge{display:flex;flex-direction:column;align-items:flex-start;flex-grow:0;flex-shrink:0;flex-wrap:nowrap}.react-sigma .edge .node{font-size:.7em}.react-sigma .edge .body{display:flex;flex-direction:row;flex-grow:1;flex-shrink:1;min-height:.6em}.react-sigma .edge .body .render{display:flex;flex-direction:column;margin:0 2px}.react-sigma .edge .body .render .dash,.react-sigma .edge .body .render .dotted{display:inline-block;width:0;margin:0 2px;border:2px solid #ccc;flex-grow:1;flex-shrink:1}.react-sigma .edge .body .render .dotted{border-style:dotted}.react-sigma .edge .body .render .arrow{width:0;height:0;border-left:.3em solid transparent;border-right:.3em solid transparent;border-top:.6em solid red;flex-shrink:0;flex-grow:0;border-left-width:.3em;border-right-width:.3em}.react-sigma .edge .body .label{flex-grow:1;flex-shrink:1;text-align:center} diff --git a/lightrag/api/webui/assets/index-CAa7yEmm.css b/lightrag/api/webui/assets/index-CAa7yEmm.css deleted file mode 100644 index 2798c5f395..0000000000 --- a/lightrag/api/webui/assets/index-CAa7yEmm.css +++ /dev/null @@ -1 +0,0 @@ -/*! tailwindcss v4.0.8 | MIT License | https://tailwindcss.com */@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-100:oklch(.936 .032 17.717);--color-red-400:oklch(.704 .191 22.216);--color-red-500:oklch(.637 .237 25.331);--color-red-600:oklch(.577 .245 27.325);--color-red-700:oklch(.505 .213 27.518);--color-red-950:oklch(.258 .092 26.042);--color-amber-100:oklch(.962 .059 95.617);--color-amber-200:oklch(.924 .12 95.746);--color-amber-700:oklch(.555 .163 48.998);--color-amber-800:oklch(.473 .137 46.201);--color-amber-900:oklch(.414 .112 45.904);--color-yellow-600:oklch(.681 .162 75.834);--color-green-500:oklch(.723 .219 149.579);--color-green-600:oklch(.627 .194 149.214);--color-emerald-50:oklch(.979 .021 166.113);--color-emerald-400:oklch(.765 .177 163.223);--color-emerald-700:oklch(.508 .118 165.612);--color-teal-100:oklch(.953 .051 180.801);--color-blue-600:oklch(.546 .245 262.881);--color-blue-700:oklch(.488 .243 264.376);--color-violet-700:oklch(.491 .27 292.581);--color-gray-100:oklch(.967 .003 264.542);--color-gray-200:oklch(.928 .006 264.531);--color-gray-300:oklch(.872 .01 258.338);--color-gray-400:oklch(.707 .022 261.325);--color-gray-600:oklch(.446 .03 256.802);--color-gray-700:oklch(.373 .034 259.733);--color-gray-800:oklch(.278 .033 256.848);--color-gray-900:oklch(.21 .034 264.665);--color-zinc-50:oklch(.985 0 0);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-xs:20rem;--container-lg:32rem;--container-xl:36rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75/1.125);--text-3xl:1.875rem;--text-3xl--line-height: 1.2 ;--font-weight-medium:500;--font-weight-semibold:600;--font-weight-bold:700;--tracking-tight:-.025em;--tracking-wide:.025em;--tracking-widest:.1em;--leading-relaxed:1.625;--radius-xs:.125rem;--ease-in-out:cubic-bezier(.4,0,.2,1);--animate-spin:spin 1s linear infinite;--animate-pulse:pulse 2s cubic-bezier(.4,0,.6,1)infinite;--blur-sm:8px;--blur-lg:16px;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-font-feature-settings:var(--font-sans--font-feature-settings);--default-font-variation-settings:var(--font-sans--font-variation-settings);--default-mono-font-family:var(--font-mono);--default-mono-font-feature-settings:var(--font-mono--font-feature-settings);--default-mono-font-variation-settings:var(--font-mono--font-variation-settings)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;-moz-tab-size:4;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}body{line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1;color:color-mix(in oklab,currentColor 50%,transparent)}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){-webkit-appearance:button;-moz-appearance:button;appearance:button}::file-selector-button{-webkit-appearance:button;-moz-appearance:button;appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}*{border-color:var(--border);outline-color:color-mix(in oklab,var(--ring)50%,transparent)}body{background-color:var(--background);color:var(--foreground)}*{scrollbar-color:initial;scrollbar-width:initial}}@layer components;@layer utilities{.pointer-events-none{pointer-events:none}.visible{visibility:visible}.sr-only{clip:rect(0,0,0,0);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.sticky{position:sticky}.inset-0{inset:calc(var(--spacing)*0)}.top-0{top:calc(var(--spacing)*0)}.top-1\/2{top:50%}.top-2{top:calc(var(--spacing)*2)}.top-4{top:calc(var(--spacing)*4)}.top-12{top:calc(var(--spacing)*12)}.top-\[50\%\]{top:50%}.right-0{right:calc(var(--spacing)*0)}.right-2{right:calc(var(--spacing)*2)}.right-4{right:calc(var(--spacing)*4)}.bottom-0{bottom:calc(var(--spacing)*0)}.bottom-2{bottom:calc(var(--spacing)*2)}.bottom-4{bottom:calc(var(--spacing)*4)}.left-0{left:calc(var(--spacing)*0)}.left-1\/2{left:50%}.left-2{left:calc(var(--spacing)*2)}.left-\[50\%\]{left:50%}.left-\[calc\(1rem\+2\.5rem\)\]{left:3.5rem}.z-10{z-index:10}.z-50{z-index:50}.z-60{z-index:60}.container{width:100%}@media (width>=40rem){.container{max-width:40rem}}@media (width>=48rem){.container{max-width:48rem}}@media (width>=64rem){.container{max-width:64rem}}@media (width>=80rem){.container{max-width:80rem}}@media (width>=96rem){.container{max-width:96rem}}.\!m-0{margin:calc(var(--spacing)*0)!important}.m-0{margin:calc(var(--spacing)*0)}.-mx-1{margin-inline:calc(var(--spacing)*-1)}.mx-1{margin-inline:calc(var(--spacing)*1)}.mx-4{margin-inline:calc(var(--spacing)*4)}.my-1{margin-block:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-4{margin-top:calc(var(--spacing)*4)}.mr-1{margin-right:calc(var(--spacing)*1)}.mr-2{margin-right:calc(var(--spacing)*2)}.mr-4{margin-right:calc(var(--spacing)*4)}.mr-6{margin-right:calc(var(--spacing)*6)}.mb-1{margin-bottom:calc(var(--spacing)*1)}.mb-2{margin-bottom:calc(var(--spacing)*2)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.ml-1{margin-left:calc(var(--spacing)*1)}.ml-2{margin-left:calc(var(--spacing)*2)}.ml-auto{margin-left:auto}.line-clamp-1{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.\!inline{display:inline!important}.block{display:block}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-flex{display:inline-flex}.table{display:table}.aspect-square{aspect-ratio:1}.\!size-full{width:100%!important;height:100%!important}.size-4{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.size-6{width:calc(var(--spacing)*6);height:calc(var(--spacing)*6)}.size-7{width:calc(var(--spacing)*7);height:calc(var(--spacing)*7)}.size-8{width:calc(var(--spacing)*8);height:calc(var(--spacing)*8)}.size-10{width:calc(var(--spacing)*10);height:calc(var(--spacing)*10)}.size-full{width:100%;height:100%}.h-1\/2{height:50%}.h-2\.5{height:calc(var(--spacing)*2.5)}.h-3{height:calc(var(--spacing)*3)}.h-3\.5{height:calc(var(--spacing)*3.5)}.h-4{height:calc(var(--spacing)*4)}.h-5{height:calc(var(--spacing)*5)}.h-6{height:calc(var(--spacing)*6)}.h-7{height:calc(var(--spacing)*7)}.h-8{height:calc(var(--spacing)*8)}.h-9{height:calc(var(--spacing)*9)}.h-10{height:calc(var(--spacing)*10)}.h-11{height:calc(var(--spacing)*11)}.h-12{height:calc(var(--spacing)*12)}.h-24{height:calc(var(--spacing)*24)}.h-52{height:calc(var(--spacing)*52)}.h-\[1px\]{height:1px}.h-\[var\(--radix-select-trigger-height\)\]{height:var(--radix-select-trigger-height)}.h-fit{height:fit-content}.h-full{height:100%}.h-px{height:1px}.h-screen{height:100vh}.max-h-8{max-height:calc(var(--spacing)*8)}.max-h-48{max-height:calc(var(--spacing)*48)}.max-h-96{max-height:calc(var(--spacing)*96)}.max-h-\[60vh\]{max-height:60vh}.max-h-\[300px\]{max-height:300px}.max-h-full{max-height:100%}.min-h-0{min-height:calc(var(--spacing)*0)}.w-2\.5{width:calc(var(--spacing)*2.5)}.w-3{width:calc(var(--spacing)*3)}.w-3\.5{width:calc(var(--spacing)*3.5)}.w-4{width:calc(var(--spacing)*4)}.w-5{width:calc(var(--spacing)*5)}.w-6{width:calc(var(--spacing)*6)}.w-7{width:calc(var(--spacing)*7)}.w-8{width:calc(var(--spacing)*8)}.w-9{width:calc(var(--spacing)*9)}.w-12{width:calc(var(--spacing)*12)}.w-16{width:calc(var(--spacing)*16)}.w-24{width:calc(var(--spacing)*24)}.w-56{width:calc(var(--spacing)*56)}.w-\[1px\]{width:1px}.w-auto{width:auto}.w-full{width:100%}.w-screen{width:100vw}.max-w-80{max-width:calc(var(--spacing)*80)}.max-w-\[80\%\]{max-width:80%}.max-w-\[480px\]{max-width:480px}.max-w-lg{max-width:var(--container-lg)}.max-w-none{max-width:none}.max-w-xs{max-width:var(--container-xs)}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-24{min-width:calc(var(--spacing)*24)}.min-w-\[8rem\]{min-width:8rem}.min-w-\[180px\]{min-width:180px}.min-w-\[300px\]{min-width:300px}.min-w-\[var\(--radix-select-trigger-width\)\]{min-width:var(--radix-select-trigger-width)}.flex-1{flex:1}.flex-auto{flex:auto}.shrink-0{flex-shrink:0}.grow{flex-grow:1}.caption-bottom{caption-side:bottom}.-translate-x-1\/2{--tw-translate-x: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-\[-50\%\]{--tw-translate-x:-50%;translate:var(--tw-translate-x)var(--tw-translate-y)}.-translate-y-1\/2{--tw-translate-y: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.-translate-y-20{--tw-translate-y:calc(var(--spacing)*-20);translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-y-0{--tw-translate-y:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-y-\[-50\%\]{--tw-translate-y:-50%;translate:var(--tw-translate-x)var(--tw-translate-y)}.scale-125{--tw-scale-x:125%;--tw-scale-y:125%;--tw-scale-z:125%;scale:var(--tw-scale-x)var(--tw-scale-y)}.transform{transform:var(--tw-rotate-x)var(--tw-rotate-y)var(--tw-rotate-z)var(--tw-skew-x)var(--tw-skew-y)}.animate-pulse{animation:var(--animate-pulse)}.animate-spin{animation:var(--animate-spin)}.cursor-default{cursor:default}.cursor-help{cursor:help}.cursor-not-allowed{cursor:not-allowed}.cursor-pointer{cursor:pointer}.touch-none{touch-action:none}.\[appearance\:textfield\]{-webkit-appearance:textfield;-moz-appearance:textfield;appearance:textfield}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.flex-col{flex-direction:column}.flex-col-reverse{flex-direction:column-reverse}.place-items-center{place-items:center}.items-center{align-items:center}.items-start{align-items:flex-start}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}.justify-start{justify-content:flex-start}.gap-1{gap:calc(var(--spacing)*1)}.gap-1\.5{gap:calc(var(--spacing)*1.5)}.gap-2{gap:calc(var(--spacing)*2)}.gap-2\.5{gap:calc(var(--spacing)*2.5)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}.gap-6{gap:calc(var(--spacing)*6)}.gap-px{gap:1px}:where(.space-y-1>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*2)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-3>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*3)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*3)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-4>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*4)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*4)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}.self-center{align-self:center}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-visible{overflow:visible}.overflow-x-hidden{overflow-x:hidden}.overflow-y-auto{overflow-y:auto}.\!rounded-none{border-radius:0!important}.rounded{border-radius:.25rem}.rounded-\[inherit\]{border-radius:inherit}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius)}.rounded-md{border-radius:calc(var(--radius) - 2px)}.rounded-sm{border-radius:calc(var(--radius) - 4px)}.rounded-xl{border-radius:calc(var(--radius) + 4px)}.rounded-xs{border-radius:var(--radius-xs)}.rounded-l-none{border-top-left-radius:0;border-bottom-left-radius:0}.rounded-tr-none{border-top-right-radius:0}.rounded-br-none{border-bottom-right-radius:0}.border,.border-1{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-4{border-style:var(--tw-border-style);border-width:4px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.\!border-none{--tw-border-style:none;border-style:none!important}.border-dashed{--tw-border-style:dashed;border-style:dashed}.\!border-input{border-color:var(--input)!important}.border-border\/40{border-color:color-mix(in oklab,var(--border)40%,transparent)}.border-destructive\/50{border-color:color-mix(in oklab,var(--destructive)50%,transparent)}.border-gray-400{border-color:var(--color-gray-400)}.border-input{border-color:var(--input)}.border-muted-foreground\/25{border-color:color-mix(in oklab,var(--muted-foreground)25%,transparent)}.border-muted-foreground\/50{border-color:color-mix(in oklab,var(--muted-foreground)50%,transparent)}.border-primary{border-color:var(--primary)}.border-transparent{border-color:#0000}.border-t-transparent{border-top-color:#0000}.border-l-transparent{border-left-color:#0000}.\!bg-background{background-color:var(--background)!important}.\!bg-emerald-400{background-color:var(--color-emerald-400)!important}.bg-amber-100{background-color:var(--color-amber-100)}.bg-background{background-color:var(--background)}.bg-background\/60{background-color:color-mix(in oklab,var(--background)60%,transparent)}.bg-background\/80{background-color:color-mix(in oklab,var(--background)80%,transparent)}.bg-background\/90{background-color:color-mix(in oklab,var(--background)90%,transparent)}.bg-background\/95{background-color:color-mix(in oklab,var(--background)95%,transparent)}.bg-black\/10{background-color:color-mix(in oklab,var(--color-black)10%,transparent)}.bg-black\/80{background-color:color-mix(in oklab,var(--color-black)80%,transparent)}.bg-border{background-color:var(--border)}.bg-card{background-color:var(--card)}.bg-destructive{background-color:var(--destructive)}.bg-foreground\/10{background-color:color-mix(in oklab,var(--foreground)10%,transparent)}.bg-green-500{background-color:var(--color-green-500)}.bg-muted{background-color:var(--muted)}.bg-muted\/50{background-color:color-mix(in oklab,var(--muted)50%,transparent)}.bg-popover{background-color:var(--popover)}.bg-primary{background-color:var(--primary)}.bg-primary-foreground\/60{background-color:color-mix(in oklab,var(--primary-foreground)60%,transparent)}.bg-primary\/5{background-color:color-mix(in oklab,var(--primary)5%,transparent)}.bg-red-100{background-color:var(--color-red-100)}.bg-red-500{background-color:var(--color-red-500)}.bg-red-700{background-color:var(--color-red-700)}.bg-secondary{background-color:var(--secondary)}.bg-transparent{background-color:#0000}.bg-white\/30{background-color:color-mix(in oklab,var(--color-white)30%,transparent)}.bg-gradient-to-br{--tw-gradient-position:to bottom right in oklab;background-image:linear-gradient(var(--tw-gradient-stops))}.from-emerald-50{--tw-gradient-from:var(--color-emerald-50);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.to-teal-100{--tw-gradient-to:var(--color-teal-100);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.object-cover{object-fit:cover}.\!p-0{padding:calc(var(--spacing)*0)!important}.\!p-2{padding:calc(var(--spacing)*2)!important}.p-0{padding:calc(var(--spacing)*0)}.p-1{padding:calc(var(--spacing)*1)}.p-2{padding:calc(var(--spacing)*2)}.p-3{padding:calc(var(--spacing)*3)}.p-4{padding:calc(var(--spacing)*4)}.p-6{padding:calc(var(--spacing)*6)}.p-16{padding:calc(var(--spacing)*16)}.p-\[1px\]{padding:1px}.px-1{padding-inline:calc(var(--spacing)*1)}.px-2{padding-inline:calc(var(--spacing)*2)}.px-2\.5{padding-inline:calc(var(--spacing)*2.5)}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-5{padding-inline:calc(var(--spacing)*5)}.px-8{padding-inline:calc(var(--spacing)*8)}.py-0\.5{padding-block:calc(var(--spacing)*.5)}.py-1{padding-block:calc(var(--spacing)*1)}.py-1\.5{padding-block:calc(var(--spacing)*1.5)}.py-2{padding-block:calc(var(--spacing)*2)}.py-2\.5{padding-block:calc(var(--spacing)*2.5)}.py-3{padding-block:calc(var(--spacing)*3)}.py-6{padding-block:calc(var(--spacing)*6)}.pt-0{padding-top:calc(var(--spacing)*0)}.pt-1{padding-top:calc(var(--spacing)*1)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-6{padding-top:calc(var(--spacing)*6)}.pr-1{padding-right:calc(var(--spacing)*1)}.pr-2{padding-right:calc(var(--spacing)*2)}.pb-1{padding-bottom:calc(var(--spacing)*1)}.pb-2{padding-bottom:calc(var(--spacing)*2)}.pb-8{padding-bottom:calc(var(--spacing)*8)}.pb-12{padding-bottom:calc(var(--spacing)*12)}.pl-1{padding-left:calc(var(--spacing)*1)}.pl-8{padding-left:calc(var(--spacing)*8)}.text-center{text-align:center}.text-left{text-align:left}.align-middle{vertical-align:middle}.font-mono{font-family:var(--font-mono)}.text-3xl{font-size:var(--text-3xl);line-height:var(--tw-leading,var(--text-3xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.leading-none{--tw-leading:1;line-height:1}.font-bold{--tw-font-weight:var(--font-weight-bold);font-weight:var(--font-weight-bold)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-tight{--tw-tracking:var(--tracking-tight);letter-spacing:var(--tracking-tight)}.tracking-wide{--tw-tracking:var(--tracking-wide);letter-spacing:var(--tracking-wide)}.tracking-widest{--tw-tracking:var(--tracking-widest);letter-spacing:var(--tracking-widest)}.break-words{overflow-wrap:break-word}.text-ellipsis{text-overflow:ellipsis}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.\!text-zinc-50{color:var(--color-zinc-50)!important}.text-amber-700{color:var(--color-amber-700)}.text-amber-800{color:var(--color-amber-800)}.text-blue-600{color:var(--color-blue-600)}.text-blue-700{color:var(--color-blue-700)}.text-card-foreground{color:var(--card-foreground)}.text-current{color:currentColor}.text-destructive{color:var(--destructive)}.text-destructive-foreground{color:var(--destructive-foreground)}.text-emerald-400{color:var(--color-emerald-400)}.text-emerald-700{color:var(--color-emerald-700)}.text-foreground{color:var(--foreground)}.text-foreground\/80{color:color-mix(in oklab,var(--foreground)80%,transparent)}.text-gray-400{color:var(--color-gray-400)}.text-gray-700{color:var(--color-gray-700)}.text-gray-900{color:var(--color-gray-900)}.text-green-600{color:var(--color-green-600)}.text-muted-foreground{color:var(--muted-foreground)}.text-muted-foreground\/70{color:color-mix(in oklab,var(--muted-foreground)70%,transparent)}.text-popover-foreground{color:var(--popover-foreground)}.text-primary{color:var(--primary)}.text-primary-foreground{color:var(--primary-foreground)}.text-primary\/60{color:color-mix(in oklab,var(--primary)60%,transparent)}.text-red-500{color:var(--color-red-500)}.text-red-600{color:var(--color-red-600)}.text-secondary-foreground{color:var(--secondary-foreground)}.text-violet-700{color:var(--color-violet-700)}.text-white{color:var(--color-white)}.text-yellow-600{color:var(--color-yellow-600)}.underline-offset-4{text-underline-offset:4px}.opacity-0{opacity:0}.opacity-20{opacity:.2}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-80{opacity:.8}.opacity-100{opacity:1}.shadow{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[0_0_8px_rgba\(0\,0\,0\,0\.2\)\]{--tw-shadow:0 0 8px var(--tw-shadow-color,#0003);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[0_0_12px_rgba\(34\,197\,94\,0\.4\)\]{--tw-shadow:0 0 12px var(--tw-shadow-color,#22c55e66);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[0_0_12px_rgba\(239\,68\,68\,0\.4\)\]{--tw-shadow:0 0 12px var(--tw-shadow-color,#ef444466);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a),0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-offset-background{--tw-ring-offset-color:var(--background)}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.filter{filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur{--tw-backdrop-blur:blur(8px);-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.backdrop-blur-lg{--tw-backdrop-blur:blur(var(--blur-lg));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.backdrop-blur-sm{--tw-backdrop-blur:blur(var(--blur-sm));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to,opacity,box-shadow,transform,translate,scale,rotate,filter,-webkit-backdrop-filter,backdrop-filter;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-opacity{transition-property:opacity;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.duration-500{--tw-duration:.5s;transition-duration:.5s}.duration-2000{--tw-duration:2s;transition-duration:2s}.ease-in-out{--tw-ease:var(--ease-in-out);transition-timing-function:var(--ease-in-out)}.animate-in{--tw-enter-opacity:initial;--tw-enter-scale:initial;--tw-enter-rotate:initial;--tw-enter-translate-x:initial;--tw-enter-translate-y:initial;animation-name:enter;animation-duration:.15s}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}.duration-200{animation-duration:.2s}.duration-300{animation-duration:.3s}.duration-500{animation-duration:.5s}.duration-2000{animation-duration:2s}.ease-in-out{animation-timing-function:cubic-bezier(.4,0,.2,1)}.fade-in-0{--tw-enter-opacity:0}.running{animation-play-state:running}.zoom-in-95{--tw-enter-scale:.95}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-70:is(:where(.peer):disabled~*){opacity:.7}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.file\:text-foreground::file-selector-button{color:var(--foreground)}.placeholder\:text-muted-foreground::placeholder{color:var(--muted-foreground)}@media (hover:hover){.hover\:w-fit:hover{width:fit-content}.hover\:bg-accent:hover{background-color:var(--accent)}.hover\:bg-background\/60:hover{background-color:color-mix(in oklab,var(--background)60%,transparent)}.hover\:bg-destructive\/80:hover{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}.hover\:bg-destructive\/90:hover{background-color:color-mix(in oklab,var(--destructive)90%,transparent)}.hover\:bg-gray-200:hover{background-color:var(--color-gray-200)}.hover\:bg-muted\/25:hover{background-color:color-mix(in oklab,var(--muted)25%,transparent)}.hover\:bg-muted\/50:hover{background-color:color-mix(in oklab,var(--muted)50%,transparent)}.hover\:bg-primary\/5:hover{background-color:color-mix(in oklab,var(--primary)5%,transparent)}.hover\:bg-primary\/20:hover{background-color:color-mix(in oklab,var(--primary)20%,transparent)}.hover\:bg-primary\/80:hover{background-color:color-mix(in oklab,var(--primary)80%,transparent)}.hover\:bg-primary\/90:hover{background-color:color-mix(in oklab,var(--primary)90%,transparent)}.hover\:bg-secondary\/80:hover{background-color:color-mix(in oklab,var(--secondary)80%,transparent)}.hover\:text-accent-foreground:hover{color:var(--accent-foreground)}.hover\:underline:hover{text-decoration-line:underline}.hover\:opacity-100:hover{opacity:1}}.focus\:bg-accent:focus{background-color:var(--accent)}.focus\:text-accent-foreground:focus{color:var(--accent-foreground)}.focus\:ring-0:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(0px + var(--tw-ring-offset-width))var(--tw-ring-color,currentColor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentColor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-ring:focus{--tw-ring-color:var(--ring)}.focus\:ring-offset-0:focus{--tw-ring-offset-width:0px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-0:focus{outline-style:var(--tw-outline-style);outline-width:0}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:relative:focus-visible{position:relative}.focus-visible\:ring-1:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color,currentColor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentColor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-ring:focus-visible{--tw-ring-color:var(--ring)}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.active\:right-0:active{right:calc(var(--spacing)*0)}.disabled\:pointer-events-none:disabled{pointer-events:none}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:opacity-50:disabled{opacity:.5}.data-\[disabled\]\:pointer-events-none[data-disabled]{pointer-events:none}.data-\[disabled\]\:opacity-50[data-disabled]{opacity:.5}.data-\[disabled\=true\]\:pointer-events-none[data-disabled=true]{pointer-events:none}.data-\[disabled\=true\]\:opacity-50[data-disabled=true]{opacity:.5}.data-\[selected\=\'true\'\]\:bg-accent[data-selected=true]{background-color:var(--accent)}.data-\[selected\=true\]\:text-accent-foreground[data-selected=true]{color:var(--accent-foreground)}.data-\[side\=bottom\]\:translate-y-1[data-side=bottom]{--tw-translate-y:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=bottom\]\:slide-in-from-top-2[data-side=bottom]{--tw-enter-translate-y:-.5rem}.data-\[side\=left\]\:-translate-x-1[data-side=left]{--tw-translate-x:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=left\]\:slide-in-from-right-2[data-side=left]{--tw-enter-translate-x:.5rem}.data-\[side\=right\]\:translate-x-1[data-side=right]{--tw-translate-x:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=right\]\:slide-in-from-left-2[data-side=right]{--tw-enter-translate-x:-.5rem}.data-\[side\=top\]\:-translate-y-1[data-side=top]{--tw-translate-y:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=top\]\:slide-in-from-bottom-2[data-side=top]{--tw-enter-translate-y:.5rem}.data-\[state\=active\]\:visible[data-state=active]{visibility:visible}.data-\[state\=active\]\:bg-background[data-state=active]{background-color:var(--background)}.data-\[state\=active\]\:text-foreground[data-state=active]{color:var(--foreground)}.data-\[state\=active\]\:shadow-sm[data-state=active]{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.data-\[state\=checked\]\:bg-primary[data-state=checked]{background-color:var(--primary)}.data-\[state\=checked\]\:text-primary-foreground[data-state=checked]{color:var(--primary-foreground)}.data-\[state\=closed\]\:animate-out[data-state=closed]{--tw-exit-opacity:initial;--tw-exit-scale:initial;--tw-exit-rotate:initial;--tw-exit-translate-x:initial;--tw-exit-translate-y:initial;animation-name:exit;animation-duration:.15s}.data-\[state\=closed\]\:fade-out-0[data-state=closed]{--tw-exit-opacity:0}.data-\[state\=closed\]\:slide-out-to-top-\[48\%\][data-state=closed]{--tw-exit-translate-y:-48%}.data-\[state\=closed\]\:zoom-out-95[data-state=closed]{--tw-exit-scale:.95}.data-\[state\=inactive\]\:invisible[data-state=inactive]{visibility:hidden}.data-\[state\=open\]\:bg-accent[data-state=open]{background-color:var(--accent)}.data-\[state\=open\]\:text-muted-foreground[data-state=open]{color:var(--muted-foreground)}.data-\[state\=open\]\:animate-in[data-state=open]{--tw-enter-opacity:initial;--tw-enter-scale:initial;--tw-enter-rotate:initial;--tw-enter-translate-x:initial;--tw-enter-translate-y:initial;animation-name:enter;animation-duration:.15s}.data-\[state\=open\]\:fade-in-0[data-state=open]{--tw-enter-opacity:0}.data-\[state\=open\]\:slide-in-from-top-\[48\%\][data-state=open]{--tw-enter-translate-y:-48%}.data-\[state\=open\]\:zoom-in-95[data-state=open]{--tw-enter-scale:.95}.data-\[state\=selected\]\:bg-muted[data-state=selected]{background-color:var(--muted)}@supports ((-webkit-backdrop-filter:var(--tw)) or (backdrop-filter:var(--tw))){.supports-\[backdrop-filter\]\:bg-background\/60{background-color:color-mix(in oklab,var(--background)60%,transparent)}}@media (width>=40rem){.sm\:mt-0{margin-top:calc(var(--spacing)*0)}.sm\:max-w-xl{max-width:var(--container-xl)}.sm\:flex-row{flex-direction:row}.sm\:justify-end{justify-content:flex-end}:where(.sm\:space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.sm\:rounded-lg{border-radius:var(--radius)}.sm\:px-5{padding-inline:calc(var(--spacing)*5)}.sm\:text-left{text-align:left}}@media (width>=48rem){.md\:inline-block{display:inline-block}.md\:text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}}.dark\:border-destructive:is(.dark *){border-color:var(--destructive)}.dark\:border-gray-600:is(.dark *){border-color:var(--color-gray-600)}.dark\:bg-amber-900:is(.dark *){background-color:var(--color-amber-900)}.dark\:bg-gray-100\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-100)20%,transparent)}.dark\:bg-gray-800\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)30%,transparent)}.dark\:bg-red-950:is(.dark *){background-color:var(--color-red-950)}.dark\:from-gray-900:is(.dark *){--tw-gradient-from:var(--color-gray-900);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.dark\:to-gray-800:is(.dark *){--tw-gradient-to:var(--color-gray-800);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.dark\:text-amber-200:is(.dark *){color:var(--color-amber-200)}.dark\:text-gray-300:is(.dark *){color:var(--color-gray-300)}.dark\:text-red-400:is(.dark *){color:var(--color-red-400)}@media (hover:hover){.dark\:hover\:bg-gray-700:is(.dark *):hover{background-color:var(--color-gray-700)}}.\[\&_\[cmdk-group-heading\]\]\:px-2 [cmdk-group-heading]{padding-inline:calc(var(--spacing)*2)}.\[\&_\[cmdk-group-heading\]\]\:py-1\.5 [cmdk-group-heading]{padding-block:calc(var(--spacing)*1.5)}.\[\&_\[cmdk-group-heading\]\]\:text-xs [cmdk-group-heading]{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.\[\&_\[cmdk-group-heading\]\]\:font-medium [cmdk-group-heading]{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.\[\&_\[cmdk-group-heading\]\]\:text-muted-foreground [cmdk-group-heading]{color:var(--muted-foreground)}.\[\&_\[cmdk-group\]\]\:px-2 [cmdk-group]{padding-inline:calc(var(--spacing)*2)}.\[\&_\[cmdk-group\]\:not\(\[hidden\]\)_\~\[cmdk-group\]\]\:pt-0 [cmdk-group]:not([hidden])~[cmdk-group]{padding-top:calc(var(--spacing)*0)}.\[\&_\[cmdk-input-wrapper\]_svg\]\:h-5 [cmdk-input-wrapper] svg{height:calc(var(--spacing)*5)}.\[\&_\[cmdk-input-wrapper\]_svg\]\:w-5 [cmdk-input-wrapper] svg{width:calc(var(--spacing)*5)}.\[\&_\[cmdk-input\]\]\:h-12 [cmdk-input]{height:calc(var(--spacing)*12)}.\[\&_\[cmdk-item\]\]\:px-2 [cmdk-item]{padding-inline:calc(var(--spacing)*2)}.\[\&_\[cmdk-item\]\]\:py-3 [cmdk-item]{padding-block:calc(var(--spacing)*3)}.\[\&_\[cmdk-item\]_svg\]\:h-5 [cmdk-item] svg{height:calc(var(--spacing)*5)}.\[\&_\[cmdk-item\]_svg\]\:w-5 [cmdk-item] svg{width:calc(var(--spacing)*5)}.\[\&_p\]\:leading-relaxed p{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.\[\&_svg\]\:pointer-events-none svg{pointer-events:none}.\[\&_svg\]\:size-4 svg{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.\[\&_svg\]\:shrink-0 svg{flex-shrink:0}.\[\&_tr\]\:border-b tr{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.\[\&_tr\:last-child\]\:border-0 tr:last-child{border-style:var(--tw-border-style);border-width:0}.\[\&\:\:-webkit-inner-spin-button\]\:appearance-none::-webkit-inner-spin-button{-webkit-appearance:none;-moz-appearance:none;appearance:none}.\[\&\:\:-webkit-inner-spin-button\]\:opacity-100::-webkit-inner-spin-button{opacity:1}.\[\&\:\:-webkit-outer-spin-button\]\:appearance-none::-webkit-outer-spin-button{-webkit-appearance:none;-moz-appearance:none;appearance:none}.\[\&\:\:-webkit-outer-spin-button\]\:opacity-100::-webkit-outer-spin-button{opacity:1}.\[\&\:has\(\[role\=checkbox\]\)\]\:pr-0:has([role=checkbox]){padding-right:calc(var(--spacing)*0)}.\[\&\>\[role\=checkbox\]\]\:translate-y-\[2px\]>[role=checkbox]{--tw-translate-y:2px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>span\]\:line-clamp-1>span{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.\[\&\>svg\]\:absolute>svg{position:absolute}.\[\&\>svg\]\:top-4>svg{top:calc(var(--spacing)*4)}.\[\&\>svg\]\:left-4>svg{left:calc(var(--spacing)*4)}.\[\&\>svg\]\:text-destructive>svg{color:var(--destructive)}.\[\&\>svg\]\:text-foreground>svg{color:var(--foreground)}.\[\&\>svg\+div\]\:translate-y-\[-3px\]>svg+div{--tw-translate-y:-3px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>svg\~\*\]\:pl-7>svg~*{padding-left:calc(var(--spacing)*7)}.\[\&\>tr\]\:last\:border-b-0>tr:last-child{border-bottom-style:var(--tw-border-style);border-bottom-width:0}}:root{--background:#fff;--foreground:#09090b;--card:#fff;--card-foreground:#09090b;--popover:#fff;--popover-foreground:#09090b;--primary:#18181b;--primary-foreground:#fafafa;--secondary:#f4f4f5;--secondary-foreground:#18181b;--muted:#f4f4f5;--muted-foreground:#71717a;--accent:#f4f4f5;--accent-foreground:#18181b;--destructive:#ef4444;--destructive-foreground:#fafafa;--border:#e4e4e7;--input:#e4e4e7;--ring:#09090b;--chart-1:#e76e50;--chart-2:#2a9d90;--chart-3:#274754;--chart-4:#e8c468;--chart-5:#f4a462;--radius:.6rem;--sidebar-background:#fafafa;--sidebar-foreground:#3f3f46;--sidebar-primary:#18181b;--sidebar-primary-foreground:#fafafa;--sidebar-accent:#f4f4f5;--sidebar-accent-foreground:#18181b;--sidebar-border:#e5e7eb;--sidebar-ring:#3b82f6}.dark{--background:#09090b;--foreground:#fafafa;--card:#09090b;--card-foreground:#fafafa;--popover:#09090b;--popover-foreground:#fafafa;--primary:#fafafa;--primary-foreground:#18181b;--secondary:#27272a;--secondary-foreground:#fafafa;--muted:#27272a;--muted-foreground:#a1a1aa;--accent:#27272a;--accent-foreground:#fafafa;--destructive:#7f1d1d;--destructive-foreground:#fafafa;--border:#27272a;--input:#27272a;--ring:#d4d4d8;--chart-1:#2662d9;--chart-2:#2eb88a;--chart-3:#e88c30;--chart-4:#af57db;--chart-5:#e23670;--sidebar-background:#18181b;--sidebar-foreground:#f4f4f5;--sidebar-primary:#1d4ed8;--sidebar-primary-foreground:#fff;--sidebar-accent:#27272a;--sidebar-accent-foreground:#f4f4f5;--sidebar-border:#27272a;--sidebar-ring:#3b82f6}::-webkit-scrollbar{width:10px;height:10px}::-webkit-scrollbar-thumb{background-color:#ccc;border-radius:5px}::-webkit-scrollbar-track{background-color:#f2f2f2}.dark ::-webkit-scrollbar-thumb{background-color:#e6e6e6}.dark ::-webkit-scrollbar-track{background-color:#000}@keyframes enter{0%{opacity:var(--tw-enter-opacity,1);transform:translate3d(var(--tw-enter-translate-x,0),var(--tw-enter-translate-y,0),0)scale3d(var(--tw-enter-scale,1),var(--tw-enter-scale,1),var(--tw-enter-scale,1))rotate(var(--tw-enter-rotate,0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity,1);transform:translate3d(var(--tw-exit-translate-x,0),var(--tw-exit-translate-y,0),0)scale3d(var(--tw-exit-scale,1),var(--tw-exit-scale,1),var(--tw-exit-scale,1))rotate(var(--tw-exit-rotate,0))}}@keyframes spin{to{transform:rotate(360deg)}}@keyframes pulse{50%{opacity:.5}}@property --tw-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-z{syntax:"*";inherits:false;initial-value:0}@property --tw-scale-x{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-y{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-z{syntax:"*";inherits:false;initial-value:1}@property --tw-rotate-x{syntax:"*";inherits:false;initial-value:rotateX(0)}@property --tw-rotate-y{syntax:"*";inherits:false;initial-value:rotateY(0)}@property --tw-rotate-z{syntax:"*";inherits:false;initial-value:rotateZ(0)}@property --tw-skew-x{syntax:"*";inherits:false;initial-value:skewX(0)}@property --tw-skew-y{syntax:"*";inherits:false;initial-value:skewY(0)}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-gradient-position{syntax:"*";inherits:false}@property --tw-gradient-from{syntax:"";inherits:false;initial-value:#0000}@property --tw-gradient-via{syntax:"";inherits:false;initial-value:#0000}@property --tw-gradient-to{syntax:"";inherits:false;initial-value:#0000}@property --tw-gradient-stops{syntax:"*";inherits:false}@property --tw-gradient-via-stops{syntax:"*";inherits:false}@property --tw-gradient-from-position{syntax:"";inherits:false;initial-value:0%}@property --tw-gradient-via-position{syntax:"";inherits:false;initial-value:50%}@property --tw-gradient-to-position{syntax:"";inherits:false;initial-value:100%}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@property --tw-duration{syntax:"*";inherits:false}@property --tw-ease{syntax:"*";inherits:false}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}:root{--sigma-background-color:#fff;--sigma-controls-background-color:#fff;--sigma-controls-background-color-hover:rgba(0,0,0,.2);--sigma-controls-border-color:rgba(0,0,0,.2);--sigma-controls-color:#000;--sigma-controls-zindex:100;--sigma-controls-margin:5px;--sigma-controls-size:30px}div.react-sigma{height:100%;width:100%;position:relative;background:var(--sigma-background-color)}div.sigma-container{height:100%;width:100%}.react-sigma-controls{position:absolute;z-index:var(--sigma-controls-zindex);border:2px solid var(--sigma-controls-border-color);border-radius:4px;color:var(--sigma-controls-color);background-color:var(--sigma-controls-background-color)}.react-sigma-controls.bottom-right{bottom:var(--sigma-controls-margin);right:var(--sigma-controls-margin)}.react-sigma-controls.bottom-left{bottom:var(--sigma-controls-margin);left:var(--sigma-controls-margin)}.react-sigma-controls.top-right{top:var(--sigma-controls-margin);right:var(--sigma-controls-margin)}.react-sigma-controls.top-left{top:var(--sigma-controls-margin);left:var(--sigma-controls-margin)}.react-sigma-controls:first-child{border-top-left-radius:2px;border-top-right-radius:2px}.react-sigma-controls:last-child{border-bottom-left-radius:2px;border-bottom-right-radius:2px}.react-sigma-control{width:var(--sigma-controls-size);height:var(--sigma-controls-size);line-height:var(--sigma-controls-size);background-color:var(--sigma-controls-background-color);border-bottom:1px solid var(--sigma-controls-border-color)}.react-sigma-control:last-child{border-bottom:none}.react-sigma-control>*{box-sizing:border-box}.react-sigma-control>button{display:block;border:none;margin:0;padding:0;width:var(--sigma-controls-size);height:var(--sigma-controls-size);line-height:var(--sigma-controls-size);background-position:center;background-size:50%;background-repeat:no-repeat;background-color:var(--sigma-controls-background-color);clip:rect(0,0,0,0)}.react-sigma-control>button:hover{background-color:var(--sigma-controls-background-color-hover)}.react-sigma-search{background-color:var(--sigma-controls-background-color)}.react-sigma-search label{visibility:hidden}.react-sigma-search input{color:var(--sigma-controls-color);background-color:var(--sigma-controls-background-color);font-size:1em;width:100%;margin:0;border:none;padding:var(--sigma-controls-margin);box-sizing:border-box}:root{--sigma-grey-color:#ccc}.react-sigma .option.hoverable{cursor:pointer!important}.react-sigma .text-ellipsis{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}.react-sigma .react-select__clear-indicator{cursor:pointer!important}.react-sigma .text-muted{color:var(--sigma-grey-color)}.react-sigma .text-italic{font-style:italic}.react-sigma .text-center{text-align:center}.react-sigma .graph-search{min-width:250px}.react-sigma .graph-search .option{padding:2px 8px}.react-sigma .graph-search .dropdown-indicator{font-size:1.25em;padding:4px}.react-sigma .graph-search .option.selected{background-color:var(--sigma-grey-color)}.react-sigma .node .render{position:relative;display:inline-block;width:1em;height:1em;border-radius:1em;background-color:var(--sigma-grey-color);margin-right:8px}.react-sigma .node{display:flex;flex-direction:row;align-items:center}.react-sigma .node .render{flex-grow:0;flex-shrink:0;margin-right:0 .25em}.react-sigma .node .label{flex-grow:1;flex-shrink:1}.react-sigma .edge{display:flex;flex-direction:column;align-items:flex-start;flex-grow:0;flex-shrink:0;flex-wrap:nowrap}.react-sigma .edge .node{font-size:.7em}.react-sigma .edge .body{display:flex;flex-direction:row;flex-grow:1;flex-shrink:1;min-height:.6em}.react-sigma .edge .body .render{display:flex;flex-direction:column;margin:0 2px}.react-sigma .edge .body .render .dash,.react-sigma .edge .body .render .dotted{display:inline-block;width:0;margin:0 2px;border:2px solid #ccc;flex-grow:1;flex-shrink:1}.react-sigma .edge .body .render .dotted{border-style:dotted}.react-sigma .edge .body .render .arrow{width:0;height:0;border-left:.3em solid transparent;border-right:.3em solid transparent;border-top:.6em solid red;flex-shrink:0;flex-grow:0;border-left-width:.3em;border-right-width:.3em}.react-sigma .edge .body .label{flex-grow:1;flex-shrink:1;text-align:center} diff --git a/lightrag/api/webui/assets/index-CKfcfaVR.js b/lightrag/api/webui/assets/index-DHRLzVNB.js similarity index 51% rename from lightrag/api/webui/assets/index-CKfcfaVR.js rename to lightrag/api/webui/assets/index-DHRLzVNB.js index a7800f2f3b..e203160df9 100644 --- a/lightrag/api/webui/assets/index-CKfcfaVR.js +++ b/lightrag/api/webui/assets/index-DHRLzVNB.js @@ -1,4 +1,4 @@ -var l9=Object.defineProperty;var u9=(e,t,n)=>t in e?l9(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var Xr=(e,t,n)=>u9(e,typeof t!="symbol"?t+"":t,n);function c9(e,t){for(var n=0;nr[a]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const a of document.querySelectorAll('link[rel="modulepreload"]'))r(a);new MutationObserver(a=>{for(const o of a)if(o.type==="childList")for(const s of o.addedNodes)s.tagName==="LINK"&&s.rel==="modulepreload"&&r(s)}).observe(document,{childList:!0,subtree:!0});function n(a){const o={};return a.integrity&&(o.integrity=a.integrity),a.referrerPolicy&&(o.referrerPolicy=a.referrerPolicy),a.crossOrigin==="use-credentials"?o.credentials="include":a.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function r(a){if(a.ep)return;a.ep=!0;const o=n(a);fetch(a.href,o)}})();var ff=typeof globalThis<"u"?globalThis:typeof window<"u"?window:typeof global<"u"?global:typeof self<"u"?self:{};function un(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}function d9(e){if(e.__esModule)return e;var t=e.default;if(typeof t=="function"){var n=function r(){return this instanceof r?Reflect.construct(t,arguments,this.constructor):t.apply(this,arguments)};n.prototype=t.prototype}else n={};return Object.defineProperty(n,"__esModule",{value:!0}),Object.keys(e).forEach(function(r){var a=Object.getOwnPropertyDescriptor(e,r);Object.defineProperty(n,r,a.get?a:{enumerable:!0,get:function(){return e[r]}})}),n}var Vh={exports:{}},Xl={};/** +var sq=Object.defineProperty;var lq=(e,t,n)=>t in e?sq(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var Qr=(e,t,n)=>lq(e,typeof t!="symbol"?t+"":t,n);function cq(e,t){for(var n=0;nr[a]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const a of document.querySelectorAll('link[rel="modulepreload"]'))r(a);new MutationObserver(a=>{for(const o of a)if(o.type==="childList")for(const s of o.addedNodes)s.tagName==="LINK"&&s.rel==="modulepreload"&&r(s)}).observe(document,{childList:!0,subtree:!0});function n(a){const o={};return a.integrity&&(o.integrity=a.integrity),a.referrerPolicy&&(o.referrerPolicy=a.referrerPolicy),a.crossOrigin==="use-credentials"?o.credentials="include":a.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function r(a){if(a.ep)return;a.ep=!0;const o=n(a);fetch(a.href,o)}})();var fp=typeof globalThis<"u"?globalThis:typeof window<"u"?window:typeof global<"u"?global:typeof self<"u"?self:{};function dn(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}function uq(e){if(e.__esModule)return e;var t=e.default;if(typeof t=="function"){var n=function r(){return this instanceof r?Reflect.construct(t,arguments,this.constructor):t.apply(this,arguments)};n.prototype=t.prototype}else n={};return Object.defineProperty(n,"__esModule",{value:!0}),Object.keys(e).forEach(function(r){var a=Object.getOwnPropertyDescriptor(e,r);Object.defineProperty(n,r,a.get?a:{enumerable:!0,get:function(){return e[r]}})}),n}var Wh={exports:{}},tc={};/** * @license React * react-jsx-runtime.production.js * @@ -6,7 +6,7 @@ var l9=Object.defineProperty;var u9=(e,t,n)=>t in e?l9(e,t,{enumerable:!0,config * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var V_;function f9(){if(V_)return Xl;V_=1;var e=Symbol.for("react.transitional.element"),t=Symbol.for("react.fragment");function n(r,a,o){var s=null;if(o!==void 0&&(s=""+o),a.key!==void 0&&(s=""+a.key),"key"in a){o={};for(var u in a)u!=="key"&&(o[u]=a[u])}else o=a;return a=o.ref,{$$typeof:e,type:r,key:s,ref:a!==void 0?a:null,props:o}}return Xl.Fragment=t,Xl.jsx=n,Xl.jsxs=n,Xl}var W_;function p9(){return W_||(W_=1,Vh.exports=f9()),Vh.exports}var w=p9(),Wh={exports:{}},it={};/** + */var n_;function dq(){if(n_)return tc;n_=1;var e=Symbol.for("react.transitional.element"),t=Symbol.for("react.fragment");function n(r,a,o){var s=null;if(o!==void 0&&(s=""+o),a.key!==void 0&&(s=""+a.key),"key"in a){o={};for(var l in a)l!=="key"&&(o[l]=a[l])}else o=a;return a=o.ref,{$$typeof:e,type:r,key:s,ref:a!==void 0?a:null,props:o}}return tc.Fragment=t,tc.jsx=n,tc.jsxs=n,tc}var r_;function pq(){return r_||(r_=1,Wh.exports=dq()),Wh.exports}var w=pq(),Yh={exports:{}},lt={};/** * @license React * react.production.js * @@ -14,7 +14,7 @@ var l9=Object.defineProperty;var u9=(e,t,n)=>t in e?l9(e,t,{enumerable:!0,config * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var Y_;function g9(){if(Y_)return it;Y_=1;var e=Symbol.for("react.transitional.element"),t=Symbol.for("react.portal"),n=Symbol.for("react.fragment"),r=Symbol.for("react.strict_mode"),a=Symbol.for("react.profiler"),o=Symbol.for("react.consumer"),s=Symbol.for("react.context"),u=Symbol.for("react.forward_ref"),c=Symbol.for("react.suspense"),d=Symbol.for("react.memo"),p=Symbol.for("react.lazy"),g=Symbol.iterator;function m(D){return D===null||typeof D!="object"?null:(D=g&&D[g]||D["@@iterator"],typeof D=="function"?D:null)}var b={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},y=Object.assign,S={};function x(D,V,B){this.props=D,this.context=V,this.refs=S,this.updater=B||b}x.prototype.isReactComponent={},x.prototype.setState=function(D,V){if(typeof D!="object"&&typeof D!="function"&&D!=null)throw Error("takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,D,V,"setState")},x.prototype.forceUpdate=function(D){this.updater.enqueueForceUpdate(this,D,"forceUpdate")};function R(){}R.prototype=x.prototype;function k(D,V,B){this.props=D,this.context=V,this.refs=S,this.updater=B||b}var A=k.prototype=new R;A.constructor=k,y(A,x.prototype),A.isPureReactComponent=!0;var C=Array.isArray,N={H:null,A:null,T:null,S:null},_=Object.prototype.hasOwnProperty;function O(D,V,B,M,W,Q){return B=Q.ref,{$$typeof:e,type:D,key:V,ref:B!==void 0?B:null,props:Q}}function F(D,V){return O(D.type,V,void 0,void 0,void 0,D.props)}function L(D){return typeof D=="object"&&D!==null&&D.$$typeof===e}function I(D){var V={"=":"=0",":":"=2"};return"$"+D.replace(/[=:]/g,function(B){return V[B]})}var H=/\/+/g;function $(D,V){return typeof D=="object"&&D!==null&&D.key!=null?I(""+D.key):V.toString(36)}function U(){}function Y(D){switch(D.status){case"fulfilled":return D.value;case"rejected":throw D.reason;default:switch(typeof D.status=="string"?D.then(U,U):(D.status="pending",D.then(function(V){D.status==="pending"&&(D.status="fulfilled",D.value=V)},function(V){D.status==="pending"&&(D.status="rejected",D.reason=V)})),D.status){case"fulfilled":return D.value;case"rejected":throw D.reason}}throw D}function Z(D,V,B,M,W){var Q=typeof D;(Q==="undefined"||Q==="boolean")&&(D=null);var oe=!1;if(D===null)oe=!0;else switch(Q){case"bigint":case"string":case"number":oe=!0;break;case"object":switch(D.$$typeof){case e:case t:oe=!0;break;case p:return oe=D._init,Z(oe(D._payload),V,B,M,W)}}if(oe)return W=W(D),oe=M===""?"."+$(D,0):M,C(W)?(B="",oe!=null&&(B=oe.replace(H,"$&/")+"/"),Z(W,V,B,"",function(Se){return Se})):W!=null&&(L(W)&&(W=F(W,B+(W.key==null||D&&D.key===W.key?"":(""+W.key).replace(H,"$&/")+"/")+oe)),V.push(W)),1;oe=0;var re=M===""?".":M+":";if(C(D))for(var ie=0;iet in e?l9(e,t,{enumerable:!0,config * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var X_;function m9(){return X_||(X_=1,function(e){function t(j,G){var z=j.length;j.push(G);e:for(;0>>1,D=j[K];if(0>>1;Ka(M,z))Wa(Q,M)?(j[K]=Q,j[W]=z,K=W):(j[K]=M,j[B]=z,K=B);else if(Wa(Q,z))j[K]=Q,j[W]=z,K=W;else break e}}return G}function a(j,G){var z=j.sortIndex-G.sortIndex;return z!==0?z:j.id-G.id}if(e.unstable_now=void 0,typeof performance=="object"&&typeof performance.now=="function"){var o=performance;e.unstable_now=function(){return o.now()}}else{var s=Date,u=s.now();e.unstable_now=function(){return s.now()-u}}var c=[],d=[],p=1,g=null,m=3,b=!1,y=!1,S=!1,x=typeof setTimeout=="function"?setTimeout:null,R=typeof clearTimeout=="function"?clearTimeout:null,k=typeof setImmediate<"u"?setImmediate:null;function A(j){for(var G=n(d);G!==null;){if(G.callback===null)r(d);else if(G.startTime<=j)r(d),G.sortIndex=G.expirationTime,t(c,G);else break;G=n(d)}}function C(j){if(S=!1,A(j),!y)if(n(c)!==null)y=!0,Y();else{var G=n(d);G!==null&&Z(C,G.startTime-j)}}var N=!1,_=-1,O=5,F=-1;function L(){return!(e.unstable_now()-Fj&&L());){var K=g.callback;if(typeof K=="function"){g.callback=null,m=g.priorityLevel;var D=K(g.expirationTime<=j);if(j=e.unstable_now(),typeof D=="function"){g.callback=D,A(j),G=!0;break t}g===n(c)&&r(c),A(j)}else r(c);g=n(c)}if(g!==null)G=!0;else{var V=n(d);V!==null&&Z(C,V.startTime-j),G=!1}}break e}finally{g=null,m=z,b=!1}G=void 0}}finally{G?H():N=!1}}}var H;if(typeof k=="function")H=function(){k(I)};else if(typeof MessageChannel<"u"){var $=new MessageChannel,U=$.port2;$.port1.onmessage=I,H=function(){U.postMessage(null)}}else H=function(){x(I,0)};function Y(){N||(N=!0,H())}function Z(j,G){_=x(function(){j(e.unstable_now())},G)}e.unstable_IdlePriority=5,e.unstable_ImmediatePriority=1,e.unstable_LowPriority=4,e.unstable_NormalPriority=3,e.unstable_Profiling=null,e.unstable_UserBlockingPriority=2,e.unstable_cancelCallback=function(j){j.callback=null},e.unstable_continueExecution=function(){y||b||(y=!0,Y())},e.unstable_forceFrameRate=function(j){0>j||125K?(j.sortIndex=z,t(d,j),n(c)===null&&j===n(d)&&(S?(R(_),_=-1):S=!0,Z(C,z-K))):(j.sortIndex=D,t(c,j),y||b||(y=!0,Y())),j},e.unstable_shouldYield=L,e.unstable_wrapCallback=function(j){var G=m;return function(){var z=m;m=G;try{return j.apply(this,arguments)}finally{m=z}}}}(Xh)),Xh}var Z_;function b9(){return Z_||(Z_=1,Kh.exports=m9()),Kh.exports}var Zh={exports:{}},wn={};/** + */var i_;function hq(){return i_||(i_=1,function(e){function t(G,H){var F=G.length;G.push(H);e:for(;0>>1,M=G[Y];if(0>>1;Ya(P,F))Za(Q,P)?(G[Y]=Q,G[Z]=F,Y=Z):(G[Y]=P,G[j]=F,Y=j);else if(Za(Q,F))G[Y]=Q,G[Z]=F,Y=Z;else break e}}return H}function a(G,H){var F=G.sortIndex-H.sortIndex;return F!==0?F:G.id-H.id}if(e.unstable_now=void 0,typeof performance=="object"&&typeof performance.now=="function"){var o=performance;e.unstable_now=function(){return o.now()}}else{var s=Date,l=s.now();e.unstable_now=function(){return s.now()-l}}var u=[],d=[],f=1,g=null,m=3,b=!1,y=!1,v=!1,x=typeof setTimeout=="function"?setTimeout:null,T=typeof clearTimeout=="function"?clearTimeout:null,k=typeof setImmediate<"u"?setImmediate:null;function R(G){for(var H=n(d);H!==null;){if(H.callback===null)r(d);else if(H.startTime<=G)r(d),H.sortIndex=H.expirationTime,t(u,H);else break;H=n(d)}}function O(G){if(v=!1,R(G),!y)if(n(u)!==null)y=!0,W();else{var H=n(d);H!==null&&K(O,H.startTime-G)}}var _=!1,C=-1,N=5,L=-1;function D(){return!(e.unstable_now()-LG&&D());){var Y=g.callback;if(typeof Y=="function"){g.callback=null,m=g.priorityLevel;var M=Y(g.expirationTime<=G);if(G=e.unstable_now(),typeof M=="function"){g.callback=M,R(G),H=!0;break t}g===n(u)&&r(u),R(G)}else r(u);g=n(u)}if(g!==null)H=!0;else{var V=n(d);V!==null&&K(O,V.startTime-G),H=!1}}break e}finally{g=null,m=F,b=!1}H=void 0}}finally{H?U():_=!1}}}var U;if(typeof k=="function")U=function(){k(I)};else if(typeof MessageChannel<"u"){var $=new MessageChannel,B=$.port2;$.port1.onmessage=I,U=function(){B.postMessage(null)}}else U=function(){x(I,0)};function W(){_||(_=!0,U())}function K(G,H){C=x(function(){G(e.unstable_now())},H)}e.unstable_IdlePriority=5,e.unstable_ImmediatePriority=1,e.unstable_LowPriority=4,e.unstable_NormalPriority=3,e.unstable_Profiling=null,e.unstable_UserBlockingPriority=2,e.unstable_cancelCallback=function(G){G.callback=null},e.unstable_continueExecution=function(){y||b||(y=!0,W())},e.unstable_forceFrameRate=function(G){0>G||125Y?(G.sortIndex=F,t(d,G),n(u)===null&&G===n(d)&&(v?(T(C),C=-1):v=!0,K(O,F-Y))):(G.sortIndex=M,t(u,G),y||b||(y=!0,W())),G},e.unstable_shouldYield=D,e.unstable_wrapCallback=function(G){var H=m;return function(){var F=m;m=H;try{return G.apply(this,arguments)}finally{m=F}}}}(Zh)),Zh}var s_;function mq(){return s_||(s_=1,Xh.exports=hq()),Xh.exports}var Qh={exports:{}},kn={};/** * @license React * react-dom.production.js * @@ -30,7 +30,7 @@ var l9=Object.defineProperty;var u9=(e,t,n)=>t in e?l9(e,t,{enumerable:!0,config * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var Q_;function y9(){if(Q_)return wn;Q_=1;var e=Vf();function t(c){var d="https://react.dev/errors/"+c;if(1"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(t){console.error(t)}}return e(),Zh.exports=y9(),Zh.exports}/** + */var l_;function bq(){if(l_)return kn;l_=1;var e=$p();function t(u){var d="https://react.dev/errors/"+u;if(1"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(t){console.error(t)}}return e(),Qh.exports=bq(),Qh.exports}/** * @license React * react-dom-client.production.js * @@ -38,15 +38,15 @@ var l9=Object.defineProperty;var u9=(e,t,n)=>t in e?l9(e,t,{enumerable:!0,config * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var eC;function v9(){if(eC)return Zl;eC=1;var e=b9(),t=Vf(),n=zz();function r(i){var l="https://react.dev/errors/"+i;if(1)":-1v||X[h]!==ne[v]){var be=` -`+X[h].replace(" at new "," at ");return i.displayName&&be.includes("")&&(be=be.replace("",i.displayName)),be}while(1<=h&&0<=v);break}}}finally{Y=!1,Error.prepareStackTrace=f}return(f=i?i.displayName||i.name:"")?U(f):""}function j(i){switch(i.tag){case 26:case 27:case 5:return U(i.type);case 16:return U("Lazy");case 13:return U("Suspense");case 19:return U("SuspenseList");case 0:case 15:return i=Z(i.type,!1),i;case 11:return i=Z(i.type.render,!1),i;case 1:return i=Z(i.type,!0),i;default:return""}}function G(i){try{var l="";do l+=j(i),i=i.return;while(i);return l}catch(f){return` -Error generating stack: `+f.message+` -`+f.stack}}function z(i){var l=i,f=i;if(i.alternate)for(;l.return;)l=l.return;else{i=l;do l=i,l.flags&4098&&(f=l.return),i=l.return;while(i)}return l.tag===3?f:null}function K(i){if(i.tag===13){var l=i.memoizedState;if(l===null&&(i=i.alternate,i!==null&&(l=i.memoizedState)),l!==null)return l.dehydrated}return null}function D(i){if(z(i)!==i)throw Error(r(188))}function V(i){var l=i.alternate;if(!l){if(l=z(i),l===null)throw Error(r(188));return l!==i?null:i}for(var f=i,h=l;;){var v=f.return;if(v===null)break;var T=v.alternate;if(T===null){if(h=v.return,h!==null){f=h;continue}break}if(v.child===T.child){for(T=v.child;T;){if(T===f)return D(v),i;if(T===h)return D(v),l;T=T.sibling}throw Error(r(188))}if(f.return!==h.return)f=v,h=T;else{for(var P=!1,q=v.child;q;){if(q===f){P=!0,f=v,h=T;break}if(q===h){P=!0,h=v,f=T;break}q=q.sibling}if(!P){for(q=T.child;q;){if(q===f){P=!0,f=T,h=v;break}if(q===h){P=!0,h=T,f=v;break}q=q.sibling}if(!P)throw Error(r(189))}}if(f.alternate!==h)throw Error(r(190))}if(f.tag!==3)throw Error(r(188));return f.stateNode.current===f?i:l}function B(i){var l=i.tag;if(l===5||l===26||l===27||l===6)return i;for(i=i.child;i!==null;){if(l=B(i),l!==null)return l;i=i.sibling}return null}var M=Array.isArray,W=n.__DOM_INTERNALS_DO_NOT_USE_OR_WARN_USERS_THEY_CANNOT_UPGRADE,Q={pending:!1,data:null,method:null,action:null},oe=[],re=-1;function ie(i){return{current:i}}function Se(i){0>re||(i.current=oe[re],oe[re]=null,re--)}function ae(i,l){re++,oe[re]=i.current,i.current=l}var ve=ie(null),xe=ie(null),Ie=ie(null),Ce=ie(null);function ke(i,l){switch(ae(Ie,l),ae(xe,i),ae(ve,null),i=l.nodeType,i){case 9:case 11:l=(l=l.documentElement)&&(l=l.namespaceURI)?E_(l):0;break;default:if(i=i===8?l.parentNode:l,l=i.tagName,i=i.namespaceURI)i=E_(i),l=w_(i,l);else switch(l){case"svg":l=1;break;case"math":l=2;break;default:l=0}}Se(ve),ae(ve,l)}function J(){Se(ve),Se(xe),Se(Ie)}function fe(i){i.memoizedState!==null&&ae(Ce,i);var l=ve.current,f=w_(l,i.type);l!==f&&(ae(xe,i),ae(ve,f))}function Te(i){xe.current===i&&(Se(ve),Se(xe)),Ce.current===i&&(Se(Ce),ql._currentValue=Q)}var me=Object.prototype.hasOwnProperty,Ee=e.unstable_scheduleCallback,le=e.unstable_cancelCallback,Be=e.unstable_shouldYield,Ue=e.unstable_requestPaint,he=e.unstable_now,Ne=e.unstable_getCurrentPriorityLevel,ee=e.unstable_ImmediatePriority,ce=e.unstable_UserBlockingPriority,_e=e.unstable_NormalPriority,Pe=e.unstable_LowPriority,We=e.unstable_IdlePriority,St=e.log,kt=e.unstable_setDisableYieldValue,mt=null,et=null;function Tt(i){if(et&&typeof et.onCommitFiberRoot=="function")try{et.onCommitFiberRoot(mt,i,void 0,(i.current.flags&128)===128)}catch{}}function ot(i){if(typeof St=="function"&&kt(i),et&&typeof et.setStrictMode=="function")try{et.setStrictMode(mt,i)}catch{}}var Et=Math.clz32?Math.clz32:zt,Ht=Math.log,fn=Math.LN2;function zt(i){return i>>>=0,i===0?32:31-(Ht(i)/fn|0)|0}var or=128,$r=4194304;function Jt(i){var l=i&42;if(l!==0)return l;switch(i&-i){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:return 64;case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return i&4194176;case 4194304:case 8388608:case 16777216:case 33554432:return i&62914560;case 67108864:return 67108864;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 0;default:return i}}function fa(i,l){var f=i.pendingLanes;if(f===0)return 0;var h=0,v=i.suspendedLanes,T=i.pingedLanes,P=i.warmLanes;i=i.finishedLanes!==0;var q=f&134217727;return q!==0?(f=q&~v,f!==0?h=Jt(f):(T&=q,T!==0?h=Jt(T):i||(P=q&~P,P!==0&&(h=Jt(P))))):(q=f&~v,q!==0?h=Jt(q):T!==0?h=Jt(T):i||(P=f&~P,P!==0&&(h=Jt(P)))),h===0?0:l!==0&&l!==h&&!(l&v)&&(v=h&-h,P=l&-l,v>=P||v===32&&(P&4194176)!==0)?l:h}function Xe(i,l){return(i.pendingLanes&~(i.suspendedLanes&~i.pingedLanes)&l)===0}function bt(i,l){switch(i){case 1:case 2:case 4:case 8:return l+250;case 16:case 32:case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return l+5e3;case 4194304:case 8388608:case 16777216:case 33554432:return-1;case 67108864:case 134217728:case 268435456:case 536870912:case 1073741824:return-1;default:return-1}}function Ct(){var i=or;return or<<=1,!(or&4194176)&&(or=128),i}function Dn(){var i=$r;return $r<<=1,!($r&62914560)&&($r=4194304),i}function _n(i){for(var l=[],f=0;31>f;f++)l.push(i);return l}function In(i,l){i.pendingLanes|=l,l!==268435456&&(i.suspendedLanes=0,i.pingedLanes=0,i.warmLanes=0)}function pa(i,l,f,h,v,T){var P=i.pendingLanes;i.pendingLanes=f,i.suspendedLanes=0,i.pingedLanes=0,i.warmLanes=0,i.expiredLanes&=f,i.entangledLanes&=f,i.errorRecoveryDisabledLanes&=f,i.shellSuspendCounter=0;var q=i.entanglements,X=i.expirationTimes,ne=i.hiddenUpdates;for(f=P&~f;0"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),t$=RegExp("^[:A-Z_a-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD][:A-Z_a-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD\\-.0-9\\u00B7\\u0300-\\u036F\\u203F-\\u2040]*$"),wA={},xA={};function n$(i){return me.call(xA,i)?!0:me.call(wA,i)?!1:t$.test(i)?xA[i]=!0:(wA[i]=!0,!1)}function lc(i,l,f){if(n$(l))if(f===null)i.removeAttribute(l);else{switch(typeof f){case"undefined":case"function":case"symbol":i.removeAttribute(l);return;case"boolean":var h=l.toLowerCase().slice(0,5);if(h!=="data-"&&h!=="aria-"){i.removeAttribute(l);return}}i.setAttribute(l,""+f)}}function uc(i,l,f){if(f===null)i.removeAttribute(l);else{switch(typeof f){case"undefined":case"function":case"symbol":case"boolean":i.removeAttribute(l);return}i.setAttribute(l,""+f)}}function ha(i,l,f,h){if(h===null)i.removeAttribute(f);else{switch(typeof h){case"undefined":case"function":case"symbol":case"boolean":i.removeAttribute(f);return}i.setAttributeNS(l,f,""+h)}}function ir(i){switch(typeof i){case"bigint":case"boolean":case"number":case"string":case"undefined":return i;case"object":return i;default:return""}}function kA(i){var l=i.type;return(i=i.nodeName)&&i.toLowerCase()==="input"&&(l==="checkbox"||l==="radio")}function r$(i){var l=kA(i)?"checked":"value",f=Object.getOwnPropertyDescriptor(i.constructor.prototype,l),h=""+i[l];if(!i.hasOwnProperty(l)&&typeof f<"u"&&typeof f.get=="function"&&typeof f.set=="function"){var v=f.get,T=f.set;return Object.defineProperty(i,l,{configurable:!0,get:function(){return v.call(this)},set:function(P){h=""+P,T.call(this,P)}}),Object.defineProperty(i,l,{enumerable:f.enumerable}),{getValue:function(){return h},setValue:function(P){h=""+P},stopTracking:function(){i._valueTracker=null,delete i[l]}}}}function cc(i){i._valueTracker||(i._valueTracker=r$(i))}function TA(i){if(!i)return!1;var l=i._valueTracker;if(!l)return!0;var f=l.getValue(),h="";return i&&(h=kA(i)?i.checked?"true":"false":i.value),i=h,i!==f?(l.setValue(i),!0):!1}function dc(i){if(i=i||(typeof document<"u"?document:void 0),typeof i>"u")return null;try{return i.activeElement||i.body}catch{return i.body}}var a$=/[\n"\\]/g;function sr(i){return i.replace(a$,function(l){return"\\"+l.charCodeAt(0).toString(16)+" "})}function $p(i,l,f,h,v,T,P,q){i.name="",P!=null&&typeof P!="function"&&typeof P!="symbol"&&typeof P!="boolean"?i.type=P:i.removeAttribute("type"),l!=null?P==="number"?(l===0&&i.value===""||i.value!=l)&&(i.value=""+ir(l)):i.value!==""+ir(l)&&(i.value=""+ir(l)):P!=="submit"&&P!=="reset"||i.removeAttribute("value"),l!=null?qp(i,P,ir(l)):f!=null?qp(i,P,ir(f)):h!=null&&i.removeAttribute("value"),v==null&&T!=null&&(i.defaultChecked=!!T),v!=null&&(i.checked=v&&typeof v!="function"&&typeof v!="symbol"),q!=null&&typeof q!="function"&&typeof q!="symbol"&&typeof q!="boolean"?i.name=""+ir(q):i.removeAttribute("name")}function AA(i,l,f,h,v,T,P,q){if(T!=null&&typeof T!="function"&&typeof T!="symbol"&&typeof T!="boolean"&&(i.type=T),l!=null||f!=null){if(!(T!=="submit"&&T!=="reset"||l!=null))return;f=f!=null?""+ir(f):"",l=l!=null?""+ir(l):f,q||l===i.value||(i.value=l),i.defaultValue=l}h=h??v,h=typeof h!="function"&&typeof h!="symbol"&&!!h,i.checked=q?i.checked:!!h,i.defaultChecked=!!h,P!=null&&typeof P!="function"&&typeof P!="symbol"&&typeof P!="boolean"&&(i.name=P)}function qp(i,l,f){l==="number"&&dc(i.ownerDocument)===i||i.defaultValue===""+f||(i.defaultValue=""+f)}function Oi(i,l,f,h){if(i=i.options,l){l={};for(var v=0;v=ul),BA=" ",UA=!1;function jA(i,l){switch(i){case"keyup":return D$.indexOf(l.keyCode)!==-1;case"keydown":return l.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function GA(i){return i=i.detail,typeof i=="object"&&"data"in i?i.data:null}var Mi=!1;function L$(i,l){switch(i){case"compositionend":return GA(l);case"keypress":return l.which!==32?null:(UA=!0,BA);case"textInput":return i=l.data,i===BA&&UA?null:i;default:return null}}function M$(i,l){if(Mi)return i==="compositionend"||!ng&&jA(i,l)?(i=IA(),pc=Zp=Xa=null,Mi=!1,i):null;switch(i){case"paste":return null;case"keypress":if(!(l.ctrlKey||l.altKey||l.metaKey)||l.ctrlKey&&l.altKey){if(l.char&&1=l)return{node:f,offset:l-i};i=h}e:{for(;f;){if(f.nextSibling){f=f.nextSibling;break e}f=f.parentNode}f=void 0}f=XA(f)}}function QA(i,l){return i&&l?i===l?!0:i&&i.nodeType===3?!1:l&&l.nodeType===3?QA(i,l.parentNode):"contains"in i?i.contains(l):i.compareDocumentPosition?!!(i.compareDocumentPosition(l)&16):!1:!1}function JA(i){i=i!=null&&i.ownerDocument!=null&&i.ownerDocument.defaultView!=null?i.ownerDocument.defaultView:window;for(var l=dc(i.document);l instanceof i.HTMLIFrameElement;){try{var f=typeof l.contentWindow.location.href=="string"}catch{f=!1}if(f)i=l.contentWindow;else break;l=dc(i.document)}return l}function og(i){var l=i&&i.nodeName&&i.nodeName.toLowerCase();return l&&(l==="input"&&(i.type==="text"||i.type==="search"||i.type==="tel"||i.type==="url"||i.type==="password")||l==="textarea"||i.contentEditable==="true")}function H$(i,l){var f=JA(l);l=i.focusedElem;var h=i.selectionRange;if(f!==l&&l&&l.ownerDocument&&QA(l.ownerDocument.documentElement,l)){if(h!==null&&og(l)){if(i=h.start,f=h.end,f===void 0&&(f=i),"selectionStart"in l)l.selectionStart=i,l.selectionEnd=Math.min(f,l.value.length);else if(f=(i=l.ownerDocument||document)&&i.defaultView||window,f.getSelection){f=f.getSelection();var v=l.textContent.length,T=Math.min(h.start,v);h=h.end===void 0?T:Math.min(h.end,v),!f.extend&&T>h&&(v=h,h=T,T=v),v=ZA(l,T);var P=ZA(l,h);v&&P&&(f.rangeCount!==1||f.anchorNode!==v.node||f.anchorOffset!==v.offset||f.focusNode!==P.node||f.focusOffset!==P.offset)&&(i=i.createRange(),i.setStart(v.node,v.offset),f.removeAllRanges(),T>h?(f.addRange(i),f.extend(P.node,P.offset)):(i.setEnd(P.node,P.offset),f.addRange(i)))}}for(i=[],f=l;f=f.parentNode;)f.nodeType===1&&i.push({element:f,left:f.scrollLeft,top:f.scrollTop});for(typeof l.focus=="function"&&l.focus(),l=0;l=document.documentMode,Fi=null,ig=null,pl=null,sg=!1;function e1(i,l,f){var h=f.window===f?f.document:f.nodeType===9?f:f.ownerDocument;sg||Fi==null||Fi!==dc(h)||(h=Fi,"selectionStart"in h&&og(h)?h={start:h.selectionStart,end:h.selectionEnd}:(h=(h.ownerDocument&&h.ownerDocument.defaultView||window).getSelection(),h={anchorNode:h.anchorNode,anchorOffset:h.anchorOffset,focusNode:h.focusNode,focusOffset:h.focusOffset}),pl&&fl(pl,h)||(pl=h,h=ed(ig,"onSelect"),0>=P,v-=P,ma=1<<32-Et(l)+v|f<Je?(sn=Ye,Ye=null):sn=Ye.sibling;var xt=de(se,Ye,ue[Je],we);if(xt===null){Ye===null&&(Ye=sn);break}i&&Ye&&xt.alternate===null&&l(se,Ye),te=T(xt,te,Je),lt===null?Ge=xt:lt.sibling=xt,lt=xt,Ye=sn}if(Je===ue.length)return f(se,Ye),wt&&Go(se,Je),Ge;if(Ye===null){for(;JeJe?(sn=Ye,Ye=null):sn=Ye.sibling;var bo=de(se,Ye,xt.value,we);if(bo===null){Ye===null&&(Ye=sn);break}i&&Ye&&bo.alternate===null&&l(se,Ye),te=T(bo,te,Je),lt===null?Ge=bo:lt.sibling=bo,lt=bo,Ye=sn}if(xt.done)return f(se,Ye),wt&&Go(se,Je),Ge;if(Ye===null){for(;!xt.done;Je++,xt=ue.next())xt=Ae(se,xt.value,we),xt!==null&&(te=T(xt,te,Je),lt===null?Ge=xt:lt.sibling=xt,lt=xt);return wt&&Go(se,Je),Ge}for(Ye=h(Ye);!xt.done;Je++,xt=ue.next())xt=ge(Ye,se,Je,xt.value,we),xt!==null&&(i&&xt.alternate!==null&&Ye.delete(xt.key===null?Je:xt.key),te=T(xt,te,Je),lt===null?Ge=xt:lt.sibling=xt,lt=xt);return i&&Ye.forEach(function(s9){return l(se,s9)}),wt&&Go(se,Je),Ge}function Vt(se,te,ue,we){if(typeof ue=="object"&&ue!==null&&ue.type===c&&ue.key===null&&(ue=ue.props.children),typeof ue=="object"&&ue!==null){switch(ue.$$typeof){case s:e:{for(var Ge=ue.key;te!==null;){if(te.key===Ge){if(Ge=ue.type,Ge===c){if(te.tag===7){f(se,te.sibling),we=v(te,ue.props.children),we.return=se,se=we;break e}}else if(te.elementType===Ge||typeof Ge=="object"&&Ge!==null&&Ge.$$typeof===k&&b1(Ge)===te.type){f(se,te.sibling),we=v(te,ue.props),Sl(we,ue),we.return=se,se=we;break e}f(se,te);break}else l(se,te);te=te.sibling}ue.type===c?(we=Jo(ue.props.children,se.mode,we,ue.key),we.return=se,se=we):(we=$c(ue.type,ue.key,ue.props,null,se.mode,we),Sl(we,ue),we.return=se,se=we)}return P(se);case u:e:{for(Ge=ue.key;te!==null;){if(te.key===Ge)if(te.tag===4&&te.stateNode.containerInfo===ue.containerInfo&&te.stateNode.implementation===ue.implementation){f(se,te.sibling),we=v(te,ue.children||[]),we.return=se,se=we;break e}else{f(se,te);break}else l(se,te);te=te.sibling}we=uh(ue,se.mode,we),we.return=se,se=we}return P(se);case k:return Ge=ue._init,ue=Ge(ue._payload),Vt(se,te,ue,we)}if(M(ue))return qe(se,te,ue,we);if(_(ue)){if(Ge=_(ue),typeof Ge!="function")throw Error(r(150));return ue=Ge.call(ue),nt(se,te,ue,we)}if(typeof ue.then=="function")return Vt(se,te,Tc(ue),we);if(ue.$$typeof===b)return Vt(se,te,jc(se,ue),we);Ac(se,ue)}return typeof ue=="string"&&ue!==""||typeof ue=="number"||typeof ue=="bigint"?(ue=""+ue,te!==null&&te.tag===6?(f(se,te.sibling),we=v(te,ue),we.return=se,se=we):(f(se,te),we=lh(ue,se.mode,we),we.return=se,se=we),P(se)):f(se,te)}return function(se,te,ue,we){try{vl=0;var Ge=Vt(se,te,ue,we);return Gi=null,Ge}catch(Ye){if(Ye===bl)throw Ye;var lt=hr(29,Ye,null,se.mode);return lt.lanes=we,lt.return=se,lt}finally{}}}var $o=y1(!0),v1=y1(!1),Hi=ie(null),Rc=ie(0);function S1(i,l){i=_a,ae(Rc,i),ae(Hi,l),_a=i|l.baseLanes}function hg(){ae(Rc,_a),ae(Hi,Hi.current)}function mg(){_a=Rc.current,Se(Hi),Se(Rc)}var fr=ie(null),Vr=null;function Qa(i){var l=i.alternate;ae(en,en.current&1),ae(fr,i),Vr===null&&(l===null||Hi.current!==null||l.memoizedState!==null)&&(Vr=i)}function E1(i){if(i.tag===22){if(ae(en,en.current),ae(fr,i),Vr===null){var l=i.alternate;l!==null&&l.memoizedState!==null&&(Vr=i)}}else Ja()}function Ja(){ae(en,en.current),ae(fr,fr.current)}function ya(i){Se(fr),Vr===i&&(Vr=null),Se(en)}var en=ie(0);function _c(i){for(var l=i;l!==null;){if(l.tag===13){var f=l.memoizedState;if(f!==null&&(f=f.dehydrated,f===null||f.data==="$?"||f.data==="$!"))return l}else if(l.tag===19&&l.memoizedProps.revealOrder!==void 0){if(l.flags&128)return l}else if(l.child!==null){l.child.return=l,l=l.child;continue}if(l===i)break;for(;l.sibling===null;){if(l.return===null||l.return===i)return null;l=l.return}l.sibling.return=l.return,l=l.sibling}return null}var Y$=typeof AbortController<"u"?AbortController:function(){var i=[],l=this.signal={aborted:!1,addEventListener:function(f,h){i.push(h)}};this.abort=function(){l.aborted=!0,i.forEach(function(f){return f()})}},K$=e.unstable_scheduleCallback,X$=e.unstable_NormalPriority,tn={$$typeof:b,Consumer:null,Provider:null,_currentValue:null,_currentValue2:null,_threadCount:0};function bg(){return{controller:new Y$,data:new Map,refCount:0}}function El(i){i.refCount--,i.refCount===0&&K$(X$,function(){i.controller.abort()})}var wl=null,yg=0,$i=0,qi=null;function Z$(i,l){if(wl===null){var f=wl=[];yg=0,$i=kh(),qi={status:"pending",value:void 0,then:function(h){f.push(h)}}}return yg++,l.then(w1,w1),l}function w1(){if(--yg===0&&wl!==null){qi!==null&&(qi.status="fulfilled");var i=wl;wl=null,$i=0,qi=null;for(var l=0;lT?T:8;var P=L.T,q={};L.T=q,Mg(i,!1,l,f);try{var X=v(),ne=L.S;if(ne!==null&&ne(q,X),X!==null&&typeof X=="object"&&typeof X.then=="function"){var be=Q$(X,h);Tl(i,l,be,Qn(i))}else Tl(i,l,h,Qn(i))}catch(Ae){Tl(i,l,{then:function(){},status:"rejected",reason:Ae},Qn())}finally{W.p=T,L.T=P}}function r6(){}function Ig(i,l,f,h){if(i.tag!==5)throw Error(r(476));var v=J1(i).queue;Q1(i,v,l,Q,f===null?r6:function(){return eR(i),f(h)})}function J1(i){var l=i.memoizedState;if(l!==null)return l;l={memoizedState:Q,baseState:Q,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:va,lastRenderedState:Q},next:null};var f={};return l.next={memoizedState:f,baseState:f,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:va,lastRenderedState:f},next:null},i.memoizedState=l,i=i.alternate,i!==null&&(i.memoizedState=l),l}function eR(i){var l=J1(i).next.queue;Tl(i,l,{},Qn())}function Lg(){return En(ql)}function tR(){return Xt().memoizedState}function nR(){return Xt().memoizedState}function a6(i){for(var l=i.return;l!==null;){switch(l.tag){case 24:case 3:var f=Qn();i=ao(f);var h=oo(l,i,f);h!==null&&(Nn(h,l,f),_l(h,l,f)),l={cache:bg()},i.payload=l;return}l=l.return}}function o6(i,l,f){var h=Qn();f={lane:h,revertLane:0,action:f,hasEagerState:!1,eagerState:null,next:null},Pc(i)?aR(l,f):(f=cg(i,l,f,h),f!==null&&(Nn(f,i,h),oR(f,l,h)))}function rR(i,l,f){var h=Qn();Tl(i,l,f,h)}function Tl(i,l,f,h){var v={lane:h,revertLane:0,action:f,hasEagerState:!1,eagerState:null,next:null};if(Pc(i))aR(l,v);else{var T=i.alternate;if(i.lanes===0&&(T===null||T.lanes===0)&&(T=l.lastRenderedReducer,T!==null))try{var P=l.lastRenderedState,q=T(P,f);if(v.hasEagerState=!0,v.eagerState=q,Yn(q,P))return Sc(i,l,v,0),It===null&&vc(),!1}catch{}finally{}if(f=cg(i,l,v,h),f!==null)return Nn(f,i,h),oR(f,l,h),!0}return!1}function Mg(i,l,f,h){if(h={lane:2,revertLane:kh(),action:h,hasEagerState:!1,eagerState:null,next:null},Pc(i)){if(l)throw Error(r(479))}else l=cg(i,f,h,2),l!==null&&Nn(l,i,2)}function Pc(i){var l=i.alternate;return i===st||l!==null&&l===st}function aR(i,l){Vi=Nc=!0;var f=i.pending;f===null?l.next=l:(l.next=f.next,f.next=l),i.pending=l}function oR(i,l,f){if(f&4194176){var h=l.lanes;h&=i.pendingLanes,f|=h,l.lanes=f,Rr(i,f)}}var Wr={readContext:En,use:Ic,useCallback:Wt,useContext:Wt,useEffect:Wt,useImperativeHandle:Wt,useLayoutEffect:Wt,useInsertionEffect:Wt,useMemo:Wt,useReducer:Wt,useRef:Wt,useState:Wt,useDebugValue:Wt,useDeferredValue:Wt,useTransition:Wt,useSyncExternalStore:Wt,useId:Wt};Wr.useCacheRefresh=Wt,Wr.useMemoCache=Wt,Wr.useHostTransitionStatus=Wt,Wr.useFormState=Wt,Wr.useActionState=Wt,Wr.useOptimistic=Wt;var Wo={readContext:En,use:Ic,useCallback:function(i,l){return Pn().memoizedState=[i,l===void 0?null:l],i},useContext:En,useEffect:$1,useImperativeHandle:function(i,l,f){f=f!=null?f.concat([i]):null,Mc(4194308,4,W1.bind(null,l,i),f)},useLayoutEffect:function(i,l){return Mc(4194308,4,i,l)},useInsertionEffect:function(i,l){Mc(4,2,i,l)},useMemo:function(i,l){var f=Pn();l=l===void 0?null:l;var h=i();if(Vo){ot(!0);try{i()}finally{ot(!1)}}return f.memoizedState=[h,l],h},useReducer:function(i,l,f){var h=Pn();if(f!==void 0){var v=f(l);if(Vo){ot(!0);try{f(l)}finally{ot(!1)}}}else v=l;return h.memoizedState=h.baseState=v,i={pending:null,lanes:0,dispatch:null,lastRenderedReducer:i,lastRenderedState:v},h.queue=i,i=i.dispatch=o6.bind(null,st,i),[h.memoizedState,i]},useRef:function(i){var l=Pn();return i={current:i},l.memoizedState=i},useState:function(i){i=_g(i);var l=i.queue,f=rR.bind(null,st,l);return l.dispatch=f,[i.memoizedState,f]},useDebugValue:Og,useDeferredValue:function(i,l){var f=Pn();return Dg(f,i,l)},useTransition:function(){var i=_g(!1);return i=Q1.bind(null,st,i.queue,!0,!1),Pn().memoizedState=i,[!1,i]},useSyncExternalStore:function(i,l,f){var h=st,v=Pn();if(wt){if(f===void 0)throw Error(r(407));f=f()}else{if(f=l(),It===null)throw Error(r(349));yt&60||_1(h,l,f)}v.memoizedState=f;var T={value:f,getSnapshot:l};return v.queue=T,$1(N1.bind(null,h,T,i),[i]),h.flags|=2048,Yi(9,C1.bind(null,h,T,f,l),{destroy:void 0},null),f},useId:function(){var i=Pn(),l=It.identifierPrefix;if(wt){var f=ba,h=ma;f=(h&~(1<<32-Et(h)-1)).toString(32)+f,l=":"+l+"R"+f,f=Oc++,0 title"))),hn(T,h,f),T[Sn]=i,rn(T),h=T;break e;case"link":var P=D_("link","href",v).get(h+(f.href||""));if(P){for(var q=0;q<\/script>",i=i.removeChild(i.firstChild);break;case"select":i=typeof h.is=="string"?v.createElement("select",{is:h.is}):v.createElement("select"),h.multiple?i.multiple=!0:h.size&&(i.size=h.size);break;default:i=typeof h.is=="string"?v.createElement(f,{is:h.is}):v.createElement(f)}}i[Sn]=l,i[Mn]=h;e:for(v=l.child;v!==null;){if(v.tag===5||v.tag===6)i.appendChild(v.stateNode);else if(v.tag!==4&&v.tag!==27&&v.child!==null){v.child.return=v,v=v.child;continue}if(v===l)break e;for(;v.sibling===null;){if(v.return===null||v.return===l)break e;v=v.return}v.sibling.return=v.return,v=v.sibling}l.stateNode=i;e:switch(hn(i,f,h),f){case"button":case"input":case"select":case"textarea":i=!!h.autoFocus;break e;case"img":i=!0;break e;default:i=!1}i&&Aa(l)}}return Bt(l),l.flags&=-16777217,null;case 6:if(i&&l.stateNode!=null)i.memoizedProps!==h&&Aa(l);else{if(typeof h!="string"&&l.stateNode===null)throw Error(r(166));if(i=Ie.current,gl(l)){if(i=l.stateNode,f=l.memoizedProps,h=null,v=Cn,v!==null)switch(v.tag){case 27:case 5:h=v.memoizedProps}i[Sn]=l,i=!!(i.nodeValue===f||h!==null&&h.suppressHydrationWarning===!0||S_(i.nodeValue,f)),i||Ho(l)}else i=nd(i).createTextNode(h),i[Sn]=l,l.stateNode=i}return Bt(l),null;case 13:if(h=l.memoizedState,i===null||i.memoizedState!==null&&i.memoizedState.dehydrated!==null){if(v=gl(l),h!==null&&h.dehydrated!==null){if(i===null){if(!v)throw Error(r(318));if(v=l.memoizedState,v=v!==null?v.dehydrated:null,!v)throw Error(r(317));v[Sn]=l}else hl(),!(l.flags&128)&&(l.memoizedState=null),l.flags|=4;Bt(l),v=!1}else Cr!==null&&(bh(Cr),Cr=null),v=!0;if(!v)return l.flags&256?(ya(l),l):(ya(l),null)}if(ya(l),l.flags&128)return l.lanes=f,l;if(f=h!==null,i=i!==null&&i.memoizedState!==null,f){h=l.child,v=null,h.alternate!==null&&h.alternate.memoizedState!==null&&h.alternate.memoizedState.cachePool!==null&&(v=h.alternate.memoizedState.cachePool.pool);var T=null;h.memoizedState!==null&&h.memoizedState.cachePool!==null&&(T=h.memoizedState.cachePool.pool),T!==v&&(h.flags|=2048)}return f!==i&&f&&(l.child.flags|=8192),qc(l,l.updateQueue),Bt(l),null;case 4:return J(),i===null&&_h(l.stateNode.containerInfo),Bt(l),null;case 10:return wa(l.type),Bt(l),null;case 19:if(Se(en),v=l.memoizedState,v===null)return Bt(l),null;if(h=(l.flags&128)!==0,T=v.rendering,T===null)if(h)Ml(v,!1);else{if(qt!==0||i!==null&&i.flags&128)for(i=l.child;i!==null;){if(T=_c(i),T!==null){for(l.flags|=128,Ml(v,!1),i=T.updateQueue,l.updateQueue=i,qc(l,i),l.subtreeFlags=0,i=f,f=l.child;f!==null;)YR(f,i),f=f.sibling;return ae(en,en.current&1|2),l.child}i=i.sibling}v.tail!==null&&he()>Vc&&(l.flags|=128,h=!0,Ml(v,!1),l.lanes=4194304)}else{if(!h)if(i=_c(T),i!==null){if(l.flags|=128,h=!0,i=i.updateQueue,l.updateQueue=i,qc(l,i),Ml(v,!0),v.tail===null&&v.tailMode==="hidden"&&!T.alternate&&!wt)return Bt(l),null}else 2*he()-v.renderingStartTime>Vc&&f!==536870912&&(l.flags|=128,h=!0,Ml(v,!1),l.lanes=4194304);v.isBackwards?(T.sibling=l.child,l.child=T):(i=v.last,i!==null?i.sibling=T:l.child=T,v.last=T)}return v.tail!==null?(l=v.tail,v.rendering=l,v.tail=l.sibling,v.renderingStartTime=he(),l.sibling=null,i=en.current,ae(en,h?i&1|2:i&1),l):(Bt(l),null);case 22:case 23:return ya(l),mg(),h=l.memoizedState!==null,i!==null?i.memoizedState!==null!==h&&(l.flags|=8192):h&&(l.flags|=8192),h?f&536870912&&!(l.flags&128)&&(Bt(l),l.subtreeFlags&6&&(l.flags|=8192)):Bt(l),f=l.updateQueue,f!==null&&qc(l,f.retryQueue),f=null,i!==null&&i.memoizedState!==null&&i.memoizedState.cachePool!==null&&(f=i.memoizedState.cachePool.pool),h=null,l.memoizedState!==null&&l.memoizedState.cachePool!==null&&(h=l.memoizedState.cachePool.pool),h!==f&&(l.flags|=2048),i!==null&&Se(qo),null;case 24:return f=null,i!==null&&(f=i.memoizedState.cache),l.memoizedState.cache!==f&&(l.flags|=2048),wa(tn),Bt(l),null;case 25:return null}throw Error(r(156,l.tag))}function f6(i,l){switch(fg(l),l.tag){case 1:return i=l.flags,i&65536?(l.flags=i&-65537|128,l):null;case 3:return wa(tn),J(),i=l.flags,i&65536&&!(i&128)?(l.flags=i&-65537|128,l):null;case 26:case 27:case 5:return Te(l),null;case 13:if(ya(l),i=l.memoizedState,i!==null&&i.dehydrated!==null){if(l.alternate===null)throw Error(r(340));hl()}return i=l.flags,i&65536?(l.flags=i&-65537|128,l):null;case 19:return Se(en),null;case 4:return J(),null;case 10:return wa(l.type),null;case 22:case 23:return ya(l),mg(),i!==null&&Se(qo),i=l.flags,i&65536?(l.flags=i&-65537|128,l):null;case 24:return wa(tn),null;case 25:return null;default:return null}}function ZR(i,l){switch(fg(l),l.tag){case 3:wa(tn),J();break;case 26:case 27:case 5:Te(l);break;case 4:J();break;case 13:ya(l);break;case 19:Se(en);break;case 10:wa(l.type);break;case 22:case 23:ya(l),mg(),i!==null&&Se(qo);break;case 24:wa(tn)}}var p6={getCacheForType:function(i){var l=En(tn),f=l.data.get(i);return f===void 0&&(f=i(),l.data.set(i,f)),f}},g6=typeof WeakMap=="function"?WeakMap:Map,Ut=0,It=null,ct=null,yt=0,Lt=0,Zn=null,Ra=!1,Qi=!1,ch=!1,_a=0,qt=0,co=0,ei=0,dh=0,mr=0,Ji=0,Fl=null,Yr=null,fh=!1,ph=0,Vc=1/0,Wc=null,fo=null,Yc=!1,ti=null,Pl=0,gh=0,hh=null,zl=0,mh=null;function Qn(){if(Ut&2&&yt!==0)return yt&-yt;if(L.T!==null){var i=$i;return i!==0?i:kh()}return yA()}function QR(){mr===0&&(mr=!(yt&536870912)||wt?Ct():536870912);var i=fr.current;return i!==null&&(i.flags|=32),mr}function Nn(i,l,f){(i===It&&Lt===2||i.cancelPendingCommit!==null)&&(es(i,0),Ca(i,yt,mr,!1)),In(i,f),(!(Ut&2)||i!==It)&&(i===It&&(!(Ut&2)&&(ei|=f),qt===4&&Ca(i,yt,mr,!1)),Kr(i))}function JR(i,l,f){if(Ut&6)throw Error(r(327));var h=!f&&(l&60)===0&&(l&i.expiredLanes)===0||Xe(i,l),v=h?b6(i,l):Sh(i,l,!0),T=h;do{if(v===0){Qi&&!h&&Ca(i,l,0,!1);break}else if(v===6)Ca(i,l,0,!Ra);else{if(f=i.current.alternate,T&&!h6(f)){v=Sh(i,l,!1),T=!1;continue}if(v===2){if(T=l,i.errorRecoveryDisabledLanes&T)var P=0;else P=i.pendingLanes&-536870913,P=P!==0?P:P&536870912?536870912:0;if(P!==0){l=P;e:{var q=i;v=Fl;var X=q.current.memoizedState.isDehydrated;if(X&&(es(q,P).flags|=256),P=Sh(q,P,!1),P!==2){if(ch&&!X){q.errorRecoveryDisabledLanes|=T,ei|=T,v=4;break e}T=Yr,Yr=v,T!==null&&bh(T)}v=P}if(T=!1,v!==2)continue}}if(v===1){es(i,0),Ca(i,l,0,!0);break}e:{switch(h=i,v){case 0:case 1:throw Error(r(345));case 4:if((l&4194176)===l){Ca(h,l,mr,!Ra);break e}break;case 2:Yr=null;break;case 3:case 5:break;default:throw Error(r(329))}if(h.finishedWork=f,h.finishedLanes=l,(l&62914560)===l&&(T=ph+300-he(),10f?32:f,L.T=null,ti===null)var T=!1;else{f=hh,hh=null;var P=ti,q=Pl;if(ti=null,Pl=0,Ut&6)throw Error(r(331));var X=Ut;if(Ut|=4,VR(P.current),HR(P,P.current,q,f),Ut=X,Bl(0,!1),et&&typeof et.onPostCommitFiberRoot=="function")try{et.onPostCommitFiberRoot(mt,P)}catch{}T=!0}return T}finally{W.p=v,L.T=h,l_(i,l)}}return!1}function u_(i,l,f){l=ur(f,l),l=zg(i.stateNode,l,2),i=oo(i,l,2),i!==null&&(In(i,2),Kr(i))}function Nt(i,l,f){if(i.tag===3)u_(i,i,f);else for(;l!==null;){if(l.tag===3){u_(l,i,f);break}else if(l.tag===1){var h=l.stateNode;if(typeof l.type.getDerivedStateFromError=="function"||typeof h.componentDidCatch=="function"&&(fo===null||!fo.has(h))){i=ur(f,i),f=fR(2),h=oo(l,f,2),h!==null&&(pR(f,h,l,i),In(h,2),Kr(h));break}}l=l.return}}function Eh(i,l,f){var h=i.pingCache;if(h===null){h=i.pingCache=new g6;var v=new Set;h.set(l,v)}else v=h.get(l),v===void 0&&(v=new Set,h.set(l,v));v.has(f)||(ch=!0,v.add(f),i=S6.bind(null,i,l,f),l.then(i,i))}function S6(i,l,f){var h=i.pingCache;h!==null&&h.delete(l),i.pingedLanes|=i.suspendedLanes&f,i.warmLanes&=~f,It===i&&(yt&f)===f&&(qt===4||qt===3&&(yt&62914560)===yt&&300>he()-ph?!(Ut&2)&&es(i,0):dh|=f,Ji===yt&&(Ji=0)),Kr(i)}function c_(i,l){l===0&&(l=Dn()),i=Za(i,l),i!==null&&(In(i,l),Kr(i))}function E6(i){var l=i.memoizedState,f=0;l!==null&&(f=l.retryLane),c_(i,f)}function w6(i,l){var f=0;switch(i.tag){case 13:var h=i.stateNode,v=i.memoizedState;v!==null&&(f=v.retryLane);break;case 19:h=i.stateNode;break;case 22:h=i.stateNode._retryCache;break;default:throw Error(r(314))}h!==null&&h.delete(l),c_(i,f)}function x6(i,l){return Ee(i,l)}var Zc=null,rs=null,wh=!1,Qc=!1,xh=!1,ni=0;function Kr(i){i!==rs&&i.next===null&&(rs===null?Zc=rs=i:rs=rs.next=i),Qc=!0,wh||(wh=!0,T6(k6))}function Bl(i,l){if(!xh&&Qc){xh=!0;do for(var f=!1,h=Zc;h!==null;){if(i!==0){var v=h.pendingLanes;if(v===0)var T=0;else{var P=h.suspendedLanes,q=h.pingedLanes;T=(1<<31-Et(42|i)+1)-1,T&=v&~(P&~q),T=T&201326677?T&201326677|1:T?T|2:0}T!==0&&(f=!0,p_(h,T))}else T=yt,T=fa(h,h===It?T:0),!(T&3)||Xe(h,T)||(f=!0,p_(h,T));h=h.next}while(f);xh=!1}}function k6(){Qc=wh=!1;var i=0;ni!==0&&(I6()&&(i=ni),ni=0);for(var l=he(),f=null,h=Zc;h!==null;){var v=h.next,T=d_(h,l);T===0?(h.next=null,f===null?Zc=v:f.next=v,v===null&&(rs=f)):(f=h,(i!==0||T&3)&&(Qc=!0)),h=v}Bl(i)}function d_(i,l){for(var f=i.suspendedLanes,h=i.pingedLanes,v=i.expirationTimes,T=i.pendingLanes&-62914561;0"u"?null:document;function __(i,l,f){var h=os;if(h&&typeof l=="string"&&l){var v=sr(l);v='link[rel="'+i+'"][href="'+v+'"]',typeof f=="string"&&(v+='[crossorigin="'+f+'"]'),R_.has(v)||(R_.add(v),i={rel:i,crossOrigin:f,href:l},h.querySelector(v)===null&&(l=h.createElement("link"),hn(l,"link",i),rn(l),h.head.appendChild(l)))}}function j6(i){Na.D(i),__("dns-prefetch",i,null)}function G6(i,l){Na.C(i,l),__("preconnect",i,l)}function H6(i,l,f){Na.L(i,l,f);var h=os;if(h&&i&&l){var v='link[rel="preload"][as="'+sr(l)+'"]';l==="image"&&f&&f.imageSrcSet?(v+='[imagesrcset="'+sr(f.imageSrcSet)+'"]',typeof f.imageSizes=="string"&&(v+='[imagesizes="'+sr(f.imageSizes)+'"]')):v+='[href="'+sr(i)+'"]';var T=v;switch(l){case"style":T=is(i);break;case"script":T=ss(i)}br.has(T)||(i=I({rel:"preload",href:l==="image"&&f&&f.imageSrcSet?void 0:i,as:l},f),br.set(T,i),h.querySelector(v)!==null||l==="style"&&h.querySelector(Gl(T))||l==="script"&&h.querySelector(Hl(T))||(l=h.createElement("link"),hn(l,"link",i),rn(l),h.head.appendChild(l)))}}function $6(i,l){Na.m(i,l);var f=os;if(f&&i){var h=l&&typeof l.as=="string"?l.as:"script",v='link[rel="modulepreload"][as="'+sr(h)+'"][href="'+sr(i)+'"]',T=v;switch(h){case"audioworklet":case"paintworklet":case"serviceworker":case"sharedworker":case"worker":case"script":T=ss(i)}if(!br.has(T)&&(i=I({rel:"modulepreload",href:i},l),br.set(T,i),f.querySelector(v)===null)){switch(h){case"audioworklet":case"paintworklet":case"serviceworker":case"sharedworker":case"worker":case"script":if(f.querySelector(Hl(T)))return}h=f.createElement("link"),hn(h,"link",i),rn(h),f.head.appendChild(h)}}}function q6(i,l,f){Na.S(i,l,f);var h=os;if(h&&i){var v=Ci(h).hoistableStyles,T=is(i);l=l||"default";var P=v.get(T);if(!P){var q={loading:0,preload:null};if(P=h.querySelector(Gl(T)))q.loading=5;else{i=I({rel:"stylesheet",href:i,"data-precedence":l},f),(f=br.get(T))&&Ph(i,f);var X=P=h.createElement("link");rn(X),hn(X,"link",i),X._p=new Promise(function(ne,be){X.onload=ne,X.onerror=be}),X.addEventListener("load",function(){q.loading|=1}),X.addEventListener("error",function(){q.loading|=2}),q.loading|=4,ad(P,l,h)}P={type:"stylesheet",instance:P,count:1,state:q},v.set(T,P)}}}function V6(i,l){Na.X(i,l);var f=os;if(f&&i){var h=Ci(f).hoistableScripts,v=ss(i),T=h.get(v);T||(T=f.querySelector(Hl(v)),T||(i=I({src:i,async:!0},l),(l=br.get(v))&&zh(i,l),T=f.createElement("script"),rn(T),hn(T,"link",i),f.head.appendChild(T)),T={type:"script",instance:T,count:1,state:null},h.set(v,T))}}function W6(i,l){Na.M(i,l);var f=os;if(f&&i){var h=Ci(f).hoistableScripts,v=ss(i),T=h.get(v);T||(T=f.querySelector(Hl(v)),T||(i=I({src:i,async:!0,type:"module"},l),(l=br.get(v))&&zh(i,l),T=f.createElement("script"),rn(T),hn(T,"link",i),f.head.appendChild(T)),T={type:"script",instance:T,count:1,state:null},h.set(v,T))}}function C_(i,l,f,h){var v=(v=Ie.current)?rd(v):null;if(!v)throw Error(r(446));switch(i){case"meta":case"title":return null;case"style":return typeof f.precedence=="string"&&typeof f.href=="string"?(l=is(f.href),f=Ci(v).hoistableStyles,h=f.get(l),h||(h={type:"style",instance:null,count:0,state:null},f.set(l,h)),h):{type:"void",instance:null,count:0,state:null};case"link":if(f.rel==="stylesheet"&&typeof f.href=="string"&&typeof f.precedence=="string"){i=is(f.href);var T=Ci(v).hoistableStyles,P=T.get(i);if(P||(v=v.ownerDocument||v,P={type:"stylesheet",instance:null,count:0,state:{loading:0,preload:null}},T.set(i,P),(T=v.querySelector(Gl(i)))&&!T._p&&(P.instance=T,P.state.loading=5),br.has(i)||(f={rel:"preload",as:"style",href:f.href,crossOrigin:f.crossOrigin,integrity:f.integrity,media:f.media,hrefLang:f.hrefLang,referrerPolicy:f.referrerPolicy},br.set(i,f),T||Y6(v,i,f,P.state))),l&&h===null)throw Error(r(528,""));return P}if(l&&h!==null)throw Error(r(529,""));return null;case"script":return l=f.async,f=f.src,typeof f=="string"&&l&&typeof l!="function"&&typeof l!="symbol"?(l=ss(f),f=Ci(v).hoistableScripts,h=f.get(l),h||(h={type:"script",instance:null,count:0,state:null},f.set(l,h)),h):{type:"void",instance:null,count:0,state:null};default:throw Error(r(444,i))}}function is(i){return'href="'+sr(i)+'"'}function Gl(i){return'link[rel="stylesheet"]['+i+"]"}function N_(i){return I({},i,{"data-precedence":i.precedence,precedence:null})}function Y6(i,l,f,h){i.querySelector('link[rel="preload"][as="style"]['+l+"]")?h.loading=1:(l=i.createElement("link"),h.preload=l,l.addEventListener("load",function(){return h.loading|=1}),l.addEventListener("error",function(){return h.loading|=2}),hn(l,"link",f),rn(l),i.head.appendChild(l))}function ss(i){return'[src="'+sr(i)+'"]'}function Hl(i){return"script[async]"+i}function O_(i,l,f){if(l.count++,l.instance===null)switch(l.type){case"style":var h=i.querySelector('style[data-href~="'+sr(f.href)+'"]');if(h)return l.instance=h,rn(h),h;var v=I({},f,{"data-href":f.href,"data-precedence":f.precedence,href:null,precedence:null});return h=(i.ownerDocument||i).createElement("style"),rn(h),hn(h,"style",v),ad(h,f.precedence,i),l.instance=h;case"stylesheet":v=is(f.href);var T=i.querySelector(Gl(v));if(T)return l.state.loading|=4,l.instance=T,rn(T),T;h=N_(f),(v=br.get(v))&&Ph(h,v),T=(i.ownerDocument||i).createElement("link"),rn(T);var P=T;return P._p=new Promise(function(q,X){P.onload=q,P.onerror=X}),hn(T,"link",h),l.state.loading|=4,ad(T,f.precedence,i),l.instance=T;case"script":return T=ss(f.src),(v=i.querySelector(Hl(T)))?(l.instance=v,rn(v),v):(h=f,(v=br.get(T))&&(h=I({},f),zh(h,v)),i=i.ownerDocument||i,v=i.createElement("script"),rn(v),hn(v,"link",h),i.head.appendChild(v),l.instance=v);case"void":return null;default:throw Error(r(443,l.type))}else l.type==="stylesheet"&&!(l.state.loading&4)&&(h=l.instance,l.state.loading|=4,ad(h,f.precedence,i));return l.instance}function ad(i,l,f){for(var h=f.querySelectorAll('link[rel="stylesheet"][data-precedence],style[data-precedence]'),v=h.length?h[h.length-1]:null,T=v,P=0;P title"):null)}function K6(i,l,f){if(f===1||l.itemProp!=null)return!1;switch(i){case"meta":case"title":return!0;case"style":if(typeof l.precedence!="string"||typeof l.href!="string"||l.href==="")break;return!0;case"link":if(typeof l.rel!="string"||typeof l.href!="string"||l.href===""||l.onLoad||l.onError)break;switch(l.rel){case"stylesheet":return i=l.disabled,typeof l.precedence=="string"&&i==null;default:return!0}case"script":if(l.async&&typeof l.async!="function"&&typeof l.async!="symbol"&&!l.onLoad&&!l.onError&&l.src&&typeof l.src=="string")return!0}return!1}function L_(i){return!(i.type==="stylesheet"&&!(i.state.loading&3))}var $l=null;function X6(){}function Z6(i,l,f){if($l===null)throw Error(r(475));var h=$l;if(l.type==="stylesheet"&&(typeof f.media!="string"||matchMedia(f.media).matches!==!1)&&!(l.state.loading&4)){if(l.instance===null){var v=is(f.href),T=i.querySelector(Gl(v));if(T){i=T._p,i!==null&&typeof i=="object"&&typeof i.then=="function"&&(h.count++,h=id.bind(h),i.then(h,h)),l.state.loading|=4,l.instance=T,rn(T);return}T=i.ownerDocument||i,f=N_(f),(v=br.get(v))&&Ph(f,v),T=T.createElement("link"),rn(T);var P=T;P._p=new Promise(function(q,X){P.onload=q,P.onerror=X}),hn(T,"link",f),l.instance=T}h.stylesheets===null&&(h.stylesheets=new Map),h.stylesheets.set(l,i),(i=l.state.preload)&&!(l.state.loading&3)&&(h.count++,l=id.bind(h),i.addEventListener("load",l),i.addEventListener("error",l))}}function Q6(){if($l===null)throw Error(r(475));var i=$l;return i.stylesheets&&i.count===0&&Bh(i,i.stylesheets),0"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(t){console.error(t)}}return e(),Yh.exports=v9(),Yh.exports}var E9=S9(),Ql={},nC;function w9(){if(nC)return Ql;nC=1,Object.defineProperty(Ql,"__esModule",{value:!0}),Ql.parse=s,Ql.serialize=d;const e=/^[\u0021-\u003A\u003C\u003E-\u007E]+$/,t=/^[\u0021-\u003A\u003C-\u007E]*$/,n=/^([.]?[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?)([.][a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?)*$/i,r=/^[\u0020-\u003A\u003D-\u007E]*$/,a=Object.prototype.toString,o=(()=>{const m=function(){};return m.prototype=Object.create(null),m})();function s(m,b){const y=new o,S=m.length;if(S<2)return y;const x=(b==null?void 0:b.decode)||p;let R=0;do{const k=m.indexOf("=",R);if(k===-1)break;const A=m.indexOf(";",R),C=A===-1?S:A;if(k>C){R=m.lastIndexOf(";",k-1)+1;continue}const N=u(m,R,k),_=c(m,k,N),O=m.slice(N,_);if(y[O]===void 0){let F=u(m,k+1,C),L=c(m,C,F);const I=x(m.slice(F,L));y[O]=I}R=C+1}while(Ry;){const S=m.charCodeAt(--b);if(S!==32&&S!==9)return b+1}return y}function d(m,b,y){const S=(y==null?void 0:y.encode)||encodeURIComponent;if(!e.test(m))throw new TypeError(`argument name is invalid: ${m}`);const x=S(b);if(!t.test(x))throw new TypeError(`argument val is invalid: ${b}`);let R=m+"="+x;if(!y)return R;if(y.maxAge!==void 0){if(!Number.isInteger(y.maxAge))throw new TypeError(`option maxAge is invalid: ${y.maxAge}`);R+="; Max-Age="+y.maxAge}if(y.domain){if(!n.test(y.domain))throw new TypeError(`option domain is invalid: ${y.domain}`);R+="; Domain="+y.domain}if(y.path){if(!r.test(y.path))throw new TypeError(`option path is invalid: ${y.path}`);R+="; Path="+y.path}if(y.expires){if(!g(y.expires)||!Number.isFinite(y.expires.valueOf()))throw new TypeError(`option expires is invalid: ${y.expires}`);R+="; Expires="+y.expires.toUTCString()}if(y.httpOnly&&(R+="; HttpOnly"),y.secure&&(R+="; Secure"),y.partitioned&&(R+="; Partitioned"),y.priority)switch(typeof y.priority=="string"?y.priority.toLowerCase():void 0){case"low":R+="; Priority=Low";break;case"medium":R+="; Priority=Medium";break;case"high":R+="; Priority=High";break;default:throw new TypeError(`option priority is invalid: ${y.priority}`)}if(y.sameSite)switch(typeof y.sameSite=="string"?y.sameSite.toLowerCase():y.sameSite){case!0:case"strict":R+="; SameSite=Strict";break;case"lax":R+="; SameSite=Lax";break;case"none":R+="; SameSite=None";break;default:throw new TypeError(`option sameSite is invalid: ${y.sameSite}`)}return R}function p(m){if(m.indexOf("%")===-1)return m;try{return decodeURIComponent(m)}catch{return m}}function g(m){return a.call(m)==="[object Date]"}return Ql}w9();/** + */var u_;function yq(){if(u_)return nc;u_=1;var e=mq(),t=$p(),n=Kz();function r(i){var c="https://react.dev/errors/"+i;if(1)":-1S||X[h]!==re[S]){var ve=` +`+X[h].replace(" at new "," at ");return i.displayName&&ve.includes("")&&(ve=ve.replace("",i.displayName)),ve}while(1<=h&&0<=S);break}}}finally{W=!1,Error.prepareStackTrace=p}return(p=i?i.displayName||i.name:"")?B(p):""}function G(i){switch(i.tag){case 26:case 27:case 5:return B(i.type);case 16:return B("Lazy");case 13:return B("Suspense");case 19:return B("SuspenseList");case 0:case 15:return i=K(i.type,!1),i;case 11:return i=K(i.type.render,!1),i;case 1:return i=K(i.type,!0),i;default:return""}}function H(i){try{var c="";do c+=G(i),i=i.return;while(i);return c}catch(p){return` +Error generating stack: `+p.message+` +`+p.stack}}function F(i){var c=i,p=i;if(i.alternate)for(;c.return;)c=c.return;else{i=c;do c=i,c.flags&4098&&(p=c.return),i=c.return;while(i)}return c.tag===3?p:null}function Y(i){if(i.tag===13){var c=i.memoizedState;if(c===null&&(i=i.alternate,i!==null&&(c=i.memoizedState)),c!==null)return c.dehydrated}return null}function M(i){if(F(i)!==i)throw Error(r(188))}function V(i){var c=i.alternate;if(!c){if(c=F(i),c===null)throw Error(r(188));return c!==i?null:i}for(var p=i,h=c;;){var S=p.return;if(S===null)break;var A=S.alternate;if(A===null){if(h=S.return,h!==null){p=h;continue}break}if(S.child===A.child){for(A=S.child;A;){if(A===p)return M(S),i;if(A===h)return M(S),c;A=A.sibling}throw Error(r(188))}if(p.return!==h.return)p=S,h=A;else{for(var z=!1,q=S.child;q;){if(q===p){z=!0,p=S,h=A;break}if(q===h){z=!0,h=S,p=A;break}q=q.sibling}if(!z){for(q=A.child;q;){if(q===p){z=!0,p=A,h=S;break}if(q===h){z=!0,h=A,p=S;break}q=q.sibling}if(!z)throw Error(r(189))}}if(p.alternate!==h)throw Error(r(190))}if(p.tag!==3)throw Error(r(188));return p.stateNode.current===p?i:c}function j(i){var c=i.tag;if(c===5||c===26||c===27||c===6)return i;for(i=i.child;i!==null;){if(c=j(i),c!==null)return c;i=i.sibling}return null}var P=Array.isArray,Z=n.__DOM_INTERNALS_DO_NOT_USE_OR_WARN_USERS_THEY_CANNOT_UPGRADE,Q={pending:!1,data:null,method:null,action:null},oe=[],ae=-1;function ce(i){return{current:i}}function Re(i){0>ae||(i.current=oe[ae],oe[ae]=null,ae--)}function ie(i,c){ae++,oe[ae]=i.current,i.current=c}var Te=ce(null),ne=ce(null),xe=ce(null),Se=ce(null);function be(i,c){switch(ie(xe,c),ie(ne,i),ie(Te,null),i=c.nodeType,i){case 9:case 11:c=(c=c.documentElement)&&(c=c.namespaceURI)?OC(c):0;break;default:if(i=i===8?c.parentNode:c,c=i.tagName,i=i.namespaceURI)i=OC(i),c=IC(i,c);else switch(c){case"svg":c=1;break;case"math":c=2;break;default:c=0}}Re(Te),ie(Te,c)}function J(){Re(Te),Re(ne),Re(xe)}function fe(i){i.memoizedState!==null&&ie(Se,i);var c=Te.current,p=IC(c,i.type);c!==p&&(ie(ne,i),ie(Te,p))}function ke(i){ne.current===i&&(Re(Te),Re(ne)),Se.current===i&&(Re(Se),Xl._currentValue=Q)}var he=Object.prototype.hasOwnProperty,we=e.unstable_scheduleCallback,se=e.unstable_cancelCallback,Be=e.unstable_shouldYield,je=e.unstable_requestPaint,ye=e.unstable_now,Oe=e.unstable_getCurrentPriorityLevel,ee=e.unstable_ImmediatePriority,de=e.unstable_UserBlockingPriority,Ne=e.unstable_NormalPriority,ze=e.unstable_LowPriority,We=e.unstable_IdlePriority,wt=e.log,Tt=e.unstable_setDisableYieldValue,yt=null,et=null;function Rt(i){if(et&&typeof et.onCommitFiberRoot=="function")try{et.onCommitFiberRoot(yt,i,void 0,(i.current.flags&128)===128)}catch{}}function st(i){if(typeof wt=="function"&&Tt(i),et&&typeof et.setStrictMode=="function")try{et.setStrictMode(yt,i)}catch{}}var Et=Math.clz32?Math.clz32:zt,Ht=Math.log,gn=Math.LN2;function zt(i){return i>>>=0,i===0?32:31-(Ht(i)/gn|0)|0}var sr=128,Vr=4194304;function Jt(i){var c=i&42;if(c!==0)return c;switch(i&-i){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:return 64;case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return i&4194176;case 4194304:case 8388608:case 16777216:case 33554432:return i&62914560;case 67108864:return 67108864;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 0;default:return i}}function fa(i,c){var p=i.pendingLanes;if(p===0)return 0;var h=0,S=i.suspendedLanes,A=i.pingedLanes,z=i.warmLanes;i=i.finishedLanes!==0;var q=p&134217727;return q!==0?(p=q&~S,p!==0?h=Jt(p):(A&=q,A!==0?h=Jt(A):i||(z=q&~z,z!==0&&(h=Jt(z))))):(q=p&~S,q!==0?h=Jt(q):A!==0?h=Jt(A):i||(z=p&~z,z!==0&&(h=Jt(z)))),h===0?0:c!==0&&c!==h&&!(c&S)&&(S=h&-h,z=c&-c,S>=z||S===32&&(z&4194176)!==0)?c:h}function Xe(i,c){return(i.pendingLanes&~(i.suspendedLanes&~i.pingedLanes)&c)===0}function vt(i,c){switch(i){case 1:case 2:case 4:case 8:return c+250;case 16:case 32:case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return c+5e3;case 4194304:case 8388608:case 16777216:case 33554432:return-1;case 67108864:case 134217728:case 268435456:case 536870912:case 1073741824:return-1;default:return-1}}function Ot(){var i=sr;return sr<<=1,!(sr&4194176)&&(sr=128),i}function Ln(){var i=Vr;return Vr<<=1,!(Vr&62914560)&&(Vr=4194304),i}function Nn(i){for(var c=[],p=0;31>p;p++)c.push(i);return c}function Mn(i,c){i.pendingLanes|=c,c!==268435456&&(i.suspendedLanes=0,i.pingedLanes=0,i.warmLanes=0)}function ga(i,c,p,h,S,A){var z=i.pendingLanes;i.pendingLanes=p,i.suspendedLanes=0,i.pingedLanes=0,i.warmLanes=0,i.expiredLanes&=p,i.entangledLanes&=p,i.errorRecoveryDisabledLanes&=p,i.shellSuspendCounter=0;var q=i.entanglements,X=i.expirationTimes,re=i.hiddenUpdates;for(p=z&~p;0"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),e$=RegExp("^[:A-Z_a-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD][:A-Z_a-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD\\-.0-9\\u00B7\\u0300-\\u036F\\u203F-\\u2040]*$"),IA={},DA={};function t$(i){return he.call(DA,i)?!0:he.call(IA,i)?!1:e$.test(i)?DA[i]=!0:(IA[i]=!0,!1)}function uu(i,c,p){if(t$(c))if(p===null)i.removeAttribute(c);else{switch(typeof p){case"undefined":case"function":case"symbol":i.removeAttribute(c);return;case"boolean":var h=c.toLowerCase().slice(0,5);if(h!=="data-"&&h!=="aria-"){i.removeAttribute(c);return}}i.setAttribute(c,""+p)}}function du(i,c,p){if(p===null)i.removeAttribute(c);else{switch(typeof p){case"undefined":case"function":case"symbol":case"boolean":i.removeAttribute(c);return}i.setAttribute(c,""+p)}}function ma(i,c,p,h){if(h===null)i.removeAttribute(p);else{switch(typeof h){case"undefined":case"function":case"symbol":case"boolean":i.removeAttribute(p);return}i.setAttributeNS(c,p,""+h)}}function lr(i){switch(typeof i){case"bigint":case"boolean":case"number":case"string":case"undefined":return i;case"object":return i;default:return""}}function LA(i){var c=i.type;return(i=i.nodeName)&&i.toLowerCase()==="input"&&(c==="checkbox"||c==="radio")}function n$(i){var c=LA(i)?"checked":"value",p=Object.getOwnPropertyDescriptor(i.constructor.prototype,c),h=""+i[c];if(!i.hasOwnProperty(c)&&typeof p<"u"&&typeof p.get=="function"&&typeof p.set=="function"){var S=p.get,A=p.set;return Object.defineProperty(i,c,{configurable:!0,get:function(){return S.call(this)},set:function(z){h=""+z,A.call(this,z)}}),Object.defineProperty(i,c,{enumerable:p.enumerable}),{getValue:function(){return h},setValue:function(z){h=""+z},stopTracking:function(){i._valueTracker=null,delete i[c]}}}}function pu(i){i._valueTracker||(i._valueTracker=n$(i))}function MA(i){if(!i)return!1;var c=i._valueTracker;if(!c)return!0;var p=c.getValue(),h="";return i&&(h=LA(i)?i.checked?"true":"false":i.value),i=h,i!==p?(c.setValue(i),!0):!1}function fu(i){if(i=i||(typeof document<"u"?document:void 0),typeof i>"u")return null;try{return i.activeElement||i.body}catch{return i.body}}var r$=/[\n"\\]/g;function cr(i){return i.replace(r$,function(c){return"\\"+c.charCodeAt(0).toString(16)+" "})}function qf(i,c,p,h,S,A,z,q){i.name="",z!=null&&typeof z!="function"&&typeof z!="symbol"&&typeof z!="boolean"?i.type=z:i.removeAttribute("type"),c!=null?z==="number"?(c===0&&i.value===""||i.value!=c)&&(i.value=""+lr(c)):i.value!==""+lr(c)&&(i.value=""+lr(c)):z!=="submit"&&z!=="reset"||i.removeAttribute("value"),c!=null?Vf(i,z,lr(c)):p!=null?Vf(i,z,lr(p)):h!=null&&i.removeAttribute("value"),S==null&&A!=null&&(i.defaultChecked=!!A),S!=null&&(i.checked=S&&typeof S!="function"&&typeof S!="symbol"),q!=null&&typeof q!="function"&&typeof q!="symbol"&&typeof q!="boolean"?i.name=""+lr(q):i.removeAttribute("name")}function PA(i,c,p,h,S,A,z,q){if(A!=null&&typeof A!="function"&&typeof A!="symbol"&&typeof A!="boolean"&&(i.type=A),c!=null||p!=null){if(!(A!=="submit"&&A!=="reset"||c!=null))return;p=p!=null?""+lr(p):"",c=c!=null?""+lr(c):p,q||c===i.value||(i.value=c),i.defaultValue=c}h=h??S,h=typeof h!="function"&&typeof h!="symbol"&&!!h,i.checked=q?i.checked:!!h,i.defaultChecked=!!h,z!=null&&typeof z!="function"&&typeof z!="symbol"&&typeof z!="boolean"&&(i.name=z)}function Vf(i,c,p){c==="number"&&fu(i.ownerDocument)===i||i.defaultValue===""+p||(i.defaultValue=""+p)}function Di(i,c,p,h){if(i=i.options,c){c={};for(var S=0;S=gl),KA=" ",XA=!1;function ZA(i,c){switch(i){case"keyup":return O$.indexOf(c.keyCode)!==-1;case"keydown":return c.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function QA(i){return i=i.detail,typeof i=="object"&&"data"in i?i.data:null}var Fi=!1;function D$(i,c){switch(i){case"compositionend":return QA(c);case"keypress":return c.which!==32?null:(XA=!0,KA);case"textInput":return i=c.data,i===KA&&XA?null:i;default:return null}}function L$(i,c){if(Fi)return i==="compositionend"||!rg&&ZA(i,c)?(i=HA(),hu=Qf=Ya=null,Fi=!1,i):null;switch(i){case"paste":return null;case"keypress":if(!(c.ctrlKey||c.altKey||c.metaKey)||c.ctrlKey&&c.altKey){if(c.char&&1=c)return{node:p,offset:c-i};i=h}e:{for(;p;){if(p.nextSibling){p=p.nextSibling;break e}p=p.parentNode}p=void 0}p=i1(p)}}function l1(i,c){return i&&c?i===c?!0:i&&i.nodeType===3?!1:c&&c.nodeType===3?l1(i,c.parentNode):"contains"in i?i.contains(c):i.compareDocumentPosition?!!(i.compareDocumentPosition(c)&16):!1:!1}function c1(i){i=i!=null&&i.ownerDocument!=null&&i.ownerDocument.defaultView!=null?i.ownerDocument.defaultView:window;for(var c=fu(i.document);c instanceof i.HTMLIFrameElement;){try{var p=typeof c.contentWindow.location.href=="string"}catch{p=!1}if(p)i=c.contentWindow;else break;c=fu(i.document)}return c}function ig(i){var c=i&&i.nodeName&&i.nodeName.toLowerCase();return c&&(c==="input"&&(i.type==="text"||i.type==="search"||i.type==="tel"||i.type==="url"||i.type==="password")||c==="textarea"||i.contentEditable==="true")}function G$(i,c){var p=c1(c);c=i.focusedElem;var h=i.selectionRange;if(p!==c&&c&&c.ownerDocument&&l1(c.ownerDocument.documentElement,c)){if(h!==null&&ig(c)){if(i=h.start,p=h.end,p===void 0&&(p=i),"selectionStart"in c)c.selectionStart=i,c.selectionEnd=Math.min(p,c.value.length);else if(p=(i=c.ownerDocument||document)&&i.defaultView||window,p.getSelection){p=p.getSelection();var S=c.textContent.length,A=Math.min(h.start,S);h=h.end===void 0?A:Math.min(h.end,S),!p.extend&&A>h&&(S=h,h=A,A=S),S=s1(c,A);var z=s1(c,h);S&&z&&(p.rangeCount!==1||p.anchorNode!==S.node||p.anchorOffset!==S.offset||p.focusNode!==z.node||p.focusOffset!==z.offset)&&(i=i.createRange(),i.setStart(S.node,S.offset),p.removeAllRanges(),A>h?(p.addRange(i),p.extend(z.node,z.offset)):(i.setEnd(z.node,z.offset),p.addRange(i)))}}for(i=[],p=c;p=p.parentNode;)p.nodeType===1&&i.push({element:p,left:p.scrollLeft,top:p.scrollTop});for(typeof c.focus=="function"&&c.focus(),c=0;c=document.documentMode,zi=null,sg=null,yl=null,lg=!1;function u1(i,c,p){var h=p.window===p?p.document:p.nodeType===9?p:p.ownerDocument;lg||zi==null||zi!==fu(h)||(h=zi,"selectionStart"in h&&ig(h)?h={start:h.selectionStart,end:h.selectionEnd}:(h=(h.ownerDocument&&h.ownerDocument.defaultView||window).getSelection(),h={anchorNode:h.anchorNode,anchorOffset:h.anchorOffset,focusNode:h.focusNode,focusOffset:h.focusOffset}),yl&&bl(yl,h)||(yl=h,h=nd(sg,"onSelect"),0>=z,S-=z,ba=1<<32-Et(c)+S|p<Qe?(ln=Ye,Ye=null):ln=Ye.sibling;var kt=pe(le,Ye,ue[Qe],Ae);if(kt===null){Ye===null&&(Ye=ln);break}i&&Ye&&kt.alternate===null&&c(le,Ye),te=A(kt,te,Qe),ut===null?Ge=kt:ut.sibling=kt,ut=kt,Ye=ln}if(Qe===ue.length)return p(le,Ye),xt&&Ho(le,Qe),Ge;if(Ye===null){for(;QeQe?(ln=Ye,Ye=null):ln=Ye.sibling;var ho=pe(le,Ye,kt.value,Ae);if(ho===null){Ye===null&&(Ye=ln);break}i&&Ye&&ho.alternate===null&&c(le,Ye),te=A(ho,te,Qe),ut===null?Ge=ho:ut.sibling=ho,ut=ho,Ye=ln}if(kt.done)return p(le,Ye),xt&&Ho(le,Qe),Ge;if(Ye===null){for(;!kt.done;Qe++,kt=ue.next())kt=Ce(le,kt.value,Ae),kt!==null&&(te=A(kt,te,Qe),ut===null?Ge=kt:ut.sibling=kt,ut=kt);return xt&&Ho(le,Qe),Ge}for(Ye=h(Ye);!kt.done;Qe++,kt=ue.next())kt=me(Ye,le,Qe,kt.value,Ae),kt!==null&&(i&&kt.alternate!==null&&Ye.delete(kt.key===null?Qe:kt.key),te=A(kt,te,Qe),ut===null?Ge=kt:ut.sibling=kt,ut=kt);return i&&Ye.forEach(function(iq){return c(le,iq)}),xt&&Ho(le,Qe),Ge}function Vt(le,te,ue,Ae){if(typeof ue=="object"&&ue!==null&&ue.type===u&&ue.key===null&&(ue=ue.props.children),typeof ue=="object"&&ue!==null){switch(ue.$$typeof){case s:e:{for(var Ge=ue.key;te!==null;){if(te.key===Ge){if(Ge=ue.type,Ge===u){if(te.tag===7){p(le,te.sibling),Ae=S(te,ue.props.children),Ae.return=le,le=Ae;break e}}else if(te.elementType===Ge||typeof Ge=="object"&&Ge!==null&&Ge.$$typeof===k&&R1(Ge)===te.type){p(le,te.sibling),Ae=S(te,ue.props),Tl(Ae,ue),Ae.return=le,le=Ae;break e}p(le,te);break}else c(le,te);te=te.sibling}ue.type===u?(Ae=ei(ue.props.children,le.mode,Ae,ue.key),Ae.return=le,le=Ae):(Ae=Vu(ue.type,ue.key,ue.props,null,le.mode,Ae),Tl(Ae,ue),Ae.return=le,le=Ae)}return z(le);case l:e:{for(Ge=ue.key;te!==null;){if(te.key===Ge)if(te.tag===4&&te.stateNode.containerInfo===ue.containerInfo&&te.stateNode.implementation===ue.implementation){p(le,te.sibling),Ae=S(te,ue.children||[]),Ae.return=le,le=Ae;break e}else{p(le,te);break}else c(le,te);te=te.sibling}Ae=uh(ue,le.mode,Ae),Ae.return=le,le=Ae}return z(le);case k:return Ge=ue._init,ue=Ge(ue._payload),Vt(le,te,ue,Ae)}if(P(ue))return qe(le,te,ue,Ae);if(C(ue)){if(Ge=C(ue),typeof Ge!="function")throw Error(r(150));return ue=Ge.call(ue),rt(le,te,ue,Ae)}if(typeof ue.then=="function")return Vt(le,te,Ru(ue),Ae);if(ue.$$typeof===b)return Vt(le,te,Hu(le,ue),Ae);Cu(le,ue)}return typeof ue=="string"&&ue!==""||typeof ue=="number"||typeof ue=="bigint"?(ue=""+ue,te!==null&&te.tag===6?(p(le,te.sibling),Ae=S(te,ue),Ae.return=le,le=Ae):(p(le,te),Ae=ch(ue,le.mode,Ae),Ae.return=le,le=Ae),z(le)):p(le,te)}return function(le,te,ue,Ae){try{kl=0;var Ge=Vt(le,te,ue,Ae);return $i=null,Ge}catch(Ye){if(Ye===El)throw Ye;var ut=br(29,Ye,null,le.mode);return ut.lanes=Ae,ut.return=le,ut}finally{}}}var qo=C1(!0),_1=C1(!1),qi=ce(null),_u=ce(0);function N1(i,c){i=_a,ie(_u,i),ie(qi,c),_a=i|c.baseLanes}function mg(){ie(_u,_a),ie(qi,qi.current)}function bg(){_a=_u.current,Re(qi),Re(_u)}var gr=ce(null),Yr=null;function Xa(i){var c=i.alternate;ie(en,en.current&1),ie(gr,i),Yr===null&&(c===null||qi.current!==null||c.memoizedState!==null)&&(Yr=i)}function O1(i){if(i.tag===22){if(ie(en,en.current),ie(gr,i),Yr===null){var c=i.alternate;c!==null&&c.memoizedState!==null&&(Yr=i)}}else Za()}function Za(){ie(en,en.current),ie(gr,gr.current)}function va(i){Re(gr),Yr===i&&(Yr=null),Re(en)}var en=ce(0);function Nu(i){for(var c=i;c!==null;){if(c.tag===13){var p=c.memoizedState;if(p!==null&&(p=p.dehydrated,p===null||p.data==="$?"||p.data==="$!"))return c}else if(c.tag===19&&c.memoizedProps.revealOrder!==void 0){if(c.flags&128)return c}else if(c.child!==null){c.child.return=c,c=c.child;continue}if(c===i)break;for(;c.sibling===null;){if(c.return===null||c.return===i)return null;c=c.return}c.sibling.return=c.return,c=c.sibling}return null}var W$=typeof AbortController<"u"?AbortController:function(){var i=[],c=this.signal={aborted:!1,addEventListener:function(p,h){i.push(h)}};this.abort=function(){c.aborted=!0,i.forEach(function(p){return p()})}},Y$=e.unstable_scheduleCallback,K$=e.unstable_NormalPriority,tn={$$typeof:b,Consumer:null,Provider:null,_currentValue:null,_currentValue2:null,_threadCount:0};function yg(){return{controller:new W$,data:new Map,refCount:0}}function Al(i){i.refCount--,i.refCount===0&&Y$(K$,function(){i.controller.abort()})}var Rl=null,vg=0,Vi=0,Wi=null;function X$(i,c){if(Rl===null){var p=Rl=[];vg=0,Vi=Th(),Wi={status:"pending",value:void 0,then:function(h){p.push(h)}}}return vg++,c.then(I1,I1),c}function I1(){if(--vg===0&&Rl!==null){Wi!==null&&(Wi.status="fulfilled");var i=Rl;Rl=null,Vi=0,Wi=null;for(var c=0;cA?A:8;var z=D.T,q={};D.T=q,Pg(i,!1,c,p);try{var X=S(),re=D.S;if(re!==null&&re(q,X),X!==null&&typeof X=="object"&&typeof X.then=="function"){var ve=Z$(X,h);Nl(i,c,ve,Qn(i))}else Nl(i,c,h,Qn(i))}catch(Ce){Nl(i,c,{then:function(){},status:"rejected",reason:Ce},Qn())}finally{Z.p=A,D.T=z}}function n6(){}function Lg(i,c,p,h){if(i.tag!==5)throw Error(r(476));var S=cR(i).queue;lR(i,S,c,Q,p===null?n6:function(){return uR(i),p(h)})}function cR(i){var c=i.memoizedState;if(c!==null)return c;c={memoizedState:Q,baseState:Q,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:Sa,lastRenderedState:Q},next:null};var p={};return c.next={memoizedState:p,baseState:p,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:Sa,lastRenderedState:p},next:null},i.memoizedState=c,i=i.alternate,i!==null&&(i.memoizedState=c),c}function uR(i){var c=cR(i).next.queue;Nl(i,c,{},Qn())}function Mg(){return xn(Xl)}function dR(){return Xt().memoizedState}function pR(){return Xt().memoizedState}function r6(i){for(var c=i.return;c!==null;){switch(c.tag){case 24:case 3:var p=Qn();i=no(p);var h=ro(c,i,p);h!==null&&(In(h,c,p),Dl(h,c,p)),c={cache:yg()},i.payload=c;return}c=c.return}}function a6(i,c,p){var h=Qn();p={lane:h,revertLane:0,action:p,hasEagerState:!1,eagerState:null,next:null},Bu(i)?gR(c,p):(p=dg(i,c,p,h),p!==null&&(In(p,i,h),hR(p,c,h)))}function fR(i,c,p){var h=Qn();Nl(i,c,p,h)}function Nl(i,c,p,h){var S={lane:h,revertLane:0,action:p,hasEagerState:!1,eagerState:null,next:null};if(Bu(i))gR(c,S);else{var A=i.alternate;if(i.lanes===0&&(A===null||A.lanes===0)&&(A=c.lastRenderedReducer,A!==null))try{var z=c.lastRenderedState,q=A(z,p);if(S.hasEagerState=!0,S.eagerState=q,Yn(q,z))return Eu(i,c,S,0),Mt===null&&wu(),!1}catch{}finally{}if(p=dg(i,c,S,h),p!==null)return In(p,i,h),hR(p,c,h),!0}return!1}function Pg(i,c,p,h){if(h={lane:2,revertLane:Th(),action:h,hasEagerState:!1,eagerState:null,next:null},Bu(i)){if(c)throw Error(r(479))}else c=dg(i,p,h,2),c!==null&&In(c,i,2)}function Bu(i){var c=i.alternate;return i===ct||c!==null&&c===ct}function gR(i,c){Yi=Iu=!0;var p=i.pending;p===null?c.next=c:(c.next=p.next,p.next=c),i.pending=c}function hR(i,c,p){if(p&4194176){var h=c.lanes;h&=i.pendingLanes,p|=h,c.lanes=p,_r(i,p)}}var Kr={readContext:xn,use:Mu,useCallback:Wt,useContext:Wt,useEffect:Wt,useImperativeHandle:Wt,useLayoutEffect:Wt,useInsertionEffect:Wt,useMemo:Wt,useReducer:Wt,useRef:Wt,useState:Wt,useDebugValue:Wt,useDeferredValue:Wt,useTransition:Wt,useSyncExternalStore:Wt,useId:Wt};Kr.useCacheRefresh=Wt,Kr.useMemoCache=Wt,Kr.useHostTransitionStatus=Wt,Kr.useFormState=Wt,Kr.useActionState=Wt,Kr.useOptimistic=Wt;var Yo={readContext:xn,use:Mu,useCallback:function(i,c){return Bn().memoizedState=[i,c===void 0?null:c],i},useContext:xn,useEffect:eR,useImperativeHandle:function(i,c,p){p=p!=null?p.concat([i]):null,Fu(4194308,4,rR.bind(null,c,i),p)},useLayoutEffect:function(i,c){return Fu(4194308,4,i,c)},useInsertionEffect:function(i,c){Fu(4,2,i,c)},useMemo:function(i,c){var p=Bn();c=c===void 0?null:c;var h=i();if(Wo){st(!0);try{i()}finally{st(!1)}}return p.memoizedState=[h,c],h},useReducer:function(i,c,p){var h=Bn();if(p!==void 0){var S=p(c);if(Wo){st(!0);try{p(c)}finally{st(!1)}}}else S=c;return h.memoizedState=h.baseState=S,i={pending:null,lanes:0,dispatch:null,lastRenderedReducer:i,lastRenderedState:S},h.queue=i,i=i.dispatch=a6.bind(null,ct,i),[h.memoizedState,i]},useRef:function(i){var c=Bn();return i={current:i},c.memoizedState=i},useState:function(i){i=_g(i);var c=i.queue,p=fR.bind(null,ct,c);return c.dispatch=p,[i.memoizedState,p]},useDebugValue:Ig,useDeferredValue:function(i,c){var p=Bn();return Dg(p,i,c)},useTransition:function(){var i=_g(!1);return i=lR.bind(null,ct,i.queue,!0,!1),Bn().memoizedState=i,[!1,i]},useSyncExternalStore:function(i,c,p){var h=ct,S=Bn();if(xt){if(p===void 0)throw Error(r(407));p=p()}else{if(p=c(),Mt===null)throw Error(r(349));St&60||z1(h,c,p)}S.memoizedState=p;var A={value:p,getSnapshot:c};return S.queue=A,eR(j1.bind(null,h,A,i),[i]),h.flags|=2048,Xi(9,B1.bind(null,h,A,p,c),{destroy:void 0},null),p},useId:function(){var i=Bn(),c=Mt.identifierPrefix;if(xt){var p=ya,h=ba;p=(h&~(1<<32-Et(h)-1)).toString(32)+p,c=":"+c+"R"+p,p=Du++,0 title"))),bn(A,h,p),A[En]=i,an(A),h=A;break e;case"link":var z=GC("link","href",S).get(h+(p.href||""));if(z){for(var q=0;q<\/script>",i=i.removeChild(i.firstChild);break;case"select":i=typeof h.is=="string"?S.createElement("select",{is:h.is}):S.createElement("select"),h.multiple?i.multiple=!0:h.size&&(i.size=h.size);break;default:i=typeof h.is=="string"?S.createElement(p,{is:h.is}):S.createElement(p)}}i[En]=c,i[Fn]=h;e:for(S=c.child;S!==null;){if(S.tag===5||S.tag===6)i.appendChild(S.stateNode);else if(S.tag!==4&&S.tag!==27&&S.child!==null){S.child.return=S,S=S.child;continue}if(S===c)break e;for(;S.sibling===null;){if(S.return===null||S.return===c)break e;S=S.return}S.sibling.return=S.return,S=S.sibling}c.stateNode=i;e:switch(bn(i,p,h),p){case"button":case"input":case"select":case"textarea":i=!!h.autoFocus;break e;case"img":i=!0;break e;default:i=!1}i&&Ra(c)}}return Bt(c),c.flags&=-16777217,null;case 6:if(i&&c.stateNode!=null)i.memoizedProps!==h&&Ra(c);else{if(typeof h!="string"&&c.stateNode===null)throw Error(r(166));if(i=xe.current,vl(c)){if(i=c.stateNode,p=c.memoizedProps,h=null,S=On,S!==null)switch(S.tag){case 27:case 5:h=S.memoizedProps}i[En]=c,i=!!(i.nodeValue===p||h!==null&&h.suppressHydrationWarning===!0||NC(i.nodeValue,p)),i||$o(c)}else i=ad(i).createTextNode(h),i[En]=c,c.stateNode=i}return Bt(c),null;case 13:if(h=c.memoizedState,i===null||i.memoizedState!==null&&i.memoizedState.dehydrated!==null){if(S=vl(c),h!==null&&h.dehydrated!==null){if(i===null){if(!S)throw Error(r(318));if(S=c.memoizedState,S=S!==null?S.dehydrated:null,!S)throw Error(r(317));S[En]=c}else Sl(),!(c.flags&128)&&(c.memoizedState=null),c.flags|=4;Bt(c),S=!1}else Or!==null&&(yh(Or),Or=null),S=!0;if(!S)return c.flags&256?(va(c),c):(va(c),null)}if(va(c),c.flags&128)return c.lanes=p,c;if(p=h!==null,i=i!==null&&i.memoizedState!==null,p){h=c.child,S=null,h.alternate!==null&&h.alternate.memoizedState!==null&&h.alternate.memoizedState.cachePool!==null&&(S=h.alternate.memoizedState.cachePool.pool);var A=null;h.memoizedState!==null&&h.memoizedState.cachePool!==null&&(A=h.memoizedState.cachePool.pool),A!==S&&(h.flags|=2048)}return p!==i&&p&&(c.child.flags|=8192),Wu(c,c.updateQueue),Bt(c),null;case 4:return J(),i===null&&_h(c.stateNode.containerInfo),Bt(c),null;case 10:return xa(c.type),Bt(c),null;case 19:if(Re(en),S=c.memoizedState,S===null)return Bt(c),null;if(h=(c.flags&128)!==0,A=S.rendering,A===null)if(h)jl(S,!1);else{if(qt!==0||i!==null&&i.flags&128)for(i=c.child;i!==null;){if(A=Nu(i),A!==null){for(c.flags|=128,jl(S,!1),i=A.updateQueue,c.updateQueue=i,Wu(c,i),c.subtreeFlags=0,i=p,p=c.child;p!==null;)aC(p,i),p=p.sibling;return ie(en,en.current&1|2),c.child}i=i.sibling}S.tail!==null&&ye()>Yu&&(c.flags|=128,h=!0,jl(S,!1),c.lanes=4194304)}else{if(!h)if(i=Nu(A),i!==null){if(c.flags|=128,h=!0,i=i.updateQueue,c.updateQueue=i,Wu(c,i),jl(S,!0),S.tail===null&&S.tailMode==="hidden"&&!A.alternate&&!xt)return Bt(c),null}else 2*ye()-S.renderingStartTime>Yu&&p!==536870912&&(c.flags|=128,h=!0,jl(S,!1),c.lanes=4194304);S.isBackwards?(A.sibling=c.child,c.child=A):(i=S.last,i!==null?i.sibling=A:c.child=A,S.last=A)}return S.tail!==null?(c=S.tail,S.rendering=c,S.tail=c.sibling,S.renderingStartTime=ye(),c.sibling=null,i=en.current,ie(en,h?i&1|2:i&1),c):(Bt(c),null);case 22:case 23:return va(c),bg(),h=c.memoizedState!==null,i!==null?i.memoizedState!==null!==h&&(c.flags|=8192):h&&(c.flags|=8192),h?p&536870912&&!(c.flags&128)&&(Bt(c),c.subtreeFlags&6&&(c.flags|=8192)):Bt(c),p=c.updateQueue,p!==null&&Wu(c,p.retryQueue),p=null,i!==null&&i.memoizedState!==null&&i.memoizedState.cachePool!==null&&(p=i.memoizedState.cachePool.pool),h=null,c.memoizedState!==null&&c.memoizedState.cachePool!==null&&(h=c.memoizedState.cachePool.pool),h!==p&&(c.flags|=2048),i!==null&&Re(Vo),null;case 24:return p=null,i!==null&&(p=i.memoizedState.cache),c.memoizedState.cache!==p&&(c.flags|=2048),xa(tn),Bt(c),null;case 25:return null}throw Error(r(156,c.tag))}function d6(i,c){switch(fg(c),c.tag){case 1:return i=c.flags,i&65536?(c.flags=i&-65537|128,c):null;case 3:return xa(tn),J(),i=c.flags,i&65536&&!(i&128)?(c.flags=i&-65537|128,c):null;case 26:case 27:case 5:return ke(c),null;case 13:if(va(c),i=c.memoizedState,i!==null&&i.dehydrated!==null){if(c.alternate===null)throw Error(r(340));Sl()}return i=c.flags,i&65536?(c.flags=i&-65537|128,c):null;case 19:return Re(en),null;case 4:return J(),null;case 10:return xa(c.type),null;case 22:case 23:return va(c),bg(),i!==null&&Re(Vo),i=c.flags,i&65536?(c.flags=i&-65537|128,c):null;case 24:return xa(tn),null;case 25:return null;default:return null}}function sC(i,c){switch(fg(c),c.tag){case 3:xa(tn),J();break;case 26:case 27:case 5:ke(c);break;case 4:J();break;case 13:va(c);break;case 19:Re(en);break;case 10:xa(c.type);break;case 22:case 23:va(c),bg(),i!==null&&Re(Vo);break;case 24:xa(tn)}}var p6={getCacheForType:function(i){var c=xn(tn),p=c.data.get(i);return p===void 0&&(p=i(),c.data.set(i,p)),p}},f6=typeof WeakMap=="function"?WeakMap:Map,jt=0,Mt=null,pt=null,St=0,Pt=0,Zn=null,Ca=!1,es=!1,dh=!1,_a=0,qt=0,lo=0,ti=0,ph=0,yr=0,ts=0,Ul=null,Xr=null,fh=!1,gh=0,Yu=1/0,Ku=null,co=null,Xu=!1,ni=null,Gl=0,hh=0,mh=null,Hl=0,bh=null;function Qn(){if(jt&2&&St!==0)return St&-St;if(D.T!==null){var i=Vi;return i!==0?i:Th()}return CA()}function lC(){yr===0&&(yr=!(St&536870912)||xt?Ot():536870912);var i=gr.current;return i!==null&&(i.flags|=32),yr}function In(i,c,p){(i===Mt&&Pt===2||i.cancelPendingCommit!==null)&&(ns(i,0),Na(i,St,yr,!1)),Mn(i,p),(!(jt&2)||i!==Mt)&&(i===Mt&&(!(jt&2)&&(ti|=p),qt===4&&Na(i,St,yr,!1)),Zr(i))}function cC(i,c,p){if(jt&6)throw Error(r(327));var h=!p&&(c&60)===0&&(c&i.expiredLanes)===0||Xe(i,c),S=h?m6(i,c):wh(i,c,!0),A=h;do{if(S===0){es&&!h&&Na(i,c,0,!1);break}else if(S===6)Na(i,c,0,!Ca);else{if(p=i.current.alternate,A&&!g6(p)){S=wh(i,c,!1),A=!1;continue}if(S===2){if(A=c,i.errorRecoveryDisabledLanes&A)var z=0;else z=i.pendingLanes&-536870913,z=z!==0?z:z&536870912?536870912:0;if(z!==0){c=z;e:{var q=i;S=Ul;var X=q.current.memoizedState.isDehydrated;if(X&&(ns(q,z).flags|=256),z=wh(q,z,!1),z!==2){if(dh&&!X){q.errorRecoveryDisabledLanes|=A,ti|=A,S=4;break e}A=Xr,Xr=S,A!==null&&yh(A)}S=z}if(A=!1,S!==2)continue}}if(S===1){ns(i,0),Na(i,c,0,!0);break}e:{switch(h=i,S){case 0:case 1:throw Error(r(345));case 4:if((c&4194176)===c){Na(h,c,yr,!Ca);break e}break;case 2:Xr=null;break;case 3:case 5:break;default:throw Error(r(329))}if(h.finishedWork=p,h.finishedLanes=c,(c&62914560)===c&&(A=gh+300-ye(),10p?32:p,D.T=null,ni===null)var A=!1;else{p=mh,mh=null;var z=ni,q=Gl;if(ni=null,Gl=0,jt&6)throw Error(r(331));var X=jt;if(jt|=4,nC(z.current),JR(z,z.current,q,p),jt=X,$l(0,!1),et&&typeof et.onPostCommitFiberRoot=="function")try{et.onPostCommitFiberRoot(yt,z)}catch{}A=!0}return A}finally{Z.p=S,D.T=h,yC(i,c)}}return!1}function vC(i,c,p){c=dr(p,c),c=Bg(i.stateNode,c,2),i=ro(i,c,2),i!==null&&(Mn(i,2),Zr(i))}function It(i,c,p){if(i.tag===3)vC(i,i,p);else for(;c!==null;){if(c.tag===3){vC(c,i,p);break}else if(c.tag===1){var h=c.stateNode;if(typeof c.type.getDerivedStateFromError=="function"||typeof h.componentDidCatch=="function"&&(co===null||!co.has(h))){i=dr(p,i),p=ER(2),h=ro(c,p,2),h!==null&&(xR(p,h,c,i),Mn(h,2),Zr(h));break}}c=c.return}}function Eh(i,c,p){var h=i.pingCache;if(h===null){h=i.pingCache=new f6;var S=new Set;h.set(c,S)}else S=h.get(c),S===void 0&&(S=new Set,h.set(c,S));S.has(p)||(dh=!0,S.add(p),i=v6.bind(null,i,c,p),c.then(i,i))}function v6(i,c,p){var h=i.pingCache;h!==null&&h.delete(c),i.pingedLanes|=i.suspendedLanes&p,i.warmLanes&=~p,Mt===i&&(St&p)===p&&(qt===4||qt===3&&(St&62914560)===St&&300>ye()-gh?!(jt&2)&&ns(i,0):ph|=p,ts===St&&(ts=0)),Zr(i)}function SC(i,c){c===0&&(c=Ln()),i=Ka(i,c),i!==null&&(Mn(i,c),Zr(i))}function S6(i){var c=i.memoizedState,p=0;c!==null&&(p=c.retryLane),SC(i,p)}function w6(i,c){var p=0;switch(i.tag){case 13:var h=i.stateNode,S=i.memoizedState;S!==null&&(p=S.retryLane);break;case 19:h=i.stateNode;break;case 22:h=i.stateNode._retryCache;break;default:throw Error(r(314))}h!==null&&h.delete(c),SC(i,p)}function E6(i,c){return we(i,c)}var Ju=null,os=null,xh=!1,ed=!1,kh=!1,ri=0;function Zr(i){i!==os&&i.next===null&&(os===null?Ju=os=i:os=os.next=i),ed=!0,xh||(xh=!0,k6(x6))}function $l(i,c){if(!kh&&ed){kh=!0;do for(var p=!1,h=Ju;h!==null;){if(i!==0){var S=h.pendingLanes;if(S===0)var A=0;else{var z=h.suspendedLanes,q=h.pingedLanes;A=(1<<31-Et(42|i)+1)-1,A&=S&~(z&~q),A=A&201326677?A&201326677|1:A?A|2:0}A!==0&&(p=!0,xC(h,A))}else A=St,A=fa(h,h===Mt?A:0),!(A&3)||Xe(h,A)||(p=!0,xC(h,A));h=h.next}while(p);kh=!1}}function x6(){ed=xh=!1;var i=0;ri!==0&&(I6()&&(i=ri),ri=0);for(var c=ye(),p=null,h=Ju;h!==null;){var S=h.next,A=wC(h,c);A===0?(h.next=null,p===null?Ju=S:p.next=S,S===null&&(os=p)):(p=h,(i!==0||A&3)&&(ed=!0)),h=S}$l(i)}function wC(i,c){for(var p=i.suspendedLanes,h=i.pingedLanes,S=i.expirationTimes,A=i.pendingLanes&-62914561;0"u"?null:document;function zC(i,c,p){var h=ss;if(h&&typeof c=="string"&&c){var S=cr(c);S='link[rel="'+i+'"][href="'+S+'"]',typeof p=="string"&&(S+='[crossorigin="'+p+'"]'),FC.has(S)||(FC.add(S),i={rel:i,crossOrigin:p,href:c},h.querySelector(S)===null&&(c=h.createElement("link"),bn(c,"link",i),an(c),h.head.appendChild(c)))}}function j6(i){Oa.D(i),zC("dns-prefetch",i,null)}function U6(i,c){Oa.C(i,c),zC("preconnect",i,c)}function G6(i,c,p){Oa.L(i,c,p);var h=ss;if(h&&i&&c){var S='link[rel="preload"][as="'+cr(c)+'"]';c==="image"&&p&&p.imageSrcSet?(S+='[imagesrcset="'+cr(p.imageSrcSet)+'"]',typeof p.imageSizes=="string"&&(S+='[imagesizes="'+cr(p.imageSizes)+'"]')):S+='[href="'+cr(i)+'"]';var A=S;switch(c){case"style":A=ls(i);break;case"script":A=cs(i)}vr.has(A)||(i=I({rel:"preload",href:c==="image"&&p&&p.imageSrcSet?void 0:i,as:c},p),vr.set(A,i),h.querySelector(S)!==null||c==="style"&&h.querySelector(Wl(A))||c==="script"&&h.querySelector(Yl(A))||(c=h.createElement("link"),bn(c,"link",i),an(c),h.head.appendChild(c)))}}function H6(i,c){Oa.m(i,c);var p=ss;if(p&&i){var h=c&&typeof c.as=="string"?c.as:"script",S='link[rel="modulepreload"][as="'+cr(h)+'"][href="'+cr(i)+'"]',A=S;switch(h){case"audioworklet":case"paintworklet":case"serviceworker":case"sharedworker":case"worker":case"script":A=cs(i)}if(!vr.has(A)&&(i=I({rel:"modulepreload",href:i},c),vr.set(A,i),p.querySelector(S)===null)){switch(h){case"audioworklet":case"paintworklet":case"serviceworker":case"sharedworker":case"worker":case"script":if(p.querySelector(Yl(A)))return}h=p.createElement("link"),bn(h,"link",i),an(h),p.head.appendChild(h)}}}function $6(i,c,p){Oa.S(i,c,p);var h=ss;if(h&&i){var S=Oi(h).hoistableStyles,A=ls(i);c=c||"default";var z=S.get(A);if(!z){var q={loading:0,preload:null};if(z=h.querySelector(Wl(A)))q.loading=5;else{i=I({rel:"stylesheet",href:i,"data-precedence":c},p),(p=vr.get(A))&&zh(i,p);var X=z=h.createElement("link");an(X),bn(X,"link",i),X._p=new Promise(function(re,ve){X.onload=re,X.onerror=ve}),X.addEventListener("load",function(){q.loading|=1}),X.addEventListener("error",function(){q.loading|=2}),q.loading|=4,id(z,c,h)}z={type:"stylesheet",instance:z,count:1,state:q},S.set(A,z)}}}function q6(i,c){Oa.X(i,c);var p=ss;if(p&&i){var h=Oi(p).hoistableScripts,S=cs(i),A=h.get(S);A||(A=p.querySelector(Yl(S)),A||(i=I({src:i,async:!0},c),(c=vr.get(S))&&Bh(i,c),A=p.createElement("script"),an(A),bn(A,"link",i),p.head.appendChild(A)),A={type:"script",instance:A,count:1,state:null},h.set(S,A))}}function V6(i,c){Oa.M(i,c);var p=ss;if(p&&i){var h=Oi(p).hoistableScripts,S=cs(i),A=h.get(S);A||(A=p.querySelector(Yl(S)),A||(i=I({src:i,async:!0,type:"module"},c),(c=vr.get(S))&&Bh(i,c),A=p.createElement("script"),an(A),bn(A,"link",i),p.head.appendChild(A)),A={type:"script",instance:A,count:1,state:null},h.set(S,A))}}function BC(i,c,p,h){var S=(S=xe.current)?od(S):null;if(!S)throw Error(r(446));switch(i){case"meta":case"title":return null;case"style":return typeof p.precedence=="string"&&typeof p.href=="string"?(c=ls(p.href),p=Oi(S).hoistableStyles,h=p.get(c),h||(h={type:"style",instance:null,count:0,state:null},p.set(c,h)),h):{type:"void",instance:null,count:0,state:null};case"link":if(p.rel==="stylesheet"&&typeof p.href=="string"&&typeof p.precedence=="string"){i=ls(p.href);var A=Oi(S).hoistableStyles,z=A.get(i);if(z||(S=S.ownerDocument||S,z={type:"stylesheet",instance:null,count:0,state:{loading:0,preload:null}},A.set(i,z),(A=S.querySelector(Wl(i)))&&!A._p&&(z.instance=A,z.state.loading=5),vr.has(i)||(p={rel:"preload",as:"style",href:p.href,crossOrigin:p.crossOrigin,integrity:p.integrity,media:p.media,hrefLang:p.hrefLang,referrerPolicy:p.referrerPolicy},vr.set(i,p),A||W6(S,i,p,z.state))),c&&h===null)throw Error(r(528,""));return z}if(c&&h!==null)throw Error(r(529,""));return null;case"script":return c=p.async,p=p.src,typeof p=="string"&&c&&typeof c!="function"&&typeof c!="symbol"?(c=cs(p),p=Oi(S).hoistableScripts,h=p.get(c),h||(h={type:"script",instance:null,count:0,state:null},p.set(c,h)),h):{type:"void",instance:null,count:0,state:null};default:throw Error(r(444,i))}}function ls(i){return'href="'+cr(i)+'"'}function Wl(i){return'link[rel="stylesheet"]['+i+"]"}function jC(i){return I({},i,{"data-precedence":i.precedence,precedence:null})}function W6(i,c,p,h){i.querySelector('link[rel="preload"][as="style"]['+c+"]")?h.loading=1:(c=i.createElement("link"),h.preload=c,c.addEventListener("load",function(){return h.loading|=1}),c.addEventListener("error",function(){return h.loading|=2}),bn(c,"link",p),an(c),i.head.appendChild(c))}function cs(i){return'[src="'+cr(i)+'"]'}function Yl(i){return"script[async]"+i}function UC(i,c,p){if(c.count++,c.instance===null)switch(c.type){case"style":var h=i.querySelector('style[data-href~="'+cr(p.href)+'"]');if(h)return c.instance=h,an(h),h;var S=I({},p,{"data-href":p.href,"data-precedence":p.precedence,href:null,precedence:null});return h=(i.ownerDocument||i).createElement("style"),an(h),bn(h,"style",S),id(h,p.precedence,i),c.instance=h;case"stylesheet":S=ls(p.href);var A=i.querySelector(Wl(S));if(A)return c.state.loading|=4,c.instance=A,an(A),A;h=jC(p),(S=vr.get(S))&&zh(h,S),A=(i.ownerDocument||i).createElement("link"),an(A);var z=A;return z._p=new Promise(function(q,X){z.onload=q,z.onerror=X}),bn(A,"link",h),c.state.loading|=4,id(A,p.precedence,i),c.instance=A;case"script":return A=cs(p.src),(S=i.querySelector(Yl(A)))?(c.instance=S,an(S),S):(h=p,(S=vr.get(A))&&(h=I({},p),Bh(h,S)),i=i.ownerDocument||i,S=i.createElement("script"),an(S),bn(S,"link",h),i.head.appendChild(S),c.instance=S);case"void":return null;default:throw Error(r(443,c.type))}else c.type==="stylesheet"&&!(c.state.loading&4)&&(h=c.instance,c.state.loading|=4,id(h,p.precedence,i));return c.instance}function id(i,c,p){for(var h=p.querySelectorAll('link[rel="stylesheet"][data-precedence],style[data-precedence]'),S=h.length?h[h.length-1]:null,A=S,z=0;z title"):null)}function Y6(i,c,p){if(p===1||c.itemProp!=null)return!1;switch(i){case"meta":case"title":return!0;case"style":if(typeof c.precedence!="string"||typeof c.href!="string"||c.href==="")break;return!0;case"link":if(typeof c.rel!="string"||typeof c.href!="string"||c.href===""||c.onLoad||c.onError)break;switch(c.rel){case"stylesheet":return i=c.disabled,typeof c.precedence=="string"&&i==null;default:return!0}case"script":if(c.async&&typeof c.async!="function"&&typeof c.async!="symbol"&&!c.onLoad&&!c.onError&&c.src&&typeof c.src=="string")return!0}return!1}function $C(i){return!(i.type==="stylesheet"&&!(i.state.loading&3))}var Kl=null;function K6(){}function X6(i,c,p){if(Kl===null)throw Error(r(475));var h=Kl;if(c.type==="stylesheet"&&(typeof p.media!="string"||matchMedia(p.media).matches!==!1)&&!(c.state.loading&4)){if(c.instance===null){var S=ls(p.href),A=i.querySelector(Wl(S));if(A){i=A._p,i!==null&&typeof i=="object"&&typeof i.then=="function"&&(h.count++,h=ld.bind(h),i.then(h,h)),c.state.loading|=4,c.instance=A,an(A);return}A=i.ownerDocument||i,p=jC(p),(S=vr.get(S))&&zh(p,S),A=A.createElement("link"),an(A);var z=A;z._p=new Promise(function(q,X){z.onload=q,z.onerror=X}),bn(A,"link",p),c.instance=A}h.stylesheets===null&&(h.stylesheets=new Map),h.stylesheets.set(c,i),(i=c.state.preload)&&!(c.state.loading&3)&&(h.count++,c=ld.bind(h),i.addEventListener("load",c),i.addEventListener("error",c))}}function Z6(){if(Kl===null)throw Error(r(475));var i=Kl;return i.stylesheets&&i.count===0&&jh(i,i.stylesheets),0"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(t){console.error(t)}}return e(),Kh.exports=yq(),Kh.exports}var Sq=vq(),rc={},p_;function wq(){if(p_)return rc;p_=1,Object.defineProperty(rc,"__esModule",{value:!0}),rc.parse=s,rc.serialize=d;const e=/^[\u0021-\u003A\u003C\u003E-\u007E]+$/,t=/^[\u0021-\u003A\u003C-\u007E]*$/,n=/^([.]?[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?)([.][a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?)*$/i,r=/^[\u0020-\u003A\u003D-\u007E]*$/,a=Object.prototype.toString,o=(()=>{const m=function(){};return m.prototype=Object.create(null),m})();function s(m,b){const y=new o,v=m.length;if(v<2)return y;const x=(b==null?void 0:b.decode)||f;let T=0;do{const k=m.indexOf("=",T);if(k===-1)break;const R=m.indexOf(";",T),O=R===-1?v:R;if(k>O){T=m.lastIndexOf(";",k-1)+1;continue}const _=l(m,T,k),C=u(m,k,_),N=m.slice(_,C);if(y[N]===void 0){let L=l(m,k+1,O),D=u(m,O,L);const I=x(m.slice(L,D));y[N]=I}T=O+1}while(Ty;){const v=m.charCodeAt(--b);if(v!==32&&v!==9)return b+1}return y}function d(m,b,y){const v=(y==null?void 0:y.encode)||encodeURIComponent;if(!e.test(m))throw new TypeError(`argument name is invalid: ${m}`);const x=v(b);if(!t.test(x))throw new TypeError(`argument val is invalid: ${b}`);let T=m+"="+x;if(!y)return T;if(y.maxAge!==void 0){if(!Number.isInteger(y.maxAge))throw new TypeError(`option maxAge is invalid: ${y.maxAge}`);T+="; Max-Age="+y.maxAge}if(y.domain){if(!n.test(y.domain))throw new TypeError(`option domain is invalid: ${y.domain}`);T+="; Domain="+y.domain}if(y.path){if(!r.test(y.path))throw new TypeError(`option path is invalid: ${y.path}`);T+="; Path="+y.path}if(y.expires){if(!g(y.expires)||!Number.isFinite(y.expires.valueOf()))throw new TypeError(`option expires is invalid: ${y.expires}`);T+="; Expires="+y.expires.toUTCString()}if(y.httpOnly&&(T+="; HttpOnly"),y.secure&&(T+="; Secure"),y.partitioned&&(T+="; Partitioned"),y.priority)switch(typeof y.priority=="string"?y.priority.toLowerCase():void 0){case"low":T+="; Priority=Low";break;case"medium":T+="; Priority=Medium";break;case"high":T+="; Priority=High";break;default:throw new TypeError(`option priority is invalid: ${y.priority}`)}if(y.sameSite)switch(typeof y.sameSite=="string"?y.sameSite.toLowerCase():y.sameSite){case!0:case"strict":T+="; SameSite=Strict";break;case"lax":T+="; SameSite=Lax";break;case"none":T+="; SameSite=None";break;default:throw new TypeError(`option sameSite is invalid: ${y.sameSite}`)}return T}function f(m){if(m.indexOf("%")===-1)return m;try{return decodeURIComponent(m)}catch{return m}}function g(m){return a.call(m)==="[object Date]"}return rc}wq();/** * react-router v7.3.0 * * Copyright (c) Remix Software Inc. @@ -55,262 +55,308 @@ Error generating stack: `+f.message+` * LICENSE.md file in the root directory of this source tree. * * @license MIT - */var rC="popstate";function x9(e={}){function t(a,o){let{pathname:s="/",search:u="",hash:c=""}=Ei(a.location.hash.substring(1));return!s.startsWith("/")&&!s.startsWith(".")&&(s="/"+s),ok("",{pathname:s,search:u,hash:c},o.state&&o.state.usr||null,o.state&&o.state.key||"default")}function n(a,o){let s=a.document.querySelector("base"),u="";if(s&&s.getAttribute("href")){let c=a.location.href,d=c.indexOf("#");u=d===-1?c:c.slice(0,d)}return u+"#"+(typeof o=="string"?o:Eu(o))}function r(a,o){Br(a.pathname.charAt(0)==="/",`relative pathnames are not supported in hash history.push(${JSON.stringify(o)})`)}return T9(t,n,r,e)}function Gt(e,t){if(e===!1||e===null||typeof e>"u")throw new Error(t)}function Br(e,t){if(!e){typeof console<"u"&&console.warn(t);try{throw new Error(t)}catch{}}}function k9(){return Math.random().toString(36).substring(2,10)}function aC(e,t){return{usr:e.state,key:e.key,idx:t}}function ok(e,t,n=null,r){return{pathname:typeof e=="string"?e:e.pathname,search:"",hash:"",...typeof t=="string"?Ei(t):t,state:n,key:t&&t.key||r||k9()}}function Eu({pathname:e="/",search:t="",hash:n=""}){return t&&t!=="?"&&(e+=t.charAt(0)==="?"?t:"?"+t),n&&n!=="#"&&(e+=n.charAt(0)==="#"?n:"#"+n),e}function Ei(e){let t={};if(e){let n=e.indexOf("#");n>=0&&(t.hash=e.substring(n),e=e.substring(0,n));let r=e.indexOf("?");r>=0&&(t.search=e.substring(r),e=e.substring(0,r)),e&&(t.pathname=e)}return t}function T9(e,t,n,r={}){let{window:a=document.defaultView,v5Compat:o=!1}=r,s=a.history,u="POP",c=null,d=p();d==null&&(d=0,s.replaceState({...s.state,idx:d},""));function p(){return(s.state||{idx:null}).idx}function g(){u="POP";let x=p(),R=x==null?null:x-d;d=x,c&&c({action:u,location:S.location,delta:R})}function m(x,R){u="PUSH";let k=ok(S.location,x,R);n&&n(k,x),d=p()+1;let A=aC(k,d),C=S.createHref(k);try{s.pushState(A,"",C)}catch(N){if(N instanceof DOMException&&N.name==="DataCloneError")throw N;a.location.assign(C)}o&&c&&c({action:u,location:S.location,delta:1})}function b(x,R){u="REPLACE";let k=ok(S.location,x,R);n&&n(k,x),d=p();let A=aC(k,d),C=S.createHref(k);s.replaceState(A,"",C),o&&c&&c({action:u,location:S.location,delta:0})}function y(x){let R=a.location.origin!=="null"?a.location.origin:a.location.href,k=typeof x=="string"?x:Eu(x);return k=k.replace(/ $/,"%20"),Gt(R,`No window.location.(origin|href) available to create URL for href: ${k}`),new URL(k,R)}let S={get action(){return u},get location(){return e(a,s)},listen(x){if(c)throw new Error("A history only accepts one active listener");return a.addEventListener(rC,g),c=x,()=>{a.removeEventListener(rC,g),c=null}},createHref(x){return t(a,x)},createURL:y,encodeLocation(x){let R=y(x);return{pathname:R.pathname,search:R.search,hash:R.hash}},push:m,replace:b,go(x){return s.go(x)}};return S}function Bz(e,t,n="/"){return A9(e,t,n,!1)}function A9(e,t,n,r){let a=typeof t=="string"?Ei(t):t,o=Ua(a.pathname||"/",n);if(o==null)return null;let s=Uz(e);R9(s);let u=null;for(let c=0;u==null&&c{let c={relativePath:u===void 0?o.path||"":u,caseSensitive:o.caseSensitive===!0,childrenIndex:s,route:o};c.relativePath.startsWith("/")&&(Gt(c.relativePath.startsWith(r),`Absolute route path "${c.relativePath}" nested under path "${r}" is not valid. An absolute child route path must start with the combined path of all its parent routes.`),c.relativePath=c.relativePath.slice(r.length));let d=za([r,c.relativePath]),p=n.concat(c);o.children&&o.children.length>0&&(Gt(o.index!==!0,`Index routes must not have child routes. Please remove all child routes from route path "${d}".`),Uz(o.children,t,p,d)),!(o.path==null&&!o.index)&&t.push({path:d,score:L9(d,o.index),routesMeta:p})};return e.forEach((o,s)=>{var u;if(o.path===""||!((u=o.path)!=null&&u.includes("?")))a(o,s);else for(let c of jz(o.path))a(o,s,c)}),t}function jz(e){let t=e.split("/");if(t.length===0)return[];let[n,...r]=t,a=n.endsWith("?"),o=n.replace(/\?$/,"");if(r.length===0)return a?[o,""]:[o];let s=jz(r.join("/")),u=[];return u.push(...s.map(c=>c===""?o:[o,c].join("/"))),a&&u.push(...s),u.map(c=>e.startsWith("/")&&c===""?"/":c)}function R9(e){e.sort((t,n)=>t.score!==n.score?n.score-t.score:M9(t.routesMeta.map(r=>r.childrenIndex),n.routesMeta.map(r=>r.childrenIndex)))}var _9=/^:[\w-]+$/,C9=3,N9=2,O9=1,D9=10,I9=-2,oC=e=>e==="*";function L9(e,t){let n=e.split("/"),r=n.length;return n.some(oC)&&(r+=I9),t&&(r+=N9),n.filter(a=>!oC(a)).reduce((a,o)=>a+(_9.test(o)?C9:o===""?O9:D9),r)}function M9(e,t){return e.length===t.length&&e.slice(0,-1).every((r,a)=>r===t[a])?e[e.length-1]-t[t.length-1]:0}function F9(e,t,n=!1){let{routesMeta:r}=e,a={},o="/",s=[];for(let u=0;u{if(p==="*"){let y=u[m]||"";s=o.slice(0,o.length-y.length).replace(/(.)\/+$/,"$1")}const b=u[m];return g&&!b?d[p]=void 0:d[p]=(b||"").replace(/%2F/g,"/"),d},{}),pathname:o,pathnameBase:s,pattern:e}}function P9(e,t=!1,n=!0){Br(e==="*"||!e.endsWith("*")||e.endsWith("/*"),`Route path "${e}" will be treated as if it were "${e.replace(/\*$/,"/*")}" because the \`*\` character must always follow a \`/\` in the pattern. To get rid of this warning, please change the route path to "${e.replace(/\*$/,"/*")}".`);let r=[],a="^"+e.replace(/\/*\*?$/,"").replace(/^\/*/,"/").replace(/[\\.*+^${}|()[\]]/g,"\\$&").replace(/\/:([\w-]+)(\?)?/g,(s,u,c)=>(r.push({paramName:u,isOptional:c!=null}),c?"/?([^\\/]+)?":"/([^\\/]+)"));return e.endsWith("*")?(r.push({paramName:"*"}),a+=e==="*"||e==="/*"?"(.*)$":"(?:\\/(.+)|\\/*)$"):n?a+="\\/*$":e!==""&&e!=="/"&&(a+="(?:(?=\\/|$))"),[new RegExp(a,t?void 0:"i"),r]}function z9(e){try{return e.split("/").map(t=>decodeURIComponent(t).replace(/\//g,"%2F")).join("/")}catch(t){return Br(!1,`The URL path "${e}" could not be decoded because it is a malformed URL segment. This is probably due to a bad percent encoding (${t}).`),e}}function Ua(e,t){if(t==="/")return e;if(!e.toLowerCase().startsWith(t.toLowerCase()))return null;let n=t.endsWith("/")?t.length-1:t.length,r=e.charAt(n);return r&&r!=="/"?null:e.slice(n)||"/"}function B9(e,t="/"){let{pathname:n,search:r="",hash:a=""}=typeof e=="string"?Ei(e):e;return{pathname:n?n.startsWith("/")?n:U9(n,t):t,search:H9(r),hash:$9(a)}}function U9(e,t){let n=t.replace(/\/+$/,"").split("/");return e.split("/").forEach(a=>{a===".."?n.length>1&&n.pop():a!=="."&&n.push(a)}),n.length>1?n.join("/"):"/"}function Qh(e,t,n,r){return`Cannot include a '${e}' character in a manually specified \`to.${t}\` field [${JSON.stringify(r)}]. Please separate it out to the \`to.${n}\` field. Alternatively you may provide the full path as a string in and the router will parse it for you.`}function j9(e){return e.filter((t,n)=>n===0||t.route.path&&t.route.path.length>0)}function Gz(e){let t=j9(e);return t.map((n,r)=>r===t.length-1?n.pathname:n.pathnameBase)}function Hz(e,t,n,r=!1){let a;typeof e=="string"?a=Ei(e):(a={...e},Gt(!a.pathname||!a.pathname.includes("?"),Qh("?","pathname","search",a)),Gt(!a.pathname||!a.pathname.includes("#"),Qh("#","pathname","hash",a)),Gt(!a.search||!a.search.includes("#"),Qh("#","search","hash",a)));let o=e===""||a.pathname==="",s=o?"/":a.pathname,u;if(s==null)u=n;else{let g=t.length-1;if(!r&&s.startsWith("..")){let m=s.split("/");for(;m[0]==="..";)m.shift(),g-=1;a.pathname=m.join("/")}u=g>=0?t[g]:"/"}let c=B9(a,u),d=s&&s!=="/"&&s.endsWith("/"),p=(o||s===".")&&n.endsWith("/");return!c.pathname.endsWith("/")&&(d||p)&&(c.pathname+="/"),c}var za=e=>e.join("/").replace(/\/\/+/g,"/"),G9=e=>e.replace(/\/+$/,"").replace(/^\/*/,"/"),H9=e=>!e||e==="?"?"":e.startsWith("?")?e:"?"+e,$9=e=>!e||e==="#"?"":e.startsWith("#")?e:"#"+e;function q9(e){return e!=null&&typeof e.status=="number"&&typeof e.statusText=="string"&&typeof e.internal=="boolean"&&"data"in e}var $z=["POST","PUT","PATCH","DELETE"];new Set($z);var V9=["GET",...$z];new Set(V9);var Bs=E.createContext(null);Bs.displayName="DataRouter";var Wf=E.createContext(null);Wf.displayName="DataRouterState";var qz=E.createContext({isTransitioning:!1});qz.displayName="ViewTransition";var W9=E.createContext(new Map);W9.displayName="Fetchers";var Y9=E.createContext(null);Y9.displayName="Await";var ia=E.createContext(null);ia.displayName="Navigation";var Uu=E.createContext(null);Uu.displayName="Location";var qa=E.createContext({outlet:null,matches:[],isDataRoute:!1});qa.displayName="Route";var E0=E.createContext(null);E0.displayName="RouteError";function K9(e,{relative:t}={}){Gt(ju(),"useHref() may be used only in the context of a component.");let{basename:n,navigator:r}=E.useContext(ia),{hash:a,pathname:o,search:s}=Gu(e,{relative:t}),u=o;return n!=="/"&&(u=o==="/"?n:za([n,o])),r.createHref({pathname:u,search:s,hash:a})}function ju(){return E.useContext(Uu)!=null}function wi(){return Gt(ju(),"useLocation() may be used only in the context of a component."),E.useContext(Uu).location}var Vz="You should call navigate() in a React.useEffect(), not when your component is first rendered.";function Wz(e){E.useContext(ia).static||E.useLayoutEffect(e)}function Yf(){let{isDataRoute:e}=E.useContext(qa);return e?lq():X9()}function X9(){Gt(ju(),"useNavigate() may be used only in the context of a component.");let e=E.useContext(Bs),{basename:t,navigator:n}=E.useContext(ia),{matches:r}=E.useContext(qa),{pathname:a}=wi(),o=JSON.stringify(Gz(r)),s=E.useRef(!1);return Wz(()=>{s.current=!0}),E.useCallback((c,d={})=>{if(Br(s.current,Vz),!s.current)return;if(typeof c=="number"){n.go(c);return}let p=Hz(c,JSON.parse(o),a,d.relative==="path");e==null&&t!=="/"&&(p.pathname=p.pathname==="/"?t:za([t,p.pathname])),(d.replace?n.replace:n.push)(p,d.state,d)},[t,n,o,a,e])}E.createContext(null);function Gu(e,{relative:t}={}){let{matches:n}=E.useContext(qa),{pathname:r}=wi(),a=JSON.stringify(Gz(n));return E.useMemo(()=>Hz(e,JSON.parse(a),r,t==="path"),[e,a,r,t])}function Z9(e,t){return Yz(e,t)}function Yz(e,t,n,r){var k;Gt(ju(),"useRoutes() may be used only in the context of a component.");let{navigator:a,static:o}=E.useContext(ia),{matches:s}=E.useContext(qa),u=s[s.length-1],c=u?u.params:{},d=u?u.pathname:"/",p=u?u.pathnameBase:"/",g=u&&u.route;{let A=g&&g.path||"";Kz(d,!g||A.endsWith("*")||A.endsWith("*?"),`You rendered descendant (or called \`useRoutes()\`) at "${d}" (under ) but the parent route path has no trailing "*". This means if you navigate deeper, the parent won't match anymore and therefore the child routes will never render. - -Please change the parent to .`)}let m=wi(),b;if(t){let A=typeof t=="string"?Ei(t):t;Gt(p==="/"||((k=A.pathname)==null?void 0:k.startsWith(p)),`When overriding the location using \`\` or \`useRoutes(routes, location)\`, the location pathname must begin with the portion of the URL pathname that was matched by all parent routes. The current pathname base is "${p}" but pathname "${A.pathname}" was given in the \`location\` prop.`),b=A}else b=m;let y=b.pathname||"/",S=y;if(p!=="/"){let A=p.replace(/^\//,"").split("/");S="/"+y.replace(/^\//,"").split("/").slice(A.length).join("/")}let x=!o&&n&&n.matches&&n.matches.length>0?n.matches:Bz(e,{pathname:S});Br(g||x!=null,`No routes matched location "${b.pathname}${b.search}${b.hash}" `),Br(x==null||x[x.length-1].route.element!==void 0||x[x.length-1].route.Component!==void 0||x[x.length-1].route.lazy!==void 0,`Matched leaf route at location "${b.pathname}${b.search}${b.hash}" does not have an element or Component. This means it will render an with a null value by default resulting in an "empty" page.`);let R=nq(x&&x.map(A=>Object.assign({},A,{params:Object.assign({},c,A.params),pathname:za([p,a.encodeLocation?a.encodeLocation(A.pathname).pathname:A.pathname]),pathnameBase:A.pathnameBase==="/"?p:za([p,a.encodeLocation?a.encodeLocation(A.pathnameBase).pathname:A.pathnameBase])})),s,n,r);return t&&R?E.createElement(Uu.Provider,{value:{location:{pathname:"/",search:"",hash:"",state:null,key:"default",...b},navigationType:"POP"}},R):R}function Q9(){let e=sq(),t=q9(e)?`${e.status} ${e.statusText}`:e instanceof Error?e.message:JSON.stringify(e),n=e instanceof Error?e.stack:null,r="rgba(200,200,200, 0.5)",a={padding:"0.5rem",backgroundColor:r},o={padding:"2px 4px",backgroundColor:r},s=null;return console.error("Error handled by React Router default ErrorBoundary:",e),s=E.createElement(E.Fragment,null,E.createElement("p",null,"💿 Hey developer 👋"),E.createElement("p",null,"You can provide a way better UX than this when your app throws errors by providing your own ",E.createElement("code",{style:o},"ErrorBoundary")," or"," ",E.createElement("code",{style:o},"errorElement")," prop on your route.")),E.createElement(E.Fragment,null,E.createElement("h2",null,"Unexpected Application Error!"),E.createElement("h3",{style:{fontStyle:"italic"}},t),n?E.createElement("pre",{style:a},n):null,s)}var J9=E.createElement(Q9,null),eq=class extends E.Component{constructor(e){super(e),this.state={location:e.location,revalidation:e.revalidation,error:e.error}}static getDerivedStateFromError(e){return{error:e}}static getDerivedStateFromProps(e,t){return t.location!==e.location||t.revalidation!=="idle"&&e.revalidation==="idle"?{error:e.error,location:e.location,revalidation:e.revalidation}:{error:e.error!==void 0?e.error:t.error,location:t.location,revalidation:e.revalidation||t.revalidation}}componentDidCatch(e,t){console.error("React Router caught the following error during render",e,t)}render(){return this.state.error!==void 0?E.createElement(qa.Provider,{value:this.props.routeContext},E.createElement(E0.Provider,{value:this.state.error,children:this.props.component})):this.props.children}};function tq({routeContext:e,match:t,children:n}){let r=E.useContext(Bs);return r&&r.static&&r.staticContext&&(t.route.errorElement||t.route.ErrorBoundary)&&(r.staticContext._deepestRenderedBoundaryId=t.route.id),E.createElement(qa.Provider,{value:e},n)}function nq(e,t=[],n=null,r=null){if(e==null){if(!n)return null;if(n.errors)e=n.matches;else if(t.length===0&&!n.initialized&&n.matches.length>0)e=n.matches;else return null}let a=e,o=n==null?void 0:n.errors;if(o!=null){let c=a.findIndex(d=>d.route.id&&(o==null?void 0:o[d.route.id])!==void 0);Gt(c>=0,`Could not find a matching route for errors on route IDs: ${Object.keys(o).join(",")}`),a=a.slice(0,Math.min(a.length,c+1))}let s=!1,u=-1;if(n)for(let c=0;c=0?a=a.slice(0,u+1):a=[a[0]];break}}}return a.reduceRight((c,d,p)=>{let g,m=!1,b=null,y=null;n&&(g=o&&d.route.id?o[d.route.id]:void 0,b=d.route.errorElement||J9,s&&(u<0&&p===0?(Kz("route-fallback",!1,"No `HydrateFallback` element provided to render during initial hydration"),m=!0,y=null):u===p&&(m=!0,y=d.route.hydrateFallbackElement||null)));let S=t.concat(a.slice(0,p+1)),x=()=>{let R;return g?R=b:m?R=y:d.route.Component?R=E.createElement(d.route.Component,null):d.route.element?R=d.route.element:R=c,E.createElement(tq,{match:d,routeContext:{outlet:c,matches:S,isDataRoute:n!=null},children:R})};return n&&(d.route.ErrorBoundary||d.route.errorElement||p===0)?E.createElement(eq,{location:n.location,revalidation:n.revalidation,component:b,error:g,children:x(),routeContext:{outlet:null,matches:S,isDataRoute:!0}}):x()},null)}function w0(e){return`${e} must be used within a data router. See https://reactrouter.com/en/main/routers/picking-a-router.`}function rq(e){let t=E.useContext(Bs);return Gt(t,w0(e)),t}function aq(e){let t=E.useContext(Wf);return Gt(t,w0(e)),t}function oq(e){let t=E.useContext(qa);return Gt(t,w0(e)),t}function x0(e){let t=oq(e),n=t.matches[t.matches.length-1];return Gt(n.route.id,`${e} can only be used on routes that contain a unique "id"`),n.route.id}function iq(){return x0("useRouteId")}function sq(){var r;let e=E.useContext(E0),t=aq("useRouteError"),n=x0("useRouteError");return e!==void 0?e:(r=t.errors)==null?void 0:r[n]}function lq(){let{router:e}=rq("useNavigate"),t=x0("useNavigate"),n=E.useRef(!1);return Wz(()=>{n.current=!0}),E.useCallback(async(a,o={})=>{Br(n.current,Vz),n.current&&(typeof a=="number"?e.navigate(a):await e.navigate(a,{fromRouteId:t,...o}))},[e,t])}var iC={};function Kz(e,t,n){!t&&!iC[e]&&(iC[e]=!0,Br(!1,n))}E.memo(uq);function uq({routes:e,future:t,state:n}){return Yz(e,void 0,n,t)}function ik(e){Gt(!1,"A is only ever to be used as the child of element, never rendered directly. Please wrap your in a .")}function cq({basename:e="/",children:t=null,location:n,navigationType:r="POP",navigator:a,static:o=!1}){Gt(!ju(),"You cannot render a inside another . You should never have more than one in your app.");let s=e.replace(/^\/*/,"/"),u=E.useMemo(()=>({basename:s,navigator:a,static:o,future:{}}),[s,a,o]);typeof n=="string"&&(n=Ei(n));let{pathname:c="/",search:d="",hash:p="",state:g=null,key:m="default"}=n,b=E.useMemo(()=>{let y=Ua(c,s);return y==null?null:{location:{pathname:y,search:d,hash:p,state:g,key:m},navigationType:r}},[s,c,d,p,g,m,r]);return Br(b!=null,` is not able to match the URL "${c}${d}${p}" because it does not start with the basename, so the won't render anything.`),b==null?null:E.createElement(ia.Provider,{value:u},E.createElement(Uu.Provider,{children:t,value:b}))}function dq({children:e,location:t}){return Z9(sk(e),t)}function sk(e,t=[]){let n=[];return E.Children.forEach(e,(r,a)=>{if(!E.isValidElement(r))return;let o=[...t,a];if(r.type===E.Fragment){n.push.apply(n,sk(r.props.children,o));return}Gt(r.type===ik,`[${typeof r.type=="string"?r.type:r.type.name}] is not a component. All component children of must be a or `),Gt(!r.props.index||!r.props.children,"An index route cannot have child routes.");let s={id:r.props.id||o.join("-"),caseSensitive:r.props.caseSensitive,element:r.props.element,Component:r.props.Component,index:r.props.index,path:r.props.path,loader:r.props.loader,action:r.props.action,hydrateFallbackElement:r.props.hydrateFallbackElement,HydrateFallback:r.props.HydrateFallback,errorElement:r.props.errorElement,ErrorBoundary:r.props.ErrorBoundary,hasErrorBoundary:r.props.hasErrorBoundary===!0||r.props.ErrorBoundary!=null||r.props.errorElement!=null,shouldRevalidate:r.props.shouldRevalidate,handle:r.props.handle,lazy:r.props.lazy};r.props.children&&(s.children=sk(r.props.children,o)),n.push(s)}),n}var Vd="get",Wd="application/x-www-form-urlencoded";function Kf(e){return e!=null&&typeof e.tagName=="string"}function fq(e){return Kf(e)&&e.tagName.toLowerCase()==="button"}function pq(e){return Kf(e)&&e.tagName.toLowerCase()==="form"}function gq(e){return Kf(e)&&e.tagName.toLowerCase()==="input"}function hq(e){return!!(e.metaKey||e.altKey||e.ctrlKey||e.shiftKey)}function mq(e,t){return e.button===0&&(!t||t==="_self")&&!hq(e)}var hd=null;function bq(){if(hd===null)try{new FormData(document.createElement("form"),0),hd=!1}catch{hd=!0}return hd}var yq=new Set(["application/x-www-form-urlencoded","multipart/form-data","text/plain"]);function Jh(e){return e!=null&&!yq.has(e)?(Br(!1,`"${e}" is not a valid \`encType\` for \`
\`/\`\` and will default to "${Wd}"`),null):e}function vq(e,t){let n,r,a,o,s;if(pq(e)){let u=e.getAttribute("action");r=u?Ua(u,t):null,n=e.getAttribute("method")||Vd,a=Jh(e.getAttribute("enctype"))||Wd,o=new FormData(e)}else if(fq(e)||gq(e)&&(e.type==="submit"||e.type==="image")){let u=e.form;if(u==null)throw new Error('Cannot submit a - + + + {message && ( +
+ {message} +
+ )} + ) diff --git a/lightrag_webui/src/components/AppSettings.tsx b/lightrag_webui/src/components/AppSettings.tsx index a1ac140393..09312794ea 100644 --- a/lightrag_webui/src/components/AppSettings.tsx +++ b/lightrag_webui/src/components/AppSettings.tsx @@ -22,7 +22,7 @@ export default function AppSettings({ className }: AppSettingsProps) { const setTheme = useSettingsStore.use.setTheme() const handleLanguageChange = useCallback((value: string) => { - setLanguage(value as 'en' | 'zh') + setLanguage(value as 'en' | 'zh' | 'fr' | 'ar' | 'zh_TW') }, [setLanguage]) const handleThemeChange = useCallback((value: string) => { @@ -47,6 +47,9 @@ export default function AppSettings({ className }: AppSettingsProps) { English 中文 + Français + العربية + 繁體中文 diff --git a/lightrag_webui/src/components/MessageAlert.tsx b/lightrag_webui/src/components/MessageAlert.tsx deleted file mode 100644 index cd23bbd92e..0000000000 --- a/lightrag_webui/src/components/MessageAlert.tsx +++ /dev/null @@ -1,56 +0,0 @@ -import { Alert, AlertDescription, AlertTitle } from '@/components/ui/Alert' -import { useBackendState } from '@/stores/state' -import { useEffect, useState } from 'react' -import { cn } from '@/lib/utils' - -// import Button from '@/components/ui/Button' -// import { controlButtonVariant } from '@/lib/constants' - -import { AlertCircle } from 'lucide-react' - -const MessageAlert = () => { - const health = useBackendState.use.health() - const message = useBackendState.use.message() - const messageTitle = useBackendState.use.messageTitle() - const [isMounted, setIsMounted] = useState(false) - - useEffect(() => { - setTimeout(() => { - setIsMounted(true) - }, 50) - }, []) - - return ( - - {!health && ( -
- -
- )} -
- {messageTitle} - {message} -
- {/*
-
- -
*/} - - ) -} - -export default MessageAlert diff --git a/lightrag_webui/src/components/documents/ClearDocumentsDialog.tsx b/lightrag_webui/src/components/documents/ClearDocumentsDialog.tsx index 58841ff6da..b84380119e 100644 --- a/lightrag_webui/src/components/documents/ClearDocumentsDialog.tsx +++ b/lightrag_webui/src/components/documents/ClearDocumentsDialog.tsx @@ -1,4 +1,4 @@ -import { useState, useCallback } from 'react' +import { useState, useCallback, useEffect } from 'react' import Button from '@/components/ui/Button' import { Dialog, @@ -6,32 +6,88 @@ import { DialogDescription, DialogHeader, DialogTitle, - DialogTrigger + DialogTrigger, + DialogFooter } from '@/components/ui/Dialog' +import Input from '@/components/ui/Input' +import Checkbox from '@/components/ui/Checkbox' import { toast } from 'sonner' import { errorMessage } from '@/lib/utils' -import { clearDocuments } from '@/api/lightrag' +import { clearDocuments, clearCache } from '@/api/lightrag' -import { EraserIcon } from 'lucide-react' +import { EraserIcon, AlertTriangleIcon } from 'lucide-react' import { useTranslation } from 'react-i18next' -export default function ClearDocumentsDialog() { +// 简单的Label组件 +const Label = ({ + htmlFor, + className, + children, + ...props +}: React.LabelHTMLAttributes) => ( + +) + +interface ClearDocumentsDialogProps { + onDocumentsCleared?: () => Promise +} + +export default function ClearDocumentsDialog({ onDocumentsCleared }: ClearDocumentsDialogProps) { const { t } = useTranslation() const [open, setOpen] = useState(false) + const [confirmText, setConfirmText] = useState('') + const [clearCacheOption, setClearCacheOption] = useState(false) + const isConfirmEnabled = confirmText.toLowerCase() === 'yes' + + // 重置状态当对话框关闭时 + useEffect(() => { + if (!open) { + setConfirmText('') + setClearCacheOption(false) + } + }, [open]) const handleClear = useCallback(async () => { + if (!isConfirmEnabled) return + try { const result = await clearDocuments() - if (result.status === 'success') { - toast.success(t('documentPanel.clearDocuments.success')) - setOpen(false) - } else { + + if (result.status !== 'success') { toast.error(t('documentPanel.clearDocuments.failed', { message: result.message })) + setConfirmText('') + return + } + + toast.success(t('documentPanel.clearDocuments.success')) + + if (clearCacheOption) { + try { + await clearCache() + toast.success(t('documentPanel.clearDocuments.cacheCleared')) + } catch (cacheErr) { + toast.error(t('documentPanel.clearDocuments.cacheClearFailed', { error: errorMessage(cacheErr) })) + } + } + + // Refresh document list if provided + if (onDocumentsCleared) { + onDocumentsCleared().catch(console.error) } + + // 所有操作成功后关闭对话框 + setOpen(false) } catch (err) { toast.error(t('documentPanel.clearDocuments.error', { error: errorMessage(err) })) + setConfirmText('') } - }, [setOpen]) + }, [isConfirmEnabled, clearCacheOption, setOpen, t, onDocumentsCleared]) return ( @@ -42,12 +98,60 @@ export default function ClearDocumentsDialog() { e.preventDefault()}> - {t('documentPanel.clearDocuments.title')} - {t('documentPanel.clearDocuments.confirm')} + + + {t('documentPanel.clearDocuments.title')} + + + {t('documentPanel.clearDocuments.description')} + - + +
+ {t('documentPanel.clearDocuments.warning')} +
+
+ {t('documentPanel.clearDocuments.confirm')} +
+ +
+
+ + ) => setConfirmText(e.target.value)} + placeholder={t('documentPanel.clearDocuments.confirmPlaceholder')} + className="w-full" + /> +
+ +
+ setClearCacheOption(checked === true)} + /> + +
+
+ + + + +
) diff --git a/lightrag_webui/src/components/documents/PipelineStatusDialog.tsx b/lightrag_webui/src/components/documents/PipelineStatusDialog.tsx new file mode 100644 index 0000000000..58d1843452 --- /dev/null +++ b/lightrag_webui/src/components/documents/PipelineStatusDialog.tsx @@ -0,0 +1,193 @@ +import { useState, useEffect, useRef } from 'react' +import { useTranslation } from 'react-i18next' +import { toast } from 'sonner' +import { AlignLeft, AlignCenter, AlignRight } from 'lucide-react' + +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, + DialogDescription +} from '@/components/ui/Dialog' +import Button from '@/components/ui/Button' +import { getPipelineStatus, PipelineStatusResponse } from '@/api/lightrag' +import { errorMessage } from '@/lib/utils' +import { cn } from '@/lib/utils' + +type DialogPosition = 'left' | 'center' | 'right' + +interface PipelineStatusDialogProps { + open: boolean + onOpenChange: (open: boolean) => void +} + +export default function PipelineStatusDialog({ + open, + onOpenChange +}: PipelineStatusDialogProps) { + const { t } = useTranslation() + const [status, setStatus] = useState(null) + const [position, setPosition] = useState('center') + const [isUserScrolled, setIsUserScrolled] = useState(false) + const historyRef = useRef(null) + + // Reset position when dialog opens + useEffect(() => { + if (open) { + setPosition('center') + setIsUserScrolled(false) + } + }, [open]) + + // Handle scroll position + useEffect(() => { + const container = historyRef.current + if (!container || isUserScrolled) return + + container.scrollTop = container.scrollHeight + }, [status?.history_messages, isUserScrolled]) + + const handleScroll = () => { + const container = historyRef.current + if (!container) return + + const isAtBottom = Math.abs( + (container.scrollHeight - container.scrollTop) - container.clientHeight + ) < 1 + + if (isAtBottom) { + setIsUserScrolled(false) + } else { + setIsUserScrolled(true) + } + } + + // Refresh status every 2 seconds + useEffect(() => { + if (!open) return + + const fetchStatus = async () => { + try { + const data = await getPipelineStatus() + setStatus(data) + } catch (err) { + toast.error(t('documentPanel.pipelineStatus.errors.fetchFailed', { error: errorMessage(err) })) + } + } + + fetchStatus() + const interval = setInterval(fetchStatus, 2000) + return () => clearInterval(interval) + }, [open, t]) + + return ( + + + + {status?.job_name + ? `${t('documentPanel.pipelineStatus.jobName')}: ${status.job_name}, ${t('documentPanel.pipelineStatus.progress')}: ${status.cur_batch}/${status.batchs}` + : t('documentPanel.pipelineStatus.noActiveJob') + } + + + + {t('documentPanel.pipelineStatus.title')} + + + {/* Position control buttons */} +
+ + + +
+
+ + {/* Status Content */} +
+ {/* Pipeline Status */} +
+
+
{t('documentPanel.pipelineStatus.busy')}:
+
+
+
+
{t('documentPanel.pipelineStatus.requestPending')}:
+
+
+
+ + {/* Job Information */} +
+
{t('documentPanel.pipelineStatus.jobName')}: {status?.job_name || '-'}
+
+ {t('documentPanel.pipelineStatus.startTime')}: {status?.job_start ? new Date(status.job_start).toLocaleString() : '-'} + {t('documentPanel.pipelineStatus.progress')}: {status ? `${status.cur_batch}/${status.batchs} ${t('documentPanel.pipelineStatus.unit')}` : '-'} +
+
+ + {/* Latest Message */} +
+
{t('documentPanel.pipelineStatus.latestMessage')}:
+
+ {status?.latest_message || '-'} +
+
+ + {/* History Messages */} +
+
{t('documentPanel.pipelineStatus.historyMessages')}:
+
+ {status?.history_messages?.length ? ( + status.history_messages.map((msg, idx) => ( +
{msg}
+ )) + ) : '-'} +
+
+
+ +
+ ) +} diff --git a/lightrag_webui/src/components/documents/UploadDocumentsDialog.tsx b/lightrag_webui/src/components/documents/UploadDocumentsDialog.tsx index 7f17393cee..5785a7d32a 100644 --- a/lightrag_webui/src/components/documents/UploadDocumentsDialog.tsx +++ b/lightrag_webui/src/components/documents/UploadDocumentsDialog.tsx @@ -1,4 +1,5 @@ import { useState, useCallback } from 'react' +import { FileRejection } from 'react-dropzone' import Button from '@/components/ui/Button' import { Dialog, @@ -16,54 +17,170 @@ import { uploadDocument } from '@/api/lightrag' import { UploadIcon } from 'lucide-react' import { useTranslation } from 'react-i18next' -export default function UploadDocumentsDialog() { +interface UploadDocumentsDialogProps { + onDocumentsUploaded?: () => Promise +} + +export default function UploadDocumentsDialog({ onDocumentsUploaded }: UploadDocumentsDialogProps) { const { t } = useTranslation() const [open, setOpen] = useState(false) const [isUploading, setIsUploading] = useState(false) const [progresses, setProgresses] = useState>({}) + const [fileErrors, setFileErrors] = useState>({}) + + const handleRejectedFiles = useCallback( + (rejectedFiles: FileRejection[]) => { + // Process rejected files and add them to fileErrors + rejectedFiles.forEach(({ file, errors }) => { + // Get the first error message + let errorMsg = errors[0]?.message || t('documentPanel.uploadDocuments.fileUploader.fileRejected', { name: file.name }) + + // Simplify error message for unsupported file types + if (errorMsg.includes('file-invalid-type')) { + errorMsg = t('documentPanel.uploadDocuments.fileUploader.unsupportedType') + } + + // Set progress to 100% to display error message + setProgresses((pre) => ({ + ...pre, + [file.name]: 100 + })) + + // Add error message to fileErrors + setFileErrors(prev => ({ + ...prev, + [file.name]: errorMsg + })) + }) + }, + [setProgresses, setFileErrors, t] + ) const handleDocumentsUpload = useCallback( async (filesToUpload: File[]) => { setIsUploading(true) + let hasSuccessfulUpload = false + + // Only clear errors for files that are being uploaded, keep errors for rejected files + setFileErrors(prev => { + const newErrors = { ...prev }; + filesToUpload.forEach(file => { + delete newErrors[file.name]; + }); + return newErrors; + }); + + // Show uploading toast + const toastId = toast.loading(t('documentPanel.uploadDocuments.batch.uploading')) try { + // Track errors locally to ensure we have the final state + const uploadErrors: Record = {} + await Promise.all( filesToUpload.map(async (file) => { try { + // Initialize upload progress + setProgresses((pre) => ({ + ...pre, + [file.name]: 0 + })) + const result = await uploadDocument(file, (percentCompleted: number) => { - console.debug(t('documentPanel.uploadDocuments.uploading', { name: file.name, percent: percentCompleted })) + console.debug(t('documentPanel.uploadDocuments.single.uploading', { name: file.name, percent: percentCompleted })) setProgresses((pre) => ({ ...pre, [file.name]: percentCompleted })) }) - if (result.status === 'success') { - toast.success(t('documentPanel.uploadDocuments.success', { name: file.name })) + + if (result.status === 'duplicated') { + uploadErrors[file.name] = t('documentPanel.uploadDocuments.fileUploader.duplicateFile') + setFileErrors(prev => ({ + ...prev, + [file.name]: t('documentPanel.uploadDocuments.fileUploader.duplicateFile') + })) + } else if (result.status !== 'success') { + uploadErrors[file.name] = result.message + setFileErrors(prev => ({ + ...prev, + [file.name]: result.message + })) } else { - toast.error(t('documentPanel.uploadDocuments.failed', { name: file.name, message: result.message })) + // Mark that we had at least one successful upload + hasSuccessfulUpload = true } } catch (err) { - toast.error(t('documentPanel.uploadDocuments.error', { name: file.name, error: errorMessage(err) })) + console.error(`Upload failed for ${file.name}:`, err) + + // Handle HTTP errors, including 400 errors + let errorMsg = errorMessage(err) + + // If it's an axios error with response data, try to extract more detailed error info + if (err && typeof err === 'object' && 'response' in err) { + const axiosError = err as { response?: { status: number, data?: { detail?: string } } } + if (axiosError.response?.status === 400) { + // Extract specific error message from backend response + errorMsg = axiosError.response.data?.detail || errorMsg + } + + // Set progress to 100% to display error message + setProgresses((pre) => ({ + ...pre, + [file.name]: 100 + })) + } + + // Record error message in both local tracking and state + uploadErrors[file.name] = errorMsg + setFileErrors(prev => ({ + ...prev, + [file.name]: errorMsg + })) } }) ) + + // Check if any files failed to upload using our local tracking + const hasErrors = Object.keys(uploadErrors).length > 0 + + // Update toast status + if (hasErrors) { + toast.error(t('documentPanel.uploadDocuments.batch.error'), { id: toastId }) + } else { + toast.success(t('documentPanel.uploadDocuments.batch.success'), { id: toastId }) + } + + // Only update if at least one file was uploaded successfully + if (hasSuccessfulUpload) { + // Refresh document list + if (onDocumentsUploaded) { + onDocumentsUploaded().catch(err => { + console.error('Error refreshing documents:', err) + }) + } + } } catch (err) { - toast.error(t('documentPanel.uploadDocuments.generalError', { error: errorMessage(err) })) + console.error('Unexpected error during upload:', err) + toast.error(t('documentPanel.uploadDocuments.generalError', { error: errorMessage(err) }), { id: toastId }) } finally { setIsUploading(false) - // setOpen(false) } }, - [setIsUploading, setProgresses] + [setIsUploading, setProgresses, setFileErrors, t, onDocumentsUploaded] ) return ( { - if (isUploading && !open) { + if (isUploading) { return } + if (!open) { + setProgresses({}) + setFileErrors({}) + } setOpen(open) }} > @@ -84,7 +201,9 @@ export default function UploadDocumentsDialog() { maxSize={200 * 1024 * 1024} description={t('documentPanel.uploadDocuments.fileTypes')} onUpload={handleDocumentsUpload} + onReject={handleRejectedFiles} progresses={progresses} + fileErrors={fileErrors} disabled={isUploading} /> diff --git a/lightrag_webui/src/components/graph/EditablePropertyRow.tsx b/lightrag_webui/src/components/graph/EditablePropertyRow.tsx new file mode 100644 index 0000000000..a1d0c23cc0 --- /dev/null +++ b/lightrag_webui/src/components/graph/EditablePropertyRow.tsx @@ -0,0 +1,123 @@ +import { useState, useEffect } from 'react' +import { useTranslation } from 'react-i18next' +import { toast } from 'sonner' +import { updateEntity, updateRelation, checkEntityNameExists } from '@/api/lightrag' +import { updateGraphNode, updateGraphEdge } from '@/utils/graphOperations' +import { PropertyName, EditIcon, PropertyValue } from './PropertyRowComponents' +import PropertyEditDialog from './PropertyEditDialog' + +/** + * Interface for the EditablePropertyRow component props + */ +interface EditablePropertyRowProps { + name: string // Property name to display and edit + value: any // Initial value of the property + onClick?: () => void // Optional click handler for the property value + entityId?: string // ID of the entity (for node type) + entityType?: 'node' | 'edge' // Type of graph entity + sourceId?: string // Source node ID (for edge type) + targetId?: string // Target node ID (for edge type) + onValueChange?: (newValue: any) => void // Optional callback when value changes + isEditable?: boolean // Whether this property can be edited + tooltip?: string // Optional tooltip to display on hover +} + +/** + * EditablePropertyRow component that supports editing property values + * This component is used in the graph properties panel to display and edit entity properties + */ +const EditablePropertyRow = ({ + name, + value: initialValue, + onClick, + entityId, + entityType, + sourceId, + targetId, + onValueChange, + isEditable = false, + tooltip +}: EditablePropertyRowProps) => { + const { t } = useTranslation() + const [isEditing, setIsEditing] = useState(false) + const [isSubmitting, setIsSubmitting] = useState(false) + const [currentValue, setCurrentValue] = useState(initialValue) + + useEffect(() => { + setCurrentValue(initialValue) + }, [initialValue]) + + const handleEditClick = () => { + if (isEditable && !isEditing) { + setIsEditing(true) + } + } + + const handleCancel = () => { + setIsEditing(false) + } + + const handleSave = async (value: string) => { + if (isSubmitting || value === String(currentValue)) { + setIsEditing(false) + return + } + + setIsSubmitting(true) + + try { + if (entityType === 'node' && entityId) { + let updatedData = { [name]: value } + + if (name === 'entity_id') { + const exists = await checkEntityNameExists(value) + if (exists) { + toast.error(t('graphPanel.propertiesView.errors.duplicateName')) + return + } + updatedData = { 'entity_name': value } + } + + await updateEntity(entityId, updatedData, true) + await updateGraphNode(entityId, name, value) + toast.success(t('graphPanel.propertiesView.success.entityUpdated')) + } else if (entityType === 'edge' && sourceId && targetId) { + const updatedData = { [name]: value } + await updateRelation(sourceId, targetId, updatedData) + await updateGraphEdge(sourceId, targetId, name, value) + toast.success(t('graphPanel.propertiesView.success.relationUpdated')) + } + + setIsEditing(false) + setCurrentValue(value) + onValueChange?.(value) + } catch (error) { + console.error('Error updating property:', error) + toast.error(t('graphPanel.propertiesView.errors.updateFailed')) + } finally { + setIsSubmitting(false) + } + } + + return ( +
+ + : + + +
+ ) +} + +export default EditablePropertyRow diff --git a/lightrag_webui/src/components/graph/GraphControl.tsx b/lightrag_webui/src/components/graph/GraphControl.tsx index baa98bfe2f..8211178ad6 100644 --- a/lightrag_webui/src/components/graph/GraphControl.tsx +++ b/lightrag_webui/src/components/graph/GraphControl.tsx @@ -36,6 +36,8 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean }) const enableEdgeEvents = useSettingsStore.use.enableEdgeEvents() const renderEdgeLabels = useSettingsStore.use.showEdgeLabel() const renderLabels = useSettingsStore.use.showNodeLabel() + const minEdgeSize = useSettingsStore.use.minEdgeSize() + const maxEdgeSize = useSettingsStore.use.maxEdgeSize() const selectedNode = useGraphStore.use.selectedNode() const focusedNode = useGraphStore.use.focusedNode() const selectedEdge = useGraphStore.use.selectedEdge() @@ -97,7 +99,10 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean }) const events: Record = { enterNode: (event: NodeEvent) => { if (!isButtonPressed(event.event.original)) { - setFocusedNode(event.node) + const graph = sigma.getGraph() + if (graph.hasNode(event.node)) { + setFocusedNode(event.node) + } } }, leaveNode: (event: NodeEvent) => { @@ -106,8 +111,11 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean }) } }, clickNode: (event: NodeEvent) => { - setSelectedNode(event.node) - setSelectedEdge(null) + const graph = sigma.getGraph() + if (graph.hasNode(event.node)) { + setSelectedNode(event.node) + setSelectedEdge(null) + } }, clickStage: () => clearSelection() } @@ -136,6 +144,51 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean }) registerEvents(events) }, [registerEvents, enableEdgeEvents]) + /** + * When edge size settings change, recalculate edge sizes and refresh the sigma instance + * to ensure changes take effect immediately + */ + useEffect(() => { + if (sigma && sigmaGraph) { + // Get the graph from sigma + const graph = sigma.getGraph() + + // Find min and max weight values + let minWeight = Number.MAX_SAFE_INTEGER + let maxWeight = 0 + + graph.forEachEdge(edge => { + // Get original weight (before scaling) + const weight = graph.getEdgeAttribute(edge, 'originalWeight') || 1 + if (typeof weight === 'number') { + minWeight = Math.min(minWeight, weight) + maxWeight = Math.max(maxWeight, weight) + } + }) + + // Scale edge sizes based on weight range and current min/max edge size settings + const weightRange = maxWeight - minWeight + if (weightRange > 0) { + const sizeScale = maxEdgeSize - minEdgeSize + graph.forEachEdge(edge => { + const weight = graph.getEdgeAttribute(edge, 'originalWeight') || 1 + if (typeof weight === 'number') { + const scaledSize = minEdgeSize + sizeScale * Math.pow((weight - minWeight) / weightRange, 0.5) + graph.setEdgeAttribute(edge, 'size', scaledSize) + } + }) + } else { + // If all weights are the same, use default size + graph.forEachEdge(edge => { + graph.setEdgeAttribute(edge, 'size', minEdgeSize) + }) + } + + // Refresh the sigma instance to apply changes + sigma.refresh() + } + }, [sigma, sigmaGraph, minEdgeSize, maxEdgeSize]) + /** * When component mount or hovered node change * => Setting the sigma reducers diff --git a/lightrag_webui/src/components/graph/GraphLabels.tsx b/lightrag_webui/src/components/graph/GraphLabels.tsx index 305f63bd6a..2b1e3cd777 100644 --- a/lightrag_webui/src/components/graph/GraphLabels.tsx +++ b/lightrag_webui/src/components/graph/GraphLabels.tsx @@ -1,4 +1,4 @@ -import { useCallback, useEffect, useRef } from 'react' +import { useCallback, useEffect } from 'react' import { AsyncSelect } from '@/components/ui/AsyncSelect' import { useSettingsStore } from '@/stores/settings' import { useGraphStore } from '@/stores/graph' @@ -12,44 +12,9 @@ const GraphLabels = () => { const { t } = useTranslation() const label = useSettingsStore.use.queryLabel() const allDatabaseLabels = useGraphStore.use.allDatabaseLabels() - const rawGraph = useGraphStore.use.rawGraph() - const labelsLoadedRef = useRef(false) + const labelsFetchAttempted = useGraphStore.use.labelsFetchAttempted() - // Track if a fetch is in progress to prevent multiple simultaneous fetches - const fetchInProgressRef = useRef(false) - - // Fetch labels and trigger initial data load - useEffect(() => { - // Check if we've already attempted to fetch labels in this session - const labelsFetchAttempted = useGraphStore.getState().labelsFetchAttempted - - // Only fetch if we haven't attempted in this session and no fetch is in progress - if (!labelsFetchAttempted && !fetchInProgressRef.current) { - fetchInProgressRef.current = true - // Set global flag to indicate we've attempted to fetch in this session - useGraphStore.getState().setLabelsFetchAttempted(true) - - useGraphStore.getState().fetchAllDatabaseLabels() - .then(() => { - labelsLoadedRef.current = true - fetchInProgressRef.current = false - }) - .catch((error) => { - console.error('Failed to fetch labels:', error) - fetchInProgressRef.current = false - // Reset global flag to allow retry - useGraphStore.getState().setLabelsFetchAttempted(false) - }) - } - }, []) // Empty dependency array ensures this only runs once on mount - - // Trigger data load when labels are loaded - useEffect(() => { - if (labelsLoadedRef.current) { - // Reset the fetch attempted flag to force a new data fetch - useGraphStore.getState().setGraphDataFetchAttempted(false) - } - }, [label]) + // Remove initial label fetch effect as it's now handled by fetchGraph based on lastSuccessfulQueryLabel const getSearchEngine = useCallback(() => { // Create search engine @@ -81,8 +46,30 @@ const GraphLabels = () => { let result: string[] = labels if (query) { - // Search labels + // Search labels using MiniSearch result = searchEngine.search(query).map((r: { id: number }) => labels[r.id]) + + // Add middle-content matching if results are few + // This enables matching content in the middle of text, not just from the beginning + if (result.length < 5) { + // Get already matched labels to avoid duplicates + const matchedLabels = new Set(result) + + // Perform middle-content matching on all labels + const middleMatchResults = labels.filter(label => { + // Skip already matched labels + if (matchedLabels.has(label)) return false + + // Match if label contains query string but doesn't start with it + return label && + typeof label === 'string' && + !label.toLowerCase().startsWith(query.toLowerCase()) && + label.toLowerCase().includes(query.toLowerCase()) + }) + + // Merge results + result = [...result, ...middleMatchResults] + } } return result.length <= labelListLimit @@ -92,43 +79,63 @@ const GraphLabels = () => { [getSearchEngine] ) + // Validate label + useEffect(() => { + + if (labelsFetchAttempted) { + if (allDatabaseLabels.length > 1) { + if (label && label !== '*' && !allDatabaseLabels.includes(label)) { + console.log(`Label "${label}" not in available labels, setting to "*"`); + useSettingsStore.getState().setQueryLabel('*'); + } else { + console.log(`Label "${label}" is valid`); + } + } else if (label && allDatabaseLabels.length <= 1 && label && label !== '*') { + console.log('Available labels list is empty, setting label to empty'); + useSettingsStore.getState().setQueryLabel(''); + } + useGraphStore.getState().setLabelsFetchAttempted(false) + } + + }, [allDatabaseLabels, label, labelsFetchAttempted]); + const handleRefresh = useCallback(() => { - // Reset labels fetch status to allow fetching labels again + // Reset fetch status flags useGraphStore.getState().setLabelsFetchAttempted(false) - - // Reset graph data fetch status directly, not depending on allDatabaseLabels changes useGraphStore.getState().setGraphDataFetchAttempted(false) - // Fetch all labels again - useGraphStore.getState().fetchAllDatabaseLabels() - .then(() => { - // Trigger a graph data reload by changing the query label back and forth - const currentLabel = useSettingsStore.getState().queryLabel - useSettingsStore.getState().setQueryLabel('') - setTimeout(() => { - useSettingsStore.getState().setQueryLabel(currentLabel) - }, 0) - }) - .catch((error) => { - console.error('Failed to refresh labels:', error) - }) - }, []) + // Clear last successful query label to ensure labels are fetched + useGraphStore.getState().setLastSuccessfulQueryLabel('') + + // Get current label + const currentLabel = useSettingsStore.getState().queryLabel + + // If current label is empty, use default label '*' + if (!currentLabel) { + useSettingsStore.getState().setQueryLabel('*') + } else { + // Trigger data reload + useSettingsStore.getState().setQueryLabel('') + setTimeout(() => { + useSettingsStore.getState().setQueryLabel(currentLabel) + }, 0) + } + }, []); return (
- {rawGraph && ( - - )} + {/* Always show refresh button */} + - className="ml-2" + className="min-w-[300px]" triggerClassName="max-h-8" searchInputClassName="max-h-8" triggerTooltip={t('graphPanel.graphLabels.selectTooltip')} @@ -141,20 +148,23 @@ const GraphLabels = () => { placeholder={t('graphPanel.graphLabels.placeholder')} value={label !== null ? label : '*'} onChange={(newLabel) => { - const currentLabel = useSettingsStore.getState().queryLabel + const currentLabel = useSettingsStore.getState().queryLabel; // select the last item means query all if (newLabel === '...') { - newLabel = '*' + newLabel = '*'; } // Handle reselecting the same label if (newLabel === currentLabel && newLabel !== '*') { - newLabel = '*' + newLabel = '*'; } - // Update the label, which will trigger the useEffect to handle data loading - useSettingsStore.getState().setQueryLabel(newLabel) + // Reset graphDataFetchAttempted flag to ensure data fetch is triggered + useGraphStore.getState().setGraphDataFetchAttempted(false); + + // Update the label to trigger data loading + useSettingsStore.getState().setQueryLabel(newLabel); }} clearable={false} // Prevent clearing value on reselect /> diff --git a/lightrag_webui/src/components/graph/GraphSearch.tsx b/lightrag_webui/src/components/graph/GraphSearch.tsx index 51e76a0bf4..40840f5979 100644 --- a/lightrag_webui/src/components/graph/GraphSearch.tsx +++ b/lightrag_webui/src/components/graph/GraphSearch.tsx @@ -123,13 +123,42 @@ export const GraphSearchInput = ({ } // If has query, search nodes and verify they still exist - const result: OptionItem[] = searchEngine.search(query) + let result: OptionItem[] = searchEngine.search(query) .filter((r: { id: string }) => graph.hasNode(r.id)) .map((r: { id: string }) => ({ id: r.id, type: 'nodes' })) + // Add middle-content matching if results are few + // This enables matching content in the middle of text, not just from the beginning + if (result.length < 5) { + // Get already matched IDs to avoid duplicates + const matchedIds = new Set(result.map(item => item.id)) + + // Perform middle-content matching on all nodes + const middleMatchResults = graph.nodes() + .filter(id => { + // Skip already matched nodes + if (matchedIds.has(id)) return false + + // Get node label + const label = graph.getNodeAttribute(id, 'label') + // Match if label contains query string but doesn't start with it + return label && + typeof label === 'string' && + !label.toLowerCase().startsWith(query.toLowerCase()) && + label.toLowerCase().includes(query.toLowerCase()) + }) + .map(id => ({ + id, + type: 'nodes' as const + })) + + // Merge results + result = [...result, ...middleMatchResults] + } + // prettier-ignore return result.length <= searchResultLimit ? result diff --git a/lightrag_webui/src/components/graph/LayoutsControl.tsx b/lightrag_webui/src/components/graph/LayoutsControl.tsx index 2f0cc50a01..c4ec67bedd 100644 --- a/lightrag_webui/src/components/graph/LayoutsControl.tsx +++ b/lightrag_webui/src/components/graph/LayoutsControl.tsx @@ -218,8 +218,8 @@ const LayoutsControl = () => { maxIterations: maxIterations, settings: { attraction: 0.0003, // Lower attraction force to reduce oscillation - repulsion: 0.05, // Lower repulsion force to reduce oscillation - gravity: 0.01, // Increase gravity to make nodes converge to center faster + repulsion: 0.02, // Lower repulsion force to reduce oscillation + gravity: 0.02, // Increase gravity to make nodes converge to center faster inertia: 0.4, // Lower inertia to add damping effect maxMove: 100 // Limit maximum movement per step to prevent large jumps } @@ -289,7 +289,7 @@ const LayoutsControl = () => { ) return ( - <> +
{layouts[layout] && 'worker' in layouts[layout] && ( { - + @@ -331,7 +338,7 @@ const LayoutsControl = () => {
- +
) } diff --git a/lightrag_webui/src/components/graph/Legend.tsx b/lightrag_webui/src/components/graph/Legend.tsx new file mode 100644 index 0000000000..d26d11a3a6 --- /dev/null +++ b/lightrag_webui/src/components/graph/Legend.tsx @@ -0,0 +1,41 @@ +import React from 'react' +import { useTranslation } from 'react-i18next' +import { useGraphStore } from '@/stores/graph' +import { Card } from '@/components/ui/Card' +import { ScrollArea } from '@/components/ui/ScrollArea' + +interface LegendProps { + className?: string +} + +const Legend: React.FC = ({ className }) => { + const { t } = useTranslation() + const typeColorMap = useGraphStore.use.typeColorMap() + + if (!typeColorMap || typeColorMap.size === 0) { + return null + } + + return ( + +

{t('graphPanel.legend')}

+ +
+ {Array.from(typeColorMap.entries()).map(([type, color]) => ( +
+
+ + {t(`graphPanel.nodeTypes.${type.toLowerCase()}`, type)} + +
+ ))} +
+ + + ) +} + +export default Legend diff --git a/lightrag_webui/src/components/graph/LegendButton.tsx b/lightrag_webui/src/components/graph/LegendButton.tsx new file mode 100644 index 0000000000..cf036721e4 --- /dev/null +++ b/lightrag_webui/src/components/graph/LegendButton.tsx @@ -0,0 +1,32 @@ +import { useCallback } from 'react' +import { BookOpenIcon } from 'lucide-react' +import Button from '@/components/ui/Button' +import { controlButtonVariant } from '@/lib/constants' +import { useSettingsStore } from '@/stores/settings' +import { useTranslation } from 'react-i18next' + +/** + * Component that toggles legend visibility. + */ +const LegendButton = () => { + const { t } = useTranslation() + const showLegend = useSettingsStore.use.showLegend() + const setShowLegend = useSettingsStore.use.setShowLegend() + + const toggleLegend = useCallback(() => { + setShowLegend(!showLegend) + }, [showLegend, setShowLegend]) + + return ( + + ) +} + +export default LegendButton diff --git a/lightrag_webui/src/components/graph/PropertiesView.tsx b/lightrag_webui/src/components/graph/PropertiesView.tsx index 627049ea1a..b63f1b093a 100644 --- a/lightrag_webui/src/components/graph/PropertiesView.tsx +++ b/lightrag_webui/src/components/graph/PropertiesView.tsx @@ -5,6 +5,7 @@ import Button from '@/components/ui/Button' import useLightragGraph from '@/hooks/useLightragGraph' import { useTranslation } from 'react-i18next' import { GitBranchPlus, Scissors } from 'lucide-react' +import EditablePropertyRow from './EditablePropertyRow' /** * Component that view properties of elements in graph. @@ -169,12 +170,22 @@ const PropertyRow = ({ name, value, onClick, - tooltip + tooltip, + entityId, + entityType, + sourceId, + targetId, + isEditable = false }: { name: string value: any onClick?: () => void tooltip?: string + entityId?: string + entityType?: 'node' | 'edge' + sourceId?: string + targetId?: string + isEditable?: boolean }) => { const { t } = useTranslation() @@ -184,9 +195,27 @@ const PropertyRow = ({ return translation === translationKey ? name : translation } + // Use EditablePropertyRow for editable fields (description, entity_id and keywords) + if (isEditable && (name === 'description' || name === 'entity_id' || name === 'keywords')) { + return ( + + ) + } + + // For non-editable fields, use the regular Text component return (
- : + {getPropertyNameTranslation(name)}: { return (
- +

{t('graphPanel.propertiesView.node.title')}

- +

{t('graphPanel.propertiesView.node.properties')}

{Object.keys(node.properties) .sort() .map((name) => { - return + return ( + + ) })}
{node.relationships.length > 0 && ( <> -