diff --git a/CodeGen/docker_compose/intel/cpu/xeon/README.md b/CodeGen/docker_compose/intel/cpu/xeon/README.md index fa0093e0ad..70db1bfbef 100644 --- a/CodeGen/docker_compose/intel/cpu/xeon/README.md +++ b/CodeGen/docker_compose/intel/cpu/xeon/README.md @@ -100,16 +100,90 @@ export host_ip=${your_ip_address} export HUGGINGFACEHUB_API_TOKEN=you_huggingface_token ``` -2. Set Netowork Proxy +2. Set Network Proxy **If you access public network through proxy, set the network proxy, otherwise, skip this step** ```bash -export no_proxy=${your_no_proxy} +export no_proxy=${no_proxy},${host_ip} export http_proxy=${your_http_proxy} export https_proxy=${your_https_proxy} ``` +## 🚀 Build Docker Images + +Should the Docker image you seek not yet be available on Docker Hub, you can build the Docker image locally. + +### 1. Build the LLM Docker Image + +```bash +git clone https://github.com/opea-project/GenAIComps.git +cd GenAIComps +docker build -t opea/llm-textgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . +``` + +### 2. Build the Retriever Image + +```bash +docker build -t opea/retriever:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/src/Dockerfile . +``` + +### 3. Build the Dataprep Image + +```bash +docker build -t opea/dataprep:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/dataprep/src/Dockerfile . +``` + +### 4. Build the MegaService Docker Image + +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `codegen.py` Python script. Build the MegaService Docker image via the command below: + +```bash +git clone https://github.com/opea-project/GenAIExamples +cd GenAIExamples/CodeGen +docker build -t opea/codegen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 5. Build the UI Gradio Image (Recommended) + +Build the frontend Gradio image via the command below: + +```bash +cd GenAIExamples/CodeGen/ui +docker build -t opea/codegen-gradio-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile.gradio . +``` + +### 5a. Build CodeGen React UI Docker Image (Optional) + +Build react frontend Docker image via below command: + +**Export the value of the public IP address of your Xeon server to the `host_ip` environment variable** + +```bash +cd GenAIExamples/CodeGen/ui +docker build --no-cache -t opea/codegen-react-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile.react . +``` + +### 5b. Build the UI Docker Image + +Construct the frontend Docker image via the command below: + +```bash +cd GenAIExamples/CodeGen/ui +docker build -t opea/codegen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . +``` + +Then run the command `docker images`, you will have the following Docker images: + +- `opea/llm-textgen:latest` +- `opea/retriever:latest` +- `opea/dataprep:latest` +- `opea/codegen:latest` +- `opea/codegen-gradio-ui:latest` (Recommended) +- `opea/codegen-ui:latest` (Optional) +- `opea/codegen-react-ui:latest` (Optional) + + ### Start the Docker Containers for All Services CodeGen support TGI service and vLLM service, you can choose start either one of them. @@ -139,8 +213,9 @@ docker compose --profile codegen-xeon-vllm up -d ```bash curl http://${host_ip}:8028/v1/chat/completions \ -X POST \ - -d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception."}], "max_tokens":32}' \ - -H 'Content-Type: application/json' + -H 'Content-Type: application/json' \ + -d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception."}], "max_tokens":32}' + ``` 2. LLM Microservices @@ -148,28 +223,59 @@ docker compose --profile codegen-xeon-vllm up -d ```bash curl http://${host_ip}:9000/v1/chat/completions\ -X POST \ - -d '{"query":"Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception.","max_tokens":256,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"stream":true}' \ - -H 'Content-Type: application/json' + -H 'Content-Type: application/json' \ + -d '{"query":"Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception.","max_tokens":256,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"stream":true}' ``` -3. MegaService +3. Dataprep Microservice + + Make sure to replace the file name placeholders with your correct file name + + ```bash + curl http://${host_ip}:6007/v1/dataprep/ingest \ + -X POST \ + -H "Content-Type: multipart/form-data" \ + -F "files=@./file1.pdf" \ + -F "files=@./file2.txt" \ + -F "index_name=my_API_document" + ``` + +4. MegaService ```bash - curl http://${host_ip}:7778/v1/codegen -H "Content-Type: application/json" -d '{ - "messages": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception." - }' + curl http://${host_ip}:7778/v1/codegen \ + -H "Content-Type: application/json" \ + -d '{"messages": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception."}' ``` - If the user wants a CodeGen service with RAG and Agents based on dedicated documentation. + CodeGen service with RAG and Agents activated based on an index. ```bash - curl http://localhost:7778/v1/codegen \ + curl http://${host_ip}:7778/v1/codegen \ -H "Content-Type: application/json" \ -d '{"agents_flag": "True", "index_name": "my_API_document", "messages": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception."}' ``` -## 🚀 Launch the UI +## 🚀 Launch the Gradio Based UI (Recommended) +To access the Gradio frontend URL, follow the steps in [this README](../../../../ui/gradio/README.md) + +Code Generation Tab +![project-screenshot](../../../../assets/img/codegen_gradio_ui_main.png) + +Resource Management Tab +![project-screenshot](../../../../assets/img/codegen_gradio_ui_main.png) + +Uploading a Knowledge Index + +![project-screenshot](../../../../assets/img/codegen_gradio_ui_dataprep.png) + +Here is an example of running a query in the Gradio UI using an Index: + +![project-screenshot](../../../../assets/img/codegen_gradio_ui_query.png) + + +## 🚀 Launch the Svelte Based UI (Optional) To access the frontend, open the following URL in your browser: `http://{host_ip}:5173`. By default, the UI runs on port 5173 internally. If you prefer to use a different host port to access the frontend, you can modify the port mapping in the `compose.yaml` file as shown below: @@ -282,54 +388,3 @@ For example: - Ask question and get answer ![qna](../../../../assets/img/codegen_qna.png) - -## 🚀 Download or Build Docker Images - -Should the Docker image you seek not yet be available on Docker Hub, you can build the Docker image locally. - -### 1. Build the LLM Docker Image - -```bash -git clone https://github.com/opea-project/GenAIComps.git -cd GenAIComps -docker build -t opea/llm-textgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . -``` - -### 2. Build the MegaService Docker Image - -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `codegen.py` Python script. Build MegaService Docker image via the command below: - -```bash -git clone https://github.com/opea-project/GenAIExamples -cd GenAIExamples/CodeGen -docker build --no-cache -t opea/codegen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . -``` - -### 3. Build the UI Docker Image - -Build the frontend Docker image via the command below: - -```bash -cd GenAIExamples/CodeGen/ui -docker build --no-cache -t opea/codegen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . -``` - -### 4. Build CodeGen React UI Docker Image (Optional) - -Build react frontend Docker image via below command: - -**Export the value of the public IP address of your Xeon server to the `host_ip` environment variable** - -```bash -cd GenAIExamples/CodeGen/ui -docker build --no-cache -t opea/codegen-react-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile.react . -``` - -Then run the command `docker images`, you will have the following Docker Images: - -- `opea/llm-textgen:latest` -- `opea/codegen:latest` -- `opea/codegen-ui:latest` -- `opea/codegen-gradio-ui:latest` -- `opea/codegen-react-ui:latest` (optional) - diff --git a/CodeGen/docker_compose/intel/hpu/gaudi/README.md b/CodeGen/docker_compose/intel/hpu/gaudi/README.md index 5408e33654..bc1444dc7b 100644 --- a/CodeGen/docker_compose/intel/hpu/gaudi/README.md +++ b/CodeGen/docker_compose/intel/hpu/gaudi/README.md @@ -93,16 +93,89 @@ export host_ip=${your_ip_address} export HUGGINGFACEHUB_API_TOKEN=you_huggingface_token ``` -2. Set Netowork Proxy +2. Set Network Proxy **If you access public network through proxy, set the network proxy, otherwise, skip this step** ```bash -export no_proxy=${your_no_proxy} +export no_proxy=${no_proxy},${host_ip} export http_proxy=${your_http_proxy} export https_proxy=${your_https_proxy} ``` +## 🚀 Build Docker Images + +First of all, you need to build the Docker images locally. This step can be ignored after the Docker images published to the Docker Hub. + +### 1. Build the LLM Docker Image + +```bash +git clone https://github.com/opea-project/GenAIComps.git +cd GenAIComps +docker build -t opea/llm-textgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . +``` + +### 2. Build the Retriever Image + +```bash +docker build -t opea/retriever:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/src/Dockerfile . +``` + +### 3. Build the Dataprep Image + +```bash +docker build -t opea/dataprep:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/dataprep/src/Dockerfile . +``` + +### 4. Build the MegaService Docker Image + +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `codegen.py` Python script. Build the MegaService Docker image via the command below: + +```bash +git clone https://github.com/opea-project/GenAIExamples +cd GenAIExamples/CodeGen +docker build -t opea/codegen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 5. Build the UI Gradio Image (Recommended) + +Build the frontend Gradio image via the command below: + +```bash +cd GenAIExamples/CodeGen/ui +docker build -t opea/codegen-gradio-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile.gradio . +``` + +### 5a. Build CodeGen React UI Docker Image (Optional) + +Build react frontend Docker image via below command: + +**Export the value of the public IP address of your Xeon server to the `host_ip` environment variable** + +```bash +cd GenAIExamples/CodeGen/ui +docker build --no-cache -t opea/codegen-react-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile.react . +``` + +### 5b. Build the UI Docker Image + +Construct the frontend Docker image via the command below: + +```bash +cd GenAIExamples/CodeGen/ui +docker build -t opea/codegen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . +``` + +Then run the command `docker images`, you will have the following Docker images: + +- `opea/llm-textgen:latest` +- `opea/retriever:latest` +- `opea/dataprep:latest` +- `opea/codegen:latest` +- `opea/codegen-gradio-ui:latest` (Recommended) +- `opea/codegen-ui:latest` (Optional) +- `opea/codegen-react-ui:latest` (Optional) + ### Start the Docker Containers for All Services CodeGen support TGI service and vLLM service, you can choose start either one of them. @@ -132,8 +205,9 @@ docker compose --profile codegen-gaudi-vllm up -d ```bash curl http://${host_ip}:8028/v1/chat/completions \ -X POST \ - -d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception."}], "max_tokens":32}' \ - -H 'Content-Type: application/json' + -H 'Content-Type: application/json' \ + -d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception."}], "max_tokens":32}' + ``` 2. LLM Microservices @@ -141,28 +215,58 @@ docker compose --profile codegen-gaudi-vllm up -d ```bash curl http://${host_ip}:9000/v1/chat/completions\ -X POST \ - -d '{"query":"Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception.","max_tokens":256,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"stream":true}' \ - -H 'Content-Type: application/json' + -H 'Content-Type: application/json' \ + -d '{"query":"Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception.","max_tokens":256,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"stream":true}' ``` -3. MegaService +3. Dataprep Microservice + + Make sure to replace the file name placeholders with your correct file name + + ```bash + curl http://${host_ip}:6007/v1/dataprep/ingest \ + -X POST \ + -H "Content-Type: multipart/form-data" \ + -F "files=@./file1.pdf" \ + -F "files=@./file2.txt" \ + -F "index_name=my_API_document" + ``` + +4. MegaService ```bash - curl http://${host_ip}:7778/v1/codegen -H "Content-Type: application/json" -d '{ - "messages": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception." - }' + curl http://${host_ip}:7778/v1/codegen \ + -H "Content-Type: application/json" \ + -d '{"messages": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception."}' ``` - If the user wants a CodeGen service with RAG and Agents based on dedicated documentation. + CodeGen service with RAG and Agents activated based on an index. ```bash - curl http://localhost:7778/v1/codegen \ + curl http://${host_ip}$:7778/v1/codegen \ -H "Content-Type: application/json" \ -d '{"agents_flag": "True", "index_name": "my_API_document", "messages": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception."}' ``` - -## 🚀 Launch the Svelte Based UI +## 🚀 Launch the Gradio Based UI (Recommended) +To access the Gradio frontend URL, follow the steps in [this README](../../../../ui/gradio/README.md) + +Code Generation Tab +![project-screenshot](../../../../assets/img/codegen_gradio_ui_main.png) + +Resource Management Tab +![project-screenshot](../../../../assets/img/codegen_gradio_ui_main.png) + +Uploading a Knowledge Index + +![project-screenshot](../../../../assets/img/codegen_gradio_ui_dataprep.png) + +Here is an example of running a query in the Gradio UI using an Index: + +![project-screenshot](../../../../assets/img/codegen_gradio_ui_query.png) + + +## 🚀 Launch the Svelte Based UI (Optional) To access the frontend, open the following URL in your browser: `http://{host_ip}:5173`. By default, the UI runs on port 5173 internally. If you prefer to use a different host port to access the frontend, you can modify the port mapping in the `compose.yaml` file as shown below: @@ -272,51 +376,3 @@ For example: ![qna](../../../../assets/img/codegen_qna.png) -## 🚀 Build Docker Images - -First of all, you need to build the Docker images locally. This step can be ignored after the Docker images published to the Docker Hub. - -### 1. Build the LLM Docker Image - -```bash -git clone https://github.com/opea-project/GenAIComps.git -cd GenAIComps -docker build -t opea/llm-textgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . -``` - -### 2. Build the MegaService Docker Image - -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `codegen.py` Python script. Build the MegaService Docker image via the command below: - -```bash -git clone https://github.com/opea-project/GenAIExamples -cd GenAIExamples/CodeGen -docker build -t opea/codegen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . -``` - -### 3. Build the UI Docker Image - -Construct the frontend Docker image via the command below: - -```bash -cd GenAIExamples/CodeGen/ui -docker build -t opea/codegen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . -``` - -### 4. Build CodeGen React UI Docker Image (Optional) - -Build react frontend Docker image via below command: - -**Export the value of the public IP address of your Xeon server to the `host_ip` environment variable** - -```bash -cd GenAIExamples/CodeGen/ui -docker build --no-cache -t opea/codegen-react-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile.react . -``` - -Then run the command `docker images`, you will have the following Docker images: - -- `opea/llm-textgen:latest` -- `opea/codegen:latest` -- `opea/codegen-ui:latest` -- `opea/codegen-react-ui:latest` diff --git a/CodeGen/ui/gradio/codegen_ui_gradio.py b/CodeGen/ui/gradio/codegen_ui_gradio.py index d0e351960a..84e2a5d652 100644 --- a/CodeGen/ui/gradio/codegen_ui_gradio.py +++ b/CodeGen/ui/gradio/codegen_ui_gradio.py @@ -15,14 +15,11 @@ import uvicorn import json import argparse -# from utils import build_logger, make_temp_image, server_error_msg, split_video from urllib.parse import urlparse from pathlib import Path from fastapi import FastAPI -# from fastapi.responses import JSONResponse, StreamingResponse from fastapi.staticfiles import StaticFiles -# logger = build_logger("gradio_web_server", "gradio_web_server.log") logflag = os.getenv("LOGFLAG", False) # create a FastAPI app @@ -56,7 +53,6 @@ # Define the functions that will be used in the app def conversation_history(prompt, index, use_agent, history): - # Print the language and prompt, and return a placeholder code print(f"Generating code for prompt: {prompt} using index: {index} and use_agent is {use_agent}") history.append([prompt, ""]) response_generator = generate_code(prompt, index, use_agent) @@ -67,7 +63,6 @@ def conversation_history(prompt, index, use_agent, history): def upload_media(media, index=None, chunk_size=1500, chunk_overlap=100): media = media.strip().split("\n") - print("Files passed is ", media, flush=True) if not chunk_size: chunk_size = 1500 if not chunk_overlap: @@ -78,8 +73,6 @@ def upload_media(media, index=None, chunk_size=1500, chunk_overlap=100): for file in media: file_ext = os.path.splitext(file)[-1] if is_valid_url(file): - print(file, " is valid URL") - print("Ingesting URL...") yield ( gr.Textbox( visible=True, @@ -90,7 +83,6 @@ def upload_media(media, index=None, chunk_size=1500, chunk_overlap=100): requests.append(value) yield value elif file_ext in ['.pdf', '.txt']: - print("Ingesting File...") yield ( gr.Textbox( visible=True, @@ -101,7 +93,6 @@ def upload_media(media, index=None, chunk_size=1500, chunk_overlap=100): requests.append(value) yield value else: - print(file, "File type not supported") yield ( gr.Textbox( visible=True, @@ -117,12 +108,9 @@ def upload_media(media, index=None, chunk_size=1500, chunk_overlap=100): value = ingest_url(media, index, chunk_size, chunk_overlap) yield value elif file_ext in ['.pdf', '.txt']: - print("Ingesting File...") value = ingest_file(media, index, chunk_size, chunk_overlap) - # print("Return value is: ", value, flush=True) yield value else: - print(media, "File type not supported") yield ( gr.Textbox( visible=True, @@ -180,16 +168,11 @@ def ingest_file(file, index=None, chunk_size=100, chunk_overlap=150): else: data = {"chunk_size": chunk_size, "chunk_overlap": chunk_overlap} - print("Calling Request Now!") response = requests.post(url=dataprep_ingest_endpoint, headers=headers, files=file_input, data=data) - # print("Ingest Files", response) - print(response.text) - - # table = update_table() + return response.text def ingest_url(url, index=None, chunk_size=100, chunk_overlap=150): - print("URL is ", url) url = str(url) if not is_valid_url(url): return "Invalid URL entered. Please enter a valid URL" @@ -203,8 +186,7 @@ def ingest_url(url, index=None, chunk_size=100, chunk_overlap=150): else: url_input = {"link_list": json.dumps([url]), "chunk_size": chunk_size, "chunk_overlap": chunk_overlap} response = requests.post(url=dataprep_ingest_endpoint, headers=headers, data=url_input) - # print("Ingest URL", response) - # table = update_table() + return response.text @@ -216,18 +198,6 @@ def is_valid_url(url): except ValueError: return False - - -# Initialize the file list -file_list = [] - -# def update_files(file): -# # Add the uploaded file to the file list -# file_list.append(file.name) -# file_df["Files"] = file_list -# return file_df - - def get_files(index=None): headers = { # "Content-Type: multipart/form-data" @@ -238,13 +208,10 @@ def get_files(index=None): if index: index = {"index_name": index} response = requests.post(url=dataprep_get_files_endpoint, headers=headers, data=index) - print("Get files with ", index, response) table = response.json() return table else: - # print("URL IS ", dataprep_get_files_endpoint) response = requests.post(url=dataprep_get_files_endpoint, headers=headers) - print("Get files ", response) table = response.json() return table @@ -252,7 +219,6 @@ def update_table(index=None): if index == "All Files": index = None files = get_files(index) - print("Files is ", files) if len(files) == 0: df = pd.DataFrame(files, columns=["Files"]) return df @@ -270,13 +236,11 @@ def delete_file(file, index=None): headers = { # "Content-Type: application/json" } - print("URL IS ", dataprep_delete_files_endpoint) if index: file_input = {"files": open(file, "rb"), "index_name": index} else: file_input = {"files": open(file, "rb")} response = requests.post(url=dataprep_delete_files_endpoint, headers=headers, data=file_input) - print("Delete file ", response) table = update_table() return response.text @@ -286,7 +250,6 @@ def delete_all_files(index=None): # "Content-Type: application/json" } response = requests.post(url=dataprep_delete_files_endpoint, headers=headers, data='{"file_path": "all"}') - print("Delete all files ", response) table = update_table() return "Delete All status: " + response.text @@ -297,7 +260,6 @@ def get_indices(): } response = requests.post(url=dataprep_get_indices_endpoint, headers=headers) indices = ["None"] - print("Get Indices", response) indices += response.json() return indices @@ -325,22 +287,16 @@ def get_file_names(files): prompt_input = gr.Textbox(label="Enter your query") with gr.Column(): with gr.Row(equal_height=True): - # indices = ["None"] + get_indices() database_dropdown = gr.Dropdown(choices=get_indices(), label="Select Index", value="None", scale=10) db_refresh_button = gr.Button("Refresh Dropdown", scale=0.1) db_refresh_button.click(update_indices_dropdown, outputs=database_dropdown) use_agent = gr.Checkbox(label="Use Agent", container=False) - # with gr.Row(scale=1): - generate_button = gr.Button("Generate Code") - - # Connect the generate button to the conversation_history function generate_button.click(conversation_history, inputs=[prompt_input, database_dropdown, use_agent, chatbot], outputs=chatbot) with gr.Tab("Resource Management"): # File management components - # url_button = gr.Button("Process") with gr.Row(): with gr.Column(scale=1): index_name_input = gr.Textbox(label="Index Name") @@ -353,26 +309,13 @@ def get_file_names(files): upload_status = gr.Textbox(label="Upload Status") file_upload.change(get_file_names, inputs=file_upload, outputs=url_input) with gr.Column(scale=1): - # table_dropdown = gr.Dropdown(indices) - # file_table = gr.Dataframe(interactive=False, value=update_table()) file_table = gr.Dataframe(interactive=False, value=update_indices()) refresh_button = gr.Button("Refresh", variant="primary", size="sm") refresh_button.click(update_indices, outputs=file_table) - # refresh_button.click(update_indices, outputs=database_dropdown) - # table_dropdown.change(fn=update_table, inputs=table_dropdown, outputs=file_table) - # upload_button.click(upload_media, inputs=[file_upload, index_name_input, chunk_size_input, chunk_overlap_input], outputs=file_table) upload_button.click(upload_media, inputs=[url_input, index_name_input, chunk_size_input, chunk_overlap_input], outputs=upload_status) delete_all_button = gr.Button("Delete All", variant="primary", size="sm") delete_all_button.click(delete_all_files, outputs=upload_status) - - - - # delete_button = gr.Button("Delete Index") - - # selected_file_output = gr.Textbox(label="Selected File") - # delete_button.click(delete_file, inputs=indices, outputs=upload_status) - ui.queue() @@ -397,14 +340,7 @@ def get_file_names(files): "BACKEND_SERVICE_ENDPOINT", f"http://{host_ip}:{MEGA_SERVICE_PORT}/v1/codegen" ) - # dataprep_ingest_endpoint = f"{DATAPREP_ENDPOINT}/ingest" - # dataprep_get_files_endpoint = f"{DATAPREP_ENDPOINT}/get" - # dataprep_delete_files_endpoint = f"{DATAPREP_ENDPOINT}/delete" - # dataprep_get_indices_endpoint = f"{DATAPREP_ENDPOINT}/indices" - - args = parser.parse_args() - # logger.info(f"args: {args}") global gateway_addr gateway_addr = backend_service_endpoint global dataprep_ingest_addr