Skip to content

Commit

Permalink
ADDED: streaming response endpoint and chatbot endpoint
Browse files Browse the repository at this point in the history
  • Loading branch information
AquibPy committed Apr 23, 2024
1 parent 7e0e48a commit 777df6a
Show file tree
Hide file tree
Showing 3 changed files with 140 additions and 4 deletions.
22 changes: 18 additions & 4 deletions api.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os
from fastapi import FastAPI,Form,File,UploadFile
from fastapi import FastAPI,Form,File,UploadFile, Request
from fastapi.templating import Jinja2Templates
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse,RedirectResponse
from fastapi.responses import JSONResponse,RedirectResponse,StreamingResponse
from typing import List,Optional
from pydantic import BaseModel
import google.generativeai as genai
Expand All @@ -10,7 +11,7 @@
from mongo import MongoDB
from helper_functions import get_qa_chain,get_gemini_response,get_url_doc_qa,extract_transcript_details,\
get_gemini_response_health,get_gemini_pdf,read_sql_query,remove_substrings,questions_generator,groq_pdf,\
summarize_audio
summarize_audio,chatbot_send_message
from langchain_groq import ChatGroq
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
Expand All @@ -19,6 +20,8 @@
app = FastAPI(title="Genify By Mohd Aquib",
summary="This API contains routes of different Gen AI usecases")

templates = Jinja2Templates(directory="templates")

app.allow_dangerous_deserialization = True

app.add_middleware(
Expand All @@ -37,6 +40,11 @@ class ResponseText(BaseModel):
async def home():
return RedirectResponse("/docs")


@app.get("/chatbot",description=" Talk to chatbot")
async def chat(request: Request):
return templates.TemplateResponse("index.html", {"request": request})

@app.post("/invoice_extractor",description="This route extracts information from invoices based on provided images and prompts.")
async def gemini(image_file: UploadFile = File(...), prompt: str = Form(...)):
image = image_file.file.read()
Expand Down Expand Up @@ -325,4 +333,10 @@ async def summarize_audio_endpoint(audio_file: UploadFile = File(...)):
print(result)
return ResponseText(response=summary_text)
except Exception as e:
return {"error": str(e)}
return {"error": str(e)}


@app.post("/stream_chat",description="This route provide the data from LLM in the streaming response.")
async def stream_chat(message: str = Form(...)):
generator = chatbot_send_message(message)
return StreamingResponse(generator, media_type="text/event-stream")
30 changes: 30 additions & 0 deletions helper_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@
import sqlite3
from langchain_community.embeddings import GooglePalmEmbeddings
import tempfile
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable
import asyncio
from langchain.schema import HumanMessage

load_dotenv()
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
Expand Down Expand Up @@ -235,6 +239,32 @@ async def summarize_audio(audio_file):

return response.text


async def chatbot_send_message(content: str) -> AsyncIterable[str]:
callback = AsyncIteratorCallbackHandler()
model = ChatGroq(
temperature=0,
groq_api_key=os.environ['GROQ_API_KEY'],
model_name="llama3-70b-8192",
streaming=True,
verbose=True,
callbacks=[callback],
)

task = asyncio.create_task(
model.agenerate(messages=[[HumanMessage(content=content)]])
)

try:
async for token in callback.aiter():
yield token
except Exception as e:
print(f"Caught exception: {e}")
finally:
callback.done.set()

await task

if __name__ == "__main__":
create_vector_db()
chain = get_qa_chain()
Expand Down
92 changes: 92 additions & 0 deletions templates/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
<!DOCTYPE html>
<html>

<head>
<style>
body {
font-family: Arial, sans-serif;
margin: 0;
padding: 0;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
background-color: #f2f2f2;
}

.chat-container {
border-radius: 5px;
background-color: white;
padding: 20px;
max-width: 500px;
width: 100%;
box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.1);
}

#result {
height: 400px;
overflow-y: auto;
border: 1px solid #ccc;
padding: 10px;
margin-bottom: 10px;
}

#message {
width: 70%;
padding: 10px;
margin-right: 10px;
border-radius: 5px;
border: 1px solid #ccc;
}

button {
padding: 10px 20px;
border: none;
border-radius: 5px;
background-color: #007BFF;
color: white;
}
</style>
</head>

<body>

<div class="chat-container">
<h1>Chat with AI</h1>
<div id="result"></div>

<input type="text" id="message" placeholder="Type your message here">
<button onclick="sendMessage()">Send Message</button>
</div>

<script>
async function sendMessage() {
var message = document.getElementById("message").value;
var response = await fetch('http://localhost:8000/stream_chat', {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded' // Set the content type to URL-encoded form data
},
body: 'message=' + encodeURIComponent(message) // Encode the message and send it as a form parameter
});

var reader = response.body.getReader();
var decoder = new TextDecoder('utf-8');

reader.read().then(function processResult(result) {
if (result.done) return;
let token = decoder.decode(result.value);
if (token.endsWith('.') || token.endsWith('!') || token.endsWith('?')) {
document.getElementById("result").innerHTML += token + "<br>";
} else {
document.getElementById("result").innerHTML += token + ' ';
}
return reader.read().then(processResult);
});
}

</script>

</body>

</html>

0 comments on commit 777df6a

Please sign in to comment.