Skip to content

Commit

Permalink
ADDED: GEMMA 2 model
Browse files Browse the repository at this point in the history
  • Loading branch information
AquibPy committed Jul 4, 2024
1 parent 5cffc53 commit e450487
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
12 changes: 6 additions & 6 deletions api.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,10 +407,10 @@ async def pdf_questions_generator(pdf: UploadFile = File(...)):
return ResponseText(response=f"Error: {str(e)}")

@app.post("/chat_groq", description= """This route uses groq for faster response using Language Processing Unit(LPU).
\n In model input default is mixtral-8x7b-32768 but you can choose gemma-7b-it, llama3-70b-8192 and llama3-8b-8192.
\n In model input default is gemma2-9b-it but you can choose gemma-7b-it, mixtral-8x7b-32768 , llama3-70b-8192 and llama3-8b-8192.
\n conversational_memory_length ranges from 1 to 10. It keeps a list of the interactions of the conversation over time.
It only uses the last K interactions """)
async def groq_chatbot(question: str = Form(...), model: Optional[str] = Form('mixtral-8x7b-32768'),
async def groq_chatbot(question: str = Form(...), model: Optional[str] = Form('gemma2-9b-it'),
conversational_memory_length: Optional[int] = Form(5)):
try:
memory=ConversationBufferWindowMemory(k=conversational_memory_length)
Expand Down Expand Up @@ -462,7 +462,7 @@ async def groq_text_summary(input_text: str = Form(...)):
return ResponseText(response=f"Error: {str(e)}")

@app.post("/RAG_PDF_Groq",description="The endpoint uses the pdf and give the answer based on the prompt provided using groq\
In model input default is mixtral-8x7b-32768 but you can choose gemma-7b-it, llama3-70b-8192 and llama3-8b-8192.")
In model input default is mixtral-8x7b-32768 but you can choose gemma-7b-it, gemma2-9b-it, llama3-70b-8192 and llama3-8b-8192.")
async def talk_pdf_groq(pdf: UploadFile = File(...),prompt: str = Form(...),
model: Optional[str] = Form('llama3-70b-8192')):
try:
Expand Down Expand Up @@ -558,7 +558,7 @@ async def ats(resume_pdf: UploadFile = File(...), job_description: str = Form(..
return ResponseText(response=f"Error: {str(e)}")

@app.post("/advance_rag_llama_index",description="The endpoint build a Router that can choose whether to do vector search or summarization\
In model input default is llama3-70b-8192 but you can choose mixtral-8x7b-32768, gemma-7b-it and llama3-8b-8192.")
In model input default is llama3-70b-8192 but you can choose mixtral-8x7b-32768, gemma-7b-it, gemma2-9b-it and llama3-8b-8192.")
async def llama_index_rag(pdf: UploadFile = File(...),question: str = Form(...),
model: Optional[str] = Form('llama3-70b-8192')):
try:
Expand Down Expand Up @@ -998,9 +998,9 @@ async def run_job_agent(request:Request,
Upload a CSV file and describe your machine learning problem.
The API will process the file and input to provide problem definition, data assessment, model recommendation, and starter code.
NOTE: In model input default is llama3-70b-8192 but you can choose mixtral-8x7b-32768, gemma-7b-it and llama3-8b-8192."
NOTE: In model input default is gemma2-9b-it but you can choose mixtral-8x7b-32768, gemma-7b-it, llama3-70b-8192 and llama3-8b-8192."
""")
async def ml_crew(file: UploadFile = File(...),user_question: str = Form(...),model: str = Form("llama3-70b-8192"),token: str = Depends(oauth2_scheme)):
async def ml_crew(file: UploadFile = File(...),user_question: str = Form(...),model: str = Form("gemma2-9b-it"),token: str = Depends(oauth2_scheme)):
try:
payload = jwt.decode(token, os.getenv("TOKEN_SECRET_KEY"), algorithms=[settings.ALGORITHM])
email = payload.get("sub")
Expand Down
2 changes: 1 addition & 1 deletion templates/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ <h1>Chat with LLAMA 3 🦙🦙🦙</h1>
"llama3-70b-8192": "LLAMA 3-70b 🦙🦙🦙",
"llama3-8b-8192": "LLAMA 3-8b 🦙🦙🦙",
"mixtral-8x7b-32768": "MIXTRAL 8x7b",
// "llama2-70b-4096": "LLAMA 2-70b 🦙🦙",
"gemma2-9b-it": "GEMMA 2 9b-it",
"gemma-7b-it": "GEMMA 7b-it"
};
var selectedModelText = modelOptions[selectedModel];
Expand Down

0 comments on commit e450487

Please sign in to comment.