From 6143a892eae0249c27afd028496ac5ca597f7460 Mon Sep 17 00:00:00 2001 From: AquibPy Date: Sat, 4 May 2024 18:22:24 +0530 Subject: [PATCH] REMOVED: LLAMA2 from groq --- api.py | 6 +++--- requirements.txt | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/api.py b/api.py index f5406a6..7e81fd2 100644 --- a/api.py +++ b/api.py @@ -258,7 +258,7 @@ async def pdf_questions_generator(pdf: UploadFile = File(...)): return ResponseText(response=f"Error: {str(e)}") @app.post("/chat_groq", description= """This route uses groq for faster response using Language Processing Unit(LPU). - \n In model input default is mixtral-8x7b-32768 but you can choose llama2-70b-4096, gemma-7b-it, llama3-70b-8192 and llama3-8b-8192. + \n In model input default is mixtral-8x7b-32768 but you can choose gemma-7b-it, llama3-70b-8192 and llama3-8b-8192. \n conversational_memory_length ranges from 1 to 10. It keeps a list of the interactions of the conversation over time. It only uses the last K interactions """) async def groq_chatbot(question: str = Form(...), model: Optional[str] = Form('mixtral-8x7b-32768'), @@ -313,9 +313,9 @@ async def groq_text_summary(input_text: str = Form(...)): return ResponseText(response=f"Error: {str(e)}") @app.post("/RAG_PDF_Groq",description="The endpoint uses the pdf and give the answer based on the prompt provided using groq\ - In model input default is mixtral-8x7b-32768 but you can choose llama2-70b-4096, gemma-7b-it, llama3-70b-8192 and llama3-8b-8192.") + In model input default is mixtral-8x7b-32768 but you can choose gemma-7b-it, llama3-70b-8192 and llama3-8b-8192.") async def talk_pdf_groq(pdf: UploadFile = File(...),prompt: str = Form(...), - model: Optional[str] = Form('llama2-70b-4096')): + model: Optional[str] = Form('llama3-70b-8192')): try: rag_chain = groq_pdf(pdf.file,model) out = rag_chain.invoke(prompt) diff --git a/requirements.txt b/requirements.txt index aa8b77a..1cd81bf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,10 +10,11 @@ python-multipart PyPDF2 unstructured youtube_transcript_api -# llama-index -# pypdf +llama-index +pypdf pymongo pytest groq langchain-groq -jinja2 \ No newline at end of file +jinja2 +tiktoken \ No newline at end of file