Skip to content

Commit

Permalink
ADDED: Text2image endpoint
Browse files Browse the repository at this point in the history
  • Loading branch information
AquibPy committed May 8, 2024
1 parent 29a01ca commit a97495a
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 3 deletions.
20 changes: 20 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,26 @@ percentage, missing keywords, and profile summary.
- **Description:** Provides a simple web interface to interact with the smart ats.
- **Try ATS:** [Smart ATS](https://llm-pgc4.onrender.com/blog_generator_ui)

### 19. Text to Image using Diffusion Models

- **Route:** `/text2image`
description: This route allows you to generate images using various diffusion models available on Hugging Face.

You can choose from the following models -

**DreamShaper v7**: A highly capable and versatile text-to-image model, suitable for a wide range of image generation tasks.

**Animagine XL**: A specialized model for generating high-quality anime-style images from text prompts.

**Stable Diffusion Base**: The base version of the popular Stable Diffusion model, suitable for general-purpose image generation.

**Stable Diffusion v2**: The latest version of Stable Diffusion, with improved performance and quality compared to the base version.

To generate an image, send a POST request to the `/text2image` endpoint with the desired model and prompt in the request body.

**Request Body:** `{ "model": "DreamShaper_v7", "prompt": "An astronaut riding a horse on the moon" }` The response will be the generated image in PNG format.


## Usage

Each endpoint accepts specific parameters as described in the respective endpoint documentation. Users can make POST requests to these endpoints with the required parameters to perform the desired tasks.
Expand Down
44 changes: 42 additions & 2 deletions api.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
import os
import uvicorn
from fastapi import FastAPI,Form,File,UploadFile, Request
import io
import requests
from PIL import Image
from fastapi import FastAPI,Form,File,UploadFile, Request ,Response
from fastapi.templating import Jinja2Templates
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse,RedirectResponse,StreamingResponse
from typing import List,Optional
from pydantic import BaseModel
import google.generativeai as genai
from fastapi.middleware.cors import CORSMiddleware
from settings import invoice_prompt,youtube_transcribe_prompt,text2sql_prompt,EMPLOYEE_DB,GEMINI_PRO,GEMINI_PRO_1_5
from settings import invoice_prompt,youtube_transcribe_prompt,text2sql_prompt,EMPLOYEE_DB,GEMINI_PRO,GEMINI_PRO_1_5, diffusion_models
from mongo import MongoDB
from helper_functions import get_qa_chain,get_gemini_response,get_url_doc_qa,extract_transcript_details,\
get_gemini_response_health,get_gemini_pdf,read_sql_query,remove_substrings,questions_generator,groq_pdf,\
Expand Down Expand Up @@ -395,6 +398,43 @@ async def ats(resume_pdf: UploadFile = File(...),job_description: str = Form(...
except Exception as e:
return ResponseText(response=f"Error: {str(e)}")

@app.post("/text2image",description=
"""
This API provides access to the following diffusion models for generating images from text prompts.
Models you can use for generating image are:
1. DreamShaper_v7 - A highly capable and versatile text-to-image model, suitable for a wide range of image generation tasks.
2. Animagine_xl - A specialized model for generating high-quality anime-style images from text prompts.
3. Stable_Diffusion_base - The base version of the popular Stable Diffusion model, suitable for general-purpose image generation.
4. Stable_Diffusion_v2 - The latest version of Stable Diffusion, with improved performance and quality compared to the base version.
""")
def generate_image(prompt: str = Form("Astronaut riding a horse"), model: str = Form("DreamShaper_v7")):
try:
if model in diffusion_models:
def query(payload):
api_key = os.getenv("HUGGINGFACE_API_KEY")
headers = {"Authorization": f"Bearer {api_key}"}
response = requests.post(diffusion_models[model], headers=headers, json=payload)
return response.content

image_bytes = query({"inputs": prompt})
image = Image.open(io.BytesIO(image_bytes))
bytes_io = io.BytesIO()
image.save(bytes_io, format="PNG")
bytes_io.seek(0)
return Response(bytes_io.getvalue(), media_type="image/png")
else:
return ResponseText(response="Invalid model name")
# except requests.exceptions.RequestException as e:
# print(f"Request Exception: {str(e)}")
# return ResponseText(response="Busy server: Please try later")
except Exception as e:
return ResponseText(response="Busy server: Please try later")

if __name__ == '__main__':
import uvicorn
uvicorn.run(app)
9 changes: 8 additions & 1 deletion settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,4 +73,11 @@
If the context is not helpful, please provide the original questions.
QUESTIONS:
"""
)
)

diffusion_models = {
"DreamShaper_v7" : "https://api-inference.huggingface.co/models/SimianLuo/LCM_Dreamshaper_v7",
"Animagine_xl" : "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-3.0",
"Stable_Diffusion_base" : "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0",
"Stable_Diffusion_v2" : "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1",
}

0 comments on commit a97495a

Please sign in to comment.