-
Notifications
You must be signed in to change notification settings - Fork 13
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
added stable-diffusion pipeline (#266)
* added stable-diffusion pipeline * readme file added
- Loading branch information
1 parent
06908de
commit 0bd5335
Showing
14 changed files
with
680 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
# Stable Diffusion Image Generation Pipeline | ||
|
||
This repository contains a Python function `compute` to generate images based on textual descriptions using the Stable Diffusion model. The function utilizes the `diffusers` library and GPU acceleration for efficient image generation. | ||
|
||
## Features | ||
|
||
- Generate high-quality images from textual prompts. | ||
- Supports inference using the Euler Discrete Scheduler for diffusion. | ||
- Utilizes GPU acceleration with `float16` for enhanced performance. | ||
|
||
--- | ||
|
||
## Usage | ||
The compute function accepts a textual description (prompt) and the number of inference steps to generate an image. The generated image is saved as result.png in the current working directory. | ||
|
||
## Function Parameters | ||
- prompt (str): Text description of the desired image. | ||
- inference_steps (int): Number of diffusion steps for the generation process. | ||
- Return Value: | ||
The function returns a dictionary containing the path to the generated image: | ||
|
||
## Model used | ||
- https://huggingface.co/stabilityai/stable-diffusion-2 |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
8 changes: 8 additions & 0 deletions
8
frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/Dockerfile
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
FROM python:3.9 | ||
|
||
WORKDIR /app | ||
|
||
RUN pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 | ||
RUN pip install diffusers transformers accelerate scipy safetensors | ||
|
||
COPY computations.py . |
25 changes: 25 additions & 0 deletions
25
frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/chatHistory.json
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
{ | ||
"index": 0, | ||
"history": [ | ||
{ | ||
"timestamp": 1731600898832, | ||
"prompt": "Code Template", | ||
"response": "def compute(in1, in2):\r\n \"\"\"A textual description of the compute function.\r\n\r\n Inputs:\r\n in1 (all): Textual description of in1\r\n in2 (all): Textual description of in2\r\n\r\n Outputs:\r\n out1 (all): Textual description of out1\r\n out2 (all): Textual description of out2\r\n\r\n Requirements:\r\n \"\"\"\r\n # some code\r\n out1 = 2 * in1\r\n out2 = \"This is the in2 string:\" + in2\r\n\r\n return {\"out1\": out1, \"out2\": out2}\r\n\r\n\r\ndef test():\r\n \"\"\"Test the compute function.\"\"\"\r\n\r\n print(\"Running test\")\r\n" | ||
}, | ||
{ | ||
"timestamp": 1731601223108, | ||
"prompt": "Manual Edit of computations.py", | ||
"response": "def compute(prompt, inference_steps):\n \"\"\"\n prompt: text description of image\n inference_steps: difussion steps\n\n output: generated image path\n \"\"\"\n from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler\n\n model_id = \"stabilityai/stable-diffusion-2\"\n \n # Use the Euler scheduler here instead\n scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder=\"scheduler\")\n pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)\n pipe = pipe.to(\"cuda\")\n \n image = pipe(prompt, num_inference_steps=inference_steps).images[0]\n \n image.save(\"result.png\")\n\n return {\"generated_image_path\": \"result.png\"}\n\n\ndef test():\n \"\"\"Test the compute function.\"\"\"\n\n print(\"Running test\")\n" | ||
}, | ||
{ | ||
"timestamp": 1731601351634, | ||
"prompt": "Manual Edit of computations.py", | ||
"response": "def compute(prompt, inference_steps):\n \"\"\"\n prompt: text description of image\n inference_steps: difussion steps\n\n output: generated image path\n\n use GPU to run this pipeline, we are using float16 dtype\n \"\"\"\n from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler\n\n model_id = \"stabilityai/stable-diffusion-2\"\n \n # Use the Euler scheduler here instead\n scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder=\"scheduler\")\n pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)\n pipe = pipe.to(\"cuda\")\n \n image = pipe(prompt, num_inference_steps=inference_steps).images[0]\n \n image.save(\"result.png\")\n\n return {\"generated_image_path\": \"result.png\"}\n\n\ndef test():\n \"\"\"Test the compute function.\"\"\"\n\n print(\"Running test\")\n" | ||
}, | ||
{ | ||
"timestamp": 1731603317095, | ||
"prompt": "Manual Edit of computations.py", | ||
"response": "def compute(prompt, inference_steps):\n \"\"\"\n prompt: text description of image\n inference_steps: difussion steps\n\n output: generated image path\n\n use GPU to run this pipeline, we are using float16 dtype\n \"\"\"\n from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler\n import torch \n model_id = \"stabilityai/stable-diffusion-2\"\n \n # Use the Euler scheduler here instead\n scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder=\"scheduler\")\n pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)\n pipe = pipe.to(\"cuda\")\n \n image = pipe(prompt, num_inference_steps=inference_steps).images[0]\n \n image.save(\"result.png\")\n\n return {\"generated_image_path\": \"result.png\"}\n\n\ndef test():\n \"\"\"Test the compute function.\"\"\"\n\n print(\"Running test\")\n" | ||
} | ||
] | ||
} |
31 changes: 31 additions & 0 deletions
31
frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/computations.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
def compute(prompt, inference_steps): | ||
""" | ||
Pipeline to generate the images based on textual description. takes text and inference step as an inputs and return the genreated image. | ||
prompt: text description of image | ||
inference_steps: difussion steps | ||
output: generated image path | ||
use GPU to run this pipeline, we are using float16 dtype | ||
""" | ||
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler | ||
import torch | ||
model_id = "stabilityai/stable-diffusion-2" | ||
|
||
# Use the Euler scheduler here instead | ||
scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler") | ||
pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16) | ||
pipe = pipe.to("cuda") | ||
|
||
image = pipe(prompt, num_inference_steps=inference_steps).images[0] | ||
|
||
image.save("result.png") | ||
|
||
return {"generated_image_path": "result.png"} | ||
|
||
|
||
def test(): | ||
"""Test the compute function.""" | ||
|
||
print("Running test") |
Binary file added
BIN
+16.7 KB
frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/cover-image.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Empty file.
92 changes: 92 additions & 0 deletions
92
frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/specs.json
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
{ | ||
"information": { | ||
"id": "stable-diffusion-2", | ||
"name": "Stable Diffusion 2", | ||
"description": "Pipeline to generate the images based on textual description. takes text and inference step as an inputs and return the genreated image.\nprompt: text description of image\ninference_steps: difussion steps\n\noutput: generated image path\n\nuse GPU to run this pipeline, we are using float16 dtype", | ||
"system_versions": [ | ||
"0.1" | ||
], | ||
"block_version": "block version number", | ||
"block_source": "core/blocks/stable-diffusion-2", | ||
"block_type": "compute" | ||
}, | ||
"inputs": { | ||
"prompt": { | ||
"type": "Any", | ||
"connections": [ | ||
{ | ||
"block": "parameter-hhz20qr4vytg", | ||
"variable": "parameter" | ||
} | ||
] | ||
}, | ||
"inference_steps": { | ||
"type": "Any", | ||
"connections": [ | ||
{ | ||
"block": "parameter-cuha2229jdsa", | ||
"variable": "parameter" | ||
} | ||
] | ||
} | ||
}, | ||
"outputs": { | ||
"generated_image_path": { | ||
"type": "Any", | ||
"connections": [ | ||
{ | ||
"block": "view-images-gwly233ys03y", | ||
"variable": "image_paths_view" | ||
} | ||
] | ||
} | ||
}, | ||
"action": { | ||
"container": { | ||
"image": "stable-diffusion-2", | ||
"version": "stable-diffusion-2-1k08gnfkag96", | ||
"command_line": [ | ||
"python", | ||
"-u", | ||
"entrypoint.py" | ||
] | ||
}, | ||
"resources": { | ||
"cpu": { | ||
"request": "", | ||
"limit": "" | ||
}, | ||
"memory": { | ||
"request": "", | ||
"limit": "" | ||
}, | ||
"gpu": { | ||
"count": 1 | ||
} | ||
} | ||
}, | ||
"views": { | ||
"node": { | ||
"active": "True or False", | ||
"title_bar": { | ||
"background_color": "#6b2be0" | ||
}, | ||
"preview": {}, | ||
"html": "", | ||
"pos_x": "786", | ||
"pos_y": "188", | ||
"pos_z": "999", | ||
"behavior": "modal", | ||
"order": { | ||
"input": [ | ||
"prompt", | ||
"inference_steps" | ||
], | ||
"output": [ | ||
"generated_image_path" | ||
] | ||
} | ||
} | ||
}, | ||
"events": {} | ||
} |
Oops, something went wrong.