Skip to content

Commit

Permalink
[Dev] Add vllm example
Browse files Browse the repository at this point in the history
  • Loading branch information
lshmouse committed Oct 9, 2024
1 parent b85a994 commit ea0bdb7
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 0 deletions.
1 change: 1 addition & 0 deletions bazel/python/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ Werkzeug==2.2.2
Flask==2.0.2
kubernetes==27.2.0
depyf
vllm
11 changes: 11 additions & 0 deletions experimental/vllm_example/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
load("@pip//:requirements.bzl", "requirement")
load("@rules_python//python:defs.bzl", "py_binary", "py_library")

py_binary(
name = "simple_inference",
srcs = ["simple_inference.py"],
main = "simple_inference.py",
deps = [
requirement("vllm"),
],
)
4 changes: 4 additions & 0 deletions experimental/vllm_example/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
## vllm

### References
- [vllm](https://github.com/vllm/vllm)
22 changes: 22 additions & 0 deletions experimental/vllm_example/simple_inference.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from vllm import LLM, SamplingParams

# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)

# Create an LLM.
llm = LLM(model="facebook/opt-125m")
# Generate texts from the prompts. The output is a list of RequestOutput objects
# that contain the prompt, generated text, and other information.
outputs = llm.generate(prompts, sampling_params)
# Print the outputs.
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")

0 comments on commit ea0bdb7

Please sign in to comment.