34 lines
896 B
Python
34 lines
896 B
Python
![]() |
import os
|
||
|
|
||
|
from vllm import LLM, SamplingParams
|
||
|
|
||
|
# enable torch profiler, can also be set on cmd line
|
||
|
os.environ["VLLM_TORCH_PROFILER_DIR"] = "./vllm_profile"
|
||
|
|
||
|
# Sample prompts.
|
||
|
prompts = [
|
||
|
"Hello, my name is",
|
||
|
"The president of the United States is",
|
||
|
"The capital of France is",
|
||
|
"The future of AI is",
|
||
|
]
|
||
|
# Create a sampling params object.
|
||
|
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
||
|
|
||
|
# Create an LLM.
|
||
|
llm = LLM(model="facebook/opt-125m")
|
||
|
|
||
|
llm.start_profile()
|
||
|
|
||
|
# Generate texts from the prompts. The output is a list of RequestOutput objects
|
||
|
# that contain the prompt, generated text, and other information.
|
||
|
outputs = llm.generate(prompts, sampling_params)
|
||
|
|
||
|
llm.stop_profile()
|
||
|
|
||
|
# Print the outputs.
|
||
|
for output in outputs:
|
||
|
prompt = output.prompt
|
||
|
generated_text = output.outputs[0].text
|
||
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|