vllm/examples/llm_engine_example.py

46 lines
1.6 KiB
Python
Raw Normal View History

2023-05-20 13:06:59 -07:00
import argparse
2023-06-17 03:07:40 -07:00
from vllm import EngineArgs, LLMEngine, SamplingParams
2023-05-20 13:06:59 -07:00
def main(args: argparse.Namespace):
2023-06-17 17:25:21 +08:00
# Parse the CLI argument and initialize the engine.
engine_args = EngineArgs.from_cli_args(args)
engine = LLMEngine.from_engine_args(engine_args)
2023-05-20 13:06:59 -07:00
# Test the following prompts.
test_prompts = [
("A robot may not injure a human being", SamplingParams()),
("To be or not to be,",
SamplingParams(temperature=0.8, top_k=5, presence_penalty=0.2)),
("What is the meaning of life?",
SamplingParams(n=2, best_of=5, temperature=0.8, top_p=0.95, frequency_penalty=0.1)),
2023-05-20 13:06:59 -07:00
("It is only with the heart that one can see rightly",
SamplingParams(n=3, best_of=3, use_beam_search=True, temperature=0.0)),
2023-05-20 13:06:59 -07:00
]
2023-06-17 17:25:21 +08:00
# Run the engine by calling `engine.step()` manually.
request_id = 0
2023-05-20 13:06:59 -07:00
while True:
# To test iteration-level scheduling, we add one request at each step.
if test_prompts:
prompt, sampling_params = test_prompts.pop(0)
2023-06-17 17:25:21 +08:00
engine.add_request(str(request_id), prompt, sampling_params)
2023-05-23 21:39:50 -07:00
request_id += 1
2023-05-20 13:06:59 -07:00
2023-06-17 17:25:21 +08:00
request_outputs = engine.step()
2023-05-20 13:06:59 -07:00
for request_output in request_outputs:
2023-05-23 21:39:50 -07:00
if request_output.finished():
2023-05-20 13:06:59 -07:00
print(request_output)
2023-06-17 17:25:21 +08:00
if not (engine.has_unfinished_requests() or test_prompts):
2023-05-20 13:06:59 -07:00
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(
2023-06-17 17:25:21 +08:00
description='Demo on using the LLMEngine class directly')
parser = EngineArgs.add_cli_args(parser)
2023-05-20 13:06:59 -07:00
args = parser.parse_args()
main(args)