vllm/examples/openai_completion_client.py

29 lines
567 B
Python
Raw Normal View History

2023-05-23 21:39:50 -07:00
import openai
2023-06-17 03:07:40 -07:00
# Modify OpenAI's API key and API base to use vLLM's API server.
2023-05-23 21:39:50 -07:00
openai.api_key = "EMPTY"
openai.api_base = "http://localhost:8000/v1"
# List models API
2023-05-23 21:39:50 -07:00
models = openai.Model.list()
print("Models:", models)
2023-05-23 21:39:50 -07:00
model = models["data"][0]["id"]
# Completion API
stream = False
2023-05-23 21:39:50 -07:00
completion = openai.Completion.create(
model=model,
prompt="A robot may not injure a human being",
echo=False,
n=2,
stream=stream,
logprobs=3)
2023-05-23 21:39:50 -07:00
print("Completion results:")
2023-05-23 21:39:50 -07:00
if stream:
for c in completion:
print(c)
else:
print(completion)