vllm/examples/gradio_webserver.py

46 lines
1.4 KiB
Python
Raw Normal View History

2023-03-29 14:48:56 +08:00
import argparse
import json
import gradio as gr
import requests
def http_bot(prompt):
2023-06-17 03:07:40 -07:00
headers = {"User-Agent": "vLLM Client"}
2023-03-29 14:48:56 +08:00
pload = {
"prompt": prompt,
"stream": True,
2023-05-11 15:45:30 -07:00
"max_tokens": 128,
2023-03-29 14:48:56 +08:00
}
response = requests.post(args.model_url, headers=headers, json=pload, stream=True)
for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"):
if chunk:
data = json.loads(chunk.decode("utf-8"))
output = data["text"][0]
yield output
def build_demo():
with gr.Blocks() as demo:
gr.Markdown(
2023-06-17 03:07:40 -07:00
"# vLLM text completion demo\n"
2023-03-29 14:48:56 +08:00
)
2023-05-23 21:39:50 -07:00
inputbox = gr.Textbox(label="Input", placeholder="Enter text and press ENTER")
2023-03-29 14:48:56 +08:00
outputbox = gr.Textbox(label="Output", placeholder="Generated result from the model")
inputbox.submit(http_bot, [inputbox], [outputbox])
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=8001)
parser.add_argument("--model-url", type=str, default="http://localhost:8000/generate")
2023-03-29 14:48:56 +08:00
args = parser.parse_args()
demo = build_demo()
2023-05-23 21:39:50 -07:00
demo.queue(concurrency_count=100).launch(server_name=args.host,
server_port=args.port,
share=True)