2023-03-29 14:48:56 +08:00
|
|
|
import argparse
|
|
|
|
import json
|
|
|
|
|
|
|
|
import gradio as gr
|
|
|
|
import requests
|
|
|
|
|
|
|
|
|
|
|
|
def http_bot(prompt):
|
2023-06-17 03:07:40 -07:00
|
|
|
headers = {"User-Agent": "vLLM Client"}
|
2023-03-29 14:48:56 +08:00
|
|
|
pload = {
|
|
|
|
"prompt": prompt,
|
2023-06-17 00:13:02 +08:00
|
|
|
"stream": True,
|
2023-05-11 15:45:30 -07:00
|
|
|
"max_tokens": 128,
|
2023-03-29 14:48:56 +08:00
|
|
|
}
|
2023-07-03 11:31:55 -07:00
|
|
|
response = requests.post(args.model_url,
|
|
|
|
headers=headers,
|
|
|
|
json=pload,
|
|
|
|
stream=True)
|
|
|
|
|
|
|
|
for chunk in response.iter_lines(chunk_size=8192,
|
|
|
|
decode_unicode=False,
|
|
|
|
delimiter=b"\0"):
|
2023-03-29 14:48:56 +08:00
|
|
|
if chunk:
|
|
|
|
data = json.loads(chunk.decode("utf-8"))
|
|
|
|
output = data["text"][0]
|
|
|
|
yield output
|
|
|
|
|
|
|
|
|
|
|
|
def build_demo():
|
|
|
|
with gr.Blocks() as demo:
|
2023-07-03 11:31:55 -07:00
|
|
|
gr.Markdown("# vLLM text completion demo\n")
|
|
|
|
inputbox = gr.Textbox(label="Input",
|
|
|
|
placeholder="Enter text and press ENTER")
|
|
|
|
outputbox = gr.Textbox(label="Output",
|
|
|
|
placeholder="Generated result from the model")
|
2023-03-29 14:48:56 +08:00
|
|
|
inputbox.submit(http_bot, [inputbox], [outputbox])
|
|
|
|
return demo
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
parser = argparse.ArgumentParser()
|
|
|
|
parser.add_argument("--host", type=str, default="localhost")
|
2023-06-17 00:13:02 +08:00
|
|
|
parser.add_argument("--port", type=int, default=8001)
|
2023-07-03 11:31:55 -07:00
|
|
|
parser.add_argument("--model-url",
|
|
|
|
type=str,
|
|
|
|
default="http://localhost:8000/generate")
|
2023-03-29 14:48:56 +08:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
demo = build_demo()
|
2023-05-23 21:39:50 -07:00
|
|
|
demo.queue(concurrency_count=100).launch(server_name=args.host,
|
|
|
|
server_port=args.port,
|
|
|
|
share=True)
|