
Signed-off-by: hzh <hezhihui_thu@163.com> Signed-off-by: Sungjae Lee <33976427+llsj14@users.noreply.github.com> Signed-off-by: shaochangxu.scx <shaochangxu.scx@antgroup.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Isotr0py <2037008807@qq.com> Signed-off-by: Roger Wang <ywang@roblox.com> Signed-off-by: Rafael Vasquez <rafvasq21@gmail.com> Signed-off-by: Akshat Tripathi <akshat@krai.ai> Signed-off-by: Oleg Mosalov <oleg@krai.ai> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com> Signed-off-by: Yida Wu <yidawu@alumni.cmu.edu> Signed-off-by: Chenguang Li <757486878@qq.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Alex-Brooks <Alex.brooks@ibm.com> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: Shanshan Shen <467638484@qq.com> Signed-off-by: elijah <f1renze.142857@gmail.com> Signed-off-by: Yikun <yikunkero@gmail.com> Signed-off-by: mgoin <michael@neuralmagic.com> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Konrad Zawora <kzawora@habana.ai> Signed-off-by: tjtanaa <tunjian.tan@embeddedllm.com> Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com> Signed-off-by: Rui Qiao <ruisearch42@gmail.com> Co-authored-by: Sungjae Lee <33976427+llsj14@users.noreply.github.com> Co-authored-by: shaochangxu <85155497+shaochangxu@users.noreply.github.com> Co-authored-by: shaochangxu.scx <shaochangxu.scx@antgroup.com> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: sixgod <evethwillbeok@outlook.com> Co-authored-by: Isotr0py <2037008807@qq.com> Co-authored-by: Roger Wang <136131678+ywang96@users.noreply.github.com> Co-authored-by: Rafael Vasquez <rafvasq21@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: Akshat Tripathi <Akshat.tripathi6568@gmail.com> Co-authored-by: Oleg Mosalov <oleg@krai.ai> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: Avshalom Manevich <12231371+avshalomman@users.noreply.github.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-neuralmagic@users.noreply.github.com> Co-authored-by: Yangcheng Li <liyangcheng.lyc@alibaba-inc.com> Co-authored-by: Siyuan Li <94890248+liaoyanqing666@users.noreply.github.com> Co-authored-by: Concurrensee <yida.wu@amd.com> Co-authored-by: Chenguang Li <757486878@qq.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Alex Brooks <alex.brooks@ibm.com> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: Shanshan Shen <467638484@qq.com> Co-authored-by: elijah <30852919+e1ijah1@users.noreply.github.com> Co-authored-by: Yikun Jiang <yikunkero@gmail.com> Co-authored-by: Steve Luo <36296769+SunflowerAries@users.noreply.github.com> Co-authored-by: mgoin <michael@neuralmagic.com> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Konrad Zawora <kzawora@habana.ai> Co-authored-by: TJian <tunjian1996@gmail.com> Co-authored-by: tjtanaa <tunjian.tan@embeddedllm.com> Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com> Co-authored-by: maang-h <55082429+maang-h@users.noreply.github.com> Co-authored-by: Elfie Guo <164945471+elfiegg@users.noreply.github.com> Co-authored-by: Rui Qiao <161574667+ruisearch42@users.noreply.github.com> Co-authored-by: Roger Wang <ywang@roblox.com>
162 lines
5.5 KiB
Python
162 lines
5.5 KiB
Python
"""
|
|
This example shows how to use vLLM for running offline inference
|
|
with the correct prompt format on audio language models.
|
|
|
|
For most models, the prompt format should follow corresponding examples
|
|
on HuggingFace model repository.
|
|
"""
|
|
from transformers import AutoTokenizer
|
|
|
|
from vllm import LLM, SamplingParams
|
|
from vllm.assets.audio import AudioAsset
|
|
from vllm.utils import FlexibleArgumentParser
|
|
|
|
audio_assets = [AudioAsset("mary_had_lamb"), AudioAsset("winning_call")]
|
|
question_per_audio_count = {
|
|
0: "What is 1+1?",
|
|
1: "What is recited in the audio?",
|
|
2: "What sport and what nursery rhyme are referenced?"
|
|
}
|
|
|
|
# NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on
|
|
# lower-end GPUs.
|
|
# Unless specified, these settings have been tested to work on a single L4.
|
|
|
|
|
|
# Ultravox 0.3
|
|
def run_ultravox(question: str, audio_count: int):
|
|
model_name = "fixie-ai/ultravox-v0_3"
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
messages = [{
|
|
'role': 'user',
|
|
'content': "<|audio|>\n" * audio_count + question
|
|
}]
|
|
prompt = tokenizer.apply_chat_template(messages,
|
|
tokenize=False,
|
|
add_generation_prompt=True)
|
|
|
|
llm = LLM(model=model_name,
|
|
max_model_len=4096,
|
|
max_num_seqs=5,
|
|
trust_remote_code=True,
|
|
limit_mm_per_prompt={"audio": audio_count})
|
|
stop_token_ids = None
|
|
return llm, prompt, stop_token_ids
|
|
|
|
|
|
# Qwen2-Audio
|
|
def run_qwen2_audio(question: str, audio_count: int):
|
|
model_name = "Qwen/Qwen2-Audio-7B-Instruct"
|
|
|
|
llm = LLM(model=model_name,
|
|
max_model_len=4096,
|
|
max_num_seqs=5,
|
|
limit_mm_per_prompt={"audio": audio_count})
|
|
|
|
audio_in_prompt = "".join([
|
|
f"Audio {idx+1}: "
|
|
f"<|audio_bos|><|AUDIO|><|audio_eos|>\n" for idx in range(audio_count)
|
|
])
|
|
|
|
prompt = ("<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
|
|
"<|im_start|>user\n"
|
|
f"{audio_in_prompt}{question}<|im_end|>\n"
|
|
"<|im_start|>assistant\n")
|
|
stop_token_ids = None
|
|
return llm, prompt, stop_token_ids
|
|
|
|
|
|
def run_minicpmo(question: str, audio_count: int):
|
|
model_name = "openbmb/MiniCPM-o-2_6"
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
|
trust_remote_code=True)
|
|
llm = LLM(model=model_name,
|
|
trust_remote_code=True,
|
|
max_model_len=4096,
|
|
max_num_seqs=5,
|
|
limit_mm_per_prompt={"audio": audio_count})
|
|
|
|
stop_tokens = ['<|im_end|>', '<|endoftext|>']
|
|
stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens]
|
|
|
|
audio_placeholder = "(<audio>./</audio>)" * audio_count
|
|
audio_chat_template = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n<|spk_bos|><|spk|><|spk_eos|><|tts_bos|>' }}{% endif %}" # noqa: E501
|
|
messages = [{
|
|
'role': 'user',
|
|
'content': f'{audio_placeholder}\n{question}'
|
|
}]
|
|
prompt = tokenizer.apply_chat_template(messages,
|
|
tokenize=False,
|
|
add_generation_prompt=True,
|
|
chat_template=audio_chat_template)
|
|
return llm, prompt, stop_token_ids
|
|
|
|
|
|
model_example_map = {
|
|
"ultravox": run_ultravox,
|
|
"qwen2_audio": run_qwen2_audio,
|
|
"minicpmo": run_minicpmo
|
|
}
|
|
|
|
|
|
def main(args):
|
|
model = args.model_type
|
|
if model not in model_example_map:
|
|
raise ValueError(f"Model type {model} is not supported.")
|
|
|
|
audio_count = args.num_audios
|
|
llm, prompt, stop_token_ids = model_example_map[model](
|
|
question_per_audio_count[audio_count], audio_count)
|
|
|
|
# We set temperature to 0.2 so that outputs can be different
|
|
# even when all prompts are identical when running batch inference.
|
|
sampling_params = SamplingParams(temperature=0.2,
|
|
max_tokens=64,
|
|
stop_token_ids=stop_token_ids)
|
|
|
|
mm_data = {}
|
|
if audio_count > 0:
|
|
mm_data = {
|
|
"audio": [
|
|
asset.audio_and_sample_rate
|
|
for asset in audio_assets[:audio_count]
|
|
]
|
|
}
|
|
|
|
assert args.num_prompts > 0
|
|
inputs = {"prompt": prompt, "multi_modal_data": mm_data}
|
|
if args.num_prompts > 1:
|
|
# Batch inference
|
|
inputs = [inputs] * args.num_prompts
|
|
|
|
outputs = llm.generate(inputs, sampling_params=sampling_params)
|
|
|
|
for o in outputs:
|
|
generated_text = o.outputs[0].text
|
|
print(generated_text)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = FlexibleArgumentParser(
|
|
description='Demo on using vLLM for offline inference with '
|
|
'audio language models')
|
|
parser.add_argument('--model-type',
|
|
'-m',
|
|
type=str,
|
|
default="ultravox",
|
|
choices=model_example_map.keys(),
|
|
help='Huggingface "model_type".')
|
|
parser.add_argument('--num-prompts',
|
|
type=int,
|
|
default=1,
|
|
help='Number of prompts to run.')
|
|
parser.add_argument("--num-audios",
|
|
type=int,
|
|
default=1,
|
|
choices=[0, 1, 2],
|
|
help="Number of audio items per prompt.")
|
|
|
|
args = parser.parse_args()
|
|
main(args)
|