2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2024-05-11 11:30:37 -07:00
|
|
|
from vllm import LLM
|
|
|
|
|
|
|
|
# Sample prompts.
|
|
|
|
prompts = [
|
|
|
|
"Hello, my name is",
|
|
|
|
"The president of the United States is",
|
|
|
|
"The capital of France is",
|
|
|
|
"The future of AI is",
|
|
|
|
]
|
|
|
|
|
|
|
|
# Create an LLM.
|
2024-12-13 18:40:07 +08:00
|
|
|
# You should pass task="embed" for embedding models
|
2024-12-11 17:28:00 +08:00
|
|
|
model = LLM(
|
|
|
|
model="intfloat/e5-mistral-7b-instruct",
|
2024-12-13 18:40:07 +08:00
|
|
|
task="embed",
|
2024-12-11 17:28:00 +08:00
|
|
|
enforce_eager=True,
|
|
|
|
)
|
|
|
|
|
2024-12-13 18:40:07 +08:00
|
|
|
# Generate embedding. The output is a list of EmbeddingRequestOutputs.
|
|
|
|
outputs = model.embed(prompts)
|
|
|
|
|
2024-05-11 11:30:37 -07:00
|
|
|
# Print the outputs.
|
2024-12-13 18:40:07 +08:00
|
|
|
for prompt, output in zip(prompts, outputs):
|
|
|
|
embeds = output.outputs.embedding
|
|
|
|
embeds_trimmed = ((str(embeds[:16])[:-1] +
|
|
|
|
", ...]") if len(embeds) > 16 else embeds)
|
|
|
|
print(f"Prompt: {prompt!r} | "
|
|
|
|
f"Embeddings: {embeds_trimmed} (size={len(embeds)})")
|