[Misc] refactor examples series - lmcache (#16758)

Signed-off-by: reidliu41 <reid201711@gmail.com>
Co-authored-by: reidliu41 <reid201711@gmail.com>
This commit is contained in:
Reid 2025-04-17 19:02:35 +08:00 committed by GitHub
parent 207da28186
commit 99ed526101
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -3,9 +3,12 @@
This file demonstrates the example usage of cpu offloading This file demonstrates the example usage of cpu offloading
with LMCache. with LMCache.
Note that `pip install lmcache` is needed to run this example. Note that `lmcache` is needed to run this example.
Learn more about LMCache in https://github.com/LMCache/LMCache. Requirements: Linux, Python: 3.10 or higher, CUDA: 12.1
Learn more about LMCache environment setup, please refer to:
https://docs.lmcache.ai/getting_started/installation.html
""" """
import contextlib
import os import os
import time import time
@ -15,51 +18,83 @@ from lmcache.integration.vllm.utils import ENGINE_NAME
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams
from vllm.config import KVTransferConfig from vllm.config import KVTransferConfig
# LMCache-related environment variables
# Use experimental features in LMCache
os.environ["LMCACHE_USE_EXPERIMENTAL"] = "True"
# LMCache is set to use 256 tokens per chunk
os.environ["LMCACHE_CHUNK_SIZE"] = "256"
# Enable local CPU backend in LMCache
os.environ["LMCACHE_LOCAL_CPU"] = "True"
# Set local CPU memory limit to 5.0 GB
os.environ["LMCACHE_MAX_LOCAL_CPU_SIZE"] = "5.0"
# This example script runs two requests with a shared prefix. def setup_environment_variables():
shared_prompt = "Hello, how are you?" * 1000 # LMCache-related environment variables
first_prompt = [ # Use experimental features in LMCache
shared_prompt + "Hello, my name is", os.environ["LMCACHE_USE_EXPERIMENTAL"] = "True"
] # LMCache is set to use 256 tokens per chunk
second_prompt = [ os.environ["LMCACHE_CHUNK_SIZE"] = "256"
shared_prompt + "Tell me a very long story", # Enable local CPU backend in LMCache
] os.environ["LMCACHE_LOCAL_CPU"] = "True"
# Set local CPU memory limit to 5.0 GB
os.environ["LMCACHE_MAX_LOCAL_CPU_SIZE"] = "5.0"
sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10)
ktc = KVTransferConfig.from_cli( @contextlib.contextmanager
'{"kv_connector":"LMCacheConnector", "kv_role":"kv_both"}') def build_llm_with_lmcache():
# Set GPU memory utilization to 0.8 for an A40 GPU with 40GB ktc = KVTransferConfig.from_cli(
# memory. Reduce the value if your GPU has less memory. '{"kv_connector":"LMCacheConnector", "kv_role":"kv_both"}')
# Note that LMCache is not compatible with chunked prefill for now. # Set GPU memory utilization to 0.8 for an A40 GPU with 40GB
llm = LLM(model="mistralai/Mistral-7B-Instruct-v0.2", # memory. Reduce the value if your GPU has less memory.
kv_transfer_config=ktc, # Note that LMCache is not compatible with chunked prefill for now.
max_model_len=8000, llm = LLM(model="mistralai/Mistral-7B-Instruct-v0.2",
enable_chunked_prefill=False, kv_transfer_config=ktc,
gpu_memory_utilization=0.8) max_model_len=8000,
enable_chunked_prefill=False,
gpu_memory_utilization=0.8)
outputs = llm.generate(first_prompt, sampling_params) try:
for output in outputs: yield llm
generated_text = output.outputs[0].text finally:
print(f"Generated text: {generated_text!r}") # Clean up lmcache backend
print("First request done.") LMCacheEngineBuilder.destroy(ENGINE_NAME)
time.sleep(1)
outputs = llm.generate(second_prompt, sampling_params) def print_output(
for output in outputs: llm: LLM,
generated_text = output.outputs[0].text prompt: list[str],
print(f"Generated text: {generated_text!r}") sampling_params: SamplingParams,
print("Second request done.") req_str: str,
):
start = time.time()
outputs = llm.generate(prompt, sampling_params)
print("-" * 50)
for output in outputs:
generated_text = output.outputs[0].text
print(f"Generated text: {generated_text!r}")
print(f"Generation took {time.time() - start:.2f} seconds, "
f"{req_str} request done.")
print("-" * 50)
# Clean up lmcache backend
LMCacheEngineBuilder.destroy(ENGINE_NAME) def main():
setup_environment_variables()
with build_llm_with_lmcache() as llm:
# This example script runs two requests with a shared prefix.
# Define the shared prompt and specific prompts
shared_prompt = "Hello, how are you?" * 1000
first_prompt = [
shared_prompt + "Hello, my name is",
]
second_prompt = [
shared_prompt + "Tell me a very long story",
]
sampling_params = SamplingParams(temperature=0,
top_p=0.95,
max_tokens=10)
# Print the first output
print_output(llm, first_prompt, sampling_params, "first")
time.sleep(1)
# print the second output
print_output(llm, second_prompt, sampling_params, "second")
if __name__ == "__main__":
main()