[Bugfix] Make the deviceprofiler include LoRA memory. (#14469)
Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
parent
c908a07f57
commit
b8b0ccbd2d
@ -1111,25 +1111,20 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
|
|||||||
with DeviceMemoryProfiler(self.device) as m:
|
with DeviceMemoryProfiler(self.device) as m:
|
||||||
time_before_load = time.perf_counter()
|
time_before_load = time.perf_counter()
|
||||||
self.model = get_model(vllm_config=self.vllm_config)
|
self.model = get_model(vllm_config=self.vllm_config)
|
||||||
time_after_load = time.perf_counter()
|
|
||||||
|
|
||||||
self.model_memory_usage = m.consumed_memory
|
|
||||||
logger.info("Model loading took %.4f GB and %.6f seconds",
|
|
||||||
self.model_memory_usage / float(2**30),
|
|
||||||
time_after_load - time_before_load)
|
|
||||||
|
|
||||||
if self.lora_config:
|
if self.lora_config:
|
||||||
assert supports_lora(
|
assert supports_lora(
|
||||||
self.model
|
self.model
|
||||||
), f"{self.model.__class__.__name__} does not support LoRA yet."
|
), f"{self.model.__class__.__name__} does not support LoRA yet."
|
||||||
|
|
||||||
if supports_multimodal(self.model):
|
if supports_multimodal(self.model):
|
||||||
logger.warning("Regarding multimodal models, vLLM currently "
|
logger.warning(
|
||||||
|
"Regarding multimodal models, vLLM currently "
|
||||||
"only supports adding LoRA to language model.")
|
"only supports adding LoRA to language model.")
|
||||||
# It's necessary to distinguish between the max_position_embeddings
|
# It's necessary to distinguish between the
|
||||||
# of VLMs and LLMs.
|
# max_position_embeddings of VLMs and LLMs.
|
||||||
if hasattr(self.model.config, "max_position_embeddings"):
|
if hasattr(self.model.config, "max_position_embeddings"):
|
||||||
max_pos_embeddings = self.model.config.max_position_embeddings
|
max_pos_embeddings = (
|
||||||
|
self.model.config.max_position_embeddings)
|
||||||
else:
|
else:
|
||||||
max_pos_embeddings = (
|
max_pos_embeddings = (
|
||||||
self.model.config.text_config.max_position_embeddings)
|
self.model.config.text_config.max_position_embeddings)
|
||||||
@ -1145,7 +1140,12 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
|
|||||||
max_position_embeddings=max_pos_embeddings,
|
max_position_embeddings=max_pos_embeddings,
|
||||||
)
|
)
|
||||||
self.model = self.lora_manager.create_lora_manager(self.model)
|
self.model = self.lora_manager.create_lora_manager(self.model)
|
||||||
|
time_after_load = time.perf_counter()
|
||||||
|
|
||||||
|
self.model_memory_usage = m.consumed_memory
|
||||||
|
logger.info("Model loading took %.4f GB and %.6f seconds",
|
||||||
|
self.model_memory_usage / float(2**30),
|
||||||
|
time_after_load - time_before_load)
|
||||||
if self.prompt_adapter_config:
|
if self.prompt_adapter_config:
|
||||||
self.prompt_adapter_manager = LRUCacheWorkerPromptAdapterManager(
|
self.prompt_adapter_manager = LRUCacheWorkerPromptAdapterManager(
|
||||||
self.scheduler_config.max_num_seqs,
|
self.scheduler_config.max_num_seqs,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user