2023-02-23 21:31:39 +00:00
|
|
|
from typing import Union
|
|
|
|
|
2023-03-22 04:45:42 +08:00
|
|
|
import numpy as np
|
2023-02-23 21:31:39 +00:00
|
|
|
import torch
|
2023-02-13 09:36:12 +00:00
|
|
|
import torch.nn as nn
|
2023-03-22 04:45:42 +08:00
|
|
|
from transformers import AutoConfig
|
2023-02-13 09:36:12 +00:00
|
|
|
|
2023-03-11 23:23:14 -08:00
|
|
|
from cacheflow.models.memory_analyzer import CacheFlowMemoryAnalyzer
|
2023-03-29 21:25:32 -07:00
|
|
|
from cacheflow.models.memory_analyzer import LlamaMemoryAnalyzer
|
2023-03-11 23:23:14 -08:00
|
|
|
from cacheflow.models.memory_analyzer import OPTMemoryAnalyzer
|
2023-03-29 21:25:32 -07:00
|
|
|
from cacheflow.models.llama import LlamaForCausalLM
|
2023-02-22 18:08:25 +00:00
|
|
|
from cacheflow.models.opt import OPTForCausalLM
|
2023-03-11 23:23:14 -08:00
|
|
|
from cacheflow.models.utils import get_torch_dtype
|
2023-02-13 09:36:12 +00:00
|
|
|
|
2023-03-11 23:23:14 -08:00
|
|
|
|
|
|
|
_MODELS = {
|
2023-03-29 21:25:32 -07:00
|
|
|
'llama': LlamaForCausalLM,
|
2023-02-13 09:36:12 +00:00
|
|
|
'opt': OPTForCausalLM,
|
|
|
|
}
|
|
|
|
|
2023-03-11 23:23:14 -08:00
|
|
|
_MEMORY_ANALYZERS = {
|
2023-03-29 21:25:32 -07:00
|
|
|
'llama': LlamaMemoryAnalyzer,
|
2023-03-11 23:23:14 -08:00
|
|
|
'opt': OPTMemoryAnalyzer,
|
2023-02-23 21:31:39 +00:00
|
|
|
}
|
|
|
|
|
2023-02-13 09:36:12 +00:00
|
|
|
|
2023-02-23 21:31:39 +00:00
|
|
|
def get_model(
|
|
|
|
model_name: str,
|
|
|
|
dtype: Union[torch.dtype, str],
|
2023-03-22 04:45:42 +08:00
|
|
|
path: str,
|
2023-04-08 23:36:12 -07:00
|
|
|
use_dummy_weights: bool,
|
2023-02-23 21:31:39 +00:00
|
|
|
) -> nn.Module:
|
2023-03-11 23:23:14 -08:00
|
|
|
torch_dtype = get_torch_dtype(dtype)
|
2023-03-22 04:45:42 +08:00
|
|
|
torch.set_default_dtype(torch_dtype)
|
|
|
|
config = AutoConfig.from_pretrained(model_name)
|
|
|
|
for model_class_name, model_class in _MODELS.items():
|
|
|
|
if model_class_name in model_name:
|
2023-04-08 23:36:12 -07:00
|
|
|
if use_dummy_weights:
|
|
|
|
# Create a model instance.
|
|
|
|
# The weights will be initialized as empty tensors.
|
|
|
|
model = model_class(config)
|
|
|
|
model = model.cuda()
|
|
|
|
# NOTE(woosuk): For precise performance evaluation, we assign
|
|
|
|
# random values to the weights.
|
|
|
|
model.initialize_dummy_weights()
|
|
|
|
else:
|
|
|
|
# Download model weights if it's not cached.
|
|
|
|
weights_dir = model_class.get_weights(model_name, path=path)
|
|
|
|
# Create a model instance.
|
|
|
|
model = model_class(config)
|
|
|
|
# Load the weights from the cached or downloaded files.
|
|
|
|
model.load_weights(weights_dir)
|
|
|
|
model = model.cuda()
|
2023-03-22 04:45:42 +08:00
|
|
|
return model.eval(), torch_dtype
|
2023-03-11 23:23:14 -08:00
|
|
|
raise ValueError(f'Unsupported model name: {model_name}')
|
2023-03-10 09:58:21 -08:00
|
|
|
|
|
|
|
|
2023-03-11 23:23:14 -08:00
|
|
|
def get_memory_analyzer(
|
|
|
|
model_name: str,
|
|
|
|
block_size: int,
|
|
|
|
dtype: Union[torch.dtype, str],
|
2023-03-29 14:48:56 +08:00
|
|
|
gpu_memory: int,
|
|
|
|
cpu_memory: int,
|
2023-03-22 04:45:42 +08:00
|
|
|
tensor_parallel_size: int = 1,
|
2023-03-11 23:23:14 -08:00
|
|
|
) -> CacheFlowMemoryAnalyzer:
|
|
|
|
torch_dtype = get_torch_dtype(dtype)
|
|
|
|
for model_class, memory_analyzer in _MEMORY_ANALYZERS.items():
|
|
|
|
if model_class in model_name:
|
|
|
|
return memory_analyzer(
|
2023-03-29 14:48:56 +08:00
|
|
|
model_name, block_size, torch_dtype, gpu_memory, cpu_memory,
|
|
|
|
tensor_parallel_size)
|
2023-03-11 23:23:14 -08:00
|
|
|
raise ValueError(f'Unsupported model name: {model_name}')
|