2023-05-04 03:05:37 -07:00
|
|
|
from typing import Optional
|
2023-02-23 21:31:39 +00:00
|
|
|
|
|
|
|
import torch
|
2023-02-13 09:36:12 +00:00
|
|
|
import torch.nn as nn
|
2023-03-22 04:45:42 +08:00
|
|
|
from transformers import AutoConfig
|
2023-05-04 03:05:37 -07:00
|
|
|
from transformers import PretrainedConfig
|
2023-02-13 09:36:12 +00:00
|
|
|
|
2023-05-09 15:30:12 -07:00
|
|
|
from cacheflow.model_executor.memory_analyzer import (
|
|
|
|
CacheFlowMemoryAnalyzer, GPT2MemoryAnalyzer, GPTNeoXMemoryAnalyzer,
|
|
|
|
LlamaMemoryAnalyzer, OPTMemoryAnalyzer)
|
|
|
|
from cacheflow.model_executor.models import (
|
|
|
|
GPT2LMHeadModel, GPTNeoXForCausalLM, LlamaForCausalLM, OPTForCausalLM)
|
|
|
|
from cacheflow.model_executor.utils import get_torch_dtype
|
|
|
|
from cacheflow.model_executor.weight_utils import initialize_dummy_weights
|
2023-02-13 09:36:12 +00:00
|
|
|
|
2023-03-11 23:23:14 -08:00
|
|
|
|
2023-05-09 15:46:42 -07:00
|
|
|
# TODO(woosuk): Lazy-load the model classes.
|
|
|
|
_MODEL_REGISTRY = {
|
|
|
|
"GPT2LMHeadModel": GPT2LMHeadModel,
|
|
|
|
"GPTNeoXForCausalLM": GPTNeoXForCausalLM,
|
|
|
|
"LlamaForCausalLM": LlamaForCausalLM,
|
|
|
|
"OPTForCausalLM": OPTForCausalLM,
|
2023-02-13 09:36:12 +00:00
|
|
|
}
|
|
|
|
|
2023-03-11 23:23:14 -08:00
|
|
|
_MEMORY_ANALYZERS = {
|
2023-05-09 15:46:42 -07:00
|
|
|
"GPT2LMHeadModel": GPT2MemoryAnalyzer,
|
|
|
|
"GPTNeoXForCausalLM": GPTNeoXMemoryAnalyzer,
|
|
|
|
"LlamaForCausalLM": LlamaMemoryAnalyzer,
|
|
|
|
"OPTForCausalLM": OPTMemoryAnalyzer,
|
2023-02-23 21:31:39 +00:00
|
|
|
}
|
|
|
|
|
2023-02-13 09:36:12 +00:00
|
|
|
|
2023-05-09 15:46:42 -07:00
|
|
|
def _get_model_architecture(config: PretrainedConfig) -> nn.Module:
|
|
|
|
architectures = getattr(config, "architectures", [])
|
|
|
|
for arch in architectures:
|
|
|
|
if arch in _MODEL_REGISTRY:
|
|
|
|
return _MODEL_REGISTRY[arch]
|
|
|
|
raise ValueError(
|
|
|
|
f"Model architectures {architectures} are not supported for now. "
|
|
|
|
f"Supported architectures: {list(_MODEL_REGISTRY.keys())}"
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def _get_memory_analyzer(config: PretrainedConfig) -> CacheFlowMemoryAnalyzer:
|
|
|
|
architectures = getattr(config, "architectures", [])
|
|
|
|
for arch in architectures:
|
|
|
|
if arch in _MEMORY_ANALYZERS:
|
|
|
|
return _MEMORY_ANALYZERS[arch]
|
|
|
|
raise ValueError(
|
|
|
|
f"Model architectures {architectures} are not supported for now. "
|
|
|
|
f"Supported architectures: {list(_MEMORY_ANALYZERS.keys())}"
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-05-04 03:05:37 -07:00
|
|
|
def _get_dtype(config: PretrainedConfig, dtype: str) -> torch.dtype:
|
2023-05-09 15:46:42 -07:00
|
|
|
# NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
|
2023-05-06 02:12:12 -07:00
|
|
|
# because config.torch_dtype can be None.
|
2023-05-09 15:46:42 -07:00
|
|
|
config_dtype = getattr(config, "torch_dtype", None)
|
2023-05-06 02:12:12 -07:00
|
|
|
if config_dtype is None:
|
|
|
|
config_dtype = torch.float32
|
2023-05-09 15:46:42 -07:00
|
|
|
if dtype == "default":
|
2023-05-04 03:05:37 -07:00
|
|
|
if config_dtype == torch.float32:
|
|
|
|
# Following the common practice, we use float16 for float32 models.
|
|
|
|
torch_dtype = torch.float16
|
|
|
|
else:
|
|
|
|
torch_dtype = config_dtype
|
|
|
|
else:
|
|
|
|
torch_dtype = get_torch_dtype(dtype)
|
|
|
|
if torch_dtype != config_dtype and config_dtype != torch.float32:
|
|
|
|
# TODO(woosuk): Allow using float16 for bfloat16 models and
|
|
|
|
# vice versa. Print a warning message and continue.
|
|
|
|
raise ValueError(
|
2023-05-09 15:46:42 -07:00
|
|
|
f"Cannot use {torch_dtype} for {config_dtype} model.")
|
2023-05-04 03:05:37 -07:00
|
|
|
return torch_dtype
|
|
|
|
|
|
|
|
|
2023-02-23 21:31:39 +00:00
|
|
|
def get_model(
|
|
|
|
model_name: str,
|
2023-05-04 03:05:37 -07:00
|
|
|
dtype: str,
|
2023-05-03 15:32:04 +08:00
|
|
|
cache_dir: Optional[str],
|
2023-04-08 23:36:12 -07:00
|
|
|
use_dummy_weights: bool,
|
2023-05-03 15:32:04 +08:00
|
|
|
use_np_cache: bool,
|
2023-02-23 21:31:39 +00:00
|
|
|
) -> nn.Module:
|
2023-03-22 04:45:42 +08:00
|
|
|
config = AutoConfig.from_pretrained(model_name)
|
2023-05-04 03:05:37 -07:00
|
|
|
torch_dtype = _get_dtype(config, dtype)
|
|
|
|
torch.set_default_dtype(torch_dtype)
|
2023-05-09 15:46:42 -07:00
|
|
|
model_class = _get_model_architecture(config)
|
|
|
|
|
|
|
|
# Create a model instance.
|
|
|
|
# The weights will be initialized as empty tensors.
|
|
|
|
model = model_class(config)
|
|
|
|
if use_dummy_weights:
|
|
|
|
model = model.cuda()
|
|
|
|
# NOTE(woosuk): For accurate performance evaluation, we assign
|
|
|
|
# random values to the weights.
|
|
|
|
initialize_dummy_weights(model)
|
|
|
|
else:
|
|
|
|
# Load the weights from the cached or downloaded files.
|
|
|
|
model.load_weights(model_name, cache_dir, use_np_cache)
|
|
|
|
model = model.cuda()
|
|
|
|
return model.eval(), torch_dtype
|
2023-03-10 09:58:21 -08:00
|
|
|
|
|
|
|
|
2023-03-11 23:23:14 -08:00
|
|
|
def get_memory_analyzer(
|
|
|
|
model_name: str,
|
|
|
|
block_size: int,
|
2023-05-04 03:05:37 -07:00
|
|
|
dtype: str,
|
2023-03-29 14:48:56 +08:00
|
|
|
gpu_memory: int,
|
|
|
|
cpu_memory: int,
|
2023-03-22 04:45:42 +08:00
|
|
|
tensor_parallel_size: int = 1,
|
2023-03-11 23:23:14 -08:00
|
|
|
) -> CacheFlowMemoryAnalyzer:
|
2023-05-04 03:05:37 -07:00
|
|
|
config = AutoConfig.from_pretrained(model_name)
|
|
|
|
torch_dtype = _get_dtype(config, dtype)
|
2023-05-09 15:46:42 -07:00
|
|
|
memory_analyzer = _get_memory_analyzer(config)
|
|
|
|
return memory_analyzer(
|
|
|
|
model_name, block_size, torch_dtype, gpu_memory, cpu_memory,
|
|
|
|
tensor_parallel_size)
|