2023-05-14 21:54:32 -07:00
|
|
|
# coding=utf-8
|
|
|
|
# Copyright 2023 The CacheFlow team.
|
|
|
|
# Adapted from https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
|
|
|
|
#
|
|
|
|
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
|
|
|
#
|
|
|
|
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
|
|
|
# and OPT implementations in this library. It has been modified from its
|
|
|
|
# original forms to accommodate minor architectural differences compared
|
|
|
|
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2023-03-29 21:25:32 -07:00
|
|
|
"""1D LLaMA model compatible with HuggingFace weights."""
|
|
|
|
from typing import Dict, List, Optional, Tuple
|
|
|
|
|
|
|
|
import torch
|
|
|
|
from torch import nn
|
|
|
|
from transformers import LlamaConfig
|
|
|
|
|
2023-05-09 15:30:12 -07:00
|
|
|
from cacheflow.sequence import SequenceOutputs
|
|
|
|
from cacheflow.model_executor.input_metadata import InputMetadata
|
|
|
|
from cacheflow.model_executor.layers.activation import SiluAndMul
|
|
|
|
from cacheflow.model_executor.layers.layernorm import RMSNorm
|
|
|
|
from cacheflow.model_executor.layers.attention import GPTNeoXCacheFlowAttention
|
|
|
|
from cacheflow.model_executor.layers.sampler import Sampler
|
|
|
|
from cacheflow.model_executor.weight_utils import (hf_model_weights_iterator,
|
|
|
|
load_tensor_parallel_weights)
|
|
|
|
from cacheflow.model_executor.parallel_utils.parallel_state import (
|
2023-03-29 21:25:32 -07:00
|
|
|
get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size)
|
2023-05-09 15:30:12 -07:00
|
|
|
from cacheflow.model_executor.parallel_utils.tensor_parallel import (
|
|
|
|
VocabParallelEmbedding, ColumnParallelLinear, RowParallelLinear)
|
2023-03-29 21:25:32 -07:00
|
|
|
from cacheflow.sequence import SequenceOutputs
|
|
|
|
|
|
|
|
KVCache = Tuple[torch.Tensor, torch.Tensor]
|
|
|
|
|
|
|
|
|
|
|
|
class LlamaMLP(nn.Module):
|
2023-03-30 11:04:21 -07:00
|
|
|
|
2023-03-29 21:25:32 -07:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
hidden_size: int,
|
|
|
|
intermediate_size: int,
|
|
|
|
hidden_act: str,
|
|
|
|
):
|
|
|
|
super().__init__()
|
2023-04-02 15:23:29 +08:00
|
|
|
self.gate_up_proj = ColumnParallelLinear(hidden_size, 2 * intermediate_size,
|
|
|
|
bias=False, gather_output=False,
|
|
|
|
perform_initialization=False)
|
2023-03-29 21:25:32 -07:00
|
|
|
self.down_proj = RowParallelLinear(intermediate_size, hidden_size,
|
|
|
|
bias=False, input_is_parallel=True,
|
|
|
|
perform_initialization=False)
|
2023-04-02 00:30:17 -07:00
|
|
|
if hidden_act != 'silu':
|
|
|
|
raise ValueError(f'Unsupported activation: {hidden_act}. '
|
|
|
|
'Only silu is supported for now.')
|
|
|
|
self.act_fn = SiluAndMul()
|
2023-03-29 21:25:32 -07:00
|
|
|
|
|
|
|
def forward(self, x):
|
2023-04-02 15:23:29 +08:00
|
|
|
gate_up, _ = self.gate_up_proj(x)
|
2023-04-02 00:30:17 -07:00
|
|
|
x = self.act_fn(gate_up)
|
2023-03-29 21:25:32 -07:00
|
|
|
x, _ = self.down_proj(x)
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
class LlamaAttention(nn.Module):
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
hidden_size: int,
|
|
|
|
num_heads: int,
|
|
|
|
):
|
|
|
|
super().__init__()
|
|
|
|
self.hidden_size = hidden_size
|
|
|
|
tensor_model_parallel_world_size = get_tensor_model_parallel_world_size()
|
|
|
|
self.total_num_heads = num_heads
|
|
|
|
assert self.total_num_heads % tensor_model_parallel_world_size == 0
|
|
|
|
self.num_heads = self.total_num_heads // tensor_model_parallel_world_size
|
|
|
|
self.head_dim = hidden_size // self.total_num_heads
|
|
|
|
self.scaling = self.head_dim ** -0.5
|
|
|
|
|
2023-04-02 15:23:29 +08:00
|
|
|
self.qkv_proj = ColumnParallelLinear(
|
2023-03-29 21:25:32 -07:00
|
|
|
hidden_size,
|
2023-04-02 15:23:29 +08:00
|
|
|
3 * self.total_num_heads * self.head_dim,
|
2023-03-29 21:25:32 -07:00
|
|
|
bias=False,
|
|
|
|
gather_output=False,
|
|
|
|
perform_initialization=False,
|
|
|
|
)
|
|
|
|
self.o_proj = RowParallelLinear(
|
|
|
|
self.total_num_heads * self.head_dim,
|
|
|
|
hidden_size,
|
|
|
|
bias=False,
|
|
|
|
input_is_parallel=True,
|
|
|
|
perform_initialization=False,
|
|
|
|
)
|
2023-05-05 02:01:08 -07:00
|
|
|
self.attn = GPTNeoXCacheFlowAttention(self.scaling, self.head_dim)
|
2023-03-29 21:25:32 -07:00
|
|
|
|
|
|
|
def forward(
|
|
|
|
self,
|
|
|
|
positions: torch.LongTensor,
|
|
|
|
hidden_states: torch.Tensor,
|
|
|
|
kv_cache: KVCache,
|
|
|
|
input_metadata: InputMetadata,
|
|
|
|
cache_event: Optional[torch.cuda.Event],
|
|
|
|
) -> torch.Tensor:
|
2023-04-02 15:23:29 +08:00
|
|
|
qkv, _ = self.qkv_proj(hidden_states)
|
2023-04-02 00:30:17 -07:00
|
|
|
q, k, v = qkv.chunk(chunks=3, dim=-1)
|
2023-03-30 11:04:21 -07:00
|
|
|
k_cache, v_cache = kv_cache
|
2023-03-29 21:25:32 -07:00
|
|
|
attn_output = self.attn(
|
2023-03-30 11:04:21 -07:00
|
|
|
positions, q, k, v, k_cache, v_cache, input_metadata, cache_event)
|
2023-03-29 21:25:32 -07:00
|
|
|
output, _ = self.o_proj(attn_output)
|
|
|
|
return output
|
|
|
|
|
|
|
|
|
|
|
|
class LlamaDecoderLayer(nn.Module):
|
|
|
|
|
|
|
|
def __init__(self, config: LlamaConfig):
|
|
|
|
super().__init__()
|
|
|
|
self.hidden_size = config.hidden_size
|
|
|
|
self.self_attn = LlamaAttention(
|
|
|
|
hidden_size=self.hidden_size,
|
|
|
|
num_heads=config.num_attention_heads,
|
|
|
|
)
|
|
|
|
self.mlp = LlamaMLP(
|
|
|
|
hidden_size=self.hidden_size,
|
|
|
|
intermediate_size=config.intermediate_size,
|
|
|
|
hidden_act=config.hidden_act,
|
|
|
|
)
|
2023-03-31 09:51:22 -07:00
|
|
|
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
|
|
|
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
2023-03-29 21:25:32 -07:00
|
|
|
|
|
|
|
def forward(
|
|
|
|
self,
|
|
|
|
positions: torch.LongTensor,
|
|
|
|
hidden_states: torch.Tensor,
|
|
|
|
kv_cache: KVCache,
|
|
|
|
input_metadata: InputMetadata,
|
|
|
|
cache_event: Optional[torch.cuda.Event],
|
|
|
|
) -> torch.Tensor:
|
|
|
|
# Self Attention
|
|
|
|
residual = hidden_states
|
|
|
|
hidden_states = self.input_layernorm(hidden_states)
|
|
|
|
hidden_states = self.self_attn(
|
|
|
|
positions=positions,
|
|
|
|
hidden_states=hidden_states,
|
|
|
|
kv_cache=kv_cache,
|
|
|
|
input_metadata=input_metadata,
|
|
|
|
cache_event=cache_event,
|
|
|
|
)
|
|
|
|
hidden_states = residual + hidden_states
|
|
|
|
|
|
|
|
# Fully Connected
|
|
|
|
residual = hidden_states
|
|
|
|
hidden_states = self.post_attention_layernorm(hidden_states)
|
|
|
|
hidden_states = self.mlp(hidden_states)
|
|
|
|
hidden_states = residual + hidden_states
|
|
|
|
return hidden_states
|
|
|
|
|
|
|
|
|
|
|
|
class LlamaModel(nn.Module):
|
|
|
|
|
|
|
|
def __init__(self, config: LlamaConfig):
|
|
|
|
super().__init__()
|
|
|
|
self.config = config
|
|
|
|
self.padding_idx = config.pad_token_id
|
|
|
|
self.vocab_size = config.vocab_size
|
|
|
|
|
|
|
|
self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config.hidden_size,
|
|
|
|
perform_initialization=False)
|
|
|
|
self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
2023-03-31 09:51:22 -07:00
|
|
|
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
2023-03-29 21:25:32 -07:00
|
|
|
|
|
|
|
def forward(
|
|
|
|
self,
|
|
|
|
input_ids: torch.LongTensor,
|
|
|
|
positions: torch.LongTensor,
|
|
|
|
kv_caches: List[KVCache],
|
|
|
|
input_metadata: InputMetadata,
|
|
|
|
cache_events: Optional[List[torch.cuda.Event]],
|
|
|
|
) -> torch.Tensor:
|
|
|
|
hidden_states = self.embed_tokens(input_ids)
|
|
|
|
for i in range(len(self.layers)):
|
|
|
|
if cache_events is None:
|
|
|
|
cache_event = None
|
|
|
|
else:
|
|
|
|
cache_event = cache_events[i]
|
|
|
|
layer = self.layers[i]
|
|
|
|
hidden_states = layer(
|
|
|
|
positions,
|
|
|
|
hidden_states,
|
|
|
|
kv_caches[i],
|
|
|
|
input_metadata,
|
|
|
|
cache_event,
|
|
|
|
)
|
|
|
|
hidden_states = self.norm(hidden_states)
|
|
|
|
return hidden_states
|
|
|
|
|
|
|
|
|
|
|
|
class LlamaForCausalLM(nn.Module):
|
|
|
|
def __init__(self, config):
|
|
|
|
super().__init__()
|
|
|
|
self.config = config
|
|
|
|
self.model = LlamaModel(config)
|
|
|
|
self.lm_head = ColumnParallelLinear(config.hidden_size,
|
|
|
|
config.vocab_size,
|
|
|
|
bias=False,
|
|
|
|
gather_output=False,
|
|
|
|
perform_initialization=False)
|
2023-05-04 02:59:56 -07:00
|
|
|
self.sampler = Sampler(config.vocab_size)
|
2023-03-29 21:25:32 -07:00
|
|
|
|
|
|
|
def forward(
|
|
|
|
self,
|
|
|
|
input_ids: torch.LongTensor,
|
|
|
|
positions: torch.LongTensor,
|
|
|
|
kv_caches: List[KVCache],
|
|
|
|
input_metadata: InputMetadata,
|
|
|
|
cache_events: Optional[List[torch.cuda.Event]],
|
|
|
|
) -> Dict[int, SequenceOutputs]:
|
|
|
|
hidden_states = self.model(
|
|
|
|
input_ids, positions, kv_caches, input_metadata, cache_events)
|
|
|
|
next_tokens = self.sampler(
|
|
|
|
self.lm_head.weight, hidden_states, input_metadata)
|
|
|
|
return next_tokens
|
|
|
|
|
|
|
|
_column_parallel_weights = ["embed_tokens.weight", "lm_head.weight",
|
2023-04-02 15:23:29 +08:00
|
|
|
"qkv_proj.weight", "gate_proj.weight",
|
2023-03-29 21:25:32 -07:00
|
|
|
"up_proj.weight"]
|
|
|
|
_row_parallel_weights = ["o_proj.weight", "down_proj.weight"]
|
|
|
|
|
2023-05-03 15:32:04 +08:00
|
|
|
def load_weights(self, model_name_or_path: str,
|
|
|
|
cache_dir: Optional[str] = None,
|
|
|
|
use_np_cache: bool = False):
|
2023-03-29 21:25:32 -07:00
|
|
|
tensor_model_parallel_rank = get_tensor_model_parallel_rank()
|
|
|
|
state_dict = self.state_dict()
|
2023-05-03 15:32:04 +08:00
|
|
|
|
|
|
|
for name, loaded_weight in hf_model_weights_iterator(
|
|
|
|
model_name_or_path, cache_dir, use_np_cache):
|
|
|
|
if "rotary_emb.inv_freq" in name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
is_attention_weight = False
|
|
|
|
for stride_id, att_weight_name in enumerate(["q_proj", "k_proj", "v_proj"]):
|
|
|
|
if att_weight_name not in name:
|
|
|
|
continue
|
|
|
|
param = state_dict[name.replace(att_weight_name, "qkv_proj")]
|
|
|
|
shard_size = param.shape[0] // 3
|
|
|
|
loaded_weight = loaded_weight[
|
|
|
|
shard_size * tensor_model_parallel_rank
|
|
|
|
:shard_size * (tensor_model_parallel_rank + 1)]
|
|
|
|
param_slice = param.data[shard_size * stride_id
|
|
|
|
:shard_size * (stride_id + 1)]
|
|
|
|
assert param_slice.shape == loaded_weight.shape
|
|
|
|
param_slice.copy_(loaded_weight)
|
|
|
|
is_attention_weight = True
|
|
|
|
break
|
|
|
|
if is_attention_weight:
|
|
|
|
continue
|
|
|
|
|
|
|
|
is_gate_up_weight = False
|
|
|
|
for stride_id, weight_name in enumerate(["gate_proj", "up_proj"]):
|
|
|
|
if weight_name not in name:
|
|
|
|
continue
|
|
|
|
param = state_dict[name.replace(weight_name, "gate_up_proj")]
|
|
|
|
shard_size = param.shape[0] // 2
|
|
|
|
loaded_weight = loaded_weight[
|
|
|
|
shard_size * tensor_model_parallel_rank
|
|
|
|
:shard_size * (tensor_model_parallel_rank + 1)]
|
|
|
|
param_slice = param.data[shard_size * stride_id
|
|
|
|
:shard_size * (stride_id + 1)]
|
|
|
|
assert param_slice.shape == loaded_weight.shape
|
|
|
|
param_slice.copy_(loaded_weight)
|
|
|
|
is_gate_up_weight = True
|
|
|
|
break
|
|
|
|
if is_gate_up_weight:
|
|
|
|
continue
|
|
|
|
|
|
|
|
param = state_dict[name]
|
|
|
|
load_tensor_parallel_weights(param, loaded_weight, name,
|
|
|
|
self._column_parallel_weights,
|
2023-05-09 15:30:12 -07:00
|
|
|
self._row_parallel_weights,
|
|
|
|
tensor_model_parallel_rank)
|