[BugFix] Fix tokenizer out of vocab size (#3685)

This commit is contained in:
Roy 2024-03-29 23:18:59 +08:00 committed by GitHub
parent d8658c8cc1
commit 6110c39dc8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 14 additions and 19 deletions

View File

@ -83,6 +83,13 @@ def test_decode_streaming(tokenizer_id, truth, with_prompt,
assert decoded_text == generated
decoded_text = _run_incremental_decode(
tokenizer, [len(tokenizer)],
skip_special_tokens=skip_special_tokens,
starting_index=starting_index)
assert decoded_text == ''
@pytest.fixture
def detokenizer(tokenizer_name: str) -> Detokenizer:

View File

@ -222,14 +222,6 @@ class LLMEngine:
self.tokenizer: BaseTokenizerGroup = get_tokenizer_group(
self.parallel_config.tokenizer_pool_config, **init_kwargs)
if len(self.get_tokenizer()) != self.model_config.get_vocab_size():
logger.warning(
f"The tokenizer's vocabulary size {len(self.get_tokenizer())}"
f" does not match the model's vocabulary size "
f"{self.model_config.get_vocab_size()}. This might "
f"cause an error in decoding. Please change config.json "
"to match the tokenizer's vocabulary size.")
def _verify_args(self) -> None:
self.model_config.verify_with_parallel_config(self.parallel_config)
self.cache_config.verify_with_parallel_config(self.parallel_config)

View File

@ -68,14 +68,6 @@ class OpenAIServing:
tokenizer_mode=engine_model_config.tokenizer_mode,
trust_remote_code=engine_model_config.trust_remote_code)
if len(self.tokenizer) != engine_model_config.get_vocab_size():
logger.warning(
f"The tokenizer's vocabulary size {len(self.tokenizer)}"
f" does not match the model's vocabulary size "
f"{engine_model_config.get_vocab_size()}. This might "
f"cause an error in decoding. Please change config.json "
"to match the tokenizer's vocabulary size.")
async def show_available_models(self) -> ModelList:
"""Show available models. Right now we only have one model."""
model_cards = [

View File

@ -232,9 +232,13 @@ def detokenize_incrementally(
all_input_ids[:-1],
skip_special_tokens=skip_special_tokens)
# Put new_token_id in a list so skip_special_tokens is respected
new_tokens = tokenizer.convert_ids_to_tokens(
[new_token_id], skip_special_tokens=skip_special_tokens)
# If the new token id is out of bounds, return an empty string.
if new_token_id >= len(tokenizer):
new_tokens = [""]
else:
# Put new_token_id in a list so skip_special_tokens is respected
new_tokens = tokenizer.convert_ids_to_tokens(
[new_token_id], skip_special_tokens=skip_special_tokens)
output_tokens = prev_tokens + new_tokens
# If this is the first iteration, return all tokens.