[Bugfix] Fix parameter name in get_tokenizer (#4107)

This commit is contained in:
Cyrus Leung 2024-04-26 10:10:48 +08:00 committed by GitHub
parent cf29b7eda4
commit a74dee9b62
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 26 additions and 5 deletions

View File

@ -0,0 +1,20 @@
import pytest
from transformers import PreTrainedTokenizerBase
from vllm.transformers_utils.tokenizer import get_tokenizer
TOKENIZER_NAMES = [
"facebook/opt-125m",
"gpt2",
]
@pytest.mark.parametrize("tokenizer_name", TOKENIZER_NAMES)
def test_tokenizer_revision(tokenizer_name: str):
# Assume that "main" branch always exists
tokenizer = get_tokenizer(tokenizer_name, revision="main")
assert isinstance(tokenizer, PreTrainedTokenizerBase)
# Assume that "never" branch always does not exist
with pytest.raises(OSError, match='not a valid git identifier'):
get_tokenizer(tokenizer_name, revision="never")

View File

@ -58,11 +58,12 @@ def get_tokenizer(
*args, *args,
tokenizer_mode: str = "auto", tokenizer_mode: str = "auto",
trust_remote_code: bool = False, trust_remote_code: bool = False,
tokenizer_revision: Optional[str] = None, revision: Optional[str] = None,
download_dir: Optional[str] = None, download_dir: Optional[str] = None,
**kwargs, **kwargs,
) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]: ) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
"""Gets a tokenizer for the given model name via Huggingface/modelscope.""" """Gets a tokenizer for the given model name via HuggingFace or ModelScope.
"""
if VLLM_USE_MODELSCOPE: if VLLM_USE_MODELSCOPE:
# download model from ModelScope hub, # download model from ModelScope hub,
# lazy import so that modelscope is not required for normal use. # lazy import so that modelscope is not required for normal use.
@ -74,7 +75,7 @@ def get_tokenizer(
tokenizer_path = snapshot_download( tokenizer_path = snapshot_download(
model_id=tokenizer_name, model_id=tokenizer_name,
cache_dir=download_dir, cache_dir=download_dir,
revision=tokenizer_revision, revision=revision,
# Ignore weights - we only need the tokenizer. # Ignore weights - we only need the tokenizer.
ignore_file_pattern=["*.pt", "*.safetensors", "*.bin"]) ignore_file_pattern=["*.pt", "*.safetensors", "*.bin"])
tokenizer_name = tokenizer_path tokenizer_name = tokenizer_path
@ -90,7 +91,7 @@ def get_tokenizer(
tokenizer_name, tokenizer_name,
*args, *args,
trust_remote_code=trust_remote_code, trust_remote_code=trust_remote_code,
tokenizer_revision=tokenizer_revision, revision=revision,
**kwargs) **kwargs)
except ValueError as e: except ValueError as e:
# If the error pertains to the tokenizer class not existing or not # If the error pertains to the tokenizer class not existing or not
@ -114,7 +115,7 @@ def get_tokenizer(
tokenizer_name, tokenizer_name,
*args, *args,
trust_remote_code=trust_remote_code, trust_remote_code=trust_remote_code,
tokenizer_revision=tokenizer_revision, revision=revision,
**kwargs) **kwargs)
else: else:
raise e raise e