vllm/cacheflow/sampling_params.py

138 lines
6.4 KiB
Python
Raw Normal View History

"""Sampling parameters for text generation."""
from typing import List, Optional, Union
2023-02-09 11:27:06 +00:00
2023-02-23 07:38:43 +00:00
class SamplingParams:
"""Sampling parameters for text generation.
Overall, we follow the sampling parameters from the OpenAI text completion
API (https://platform.openai.com/docs/api-reference/completions/create).
In addition, we support beam search, which is not supported by OpenAI.
Args:
n: Number of output sequences to return for the given prompt.
best_of: Number of output sequences that are generated from the prompt.
From these `best_of` sequences, the top `n` sequences are returned.
`best_of` must be greater than or equal to `n`. This is treated as
the beam width when `use_beam_search` is True. By default, `best_of`
is set to `n`.
presence_penalty: Float that penalizes new tokens based on whether they
appear in the generated text so far. Values > 0 encourage the model
to use new tokens, while values < 0 encourage the model to repeat
tokens.
frequency_penalty: Float that penalizes new tokens based on their
frequency in the generated text so far. Values > 0 encourage the
model to use new tokens, while values < 0 encourage the model to
repeat tokens.
temperature: Float that controls the randomness of the sampling. Lower
values make the model more deterministic, while higher values make
the model more random. Zero means greedy sampling.
top_p: Float that controls the cumulative probability of the top tokens
to consider. Must be in (0, 1]. Set to 1 to consider all tokens.
top_k: Integer that controls the number of top tokens to consider. Set
to -1 to consider all tokens.
use_beam_search: Whether to use beam search instead of sampling.
stop: List of strings that stop the generation when they are generated.
The returned output will not contain the stop strings.
ignore_eos: Whether to ignore the EOS token and continue generating
tokens after the EOS token is generated.
max_tokens: Maximum number of tokens to generate per output sequence.
logprobs: Number of log probabilities to return per output token.
"""
2023-02-09 11:27:06 +00:00
def __init__(
self,
2023-05-11 15:45:30 -07:00
n: int = 1,
best_of: Optional[int] = None,
2023-05-11 15:45:30 -07:00
presence_penalty: float = 0.0,
frequency_penalty: float = 0.0,
temperature: float = 1.0,
top_p: float = 1.0,
top_k: int = -1,
use_beam_search: bool = False,
stop: Union[str, List[str]] = [],
ignore_eos: bool = False,
2023-05-11 15:45:30 -07:00
max_tokens: int = 16,
2023-05-23 21:39:50 -07:00
logprobs: Optional[int] = None,
2023-02-09 11:27:06 +00:00
) -> None:
self.n = n
self.best_of = best_of if best_of is not None else n
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
2023-02-09 11:27:06 +00:00
self.temperature = temperature
self.top_p = top_p
2023-05-10 12:51:36 -07:00
self.top_k = top_k
2023-02-09 11:27:06 +00:00
self.use_beam_search = use_beam_search
self.stop = [stop] if isinstance(stop, str) else list(stop)
self.ignore_eos = ignore_eos
2023-05-11 15:45:30 -07:00
self.max_tokens = max_tokens
self.logprobs = logprobs
self._verify_args()
if self.use_beam_search:
self._verity_beam_search()
elif self.temperature == 0.0:
# Zero temperature means greedy sampling.
self._verify_greedy_sampling()
def _verify_args(self) -> None:
if self.n < 1:
raise ValueError(f"n must be at least 1, got {self.n}.")
if self.best_of < self.n:
raise ValueError(f"best_of must be greater than or equal to n, "
f"got n={self.n} and best_of={self.best_of}.")
if not -2.0 <= self.presence_penalty <= 2.0:
raise ValueError("presence_penalty must be in [-2, 2], got "
f"{self.presence_penalty}.")
if not -2.0 <= self.frequency_penalty <= 2.0:
raise ValueError("frequency_penalty must be in [-2, 2], got "
f"{self.frequency_penalty}.")
if self.temperature < 0.0:
raise ValueError(
f"temperature must be non-negative, got {self.temperature}.")
if not 0.0 < self.top_p <= 1.0:
raise ValueError(f"top_p must be in (0, 1], got {self.top_p}.")
if self.top_k < -1 or self.top_k == 0:
raise ValueError(f"top_k must be -1 (disable), or at least 1, "
f"got {self.top_k}.")
if self.max_tokens < 1:
raise ValueError(
f"max_tokens must be at least 1, got {self.max_tokens}.")
2023-05-23 21:39:50 -07:00
if self.logprobs is not None and self.logprobs < 0:
raise ValueError(
f"logprobs must be non-negative, got {self.logprobs}.")
def _verity_beam_search(self) -> None:
if self.best_of == 1:
raise ValueError("best_of must be greater than 1 when using beam "
f"search. Got {self.best_of}.")
if self.temperature > 0.0:
raise ValueError("temperature must be 0 when using beam search.")
if self.top_p < 1.0:
raise ValueError("top_p must be 1 when using beam search.")
if self.top_k != -1:
raise ValueError("top_k must be -1 when using beam search.")
def _verify_greedy_sampling(self) -> None:
if self.best_of > 1:
raise ValueError("best_of must be 1 when using greedy sampling."
f"Got {self.best_of}.")
if self.top_p < 1.0:
raise ValueError("top_p must be 1 when using greedy sampling.")
if self.top_k != -1:
raise ValueError("top_k must be -1 when using greedy sampling.")
def __repr__(self) -> str:
2023-05-10 12:51:36 -07:00
return (f"SamplingParams(n={self.n}, "
f"best_of={self.best_of}, "
f"presence_penalty={self.presence_penalty}, "
f"frequency_penalty={self.frequency_penalty}, "
2023-05-10 12:51:36 -07:00
f"temperature={self.temperature}, "
f"top_p={self.top_p}, "
f"top_k={self.top_k},"
f"use_beam_search={self.use_beam_search}, "
f"stop={self.stop}, "
f"ignore_eos={self.ignore_eos}, "
2023-05-11 15:45:30 -07:00
f"max_tokens={self.max_tokens}, "
2023-05-20 13:06:59 -07:00
f"logprobs={self.logprobs})")