127 lines
3.8 KiB
Python
127 lines
3.8 KiB
Python
![]() |
# Adapted from https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py
|
||
|
import time
|
||
|
from typing import Dict, List, Literal, Optional, Union
|
||
|
|
||
|
from pydantic import BaseModel, Field
|
||
|
|
||
|
from cacheflow.utils import random_uuid
|
||
|
|
||
|
|
||
|
class ErrorResponse(BaseModel):
|
||
|
object: str = "error"
|
||
|
message: str
|
||
|
type: str
|
||
|
param: Optional[str] = None
|
||
|
code: Optional[str] = None
|
||
|
|
||
|
|
||
|
class ModelPermission(BaseModel):
|
||
|
id: str = Field(default_factory=lambda: f"modelperm-{random_uuid()}")
|
||
|
object: str = "model_permission"
|
||
|
created: int = Field(default_factory=lambda: int(time.time()))
|
||
|
allow_create_engine: bool = False
|
||
|
allow_sampling: bool = True
|
||
|
allow_logprobs: bool = True
|
||
|
allow_search_indices: bool = False
|
||
|
allow_view: bool = True
|
||
|
allow_fine_tuning: bool = False
|
||
|
organization: str = "*"
|
||
|
group: Optional[str] = None
|
||
|
is_blocking: str = False
|
||
|
|
||
|
|
||
|
class ModelCard(BaseModel):
|
||
|
id: str
|
||
|
object: str = "model"
|
||
|
created: int = Field(default_factory=lambda: int(time.time()))
|
||
|
owned_by: str = "cacheflow"
|
||
|
root: Optional[str] = None
|
||
|
parent: Optional[str] = None
|
||
|
permission: List[ModelPermission] = Field(default_factory=list)
|
||
|
|
||
|
|
||
|
class ModelList(BaseModel):
|
||
|
object: str = "list"
|
||
|
data: List[ModelCard] = Field(default_factory=list)
|
||
|
|
||
|
|
||
|
class UsageInfo(BaseModel):
|
||
|
prompt_tokens: int = 0
|
||
|
total_tokens: int = 0
|
||
|
completion_tokens: Optional[int] = 0
|
||
|
|
||
|
|
||
|
class ChatCompletionRequest(BaseModel):
|
||
|
model: str
|
||
|
messages: List[Dict[str, str]]
|
||
|
temperature: Optional[float] = 0.7
|
||
|
top_p: Optional[float] = 1.0
|
||
|
n: Optional[int] = 1
|
||
|
max_tokens: Optional[int] = None
|
||
|
stop: Optional[Union[str, List[str]]] = None
|
||
|
stream: Optional[bool] = False
|
||
|
presence_penalty: Optional[float] = 0.0
|
||
|
frequency_penalty: Optional[float] = 0.0
|
||
|
user: Optional[str] = None
|
||
|
|
||
|
|
||
|
class CompletionRequest(BaseModel):
|
||
|
model: str
|
||
|
prompt: str
|
||
|
suffix: Optional[str] = None
|
||
|
max_tokens: Optional[int] = 16
|
||
|
temperature: Optional[float] = 1.0
|
||
|
top_p: Optional[float] = 1.0
|
||
|
n: Optional[int] = 1
|
||
|
stream: Optional[bool] = False
|
||
|
logprobs: Optional[int] = None
|
||
|
echo: Optional[bool] = False
|
||
|
stop: Optional[Union[str, List[str]]] = Field(default_factory=list)
|
||
|
presence_penalty: Optional[float] = 0.0
|
||
|
frequency_penalty: Optional[float] = 0.0
|
||
|
best_of: Optional[int] = None
|
||
|
logit_bias: Optional[Dict[str, float]] = None
|
||
|
user: Optional[str] = None
|
||
|
# Additional parameters supported by cacheflow
|
||
|
top_k: Optional[int] = -1
|
||
|
ignore_eos: Optional[bool] = False
|
||
|
use_beam_search: Optional[bool] = False
|
||
|
|
||
|
|
||
|
class LogProbs(BaseModel):
|
||
|
text_offset: List[int] = Field(default_factory=list)
|
||
|
token_logprobs: List[Optional[float]] = Field(default_factory=list)
|
||
|
tokens: List[str] = Field(default_factory=list)
|
||
|
top_logprobs: List[Optional[Dict[str, float]]] = Field(default_factory=list)
|
||
|
|
||
|
|
||
|
class CompletionResponseChoice(BaseModel):
|
||
|
index: int
|
||
|
text: str
|
||
|
logprobs: Optional[LogProbs] = None
|
||
|
finish_reason: Optional[Literal["stop", "length"]] = None
|
||
|
|
||
|
|
||
|
class CompletionResponse(BaseModel):
|
||
|
id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
|
||
|
object: str = "text_completion"
|
||
|
created: int = Field(default_factory=lambda: int(time.time()))
|
||
|
model: str
|
||
|
choices: List[CompletionResponseChoice]
|
||
|
usage: UsageInfo
|
||
|
|
||
|
|
||
|
class CompletionResponseStreamChoice(BaseModel):
|
||
|
index: int
|
||
|
text: str
|
||
|
logprobs: Optional[LogProbs] = None
|
||
|
finish_reason: Optional[Literal["stop", "length"]] = None
|
||
|
|
||
|
|
||
|
class CompletionStreamResponse(BaseModel):
|
||
|
id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
|
||
|
object: str = "text_completion"
|
||
|
created: int = Field(default_factory=lambda: int(time.time()))
|
||
|
model: str
|
||
|
choices: List[CompletionResponseStreamChoice]
|