
Co-authored-by: constellate <constellate@1-ai-appserver-staging.codereach.com> Co-authored-by: Kyle Mistele <kyle@constellate.ai>
30 lines
886 B
Plaintext
30 lines
886 B
Plaintext
psutil
|
|
sentencepiece # Required for LLaMA tokenizer.
|
|
numpy < 2.0.0
|
|
requests
|
|
tqdm
|
|
py-cpuinfo
|
|
transformers >= 4.43.2 # Required for Chameleon and Llama 3.1 hotfox.
|
|
tokenizers >= 0.19.1 # Required for Llama 3.
|
|
protobuf # Required by LlamaTokenizer.
|
|
fastapi
|
|
aiohttp
|
|
openai >= 1.0 # Ensure modern openai package (ensure types module present)
|
|
uvicorn[standard]
|
|
pydantic >= 2.8 # Required for OpenAI server.
|
|
pillow # Required for image processing
|
|
prometheus_client >= 0.18.0
|
|
prometheus-fastapi-instrumentator >= 7.0.0
|
|
tiktoken >= 0.6.0 # Required for DBRX tokenizer
|
|
lm-format-enforcer == 0.10.6
|
|
outlines >= 0.0.43, < 0.1 # Requires torch >= 2.1.0
|
|
typing_extensions >= 4.10
|
|
filelock >= 3.10.4 # filelock starts to support `mode` argument from 3.10.4
|
|
partial-json-parser # used for parsing partial JSON outputs
|
|
pyzmq
|
|
msgspec
|
|
gguf == 0.9.1
|
|
importlib_metadata
|
|
mistral_common >= 1.3.4
|
|
pyyaml
|