vllm/requirements/test.txt
Tarun Kumar e37073efd7
Add property-based testing for vLLM endpoints using an API defined by an OpenAPI 3.1 schema (#16721)
Signed-off-by: Tarun Kumar <takumar@redhat.com>
Signed-off-by: Nick Hill <nhill@redhat.com>
Co-authored-by: Nick Hill <nhill@redhat.com>
2025-04-17 21:08:27 -07:00

807 lines
15 KiB
Plaintext

# This file was autogenerated by uv via the following command:
# uv pip compile requirements/test.in -o requirements/test.txt
absl-py==2.1.0
# via rouge-score
accelerate==1.0.1
# via
# lm-eval
# peft
aiohappyeyeballs==2.4.3
# via aiohttp
aiohttp==3.10.11
# via
# datasets
# fsspec
# lm-eval
aiosignal==1.3.1
# via
# aiohttp
# ray
annotated-types==0.7.0
# via pydantic
anyio==4.6.2.post1
# via
# httpx
# starlette
argcomplete==3.5.1
# via datamodel-code-generator
arrow==1.3.0
# via isoduration
attrs==24.2.0
# via
# aiohttp
# hypothesis
# jsonlines
# jsonschema
# pytest-subtests
# referencing
audioread==3.0.1
# via librosa
awscli==1.35.23
# via -r requirements/test.in
backoff==2.2.1
# via
# -r requirements/test.in
# schemathesis
bitsandbytes==0.45.3
# via -r requirements/test.in
black==24.10.0
# via datamodel-code-generator
blobfile==3.0.0
# via -r requirements/test.in
boto3==1.35.57
# via tensorizer
botocore==1.35.57
# via
# awscli
# boto3
# s3transfer
bounded-pool-executor==0.0.3
# via pqdm
buildkite-test-collector==0.1.9
# via -r requirements/test.in
certifi==2024.8.30
# via
# httpcore
# httpx
# requests
cffi==1.17.1
# via soundfile
chardet==5.2.0
# via mbstrdecoder
charset-normalizer==3.4.0
# via requests
click==8.1.7
# via
# black
# jiwer
# nltk
# ray
# schemathesis
# typer
colorama==0.4.6
# via
# awscli
# sacrebleu
# schemathesis
# tqdm-multiprocess
contourpy==1.3.0
# via matplotlib
cramjam==2.9.0
# via fastparquet
cupy-cuda12x==13.3.0
# via ray
cycler==0.12.1
# via matplotlib
datamodel-code-generator==0.26.3
# via -r requirements/test.in
dataproperty==1.0.1
# via
# pytablewriter
# tabledata
datasets==3.0.2
# via
# evaluate
# lm-eval
decorator==5.1.1
# via librosa
dill==0.3.8
# via
# datasets
# evaluate
# lm-eval
# multiprocess
dnspython==2.7.0
# via email-validator
docopt==0.6.2
# via num2words
docutils==0.16
# via awscli
einops==0.8.0
# via
# -r requirements/test.in
# encodec
# mamba-ssm
# vector-quantize-pytorch
# vocos
einx==0.3.0
# via vector-quantize-pytorch
email-validator==2.2.0
# via pydantic
encodec==0.1.1
# via vocos
evaluate==0.4.3
# via lm-eval
fastparquet==2024.11.0
# via genai-perf
fastrlock==0.8.2
# via cupy-cuda12x
fastsafetensors==0.1.10
# via -r requirements/test.in
filelock==3.16.1
# via
# blobfile
# datasets
# huggingface-hub
# ray
# torch
# transformers
fonttools==4.54.1
# via matplotlib
fqdn==1.5.1
# via jsonschema
frozendict==2.4.6
# via einx
frozenlist==1.5.0
# via
# aiohttp
# aiosignal
# ray
fsspec==2024.9.0
# via
# datasets
# evaluate
# fastparquet
# huggingface-hub
# torch
genai-perf==0.0.8
# via -r requirements/test.in
genson==1.3.0
# via datamodel-code-generator
graphql-core==3.2.6
# via hypothesis-graphql
h11==0.14.0
# via httpcore
harfile==0.3.0
# via schemathesis
hf-xet==0.1.4
# via huggingface-hub
hiredis==3.0.0
# via tensorizer
httpcore==1.0.6
# via httpx
httpx==0.27.2
# via
# -r requirements/test.in
# schemathesis
huggingface-hub==0.30.1
# via
# -r requirements/test.in
# accelerate
# datasets
# evaluate
# peft
# sentence-transformers
# timm
# tokenizers
# transformers
# vocos
humanize==4.11.0
# via runai-model-streamer
hypothesis==6.131.0
# via
# hypothesis-graphql
# hypothesis-jsonschema
# schemathesis
hypothesis-graphql==0.11.1
# via schemathesis
hypothesis-jsonschema==0.23.1
# via schemathesis
idna==3.10
# via
# anyio
# email-validator
# httpx
# jsonschema
# requests
# yarl
inflect==5.6.2
# via datamodel-code-generator
iniconfig==2.0.0
# via pytest
isoduration==20.11.0
# via jsonschema
isort==5.13.2
# via datamodel-code-generator
jinja2==3.1.6
# via
# datamodel-code-generator
# torch
jiwer==3.0.5
# via -r requirements/test.in
jmespath==1.0.1
# via
# boto3
# botocore
joblib==1.4.2
# via
# librosa
# nltk
# scikit-learn
jsonlines==4.0.0
# via lm-eval
jsonpointer==3.0.0
# via jsonschema
jsonschema==4.23.0
# via
# hypothesis-jsonschema
# mistral-common
# ray
# schemathesis
jsonschema-specifications==2024.10.1
# via jsonschema
junit-xml==1.9
# via schemathesis
kaleido==0.2.1
# via genai-perf
kiwisolver==1.4.7
# via matplotlib
lazy-loader==0.4
# via librosa
libnacl==2.1.0
# via tensorizer
librosa==0.10.2.post1
# via -r requirements/test.in
llvmlite==0.44.0
# via numba
lm-eval==0.4.8
# via -r requirements/test.in
lxml==5.3.0
# via
# blobfile
# sacrebleu
mamba-ssm==2.2.4
# via -r requirements/test.in
markdown-it-py==3.0.0
# via rich
markupsafe==3.0.2
# via
# jinja2
# werkzeug
matplotlib==3.9.2
# via -r requirements/test.in
mbstrdecoder==1.1.3
# via
# dataproperty
# pytablewriter
# typepy
mdurl==0.1.2
# via markdown-it-py
mistral-common==1.5.4
# via -r requirements/test.in
more-itertools==10.5.0
# via lm-eval
mpmath==1.3.0
# via sympy
msgpack==1.1.0
# via
# librosa
# ray
multidict==6.1.0
# via
# aiohttp
# yarl
multiprocess==0.70.16
# via
# datasets
# evaluate
mypy-extensions==1.0.0
# via black
networkx==3.2.1
# via torch
ninja==1.11.1.3
# via mamba-ssm
nltk==3.9.1
# via rouge-score
num2words==0.5.14
# via -r requirements/test.in
numba==0.61.2
# via
# -r requirements/test.in
# librosa
numexpr==2.10.1
# via lm-eval
numpy==1.26.4
# via
# -r requirements/test.in
# accelerate
# bitsandbytes
# contourpy
# cupy-cuda12x
# datasets
# einx
# encodec
# evaluate
# fastparquet
# genai-perf
# librosa
# matplotlib
# mistral-common
# numba
# numexpr
# opencv-python-headless
# pandas
# patsy
# peft
# rouge-score
# runai-model-streamer
# sacrebleu
# scikit-learn
# scipy
# soxr
# statsmodels
# tensorizer
# torchvision
# transformers
# tritonclient
# vocos
nvidia-cublas-cu12==12.4.5.8
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.4.127
# via torch
nvidia-cuda-nvrtc-cu12==12.4.127
# via torch
nvidia-cuda-runtime-cu12==12.4.127
# via torch
nvidia-cudnn-cu12==9.1.0.70
# via torch
nvidia-cufft-cu12==11.2.1.3
# via torch
nvidia-curand-cu12==10.3.5.147
# via torch
nvidia-cusolver-cu12==11.6.1.9
# via torch
nvidia-cusparse-cu12==12.3.1.170
# via
# nvidia-cusolver-cu12
# torch
nvidia-cusparselt-cu12==0.6.2
# via torch
nvidia-nccl-cu12==2.21.5
# via torch
nvidia-nvjitlink-cu12==12.4.127
# via
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
# torch
nvidia-nvtx-cu12==12.4.127
# via torch
opencv-python-headless==4.11.0.86
# via
# -r requirements/test.in
# mistral-common
packaging==24.1
# via
# accelerate
# black
# datamodel-code-generator
# datasets
# evaluate
# fastparquet
# huggingface-hub
# lazy-loader
# mamba-ssm
# matplotlib
# peft
# plotly
# pooch
# pytest
# pytest-rerunfailures
# ray
# statsmodels
# transformers
# typepy
pandas==2.2.3
# via
# datasets
# evaluate
# fastparquet
# genai-perf
# statsmodels
pathspec==0.12.1
# via black
pathvalidate==3.2.1
# via pytablewriter
patsy==1.0.1
# via statsmodels
peft==0.13.2
# via
# -r requirements/test.in
# lm-eval
pillow==10.4.0
# via
# genai-perf
# matplotlib
# mistral-common
# sentence-transformers
# torchvision
platformdirs==4.3.6
# via
# black
# pooch
plotly==5.24.1
# via genai-perf
pluggy==1.5.0
# via pytest
pooch==1.8.2
# via librosa
portalocker==2.10.1
# via sacrebleu
pqdm==0.2.0
# via -r requirements/test.in
propcache==0.2.0
# via yarl
protobuf==5.28.3
# via
# ray
# tensorizer
psutil==6.1.0
# via
# accelerate
# peft
# tensorizer
py==1.11.0
# via pytest-forked
pyarrow==18.0.0
# via
# datasets
# genai-perf
pyasn1==0.6.1
# via rsa
pybind11==2.13.6
# via lm-eval
pycparser==2.22
# via cffi
pycryptodomex==3.22.0
# via blobfile
pydantic==2.9.2
# via
# datamodel-code-generator
# mistral-common
pydantic-core==2.23.4
# via pydantic
pygments==2.18.0
# via rich
pyparsing==3.2.0
# via matplotlib
pyrate-limiter==3.7.0
# via schemathesis
pytablewriter==1.2.0
# via lm-eval
pytest==8.3.3
# via
# -r requirements/test.in
# buildkite-test-collector
# genai-perf
# pytest-asyncio
# pytest-forked
# pytest-mock
# pytest-rerunfailures
# pytest-shard
# pytest-subtests
# pytest-timeout
# schemathesis
pytest-asyncio==0.24.0
# via -r requirements/test.in
pytest-forked==1.6.0
# via -r requirements/test.in
pytest-mock==3.14.0
# via genai-perf
pytest-rerunfailures==14.0
# via -r requirements/test.in
pytest-shard==0.1.2
# via -r requirements/test.in
pytest-subtests==0.14.1
# via schemathesis
pytest-timeout==2.3.1
# via -r requirements/test.in
python-dateutil==2.9.0.post0
# via
# arrow
# botocore
# matplotlib
# pandas
# typepy
python-rapidjson==1.20
# via tritonclient
pytz==2024.2
# via
# pandas
# typepy
pyyaml==6.0.2
# via
# accelerate
# awscli
# datamodel-code-generator
# datasets
# genai-perf
# huggingface-hub
# peft
# ray
# responses
# schemathesis
# timm
# transformers
# vocos
rapidfuzz==3.12.1
# via jiwer
ray==2.43.0
# via -r requirements/test.in
redis==5.2.0
# via tensorizer
referencing==0.35.1
# via
# jsonschema
# jsonschema-specifications
regex==2024.9.11
# via
# nltk
# sacrebleu
# tiktoken
# transformers
requests==2.32.3
# via
# buildkite-test-collector
# datasets
# evaluate
# huggingface-hub
# lm-eval
# mistral-common
# pooch
# ray
# responses
# schemathesis
# starlette-testclient
# tiktoken
# transformers
responses==0.25.3
# via genai-perf
rfc3339-validator==0.1.4
# via jsonschema
rfc3987==1.3.8
# via jsonschema
rich==13.9.4
# via
# genai-perf
# typer
rouge-score==0.1.2
# via lm-eval
rpds-py==0.20.1
# via
# jsonschema
# referencing
rsa==4.7.2
# via awscli
runai-model-streamer==0.11.0
# via -r requirements/test.in
runai-model-streamer-s3==0.11.0
# via -r requirements/test.in
s3transfer==0.10.3
# via
# awscli
# boto3
sacrebleu==2.4.3
# via lm-eval
safetensors==0.4.5
# via
# accelerate
# peft
# timm
# transformers
schemathesis==3.39.15
# via -r requirements/test.in
scikit-learn==1.5.2
# via
# librosa
# lm-eval
# sentence-transformers
scipy==1.13.1
# via
# librosa
# scikit-learn
# sentence-transformers
# statsmodels
# vocos
sentence-transformers==3.2.1
# via -r requirements/test.in
sentencepiece==0.2.0
# via mistral-common
setuptools==75.8.0
# via
# mamba-ssm
# pytablewriter
# torch
shellingham==1.5.4
# via typer
six==1.16.0
# via
# junit-xml
# python-dateutil
# rfc3339-validator
# rouge-score
sniffio==1.3.1
# via
# anyio
# httpx
sortedcontainers==2.4.0
# via hypothesis
soundfile==0.12.1
# via
# -r requirements/test.in
# librosa
soxr==0.5.0.post1
# via librosa
sqlitedict==2.1.0
# via lm-eval
starlette==0.46.2
# via
# schemathesis
# starlette-testclient
starlette-testclient==0.4.1
# via schemathesis
statsmodels==0.14.4
# via genai-perf
sympy==1.13.1
# via
# einx
# torch
tabledata==1.3.3
# via pytablewriter
tabulate==0.9.0
# via sacrebleu
tcolorpy==0.1.6
# via pytablewriter
tenacity==9.0.0
# via
# lm-eval
# plotly
tensorizer==2.9.0
# via -r requirements/test.in
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.7.0
# via
# lm-eval
# mistral-common
timm==1.0.11
# via -r requirements/test.in
tokenizers==0.21.1
# via
# -r requirements/test.in
# transformers
tomli==2.2.1
# via schemathesis
tomli-w==1.2.0
# via schemathesis
torch==2.6.0
# via
# -r requirements/test.in
# accelerate
# bitsandbytes
# encodec
# fastsafetensors
# lm-eval
# mamba-ssm
# peft
# runai-model-streamer
# sentence-transformers
# tensorizer
# timm
# torchaudio
# torchvision
# vector-quantize-pytorch
# vocos
torchaudio==2.6.0
# via
# -r requirements/test.in
# encodec
# vocos
torchvision==0.21.0
# via
# -r requirements/test.in
# timm
tqdm==4.66.6
# via
# datasets
# evaluate
# huggingface-hub
# lm-eval
# nltk
# peft
# pqdm
# sentence-transformers
# tqdm-multiprocess
# transformers
tqdm-multiprocess==0.0.11
# via lm-eval
transformers==4.51.1
# via
# -r requirements/test.in
# genai-perf
# lm-eval
# mamba-ssm
# peft
# sentence-transformers
# transformers-stream-generator
transformers-stream-generator==0.0.5
# via -r requirements/test.in
triton==3.2.0
# via torch
tritonclient==2.51.0
# via
# -r requirements/test.in
# genai-perf
typepy==1.3.2
# via
# dataproperty
# pytablewriter
# tabledata
typer==0.15.2
# via fastsafetensors
types-python-dateutil==2.9.0.20241206
# via arrow
typing-extensions==4.12.2
# via
# huggingface-hub
# librosa
# mistral-common
# pqdm
# pydantic
# pydantic-core
# torch
# typer
tzdata==2024.2
# via pandas
uri-template==1.3.0
# via jsonschema
urllib3==2.2.3
# via
# blobfile
# botocore
# requests
# responses
# tritonclient
vector-quantize-pytorch==1.21.2
# via -r requirements/test.in
vocos==0.1.0
# via -r requirements/test.in
webcolors==24.11.1
# via jsonschema
werkzeug==3.1.3
# via schemathesis
word2number==1.1
# via lm-eval
xxhash==3.5.0
# via
# datasets
# evaluate
yarl==1.17.1
# via
# aiohttp
# schemathesis
zstandard==0.23.0
# via lm-eval