[CI/Build] Adding timeout in CPU CI to avoid CPU test queue blocking (#6892)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
Co-authored-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Li, Jiang 2024-11-09 11:27:11 +08:00 committed by GitHub
parent 127c07480e
commit d7edca1dee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 79 additions and 69 deletions

View File

@ -17,30 +17,35 @@ source /etc/environment
#docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test cpu-test #docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test cpu-test
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true --network host -e HF_TOKEN="$HF_TOKEN" --name cpu-test cpu-test docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true --network host -e HF_TOKEN="$HF_TOKEN" --name cpu-test cpu-test
# Run basic model test function cpu_tests() {
docker exec cpu-test bash -c " # Run basic model test
set -e docker exec cpu-test bash -c "
pip install pytest pytest-asyncio \ set -e
decord einops librosa peft Pillow sentence-transformers soundfile \ pip install pytest pytest-asyncio \
transformers_stream_generator matplotlib datamodel_code_generator decord einops librosa peft Pillow sentence-transformers soundfile \
pip install torchvision --index-url https://download.pytorch.org/whl/cpu transformers_stream_generator matplotlib datamodel_code_generator
# Embedding models are not supported for CPU yet pip install torchvision --index-url https://download.pytorch.org/whl/cpu
# pytest -v -s tests/models/embedding/language # Embedding models are not supported for CPU yet
pytest -v -s tests/models/encoder_decoder/language # pytest -v -s tests/models/embedding/language
pytest -v -s tests/models/decoder_only/language/test_models.py pytest -v -s tests/models/encoder_decoder/language
# Chunked prefill not supported for CPU yet pytest -v -s tests/models/decoder_only/language/test_models.py
# pytest -v -s tests/models/decoder_only/audio_language -m cpu_model pytest -v -s tests/models/decoder_only/audio_language -m cpu_model
pytest -v -s tests/models/decoder_only/vision_language -m cpu_model" pytest -v -s tests/models/decoder_only/vision_language -m cpu_model"
# online inference # online inference
docker exec cpu-test bash -c " docker exec cpu-test bash -c "
set -e set -e
python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m &
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
python3 benchmarks/benchmark_serving.py \ python3 benchmarks/benchmark_serving.py \
--backend vllm \ --backend vllm \
--dataset-name random \ --dataset-name random \
--model facebook/opt-125m \ --model facebook/opt-125m \
--num-prompts 20 \ --num-prompts 20 \
--endpoint /v1/completions \ --endpoint /v1/completions \
--tokenizer facebook/opt-125m" --tokenizer facebook/opt-125m"
}
# All of CPU tests are expected to be finished less than 25 mins.
export -f cpu_tests
timeout 25m bash -c "cpu_tests"

View File

@ -19,50 +19,55 @@ docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/hugg
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \ docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \
--cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-avx2 cpu-test-avx2 --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-avx2 cpu-test-avx2
# offline inference function cpu_tests() {
docker exec cpu-test-avx2 bash -c " # offline inference
set -e docker exec cpu-test-avx2 bash -c "
python3 examples/offline_inference.py" set -e
python3 examples/offline_inference.py"
# Run basic model test # Run basic model test
docker exec cpu-test bash -c " docker exec cpu-test bash -c "
set -e set -e
pip install pytest pytest-asyncio \ pip install pytest pytest-asyncio \
decord einops librosa peft Pillow sentence-transformers soundfile \ decord einops librosa peft Pillow sentence-transformers soundfile \
transformers_stream_generator matplotlib datamodel_code_generator transformers_stream_generator matplotlib datamodel_code_generator
pip install torchvision --index-url https://download.pytorch.org/whl/cpu pip install torchvision --index-url https://download.pytorch.org/whl/cpu
# Embedding models are not supported for CPU yet # Embedding models are not supported for CPU yet
# pytest -v -s tests/models/embedding/language # pytest -v -s tests/models/embedding/language
pytest -v -s tests/models/encoder_decoder/language pytest -v -s tests/models/encoder_decoder/language
pytest -v -s tests/models/decoder_only/language/test_models.py pytest -v -s tests/models/decoder_only/language/test_models.py
# Chunked prefill not supported for CPU yet pytest -v -s tests/models/decoder_only/audio_language -m cpu_model
# pytest -v -s tests/models/decoder_only/audio_language -m cpu_model pytest -v -s tests/models/decoder_only/vision_language -m cpu_model"
pytest -v -s tests/models/decoder_only/vision_language -m cpu_model"
# Run compressed-tensor test # Run compressed-tensor test
docker exec cpu-test bash -c " docker exec cpu-test bash -c "
set -e set -e
pytest -s -v \ pytest -s -v \
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_static_setup \ tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_static_setup \
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_dynamic_per_token" tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_dynamic_per_token"
# Run AWQ test # Run AWQ test
docker exec cpu-test bash -c " docker exec cpu-test bash -c "
set -e set -e
pytest -s -v \ pytest -s -v \
tests/quantization/test_ipex_quant.py" tests/quantization/test_ipex_quant.py"
# online inference # online inference
docker exec cpu-test bash -c " docker exec cpu-test bash -c "
set -e set -e
export VLLM_CPU_KVCACHE_SPACE=10 export VLLM_CPU_KVCACHE_SPACE=10
export VLLM_CPU_OMP_THREADS_BIND=48-92 export VLLM_CPU_OMP_THREADS_BIND=48-92
python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m --dtype half & python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m --dtype half &
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
python3 benchmarks/benchmark_serving.py \ python3 benchmarks/benchmark_serving.py \
--backend vllm \ --backend vllm \
--dataset-name random \ --dataset-name random \
--model facebook/opt-125m \ --model facebook/opt-125m \
--num-prompts 20 \ --num-prompts 20 \
--endpoint /v1/completions \ --endpoint /v1/completions \
--tokenizer facebook/opt-125m" --tokenizer facebook/opt-125m"
}
# All of CPU tests are expected to be finished less than 25 mins.
export -f cpu_tests
timeout 25m bash -c "cpu_tests"