2024-11-07 13:17:29 -05:00
|
|
|
#!/bin/bash
|
|
|
|
|
2024-04-02 13:07:30 +08:00
|
|
|
# This script build the CPU docker image and run the offline inference inside the container.
|
|
|
|
# It serves a sanity check for compilation and basic model usage.
|
|
|
|
set -ex
|
|
|
|
|
2024-11-12 18:07:32 +08:00
|
|
|
# allow to bind to different cores
|
|
|
|
CORE_RANGE=${CORE_RANGE:-48-95}
|
|
|
|
NUMA_NODE=${NUMA_NODE:-1}
|
|
|
|
|
2024-04-02 13:07:30 +08:00
|
|
|
# Try building the docker image
|
2025-01-07 15:28:01 +08:00
|
|
|
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build -t cpu-test-"$BUILDKITE_BUILD_NUMBER" -f Dockerfile.cpu .
|
|
|
|
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" -t cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2 -f Dockerfile.cpu .
|
2024-04-02 13:07:30 +08:00
|
|
|
|
|
|
|
# Setup cleanup
|
2025-01-08 23:18:28 +08:00
|
|
|
remove_docker_container() { set -e; docker rm -f cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2-"$NUMA_NODE" || true; }
|
2024-04-02 13:07:30 +08:00
|
|
|
trap remove_docker_container EXIT
|
|
|
|
remove_docker_container
|
|
|
|
|
2024-07-27 04:50:10 +08:00
|
|
|
# Run the image, setting --shm-size=4g for tensor parallel.
|
2024-11-14 17:47:53 +08:00
|
|
|
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \
|
2025-01-07 15:28:01 +08:00
|
|
|
--cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" cpu-test-"$BUILDKITE_BUILD_NUMBER"
|
2024-11-14 17:47:53 +08:00
|
|
|
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \
|
2025-01-07 15:28:01 +08:00
|
|
|
--cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2-"$NUMA_NODE" cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2
|
2024-06-04 01:39:50 +08:00
|
|
|
|
2024-11-09 11:27:11 +08:00
|
|
|
function cpu_tests() {
|
2024-11-11 20:37:58 +08:00
|
|
|
set -e
|
2024-11-20 18:57:39 +08:00
|
|
|
export NUMA_NODE=$2
|
2024-11-11 20:37:58 +08:00
|
|
|
|
2024-11-09 11:27:11 +08:00
|
|
|
# offline inference
|
2025-01-07 15:28:01 +08:00
|
|
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2-"$NUMA_NODE" bash -c "
|
2024-11-09 11:27:11 +08:00
|
|
|
set -e
|
2025-02-20 12:53:51 +00:00
|
|
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
|
2024-06-04 01:39:50 +08:00
|
|
|
|
2024-11-09 11:27:11 +08:00
|
|
|
# Run basic model test
|
2025-01-07 15:28:01 +08:00
|
|
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
2024-11-09 11:27:11 +08:00
|
|
|
set -e
|
2025-03-08 17:44:35 +01:00
|
|
|
pip install -r vllm/requirements/test.txt
|
2024-11-15 12:23:09 +08:00
|
|
|
pytest -v -s tests/models/decoder_only/language -m cpu_model
|
|
|
|
pytest -v -s tests/models/embedding/language -m cpu_model
|
|
|
|
pytest -v -s tests/models/encoder_decoder/language -m cpu_model
|
2024-11-09 11:27:11 +08:00
|
|
|
pytest -v -s tests/models/decoder_only/audio_language -m cpu_model
|
|
|
|
pytest -v -s tests/models/decoder_only/vision_language -m cpu_model"
|
2024-07-27 04:50:10 +08:00
|
|
|
|
2024-11-09 11:27:11 +08:00
|
|
|
# Run compressed-tensor test
|
2025-01-07 15:28:01 +08:00
|
|
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
2024-11-09 11:27:11 +08:00
|
|
|
set -e
|
|
|
|
pytest -s -v \
|
|
|
|
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_static_setup \
|
|
|
|
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_dynamic_per_token"
|
2024-10-10 00:28:08 +08:00
|
|
|
|
2024-11-09 11:27:11 +08:00
|
|
|
# Run AWQ test
|
2025-01-07 15:28:01 +08:00
|
|
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
2024-11-09 11:27:11 +08:00
|
|
|
set -e
|
|
|
|
pytest -s -v \
|
|
|
|
tests/quantization/test_ipex_quant.py"
|
2024-09-12 00:46:46 +08:00
|
|
|
|
2024-11-20 18:57:39 +08:00
|
|
|
# Run chunked-prefill and prefix-cache test
|
2025-01-07 15:28:01 +08:00
|
|
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
2024-11-20 18:57:39 +08:00
|
|
|
set -e
|
|
|
|
pytest -s -v -k cpu_model \
|
|
|
|
tests/basic_correctness/test_chunked_prefill.py"
|
|
|
|
|
2025-01-10 12:05:56 +00:00
|
|
|
# online serving
|
2025-01-07 15:28:01 +08:00
|
|
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
2024-11-09 11:27:11 +08:00
|
|
|
set -e
|
|
|
|
export VLLM_CPU_KVCACHE_SPACE=10
|
2024-11-14 16:45:32 +08:00
|
|
|
export VLLM_CPU_OMP_THREADS_BIND=$1
|
2024-11-09 11:27:11 +08:00
|
|
|
python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m --dtype half &
|
|
|
|
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
|
|
|
|
python3 benchmarks/benchmark_serving.py \
|
|
|
|
--backend vllm \
|
|
|
|
--dataset-name random \
|
|
|
|
--model facebook/opt-125m \
|
|
|
|
--num-prompts 20 \
|
|
|
|
--endpoint /v1/completions \
|
|
|
|
--tokenizer facebook/opt-125m"
|
2025-01-12 13:01:52 +00:00
|
|
|
|
|
|
|
# Run multi-lora tests
|
|
|
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
|
|
|
set -e
|
|
|
|
pytest -s -v \
|
|
|
|
tests/lora/test_qwen2vl.py"
|
2024-11-09 11:27:11 +08:00
|
|
|
}
|
|
|
|
|
2025-01-17 19:39:52 +08:00
|
|
|
# All of CPU tests are expected to be finished less than 40 mins.
|
2024-11-09 11:27:11 +08:00
|
|
|
export -f cpu_tests
|
2025-01-17 19:39:52 +08:00
|
|
|
timeout 40m bash -c "cpu_tests $CORE_RANGE $NUMA_NODE"
|