2023-06-14 19:55:38 -07:00
|
|
|
#!/bin/bash
|
|
|
|
|
2023-06-26 13:15:35 -07:00
|
|
|
PORT=8000
|
2023-06-14 19:55:38 -07:00
|
|
|
MODEL=$1
|
|
|
|
TOKENS=$2
|
|
|
|
|
2024-05-20 13:16:57 -07:00
|
|
|
docker run -e HF_TOKEN=$HF_TOKEN --gpus all --shm-size 1g -p $PORT:80 \
|
2023-06-14 19:55:38 -07:00
|
|
|
-v $PWD/data:/data \
|
2024-02-12 22:53:00 -08:00
|
|
|
ghcr.io/huggingface/text-generation-inference:1.4.0 \
|
2023-06-14 19:55:38 -07:00
|
|
|
--model-id $MODEL \
|
|
|
|
--sharded false \
|
|
|
|
--max-input-length 1024 \
|
|
|
|
--max-total-tokens 2048 \
|
|
|
|
--max-best-of 5 \
|
|
|
|
--max-concurrent-requests 5000 \
|
|
|
|
--max-batch-total-tokens $TOKENS
|