diff --git a/examples/gradio_webserver.py b/examples/gradio_webserver.py index 42e74614..315fa7e9 100644 --- a/examples/gradio_webserver.py +++ b/examples/gradio_webserver.py @@ -39,7 +39,7 @@ def build_demo(): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--host", type=str, default=None) parser.add_argument("--port", type=int, default=8001) parser.add_argument("--model-url", type=str, diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py index 5e63a02c..4b22ba71 100644 --- a/vllm/entrypoints/api_server.py +++ b/vllm/entrypoints/api_server.py @@ -65,7 +65,7 @@ async def generate(request: Request) -> Response: if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--host", type=str, default=None) parser.add_argument("--port", type=int, default=8000) parser = AsyncEngineArgs.add_cli_args(parser) args = parser.parse_args() diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 64a4b865..80d6f271 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -567,10 +567,7 @@ async def create_completion(request: CompletionRequest, raw_request: Request): if __name__ == "__main__": parser = argparse.ArgumentParser( description="vLLM OpenAI-Compatible RESTful API server.") - parser.add_argument("--host", - type=str, - default="localhost", - help="host name") + parser.add_argument("--host", type=str, default=None, help="host name") parser.add_argument("--port", type=int, default=8000, help="port number") parser.add_argument("--allow-credentials", action="store_true",