[V1] Fix torch profiling for offline inference (#11125)
Signed-off-by: Roger Wang <ywang@roblox.com>
This commit is contained in:
parent
85362f028c
commit
4816d20aa4
@ -1,4 +1,5 @@
|
|||||||
import os
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
from vllm import LLM, SamplingParams
|
from vllm import LLM, SamplingParams
|
||||||
|
|
||||||
@ -15,19 +16,25 @@ prompts = [
|
|||||||
# Create a sampling params object.
|
# Create a sampling params object.
|
||||||
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
||||||
|
|
||||||
# Create an LLM.
|
if __name__ == "__main__":
|
||||||
llm = LLM(model="facebook/opt-125m", tensor_parallel_size=1)
|
|
||||||
|
|
||||||
llm.start_profile()
|
# Create an LLM.
|
||||||
|
llm = LLM(model="facebook/opt-125m", tensor_parallel_size=1)
|
||||||
|
|
||||||
# Generate texts from the prompts. The output is a list of RequestOutput objects
|
llm.start_profile()
|
||||||
# that contain the prompt, generated text, and other information.
|
|
||||||
outputs = llm.generate(prompts, sampling_params)
|
|
||||||
|
|
||||||
llm.stop_profile()
|
# Generate texts from the prompts. The output is a list of RequestOutput
|
||||||
|
# objects that contain the prompt, generated text, and other information.
|
||||||
|
outputs = llm.generate(prompts, sampling_params)
|
||||||
|
|
||||||
# Print the outputs.
|
llm.stop_profile()
|
||||||
for output in outputs:
|
|
||||||
|
# Print the outputs.
|
||||||
|
for output in outputs:
|
||||||
prompt = output.prompt
|
prompt = output.prompt
|
||||||
generated_text = output.outputs[0].text
|
generated_text = output.outputs[0].text
|
||||||
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
||||||
|
|
||||||
|
# Add a buffer to wait for profiler in the background process
|
||||||
|
# (in case MP is on) to finish writing profiling output.
|
||||||
|
time.sleep(10)
|
||||||
|
@ -105,7 +105,7 @@ class InprocClient(EngineCoreClient):
|
|||||||
def __del__(self):
|
def __del__(self):
|
||||||
self.shutdown()
|
self.shutdown()
|
||||||
|
|
||||||
async def profile(self, is_start=True) -> None:
|
def profile(self, is_start=True) -> None:
|
||||||
self.engine_core.profile(is_start)
|
self.engine_core.profile(is_start)
|
||||||
|
|
||||||
|
|
||||||
@ -212,7 +212,7 @@ class SyncMPClient(MPClient):
|
|||||||
def abort_requests(self, request_ids: List[str]) -> None:
|
def abort_requests(self, request_ids: List[str]) -> None:
|
||||||
self._send_input(EngineCoreRequestType.ABORT, request_ids)
|
self._send_input(EngineCoreRequestType.ABORT, request_ids)
|
||||||
|
|
||||||
async def profile(self, is_start=True) -> None:
|
def profile(self, is_start=True) -> None:
|
||||||
self._send_input(EngineCoreRequestType.PROFILE,
|
self._send_input(EngineCoreRequestType.PROFILE,
|
||||||
EngineCoreProfile(is_start))
|
EngineCoreProfile(is_start))
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user