Skip to content

Commit

Permalink
Use monotonic time where appropriate (#1249)
Browse files Browse the repository at this point in the history
  • Loading branch information
Yard1 authored Oct 3, 2023
1 parent 66d18a7 commit acbed3e
Show file tree
Hide file tree
Showing 7 changed files with 18 additions and 17 deletions.
4 changes: 2 additions & 2 deletions benchmarks/benchmark_latency.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,13 @@ def main(args: argparse.Namespace):
def run_to_completion(profile: bool = False):
if profile:
torch.cuda.cudart().cudaProfilerStart()
start_time = time.time()
start_time = time.perf_counter()

llm.generate(prompt_token_ids=dummy_prompt_token_ids,
sampling_params=sampling_params,
use_tqdm=False)

end_time = time.time()
end_time = time.perf_counter()
latency = end_time - start_time
if profile:
torch.cuda.cudart().cudaProfilerStop()
Expand Down
8 changes: 4 additions & 4 deletions benchmarks/benchmark_serving.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ async def send_request(
best_of: int,
use_beam_search: bool,
) -> None:
request_start_time = time.time()
request_start_time = time.perf_counter()

headers = {"User-Agent": "Benchmark Client"}
if backend == "vllm":
Expand Down Expand Up @@ -148,7 +148,7 @@ async def send_request(
if "error" not in output:
break

request_end_time = time.time()
request_end_time = time.perf_counter()
request_latency = request_end_time - request_start_time
REQUEST_LATENCY.append((prompt_len, output_len, request_latency))

Expand Down Expand Up @@ -180,10 +180,10 @@ def main(args: argparse.Namespace):
tokenizer = get_tokenizer(args.tokenizer, trust_remote_code=args.trust_remote_code)
input_requests = sample_requests(args.dataset, args.num_prompts, tokenizer)

benchmark_start_time = time.time()
benchmark_start_time = time.perf_counter()
asyncio.run(benchmark(args.backend, api_url, input_requests, args.best_of,
args.use_beam_search, args.request_rate))
benchmark_end_time = time.time()
benchmark_end_time = time.perf_counter()
benchmark_time = benchmark_end_time - benchmark_start_time
print(f"Total time: {benchmark_time:.2f} s")
print(f"Throughput: {args.num_prompts / benchmark_time:.2f} requests/s")
Expand Down
8 changes: 4 additions & 4 deletions benchmarks/benchmark_throughput.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,10 @@ def run_vllm(
sampling_params=sampling_params,
)

start = time.time()
start = time.perf_counter()
# FIXME(woosuk): Do use internal method.
llm._run_engine(use_tqdm=True)
end = time.time()
end = time.perf_counter()
return end - start


Expand All @@ -118,7 +118,7 @@ def run_hf(
llm = llm.cuda()

pbar = tqdm(total=len(requests))
start = time.time()
start = time.perf_counter()
batch: List[str] = []
max_prompt_len = 0
max_output_len = 0
Expand Down Expand Up @@ -156,7 +156,7 @@ def run_hf(
batch = []
max_prompt_len = 0
max_output_len = 0
end = time.time()
end = time.perf_counter()
return end - start


Expand Down
2 changes: 1 addition & 1 deletion vllm/core/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def _schedule(self) -> SchedulerOutputs:
blocks_to_copy: Dict[int, List[int]] = {}

# Fix the current time.
now = time.time()
now = time.monotonic()

# Join waiting sequences if possible.
if not self.swapped:
Expand Down
3 changes: 2 additions & 1 deletion vllm/engine/async_llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,8 @@ async def generate(
request.
"""
# Preprocess the request.
arrival_time = time.time()
# This should not be used for logging, as it is monotonic time.
arrival_time = time.monotonic()

try:
stream = await self.add_request(request_id,
Expand Down
6 changes: 3 additions & 3 deletions vllm/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,10 +256,10 @@ def add_request(
prompt_token_ids: The token IDs of the prompt. If None, we
use the tokenizer to convert the prompts to token IDs.
arrival_time: The arrival time of the request. If None, we use
the current time.
the current monotonic time.
"""
if arrival_time is None:
arrival_time = time.time()
arrival_time = time.monotonic()
if prompt_token_ids is None:
assert prompt is not None
prompt_token_ids = self.tokenizer.encode(prompt)
Expand Down Expand Up @@ -568,7 +568,7 @@ def _log_system_stats(
prompt_run: bool,
num_batched_tokens: int,
) -> None:
now = time.time()
now = time.monotonic()
# Log the number of batched input tokens.
if prompt_run:
self.num_prompt_tokens.append((now, num_batched_tokens))
Expand Down
4 changes: 2 additions & 2 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ async def create_chat_completion(request: ChatCompletionRequest,

model_name = request.model
request_id = f"cmpl-{random_uuid()}"
created_time = int(time.time())
created_time = int(time.monotonic())
try:
sampling_params = SamplingParams(
n=request.n,
Expand Down Expand Up @@ -411,7 +411,7 @@ async def create_completion(request: CompletionRequest, raw_request: Request):
if error_check_ret is not None:
return error_check_ret

created_time = int(time.time())
created_time = int(time.monotonic())
try:
sampling_params = SamplingParams(
n=request.n,
Expand Down

0 comments on commit acbed3e

Please sign in to comment.