Skip to content

Commit

Permalink
Use better value in timeout test L0_sequence_batcher (#5716)
Browse files Browse the repository at this point in the history
* Use better value in timeout test L0_sequence_batcher

* Format
  • Loading branch information
GuanLuo authored Apr 28, 2023
1 parent e6eda20 commit b795c01
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,6 @@ sequence_batching {
parameters [
{
key: "execute_delay_ms"
value: { string_value: "3000" }
value: { string_value: "5000" }
}
]
]
6 changes: 3 additions & 3 deletions qa/L0_sequence_batcher/sequence_batcher_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2874,7 +2874,7 @@ def setUp(self):
def send_sequence_with_timeout(self,
seq_id,
callback,
timeout_us=2000000,
timeout_us=3000000,
request_pause_sec=0):
with grpcclient.InferenceServerClient(
self.server_address_) as triton_client:
Expand Down Expand Up @@ -2903,8 +2903,8 @@ def test_request_timeout(self):
# expect the timeout will only be expired on backlog sequence and reject
# all requests of the sequence once expired.
# Sending two sequences while the model can only process one sequence
# at a time. Each model execution takes 3 second and all requests have
# 2 second timeout, so the second sequence will be rejected.
# at a time. Each model execution takes 5 second and all requests have
# 3 second timeout, so the second sequence will be rejected.

# correlation ID is 1-index
seq1_res = []
Expand Down

0 comments on commit b795c01

Please sign in to comment.