1313// See the License for the specific language governing permissions and
1414// limitations under the License.
1515
16+ // cargo test --test soak integration::main --features integration
17+ //!
18+ //! It will send a batch of requests to the runtime and measure the throughput.
19+ //!
20+ //! It will also measure the latency of the requests.
1621#[ cfg( feature = "integration" ) ]
1722mod integration {
1823
@@ -22,13 +27,17 @@ mod integration {
2227 logging,
2328 pipeline:: {
2429 async_trait, network:: Ingress , AsyncEngine , AsyncEngineContextProvider , Error , ManyOut ,
25- ResponseStream , SingleIn ,
30+ PushRouter , ResponseStream , SingleIn ,
2631 } ,
2732 protocols:: annotated:: Annotated ,
28- DistributedRuntime , ErrorContext , Result , Runtime , Worker ,
33+ stream , DistributedRuntime , ErrorContext , Result , Runtime , Worker ,
2934 } ;
3035 use futures:: StreamExt ;
31- use std:: { sync:: Arc , time:: Duration } ;
36+ use std:: {
37+ sync:: atomic:: { AtomicU64 , Ordering } ,
38+ sync:: Arc ,
39+ time:: Duration ,
40+ } ;
3241 use tokio:: time:: Instant ;
3342
3443 #[ test]
@@ -45,16 +54,29 @@ mod integration {
4554
4655 client. await ??;
4756 distributed. shutdown ( ) ;
48- server. await ??;
57+ let handler = server. await ??;
58+
59+ // Print final backend counter value
60+ let final_count = handler. backend_counter . load ( Ordering :: Relaxed ) ;
61+ println ! (
62+ "Final RequestHandler backend_counter: {} requests processed" ,
63+ final_count
64+ ) ;
4965
5066 Ok ( ( ) )
5167 }
5268
53- struct RequestHandler { }
69+ struct RequestHandler {
70+ backend_counter : Arc < AtomicU64 > ,
71+ normal_processing : bool ,
72+ }
5473
5574 impl RequestHandler {
56- fn new ( ) -> Arc < Self > {
57- Arc :: new ( Self { } )
75+ fn new ( normal_processing : bool ) -> Arc < Self > {
76+ Arc :: new ( Self {
77+ backend_counter : Arc :: new ( AtomicU64 :: new ( 0 ) ) ,
78+ normal_processing,
79+ } )
5880 }
5981 }
6082
@@ -63,25 +85,39 @@ mod integration {
6385 async fn generate ( & self , input : SingleIn < String > ) -> Result < ManyOut < Annotated < String > > > {
6486 let ( data, ctx) = input. into_parts ( ) ;
6587
88+ // Increment backend counter
89+ self . backend_counter . fetch_add ( 1 , Ordering :: Relaxed ) ;
90+
6691 let chars = data
6792 . chars ( )
6893 . map ( |c| Annotated :: from_data ( c. to_string ( ) ) )
6994 . collect :: < Vec < _ > > ( ) ;
7095
71- let stream = async_stream:: stream! {
72- for c in chars {
73- yield c;
74- tokio:: time:: sleep( tokio:: time:: Duration :: from_millis( 100 ) ) . await ;
75- }
76- } ;
77-
78- Ok ( ResponseStream :: new ( Box :: pin ( stream) , ctx. context ( ) ) )
96+ if self . normal_processing {
97+ let iter_stream = stream:: iter ( chars) ;
98+ Ok ( ResponseStream :: new ( Box :: pin ( iter_stream) , ctx. context ( ) ) )
99+ } else {
100+ // delayed processing, just to saturate the queue
101+ let async_stream = async_stream:: stream! {
102+ for c in chars {
103+ yield c;
104+ tokio:: time:: sleep( tokio:: time:: Duration :: from_millis( 100 ) ) . await ;
105+ }
106+ } ;
107+ Ok ( ResponseStream :: new ( Box :: pin ( async_stream) , ctx. context ( ) ) )
108+ }
79109 }
80110 }
81111
82- async fn backend ( runtime : DistributedRuntime ) -> Result < ( ) > {
112+ async fn backend ( runtime : DistributedRuntime ) -> Result < Arc < RequestHandler > > {
113+ // get the normal processing setting from env (not delayed)
114+ let normal_processing =
115+ std:: env:: var ( "DYN_NORMAL_PROCESSING" ) . unwrap_or ( "true" . to_string ( ) ) ;
116+ let normal_processing: bool = normal_processing. parse ( ) . unwrap_or ( true ) ;
117+
83118 // attach an ingress to an engine
84- let ingress = Ingress :: for_engine ( RequestHandler :: new ( ) ) ?;
119+ let handler = RequestHandler :: new ( normal_processing) ;
120+ let ingress = Ingress :: for_engine ( handler. clone ( ) ) ?;
85121
86122 // // make the ingress discoverable via a component service
87123 // // we must first create a service, then we can attach one more more endpoints
@@ -95,7 +131,9 @@ mod integration {
95131 . endpoint_builder ( )
96132 . handler ( ingress)
97133 . start ( )
98- . await
134+ . await ?;
135+
136+ Ok ( handler)
99137 }
100138
101139 async fn client ( runtime : DistributedRuntime ) -> Result < ( ) > {
@@ -105,29 +143,32 @@ mod integration {
105143 humantime:: parse_duration ( & run_duration) . unwrap_or ( Duration :: from_secs ( 60 ) ) ;
106144
107145 let batch_load = std:: env:: var ( "DYN_SOAK_BATCH_LOAD" ) . unwrap_or ( "10000" . to_string ( ) ) ;
108- let batch_load: usize = batch_load. parse ( ) . unwrap_or ( 10000 ) ;
146+ let batch_load: usize = batch_load. parse ( ) . unwrap_or ( 100 ) ;
109147
110148 let client = runtime
111149 . namespace ( DEFAULT_NAMESPACE ) ?
112150 . component ( "backend" ) ?
113151 . endpoint ( "generate" )
114- . client :: < String , Annotated < String > > ( )
152+ . client ( )
115153 . await ?;
116154
117155 client. wait_for_instances ( ) . await ?;
118- let client = Arc :: new ( client) ;
156+ let router =
157+ PushRouter :: < String , Annotated < String > > :: from_client ( client, Default :: default ( ) )
158+ . await ?;
159+ let router = Arc :: new ( router) ;
119160
120161 let start = Instant :: now ( ) ;
121162 let mut count = 0 ;
122163
123164 loop {
124165 let mut tasks = Vec :: new ( ) ;
125166 for _ in 0 ..batch_load {
126- let client = client . clone ( ) ;
167+ let router = router . clone ( ) ;
127168 tasks. push ( tokio:: spawn ( async move {
128169 let mut stream = tokio:: time:: timeout (
129- Duration :: from_secs ( 30 ) ,
130- client . random ( "hello world" . to_string ( ) . into ( ) ) ,
170+ Duration :: from_secs ( 5 ) ,
171+ router . random ( "hello world" . to_string ( ) . into ( ) ) ,
131172 )
132173 . await
133174 . context ( "request timed out" ) ??;
@@ -147,7 +188,9 @@ mod integration {
147188
148189 let elapsed = start. elapsed ( ) ;
149190 count += batch_load;
150- println ! ( "elapsed: {:?}; count: {}" , elapsed, count) ;
191+ if count % 1000 == 0 {
192+ println ! ( "elapsed: {:?}; count: {}" , elapsed, count) ;
193+ }
151194
152195 if elapsed > run_duration {
153196 println ! ( "done" ) ;
0 commit comments