@@ -538,76 +538,75 @@ fn high_latency_handshake() {
538538 assert ! ( pair. server_conn_mut( server_ch) . using_ecn( ) ) ;
539539}
540540
541- // // Test to expose O(n²) behavior in SendBuffer with many small writes and delayed ACKs
542- // #[test]
543- // #[cfg(not(wasm_browser))]
544- // fn many_small_writes_delayed_acks() {
545- // let _guard = subscribe();
546- // let mut pair = Pair::default();
547-
548- // // Simulate high latency to delay ACKs
549- // pair.latency = Duration::from_millis(500);
550-
551- // let (client_ch, server_ch) = pair.connect();
552-
553- // let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap();
554-
555- // // Write many small messages (simulate fragmented buffer)
556- // const NUM_WRITES: usize = 100000;
557- // const WRITE_SIZE: usize = 10;
558-
559- // for i in 0..NUM_WRITES {
560- // let data = vec![i as u8; WRITE_SIZE];
561- // pair.client_send(client_ch, s).write(&data).unwrap();
562- // }
563-
564- // // The key insight: with high latency, the client will send many packets
565- // // before any ACKs arrive. This causes SendBuffer to accumulate many
566- // // unacked segments. We don't need to artificially limit driving -
567- // // the latency naturally creates the pathological state.
568-
569- // // The high latency means:
570- // // 1. Client sends many packets quickly (all 500 writes)
571- // // 2. ACKs are delayed by 500ms RTT
572- // // 3. SendBuffer accumulates many unacked segments
573- // // 4. When retransmission or late transmission happens, get() scans are expensive
574-
575- // let start = std::time::Instant::now();
576-
577- // // Drive to completion
578- // // With O(n²) get() behavior, this will be slow due to many segments
579- // pair.drive();
580-
581- // let elapsed = start.elapsed();
582-
583- // // With O(n²) behavior and 500 segments, this could take 10-100ms
584- // // With O(n) or O(1), should be < 5ms
585- // // This is a performance regression test
586- // info!(
587- // "Time to drive {} small writes with delayed ACKs: {:?}",
588- // NUM_WRITES, elapsed
589- // );
590-
591- // // Verify correctness - all data should be received
592- // let total_written = (NUM_WRITES * WRITE_SIZE) as u64;
593- // pair.client_send(client_ch, s).finish().unwrap();
594- // pair.drive();
595-
596- // let mut recv = pair.server_recv(server_ch, s);
597- // let mut chunks = recv.read(false).unwrap();
598- // let mut received = 0;
599-
600- // while let Ok(Some(chunk)) = chunks.next(usize::MAX) {
601- // received += chunk.bytes.len();
602- // }
603- // let _ = chunks.finalize();
604-
605- // assert_eq!(received, total_written as usize);
606-
607- // // This test exposes the pathology but doesn't strictly assert on timing
608- // // because timing tests are flaky in CI. The println! shows the issue.
609- // // To properly test, we'd need to instrument SendBuffer::get() to count scans.
610- // }
541+ // Test to expose O(n²) behavior in SendBuffer with many small writes and delayed ACKs
542+ #[ test]
543+ fn many_small_writes_delayed_acks ( ) {
544+ let _guard = subscribe ( ) ;
545+ let mut pair = Pair :: default ( ) ;
546+
547+ // Simulate high latency to delay ACKs
548+ pair. latency = Duration :: from_millis ( 500 ) ;
549+
550+ let ( client_ch, server_ch) = pair. connect ( ) ;
551+
552+ let s = pair. client_streams ( client_ch) . open ( Dir :: Uni ) . unwrap ( ) ;
553+
554+ // Write many small messages (simulate fragmented buffer)
555+ const NUM_WRITES : usize = 100000 ;
556+ const WRITE_SIZE : usize = 10 ;
557+
558+ for i in 0 ..NUM_WRITES {
559+ let data = vec ! [ i as u8 ; WRITE_SIZE ] ;
560+ pair. client_send ( client_ch, s) . write ( & data) . unwrap ( ) ;
561+ }
562+
563+ // The key insight: with high latency, the client will send many packets
564+ // before any ACKs arrive. This causes SendBuffer to accumulate many
565+ // unacked segments. We don't need to artificially limit driving -
566+ // the latency naturally creates the pathological state.
567+
568+ // The high latency means:
569+ // 1. Client sends many packets quickly (all 500 writes)
570+ // 2. ACKs are delayed by 500ms RTT
571+ // 3. SendBuffer accumulates many unacked segments
572+ // 4. When retransmission or late transmission happens, get() scans are expensive
573+
574+ let start = std:: time:: Instant :: now ( ) ;
575+
576+ // Drive to completion
577+ // With O(n²) get() behavior, this will be slow due to many segments
578+ pair. drive ( ) ;
579+
580+ let elapsed = start. elapsed ( ) ;
581+
582+ // With O(n²) behavior and 500 segments, this could take 10-100ms
583+ // With O(n) or O(1), should be < 5ms
584+ // This is a performance regression test
585+ info ! (
586+ "Time to drive {} small writes with delayed ACKs: {:?}" ,
587+ NUM_WRITES , elapsed
588+ ) ;
589+
590+ // Verify correctness - all data should be received
591+ let total_written = ( NUM_WRITES * WRITE_SIZE ) as u64 ;
592+ pair. client_send ( client_ch, s) . finish ( ) . unwrap ( ) ;
593+ pair. drive ( ) ;
594+
595+ let mut recv = pair. server_recv ( server_ch, s) ;
596+ let mut chunks = recv. read ( false ) . unwrap ( ) ;
597+ let mut received = 0 ;
598+
599+ while let Ok ( Some ( chunk) ) = chunks. next ( usize:: MAX ) {
600+ received += chunk. bytes . len ( ) ;
601+ }
602+ let _ = chunks. finalize ( ) ;
603+
604+ assert_eq ! ( received, total_written as usize ) ;
605+
606+ // This test exposes the pathology but doesn't strictly assert on timing
607+ // because timing tests are flaky in CI. The println! shows the issue.
608+ // To properly test, we'd need to instrument SendBuffer::get() to count scans.
609+ }
611610
612611#[ test]
613612fn zero_rtt_happypath ( ) {
0 commit comments