forked from anacrolix/go-libutp
-
Notifications
You must be signed in to change notification settings - Fork 1
/
utp_internal.cpp
3489 lines (2906 loc) · 106 KB
/
utp_internal.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2010-2013 BitTorrent, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <limits.h> // for UINT_MAX
#include <time.h>
#include "utp_types.h"
#include "utp_packedsockaddr.h"
#include "utp_internal.h"
#include "utp_hash.h"
#define TIMEOUT_CHECK_INTERVAL 500
// number of bytes to increase max window size by, per RTT. This is
// scaled down linearly proportional to off_target. i.e. if all packets
// in one window have 0 delay, window size will increase by this number.
// Typically it's less. TCP increases one MSS per RTT, which is 1500
#define MAX_CWND_INCREASE_BYTES_PER_RTT 3000
#define CUR_DELAY_SIZE 3
// experiments suggest that a clock skew of 10 ms per 325 seconds
// is not impossible. Reset delay_base every 13 minutes. The clock
// skew is dealt with by observing the delay base in the other
// direction, and adjusting our own upwards if the opposite direction
// delay base keeps going down
#define DELAY_BASE_HISTORY 13
#define MAX_WINDOW_DECAY 100 // ms
#define REORDER_BUFFER_SIZE 32
#define REORDER_BUFFER_MAX_SIZE 1024
#define OUTGOING_BUFFER_MAX_SIZE 1024
#define PACKET_SIZE 1435
// this is the minimum max_window value. It can never drop below this
#define MIN_WINDOW_SIZE 10
// if we receive 4 or more duplicate acks, we resend the packet
// that hasn't been acked yet
#define DUPLICATE_ACKS_BEFORE_RESEND 3
// Allow a reception window of at least 3 ack_nrs behind seq_nr
// A non-SYN packet with an ack_nr difference greater than this is
// considered suspicious and ignored
#define ACK_NR_ALLOWED_WINDOW DUPLICATE_ACKS_BEFORE_RESEND
#define RST_INFO_TIMEOUT 10000
#define RST_INFO_LIMIT 1000
// 29 seconds determined from measuring many home NAT devices
#define KEEPALIVE_INTERVAL 29000
#define SEQ_NR_MASK 0xFFFF
#define ACK_NR_MASK 0xFFFF
#define TIMESTAMP_MASK 0xFFFFFFFF
#define DIV_ROUND_UP(num, denom) ((num + denom - 1) / denom)
// The totals are derived from the following data:
// 45: IPv6 address including embedded IPv4 address
// 11: Scope Id
// 2: Brackets around IPv6 address when port is present
// 6: Port (including colon)
// 1: Terminating null byte
char addrbuf[65];
#define addrfmt(x, s) x.fmt(s, sizeof(s))
#if (defined(__SVR4) && defined(__sun))
#pragma pack(1)
#else
#pragma pack(push,1)
#endif
// these packet sizes are including the uTP header wich
// is either 20 or 23 bytes depending on version
#define PACKET_SIZE_EMPTY_BUCKET 0
#define PACKET_SIZE_EMPTY 23
#define PACKET_SIZE_SMALL_BUCKET 1
#define PACKET_SIZE_SMALL 373
#define PACKET_SIZE_MID_BUCKET 2
#define PACKET_SIZE_MID 723
#define PACKET_SIZE_BIG_BUCKET 3
#define PACKET_SIZE_BIG 1400
#define PACKET_SIZE_HUGE_BUCKET 4
struct PACKED_ATTRIBUTE PacketFormatV1 {
// packet_type (4 high bits)
// protocol version (4 low bits)
byte ver_type;
byte version() const { return ver_type & 0xf; }
byte type() const { return ver_type >> 4; }
void set_version(byte v) { ver_type = (ver_type & 0xf0) | (v & 0xf); }
void set_type(byte t) { ver_type = (ver_type & 0xf) | (t << 4); }
// Type of the first extension header
byte ext;
// connection ID
uint16_big connid;
uint32_big tv_usec;
uint32_big reply_micro;
// receive window size in bytes
uint32_big windowsize;
// Sequence number
uint16_big seq_nr;
// Acknowledgment number
uint16_big ack_nr;
};
struct PACKED_ATTRIBUTE PacketFormatAckV1 {
PacketFormatV1 pf;
byte ext_next;
byte ext_len;
byte acks[4];
};
#if (defined(__SVR4) && defined(__sun))
#pragma pack(0)
#else
#pragma pack(pop)
#endif
enum {
ST_DATA = 0, // Data packet.
ST_FIN = 1, // Finalize the connection. This is the last packet.
ST_STATE = 2, // State packet. Used to transmit an ACK with no data.
ST_RESET = 3, // Terminate connection forcefully.
ST_SYN = 4, // Connect SYN
ST_NUM_STATES, // used for bounds checking
};
static const cstr flagnames[] = {
"ST_DATA","ST_FIN","ST_STATE","ST_RESET","ST_SYN"
};
enum CONN_STATE {
CS_UNINITIALIZED = 0,
CS_IDLE,
CS_SYN_SENT,
CS_SYN_RECV,
CS_CONNECTED,
CS_CONNECTED_FULL,
CS_RESET,
CS_DESTROY
};
static const cstr statenames[] = {
"UNINITIALIZED", "IDLE","SYN_SENT", "SYN_RECV", "CONNECTED","CONNECTED_FULL","DESTROY_DELAY","RESET","DESTROY"
};
struct OutgoingPacket {
size_t length;
size_t payload;
uint64 time_sent; // microseconds
uint transmissions:31;
bool need_resend:1;
byte data[1];
};
struct SizableCircularBuffer {
// This is the mask. Since it's always a power of 2, adding 1 to this value will return the size.
size_t mask;
// This is the elements that the circular buffer points to
void **elements;
void *get(size_t i) const { assert(elements); return elements ? elements[i & mask] : NULL; }
void put(size_t i, void *data) { assert(elements); elements[i&mask] = data; }
void grow(size_t item, size_t index);
void ensure_size(size_t item, size_t index) { if (index > mask) grow(item, index); }
size_t size() { return mask + 1; }
};
// Item contains the element we want to make space for
// index is the index in the list.
void SizableCircularBuffer::grow(size_t item, size_t index)
{
// Figure out the new size.
size_t size = mask + 1;
do size *= 2; while (index >= size);
// Allocate the new buffer
void **buf = (void**)calloc(size, sizeof(void*));
size--;
// Copy elements from the old buffer to the new buffer
for (size_t i = 0; i <= mask; i++) {
buf[(item - index + i) & size] = get(item - index + i);
}
// Swap to the newly allocated buffer
mask = size;
free(elements);
elements = buf;
}
// compare if lhs is less than rhs, taking wrapping
// into account. if lhs is close to UINT_MAX and rhs
// is close to 0, lhs is assumed to have wrapped and
// considered smaller
bool wrapping_compare_less(uint32 lhs, uint32 rhs, uint32 mask)
{
// distance walking from lhs to rhs, downwards
const uint32 dist_down = (lhs - rhs) & mask;
// distance walking from lhs to rhs, upwards
const uint32 dist_up = (rhs - lhs) & mask;
// if the distance walking up is shorter, lhs
// is less than rhs. If the distance walking down
// is shorter, then rhs is less than lhs
return dist_up < dist_down;
}
struct DelayHist {
uint32 delay_base;
// this is the history of delay samples,
// normalized by using the delay_base. These
// values are always greater than 0 and measures
// the queuing delay in microseconds
uint32 cur_delay_hist[CUR_DELAY_SIZE];
size_t cur_delay_idx;
// this is the history of delay_base. It's
// a number that doesn't have an absolute meaning
// only relative. It doesn't make sense to initialize
// it to anything other than values relative to
// what's been seen in the real world.
uint32 delay_base_hist[DELAY_BASE_HISTORY];
size_t delay_base_idx;
// the time when we last stepped the delay_base_idx
uint64 delay_base_time;
bool delay_base_initialized;
void clear(uint64 current_ms)
{
delay_base_initialized = false;
delay_base = 0;
cur_delay_idx = 0;
delay_base_idx = 0;
delay_base_time = current_ms;
for (size_t i = 0; i < CUR_DELAY_SIZE; i++) {
cur_delay_hist[i] = 0;
}
for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) {
delay_base_hist[i] = 0;
}
}
void shift(const uint32 offset)
{
// the offset should never be "negative"
// assert(offset < 0x10000000);
// increase all of our base delays by this amount
// this is used to take clock skew into account
// by observing the other side's changes in its base_delay
for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) {
delay_base_hist[i] += offset;
}
delay_base += offset;
}
void add_sample(const uint32 sample, uint64 current_ms)
{
// The two clocks (in the two peers) are assumed not to
// progress at the exact same rate. They are assumed to be
// drifting, which causes the delay samples to contain
// a systematic error, either they are under-
// estimated or over-estimated. This is why we update the
// delay_base every two minutes, to adjust for this.
// This means the values will keep drifting and eventually wrap.
// We can cross the wrapping boundry in two directions, either
// going up, crossing the highest value, or going down, crossing 0.
// if the delay_base is close to the max value and sample actually
// wrapped on the other end we would see something like this:
// delay_base = 0xffffff00, sample = 0x00000400
// sample - delay_base = 0x500 which is the correct difference
// if the delay_base is instead close to 0, and we got an even lower
// sample (that will eventually update the delay_base), we may see
// something like this:
// delay_base = 0x00000400, sample = 0xffffff00
// sample - delay_base = 0xfffffb00
// this needs to be interpreted as a negative number and the actual
// recorded delay should be 0.
// It is important that all arithmetic that assume wrapping
// is done with unsigned intergers. Signed integers are not guaranteed
// to wrap the way unsigned integers do. At least GCC takes advantage
// of this relaxed rule and won't necessarily wrap signed ints.
// remove the clock offset and propagation delay.
// delay base is min of the sample and the current
// delay base. This min-operation is subject to wrapping
// and care needs to be taken to correctly choose the
// true minimum.
// specifically the problem case is when delay_base is very small
// and sample is very large (because it wrapped past zero), sample
// needs to be considered the smaller
if (!delay_base_initialized) {
// delay_base being 0 suggests that we haven't initialized
// it or its history with any real measurements yet. Initialize
// everything with this sample.
for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) {
// if we don't have a value, set it to the current sample
delay_base_hist[i] = sample;
continue;
}
delay_base = sample;
delay_base_initialized = true;
}
if (wrapping_compare_less(sample, delay_base_hist[delay_base_idx], TIMESTAMP_MASK)) {
// sample is smaller than the current delay_base_hist entry
// update it
delay_base_hist[delay_base_idx] = sample;
}
// is sample lower than delay_base? If so, update delay_base
if (wrapping_compare_less(sample, delay_base, TIMESTAMP_MASK)) {
// sample is smaller than the current delay_base
// update it
delay_base = sample;
}
// this operation may wrap, and is supposed to
const uint32 delay = sample - delay_base;
// sanity check. If this is triggered, something fishy is going on
// it means the measured sample was greater than 32 seconds!
//assert(delay < 0x2000000);
cur_delay_hist[cur_delay_idx] = delay;
cur_delay_idx = (cur_delay_idx + 1) % CUR_DELAY_SIZE;
// once every minute
if (current_ms - delay_base_time > 60 * 1000) {
delay_base_time = current_ms;
delay_base_idx = (delay_base_idx + 1) % DELAY_BASE_HISTORY;
// clear up the new delay base history spot by initializing
// it to the current sample, then update it
delay_base_hist[delay_base_idx] = sample;
delay_base = delay_base_hist[0];
// Assign the lowest delay in the last 2 minutes to delay_base
for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) {
if (wrapping_compare_less(delay_base_hist[i], delay_base, TIMESTAMP_MASK))
delay_base = delay_base_hist[i];
}
}
}
uint32 get_value()
{
uint32 value = UINT_MAX;
for (size_t i = 0; i < CUR_DELAY_SIZE; i++) {
value = min<uint32>(cur_delay_hist[i], value);
}
// value could be UINT_MAX if we have no samples yet...
return value;
}
};
struct UTPSocket {
~UTPSocket();
PackedSockAddr addr;
utp_context *ctx;
int ida; //for ack socket list
uint16 retransmit_count;
uint16 reorder_count;
byte duplicate_ack;
// the number of packets in the send queue. Packets that haven't
// yet been sent count as well as packets marked as needing resend
// the oldest un-acked packet in the send queue is seq_nr - cur_window_packets
uint16 cur_window_packets;
// how much of the window is used, number of bytes in-flight
// packets that have not yet been sent do not count, packets
// that are marked as needing to be re-sent (due to a timeout)
// don't count either
size_t cur_window;
// maximum window size, in bytes
size_t max_window;
// UTP_SNDBUF setting, in bytes
size_t opt_sndbuf;
// UTP_RCVBUF setting, in bytes
size_t opt_rcvbuf;
// this is the target delay, in microseconds
// for this socket. defaults to 100000.
size_t target_delay;
// Is a FIN packet in the reassembly buffer?
bool got_fin:1;
// Have we reached the FIN?
bool got_fin_reached:1;
// Have we sent our FIN?
bool fin_sent:1;
// Has our fin been ACKed?
bool fin_sent_acked:1;
// Reading is disabled
bool read_shutdown:1;
// User called utp_close()
bool close_requested:1;
// Timeout procedure
bool fast_timeout:1;
// max receive window for other end, in bytes
size_t max_window_user;
CONN_STATE state;
// TickCount when we last decayed window (wraps)
int64 last_rwin_decay;
// the sequence number of the FIN packet. This field is only set
// when we have received a FIN, and the flag field has the FIN flag set.
// it is used to know when it is safe to destroy the socket, we must have
// received all packets up to this sequence number first.
uint16 eof_pkt;
// All sequence numbers up to including this have been properly received
// by us
uint16 ack_nr;
// This is the sequence number for the next packet to be sent.
uint16 seq_nr;
uint16 timeout_seq_nr;
// This is the sequence number of the next packet we're allowed to
// do a fast resend with. This makes sure we only do a fast-resend
// once per packet. We can resend the packet with this sequence number
// or any later packet (with a higher sequence number).
uint16 fast_resend_seq_nr;
uint32 reply_micro;
uint64 last_got_packet;
uint64 last_sent_packet;
uint64 last_measured_delay;
// timestamp of the last time the cwnd was full
// this is used to prevent the congestion window
// from growing when we're not sending at capacity
mutable uint64 last_maxed_out_window;
void *userdata;
// Round trip time
uint rtt;
// Round trip time variance
uint rtt_var;
// Round trip timeout
uint rto;
DelayHist rtt_hist;
uint retransmit_timeout;
// The RTO timer will timeout here.
uint64 rto_timeout;
// When the window size is set to zero, start this timer. It will send a new packet every 30secs.
uint64 zerowindow_time;
uint32 conn_seed;
// Connection ID for packets I receive
uint32 conn_id_recv;
// Connection ID for packets I send
uint32 conn_id_send;
// Last rcv window we advertised, in bytes
size_t last_rcv_win;
DelayHist our_hist;
DelayHist their_hist;
// extension bytes from SYN packet
byte extensions[8];
// MTU Discovery
// time when we should restart the MTU discovery
uint64 mtu_discover_time;
// ceiling and floor of binary search. last is the mtu size
// we're currently using
uint32 mtu_ceiling, mtu_floor, mtu_last;
// we only ever have a single probe in flight at any given time.
// this is the sequence number of that probe, and the size of
// that packet
uint32 mtu_probe_seq, mtu_probe_size;
// this is the average delay samples, as compared to the initial
// sample. It's averaged over 5 seconds
int32 average_delay;
// this is the sum of all the delay samples
// we've made recently. The important distinction
// of these samples is that they are all made compared
// to the initial sample, this is to deal with
// wrapping in a simple way.
int64 current_delay_sum;
// number of sample ins current_delay_sum
int current_delay_samples;
// initialized to 0, set to the first raw delay sample
// each sample that's added to current_delay_sum
// is subtracted from the value first, to make it
// a delay relative to this sample
uint32 average_delay_base;
// the next time we should add an average delay
// sample into average_delay_hist
uint64 average_sample_time;
// the estimated clock drift between our computer
// and the endpoint computer. The unit is microseconds
// per 5 seconds
int32 clock_drift;
// just used for logging
int32 clock_drift_raw;
SizableCircularBuffer inbuf, outbuf;
#ifdef _DEBUG
// Public per-socket statistics, returned by utp_get_stats()
utp_socket_stats _stats;
#endif
// true if we're in slow-start (exponential growth) phase
bool slow_start;
// the slow-start threshold, in bytes
size_t ssthresh;
void log(int level, char const *fmt, ...)
{
va_list va;
char buf[4096], buf2[4096];
// don't bother with vsnprintf() etc calls if we're not going to log.
if (!ctx->would_log(level)) {
return;
}
va_start(va, fmt);
vsnprintf(buf, 4096, fmt, va);
va_end(va);
buf[4095] = '\0';
snprintf(buf2, 4096, "%p %s %06u %s", this, addrfmt(addr, addrbuf), conn_id_recv, buf);
buf2[4095] = '\0';
ctx->log_unchecked(this, buf2);
}
void schedule_ack();
// called every time mtu_floor or mtu_ceiling are adjusted
void mtu_search_update();
void mtu_reset();
// Calculates the current receive window
size_t get_rcv_window()
{
// Trim window down according to what's already in buffer.
const size_t numbuf = utp_call_get_read_buffer_size(this->ctx, this);
assert((int)numbuf >= 0);
return opt_rcvbuf > numbuf ? opt_rcvbuf - numbuf : 0;
}
// Test if we're ready to decay max_window
// XXX this breaks when spaced by > INT_MAX/2, which is 49
// days; the failure mode in that case is we do an extra decay
// or fail to do one when we really shouldn't.
bool can_decay_win(int64 msec) const
{
return (msec - last_rwin_decay) >= MAX_WINDOW_DECAY;
}
// If we can, decay max window, returns true if we actually did so
void maybe_decay_win(uint64 current_ms)
{
if (can_decay_win(current_ms)) {
// TCP uses 0.5
max_window = (size_t)(max_window * .5);
last_rwin_decay = current_ms;
if (max_window < MIN_WINDOW_SIZE)
max_window = MIN_WINDOW_SIZE;
slow_start = false;
ssthresh = max_window;
}
}
size_t get_header_size() const
{
return sizeof(PacketFormatV1);
}
size_t get_udp_mtu()
{
socklen_t len;
SOCKADDR_STORAGE sa = addr.get_sockaddr_storage(&len);
return utp_call_get_udp_mtu(this->ctx, this, (const struct sockaddr *)&sa, len);
}
size_t get_udp_overhead()
{
socklen_t len;
SOCKADDR_STORAGE sa = addr.get_sockaddr_storage(&len);
return utp_call_get_udp_overhead(this->ctx, this, (const struct sockaddr *)&sa, len);
}
size_t get_overhead()
{
return get_udp_overhead() + get_header_size();
}
void send_data(byte* b, size_t length, bandwidth_type_t type, uint32 flags = 0);
void send_ack(bool synack = false);
void send_keep_alive();
static void send_rst(utp_context *ctx,
const PackedSockAddr &addr, uint32 conn_id_send,
uint16 ack_nr, uint16 seq_nr);
void send_packet(OutgoingPacket *pkt);
bool is_full(int bytes = -1);
bool flush_packets();
void write_outgoing_packet(size_t payload, uint flags, struct utp_iovec *iovec, size_t num_iovecs);
#ifdef _DEBUG
void check_invariant();
#endif
void check_timeouts();
int ack_packet(uint16 seq);
size_t selective_ack_bytes(uint base, const byte* mask, byte len, int64& min_rtt);
void selective_ack(uint base, const byte *mask, byte len);
void apply_ccontrol(size_t bytes_acked, uint32 actual_delay, int64 min_rtt);
size_t get_packet_size() const;
};
void removeSocketFromAckList(UTPSocket *conn)
{
if (conn->ida >= 0)
{
UTPSocket *last = conn->ctx->ack_sockets[conn->ctx->ack_sockets.GetCount() - 1];
assert(last->ida < (int)(conn->ctx->ack_sockets.GetCount()));
assert(conn->ctx->ack_sockets[last->ida] == last);
last->ida = conn->ida;
conn->ctx->ack_sockets[conn->ida] = last;
conn->ida = -1;
// Decrease the count
conn->ctx->ack_sockets.SetCount(conn->ctx->ack_sockets.GetCount() - 1);
}
}
static void utp_register_sent_packet(utp_context *ctx, size_t length)
{
if (length <= PACKET_SIZE_MID) {
if (length <= PACKET_SIZE_EMPTY) {
ctx->context_stats._nraw_send[PACKET_SIZE_EMPTY_BUCKET]++;
} else if (length <= PACKET_SIZE_SMALL) {
ctx->context_stats._nraw_send[PACKET_SIZE_SMALL_BUCKET]++;
} else
ctx->context_stats._nraw_send[PACKET_SIZE_MID_BUCKET]++;
} else {
if (length <= PACKET_SIZE_BIG) {
ctx->context_stats._nraw_send[PACKET_SIZE_BIG_BUCKET]++;
} else
ctx->context_stats._nraw_send[PACKET_SIZE_HUGE_BUCKET]++;
}
}
void send_to_addr(utp_context *ctx, utp_socket *socket, const byte *p, size_t len, const PackedSockAddr &addr, int flags = 0)
{
socklen_t tolen;
SOCKADDR_STORAGE to = addr.get_sockaddr_storage(&tolen);
utp_register_sent_packet(ctx, len);
utp_call_sendto(ctx, socket, p, len, (const struct sockaddr *)&to, tolen, flags);
}
void UTPSocket::schedule_ack()
{
if (ida == -1){
#if UTP_DEBUG_LOGGING
log(UTP_LOG_DEBUG, "schedule_ack");
#endif
ida = ctx->ack_sockets.Append(this);
} else {
#if UTP_DEBUG_LOGGING
log(UTP_LOG_DEBUG, "schedule_ack: already in list");
#endif
}
}
void UTPSocket::send_data(byte* b, size_t length, bandwidth_type_t type, uint32 flags)
{
// time stamp this packet with local time, the stamp goes into
// the header of every packet at the 8th byte for 8 bytes :
// two integers, check packet.h for more
uint64 time = utp_call_get_microseconds(ctx, this);
PacketFormatV1* b1 = (PacketFormatV1*)b;
b1->tv_usec = (uint32)time;
b1->reply_micro = reply_micro;
last_sent_packet = ctx->current_ms;
#ifdef _DEBUG
_stats.nbytes_xmit += length;
++_stats.nxmit;
#endif
if (ctx->callbacks[UTP_ON_OVERHEAD_STATISTICS]) {
size_t n;
if (type == payload_bandwidth) {
// if this packet carries payload, just
// count the header as overhead
type = header_overhead;
n = get_overhead();
} else {
n = length + get_udp_overhead();
}
utp_call_on_overhead_statistics(ctx, this, true, n, type);
}
#if UTP_DEBUG_LOGGING
int flags2 = b1->type();
uint16 seq_nr = b1->seq_nr;
uint16 ack_nr = b1->ack_nr;
log(UTP_LOG_DEBUG, "send %s len:%u id:%u timestamp:" I64u " reply_micro:%u flags:%s seq_nr:%u ack_nr:%u",
addrfmt(addr, addrbuf), (uint)length, conn_id_send, time, reply_micro, flagnames[flags2],
seq_nr, ack_nr);
#endif
send_to_addr(ctx, this, b, length, addr, flags);
removeSocketFromAckList(this);
}
void UTPSocket::send_ack(bool synack)
{
PacketFormatAckV1 pfa;
zeromem(&pfa);
size_t len;
last_rcv_win = get_rcv_window();
pfa.pf.set_version(1);
pfa.pf.set_type(ST_STATE);
pfa.pf.ext = 0;
pfa.pf.connid = conn_id_send;
pfa.pf.ack_nr = ack_nr;
pfa.pf.seq_nr = seq_nr;
pfa.pf.windowsize = (uint32)last_rcv_win;
len = sizeof(PacketFormatV1);
// we never need to send EACK for connections
// that are shutting down
if (reorder_count != 0 && !got_fin_reached) {
// if reorder count > 0, send an EACK.
// reorder count should always be 0
// for synacks, so this should not be
// as synack
assert(!synack);
pfa.pf.ext = 1;
pfa.ext_next = 0;
pfa.ext_len = 4;
uint m = 0;
// reorder count should only be non-zero
// if the packet ack_nr + 1 has not yet
// been received
assert(inbuf.get(ack_nr + 1) == NULL);
size_t window = min<size_t>(14+16, inbuf.size());
// Generate bit mask of segments received.
for (size_t i = 0; i < window; i++) {
if (inbuf.get(ack_nr + i + 2) != NULL) {
m |= 1 << i;
#if UTP_DEBUG_LOGGING
log(UTP_LOG_DEBUG, "EACK packet [%u]", ack_nr + i + 2);
#endif
}
}
pfa.acks[0] = (byte)m;
pfa.acks[1] = (byte)(m >> 8);
pfa.acks[2] = (byte)(m >> 16);
pfa.acks[3] = (byte)(m >> 24);
len += 4 + 2;
#if UTP_DEBUG_LOGGING
log(UTP_LOG_DEBUG, "Sending EACK %u [%u] bits:[%032b]", ack_nr, conn_id_send, m);
#endif
} else {
#if UTP_DEBUG_LOGGING
log(UTP_LOG_DEBUG, "Sending ACK %u [%u]", ack_nr, conn_id_send);
#endif
}
send_data((byte*)&pfa, len, ack_overhead);
removeSocketFromAckList(this);
}
void UTPSocket::send_keep_alive()
{
ack_nr--;
#if UTP_DEBUG_LOGGING
log(UTP_LOG_DEBUG, "Sending KeepAlive ACK %u [%u]", ack_nr, conn_id_send);
#endif
send_ack();
ack_nr++;
}
void UTPSocket::send_rst(utp_context *ctx,
const PackedSockAddr &addr, uint32 conn_id_send, uint16 ack_nr, uint16 seq_nr)
{
PacketFormatV1 pf1;
zeromem(&pf1);
size_t len;
pf1.set_version(1);
pf1.set_type(ST_RESET);
pf1.ext = 0;
pf1.connid = conn_id_send;
pf1.ack_nr = ack_nr;
pf1.seq_nr = seq_nr;
pf1.windowsize = 0;
len = sizeof(PacketFormatV1);
// LOG_DEBUG("%s: Sending RST id:%u seq_nr:%u ack_nr:%u", addrfmt(addr, addrbuf), conn_id_send, seq_nr, ack_nr);
// LOG_DEBUG("send %s len:%u id:%u", addrfmt(addr, addrbuf), (uint)len, conn_id_send);
send_to_addr(ctx, NULL, (const byte*)&pf1, len, addr);
}
void UTPSocket::send_packet(OutgoingPacket *pkt)
{
// only count against the quota the first time we
// send the packet. Don't enforce quota when closing
// a socket. Only enforce the quota when we're sending
// at slow rates (max window < packet size)
//size_t max_send = min(max_window, opt_sndbuf, max_window_user);
time_t cur_time = utp_call_get_milliseconds(this->ctx, this);
if (pkt->transmissions == 0 || pkt->need_resend) {
cur_window += pkt->payload;
}
pkt->need_resend = false;
PacketFormatV1* p1 = (PacketFormatV1*)pkt->data;
p1->ack_nr = ack_nr;
pkt->time_sent = utp_call_get_microseconds(this->ctx, this);
//socklen_t salen;
//SOCKADDR_STORAGE sa = addr.get_sockaddr_storage(&salen);
bool use_as_mtu_probe = false;
// TODO: this is subject to nasty wrapping issues! Below as well
if (mtu_discover_time < (uint64)cur_time) {
// it's time to reset our MTU assupmtions
// and trigger a new search
mtu_reset();
}
// don't use packets that are larger then mtu_ceiling
// as probes, since they were probably used as probes
// already and failed, now we need it to fragment
// just to get it through
// if seq_nr == 1, the probe would end up being 0
// which is a magic number representing no-probe
// that why we don't send a probe for a packet with
// sequence number 0
if (mtu_floor < mtu_ceiling
&& pkt->length > mtu_floor
&& pkt->length <= mtu_ceiling
&& mtu_probe_seq == 0
&& seq_nr != 1
&& pkt->transmissions == 0) {
// we've already incremented seq_nr
// for this packet
mtu_probe_seq = (seq_nr - 1) & ACK_NR_MASK;
mtu_probe_size = pkt->length;
assert(pkt->length >= mtu_floor);
assert(pkt->length <= mtu_ceiling);
use_as_mtu_probe = true;
log(UTP_LOG_MTU, "MTU [PROBE] floor:%d ceiling:%d current:%d"
, mtu_floor, mtu_ceiling, mtu_probe_size);
}
pkt->transmissions++;
send_data((byte*)pkt->data, pkt->length,
(state == CS_SYN_SENT) ? connect_overhead
: (pkt->transmissions == 1) ? payload_bandwidth
: retransmit_overhead, use_as_mtu_probe ? UTP_UDP_DONTFRAG : 0);
}
bool UTPSocket::is_full(int bytes)
{
size_t packet_size = get_packet_size();
if (bytes < 0) bytes = packet_size;
else if (bytes > (int)packet_size) bytes = (int)packet_size;
size_t max_send = min(max_window, opt_sndbuf, max_window_user);
// subtract one to save space for the FIN packet
if (cur_window_packets >= OUTGOING_BUFFER_MAX_SIZE - 1) {
#if UTP_DEBUG_LOGGING
log(UTP_LOG_DEBUG, "is_full:false cur_window_packets:%d MAX:%d", cur_window_packets, OUTGOING_BUFFER_MAX_SIZE - 1);
#endif
last_maxed_out_window = ctx->current_ms;
return true;
}
#if UTP_DEBUG_LOGGING
log(UTP_LOG_DEBUG, "is_full:%s. cur_window:%u pkt:%u max:%u cur_window_packets:%u max_window:%u"
, (cur_window + bytes > max_send) ? "true" : "false"
, cur_window, bytes, max_send, cur_window_packets
, max_window);
#endif
if (cur_window + bytes > max_send) {
last_maxed_out_window = ctx->current_ms;
return true;
}
return false;
}
bool UTPSocket::flush_packets()
{
size_t packet_size = get_packet_size();
// send packets that are waiting on the pacer to be sent
// i has to be an unsigned 16 bit counter to wrap correctly
// signed types are not guaranteed to wrap the way you expect
for (uint16 i = seq_nr - cur_window_packets; i != seq_nr; ++i) {
OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(i);
if (pkt == 0 || (pkt->transmissions > 0 && pkt->need_resend == false)) continue;
// have we run out of quota?
if (is_full()) return true;
// Nagle check
// don't send the last packet if we have one packet in-flight
// and the current packet is still smaller than packet_size.
if (i != ((seq_nr - 1) & ACK_NR_MASK) ||
cur_window_packets == 1 ||
pkt->payload >= packet_size) {
send_packet(pkt);
}
}
return false;
}
// @payload: number of bytes to send
// @flags: either ST_DATA, or ST_FIN
// @iovec: base address of iovec array
// @num_iovecs: number of iovecs in array
void UTPSocket::write_outgoing_packet(size_t payload, uint flags, struct utp_iovec *iovec, size_t num_iovecs)
{
// Setup initial timeout timer
if (cur_window_packets == 0) {
retransmit_timeout = rto;
rto_timeout = ctx->current_ms + retransmit_timeout;
assert(cur_window == 0);
}