46
46
47
47
#include "mstats.h"
48
48
49
+ #include <sys/select.h>
50
+ #include <sys/time.h>
51
+
49
52
counter_type link_read_bytes_global ; /* GLOBAL */
50
53
counter_type link_write_bytes_global ; /* GLOBAL */
51
54
@@ -78,6 +81,15 @@ show_wait_status(struct context *c)
78
81
79
82
#endif /* ifdef ENABLE_DEBUG */
80
83
84
+ bool check_bulk_mode (struct context * c )
85
+ {
86
+ if ((c -> c2 .frame .bulk_size > 0 ) && (c -> c1 .tuntap != NULL ) && (c -> c2 .buffers != NULL ))
87
+ {
88
+ return true;
89
+ }
90
+ return false;
91
+ }
92
+
81
93
static void
82
94
check_tls_errors_co (struct context * c )
83
95
{
@@ -605,6 +617,21 @@ buffer_turnover(const uint8_t *orig_buf, struct buffer *dest_stub, struct buffer
605
617
}
606
618
}
607
619
620
+ uint8_t * buff_prepsize (uint8_t * buff , int * size )
621
+ {
622
+ buff [0 ] = ((* size >> 8 ) & 0xff );
623
+ buff [1 ] = ((* size >> 0 ) & 0xff );
624
+ buff += 2 ;
625
+ return buff ;
626
+ }
627
+
628
+ uint8_t * buff_postsize (uint8_t * buff , int * size )
629
+ {
630
+ * size = ((buff [0 ] << 8 ) + (buff [1 ] << 0 ));
631
+ buff += 2 ;
632
+ return buff ;
633
+ }
634
+
608
635
/*
609
636
* Compress, fragment, encrypt and HMAC-sign an outgoing packet.
610
637
* Input: c->c2.buf
@@ -1031,6 +1058,7 @@ process_incoming_link_part1(struct context *c, struct link_socket_info *lsi, boo
1031
1058
fprintf (stderr , "R" );
1032
1059
}
1033
1060
#endif
1061
+
1034
1062
msg (D_LINK_RW , "%s READ [%d] from %s: %s" , proto2ascii (lsi -> proto , lsi -> af , true),
1035
1063
BLEN (& c -> c2 .buf ), print_link_socket_actual (& c -> c2 .from , & gc ), PROTO_DUMP (& c -> c2 .buf , & gc ));
1036
1064
@@ -1211,6 +1239,26 @@ process_incoming_link_part2(struct context *c, struct link_socket_info *lsi,
1211
1239
}
1212
1240
}
1213
1241
1242
+ void process_incoming_link_part3 (struct context * c )
1243
+ {
1244
+ int leng = BLEN (& c -> c2 .buf );
1245
+ if (leng > 0 )
1246
+ {
1247
+ if (check_bulk_mode (c ))
1248
+ {
1249
+ c -> c2 .buffers -> send_tun_max .offset = TUN_BAT_OFF ;
1250
+ c -> c2 .buffers -> send_tun_max .len = leng ;
1251
+ bcopy (BPTR (& c -> c2 .buf ), BPTR (& c -> c2 .buffers -> send_tun_max ), leng );
1252
+ c -> c2 .to_tun .offset += 2 ;
1253
+ c -> c2 .buf .offset += 2 ;
1254
+ }
1255
+ }
1256
+ else
1257
+ {
1258
+ buf_reset (& c -> c2 .to_tun );
1259
+ }
1260
+ }
1261
+
1214
1262
static void
1215
1263
process_incoming_link (struct context * c , struct link_socket * sock )
1216
1264
{
@@ -1221,6 +1269,7 @@ process_incoming_link(struct context *c, struct link_socket *sock)
1221
1269
1222
1270
process_incoming_link_part1 (c , lsi , false);
1223
1271
process_incoming_link_part2 (c , lsi , orig_buf );
1272
+ process_incoming_link_part3 (c );
1224
1273
1225
1274
perf_pop ();
1226
1275
}
@@ -1321,7 +1370,7 @@ process_incoming_dco(struct context *c)
1321
1370
*/
1322
1371
1323
1372
void
1324
- read_incoming_tun (struct context * c )
1373
+ read_incoming_tun_part2 (struct context * c )
1325
1374
{
1326
1375
/*
1327
1376
* Setup for read() call on TUN/TAP device.
@@ -1382,6 +1431,54 @@ read_incoming_tun(struct context *c)
1382
1431
perf_pop ();
1383
1432
}
1384
1433
1434
+ void read_incoming_tun_part3 (struct context * c )
1435
+ {
1436
+ fd_set rfds ;
1437
+ struct timeval timo ;
1438
+ if (check_bulk_mode (c ))
1439
+ {
1440
+ int plen = 0 , pidx = -1 ;
1441
+ int fdno = c -> c1 .tuntap -> fd ;
1442
+ for (int x = 0 ; x < TUN_BAT_MAX ; ++ x )
1443
+ {
1444
+ int leng = plen , indx = (pidx + 1 );
1445
+ if (indx >= TUN_BAT_MIN ) { break ; }
1446
+ if (leng < 1 )
1447
+ {
1448
+ FD_ZERO (& rfds );
1449
+ FD_SET (fdno , & rfds );
1450
+ timo .tv_sec = 0 ;
1451
+ timo .tv_usec = 0 ;
1452
+ select (fdno + 1 , & rfds , NULL , NULL , & timo );
1453
+ if (FD_ISSET (fdno , & rfds ))
1454
+ {
1455
+ read_incoming_tun_part2 (c );
1456
+ plen = BLEN (& c -> c2 .buf );
1457
+ } else { break ; }
1458
+ }
1459
+ leng = plen ;
1460
+ if (leng > 0 )
1461
+ {
1462
+ c -> c2 .buffers -> read_tun_bufs [indx ].offset = TUN_BAT_OFF ;
1463
+ c -> c2 .buffers -> read_tun_bufs [indx ].len = leng ;
1464
+ bcopy (BPTR (& c -> c2 .buf ), BPTR (& c -> c2 .buffers -> read_tun_bufs [indx ]), leng );
1465
+ c -> c2 .bufs [indx ] = c -> c2 .buffers -> read_tun_bufs [indx ];
1466
+ pidx = indx ;
1467
+ } else { break ; }
1468
+ plen = 0 ;
1469
+ }
1470
+ c -> c2 .buffers -> bulk_indx = pidx ;
1471
+ }
1472
+ }
1473
+
1474
+ void read_incoming_tun (struct context * c )
1475
+ {
1476
+ if (c -> c2 .frame .bulk_size <= 0 ) {
1477
+ read_incoming_tun_part2 (c );
1478
+ }
1479
+ read_incoming_tun_part3 (c );
1480
+ }
1481
+
1385
1482
/**
1386
1483
* Drops UDP packets which OS decided to route via tun.
1387
1484
*
@@ -1469,7 +1566,7 @@ drop_if_recursive_routing(struct context *c, struct buffer *buf)
1469
1566
*/
1470
1567
1471
1568
void
1472
- process_incoming_tun (struct context * c , struct link_socket * out_sock )
1569
+ process_incoming_tun_part2 (struct context * c , struct link_socket * out_sock )
1473
1570
{
1474
1571
struct gc_arena gc = gc_new ();
1475
1572
@@ -1488,7 +1585,7 @@ process_incoming_tun(struct context *c, struct link_socket *out_sock)
1488
1585
#endif
1489
1586
1490
1587
/* Show packet content */
1491
- dmsg (D_TUN_RW , "TUN READ [%d]" , BLEN (& c -> c2 .buf ));
1588
+ dmsg (D_TUN_RW , "TUN READ [%d] [%d] " , BLEN (& c -> c2 .buf ), c -> c2 . frame . buf . payload_size );
1492
1589
1493
1590
if (c -> c2 .buf .len > 0 )
1494
1591
{
@@ -1512,7 +1609,9 @@ process_incoming_tun(struct context *c, struct link_socket *out_sock)
1512
1609
}
1513
1610
if (c -> c2 .buf .len > 0 )
1514
1611
{
1612
+ if ((c -> c2 .buffers == NULL ) || (c -> c2 .buffers -> bulk_flag != -2 )) {
1515
1613
encrypt_sign (c , true);
1614
+ }
1516
1615
}
1517
1616
else
1518
1617
{
@@ -1522,6 +1621,65 @@ process_incoming_tun(struct context *c, struct link_socket *out_sock)
1522
1621
gc_free (& gc );
1523
1622
}
1524
1623
1624
+ void process_incoming_tun_part3 (struct context * c , struct link_socket * out_sock )
1625
+ {
1626
+ if (c -> c2 .buf .len > 0 )
1627
+ {
1628
+ if (check_bulk_mode (c ))
1629
+ {
1630
+ c -> c2 .buffers -> bulk_flag = -2 ;
1631
+ c -> c2 .buffers -> read_tun_max .offset = TUN_BAT_OFF ;
1632
+ c -> c2 .buffers -> read_tun_max .len = 0 ;
1633
+ uint8_t * temp = BPTR (& c -> c2 .buffers -> read_tun_max );
1634
+ int plen = 0 , fdno = c -> c1 .tuntap -> fd ;
1635
+ int maxl = 0 , leng = (c -> c2 .buffers -> bulk_indx + 1 );
1636
+ if ((fdno > 0 ) && (leng > 0 ))
1637
+ {
1638
+ for (int x = 0 ; x < leng ; ++ x )
1639
+ {
1640
+ c -> c2 .buf = c -> c2 .bufs [x ];
1641
+ process_incoming_tun_part2 (c , out_sock );
1642
+ if (BLEN (& c -> c2 .buf ) < 1 )
1643
+ {
1644
+ c -> c2 .bufs [x ].len = 0 ;
1645
+ }
1646
+ }
1647
+ for (int x = 0 ; x < leng ; ++ x )
1648
+ {
1649
+ plen = c -> c2 .bufs [x ].len ;
1650
+ if (plen > 0 )
1651
+ {
1652
+ temp = buff_prepsize (temp , & plen );
1653
+ bcopy (BPTR (& c -> c2 .bufs [x ]), temp , plen );
1654
+ temp += plen ; maxl += (plen + 2 );
1655
+ }
1656
+ }
1657
+ if (maxl > 0 )
1658
+ {
1659
+ c -> c2 .buffers -> read_tun_max .offset = TUN_BAT_OFF ;
1660
+ c -> c2 .buffers -> read_tun_max .len = maxl ;
1661
+ c -> c2 .buf = c -> c2 .buffers -> read_tun_max ;
1662
+ encrypt_sign (c , true);
1663
+ }
1664
+ }
1665
+ c -> c2 .buffers -> bulk_indx = -1 ;
1666
+ c -> c2 .buffers -> bulk_flag = -1 ;
1667
+ }
1668
+ }
1669
+ else
1670
+ {
1671
+ buf_reset (& c -> c2 .to_link );
1672
+ }
1673
+ }
1674
+
1675
+ void process_incoming_tun (struct context * c , struct link_socket * out_sock )
1676
+ {
1677
+ if (c -> c2 .frame .bulk_size <= 0 ) {
1678
+ process_incoming_tun_part2 (c , out_sock );
1679
+ }
1680
+ process_incoming_tun_part3 (c , out_sock );
1681
+ }
1682
+
1525
1683
/**
1526
1684
* Forges a IPv6 ICMP packet with a no route to host error code from the
1527
1685
* IPv6 packet in buf and sends it directly back to the client via the tun
@@ -1748,7 +1906,7 @@ process_outgoing_link(struct context *c, struct link_socket *sock)
1748
1906
1749
1907
perf_push (PERF_PROC_OUT_LINK );
1750
1908
1751
- if (c -> c2 .to_link .len > 0 && c -> c2 .to_link .len <= c -> c2 .frame .buf .payload_size )
1909
+ if (c -> c2 .to_link .len > 0 && ( c -> c2 .to_link .len <= c -> c2 .frame .buf .payload_size || c -> c2 . frame . bulk_size > 0 ) )
1752
1910
{
1753
1911
/*
1754
1912
* Setup for call to send/sendto which will send
@@ -1793,6 +1951,7 @@ process_outgoing_link(struct context *c, struct link_socket *sock)
1793
1951
fprintf (stderr , "W" );
1794
1952
}
1795
1953
#endif
1954
+
1796
1955
msg (D_LINK_RW , "%s WRITE [%d] to %s: %s" ,
1797
1956
proto2ascii (sock -> info .proto , sock -> info .af , true), BLEN (& c -> c2 .to_link ),
1798
1957
print_link_socket_actual (c -> c2 .to_link_addr , & gc ), PROTO_DUMP (& c -> c2 .to_link , & gc ));
@@ -1892,7 +2051,7 @@ process_outgoing_link(struct context *c, struct link_socket *sock)
1892
2051
*/
1893
2052
1894
2053
void
1895
- process_outgoing_tun (struct context * c , struct link_socket * in_sock )
2054
+ process_outgoing_tun_part2 (struct context * c , struct link_socket * in_sock )
1896
2055
{
1897
2056
/*
1898
2057
* Set up for write() call to TUN/TAP
@@ -1925,7 +2084,8 @@ process_outgoing_tun(struct context *c, struct link_socket *in_sock)
1925
2084
fprintf (stderr , "w" );
1926
2085
}
1927
2086
#endif
1928
- dmsg (D_TUN_RW , "TUN WRITE [%d]" , BLEN (& c -> c2 .to_tun ));
2087
+
2088
+ dmsg (D_TUN_RW , "TUN WRITE [%d] [%d]" , BLEN (& c -> c2 .to_tun ), c -> c2 .frame .buf .payload_size );
1929
2089
1930
2090
#ifdef PACKET_TRUNCATION_CHECK
1931
2091
ipv4_packet_size_verify (BPTR (& c -> c2 .to_tun ), BLEN (& c -> c2 .to_tun ), TUNNEL_TYPE (c -> c1 .tuntap ),
@@ -1981,6 +2141,38 @@ process_outgoing_tun(struct context *c, struct link_socket *in_sock)
1981
2141
perf_pop ();
1982
2142
}
1983
2143
2144
+ void process_outgoing_tun_part3 (struct context * c , struct link_socket * in_sock )
2145
+ {
2146
+ if (check_bulk_mode (c ))
2147
+ {
2148
+ int maxl = 0 , plen = 0 ;
2149
+ int leng = BLEN (& c -> c2 .buffers -> send_tun_max );
2150
+ uint8_t * temp = BPTR (& c -> c2 .buffers -> send_tun_max );
2151
+ for (int x = 0 ; x < TUN_BAT_MAX ; ++ x )
2152
+ {
2153
+ temp = buff_postsize (temp , & plen );
2154
+ if ((leng > 0 ) && (plen > 0 ) && ((maxl + plen ) < leng ))
2155
+ {
2156
+ c -> c2 .to_tun = c -> c2 .buffers -> to_tun_max ;
2157
+ c -> c2 .to_tun .offset = TUN_BAT_OFF ;
2158
+ c -> c2 .to_tun .len = plen ;
2159
+ bcopy (temp , BPTR (& c -> c2 .to_tun ), plen );
2160
+ temp += plen ; maxl += (plen + 2 );
2161
+ process_outgoing_tun_part2 (c , in_sock );
2162
+ } else { break ; }
2163
+ }
2164
+ }
2165
+ buf_reset (& c -> c2 .to_tun );
2166
+ }
2167
+
2168
+ void process_outgoing_tun (struct context * c , struct link_socket * in_sock )
2169
+ {
2170
+ if (c -> c2 .frame .bulk_size <= 0 ) {
2171
+ process_outgoing_tun_part2 (c , in_sock );
2172
+ }
2173
+ process_outgoing_tun_part3 (c , in_sock );
2174
+ }
2175
+
1984
2176
void
1985
2177
pre_select (struct context * c )
1986
2178
{
0 commit comments