2002-02-18 16:25:19 +00:00
/*
net_packet . c - - Handles in - and outgoing VPN packets
2006-04-26 13:52:58 +00:00
Copyright ( C ) 1998 - 2005 Ivo Timmermans ,
2014-12-24 21:15:40 +00:00
2000 - 2014 Guus Sliepen < guus @ tinc - vpn . org >
2010-10-22 11:40:04 +00:00
2010 Timothy Redaelli < timothy @ redaelli . eu >
2010-11-16 16:28:41 +00:00
2010 Brandon Black < blblack @ gmail . com >
2002-02-18 16:25:19 +00:00
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 2 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
2009-09-24 22:01:00 +00:00
You should have received a copy of the GNU General Public License along
with this program ; if not , write to the Free Software Foundation , Inc . ,
51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA .
2002-02-18 16:25:19 +00:00
*/
2003-07-17 15:06:27 +00:00
# include "system.h"
2002-02-18 16:25:19 +00:00
2010-02-10 13:52:15 +00:00
# ifdef HAVE_ZLIB
2002-02-18 16:25:19 +00:00
# include <zlib.h>
2010-02-10 13:52:15 +00:00
# endif
2010-02-10 12:24:33 +00:00
# ifdef HAVE_LZO
2006-11-29 17:18:39 +00:00
# include LZO1X_H
2010-02-10 12:24:33 +00:00
# endif
2002-02-18 16:25:19 +00:00
2008-12-11 14:44:44 +00:00
# include "cipher.h"
2002-02-18 16:25:19 +00:00
# include "conf.h"
# include "connection.h"
2008-12-11 14:44:44 +00:00
# include "crypto.h"
# include "digest.h"
2003-07-17 15:06:27 +00:00
# include "device.h"
2003-12-27 16:32:52 +00:00
# include "ethernet.h"
2003-07-17 15:06:27 +00:00
# include "graph.h"
# include "logger.h"
2002-02-18 16:25:19 +00:00
# include "net.h"
# include "netutl.h"
# include "protocol.h"
# include "route.h"
2003-07-17 15:06:27 +00:00
# include "utils.h"
# include "xalloc.h"
2002-02-18 16:25:19 +00:00
2014-10-12 18:44:33 +00:00
# ifndef MAX
# define MAX(a, b) ((a) > (b) ? (a) : (b))
# endif
2002-02-18 16:25:19 +00:00
int keylifetime = 0 ;
2010-02-10 12:24:33 +00:00
# ifdef HAVE_LZO
2003-07-06 23:16:29 +00:00
static char lzo_wrkmem [ LZO1X_999_MEM_COMPRESS > LZO1X_1_MEM_COMPRESS ? LZO1X_999_MEM_COMPRESS : LZO1X_1_MEM_COMPRESS ] ;
2010-02-10 12:24:33 +00:00
# endif
2003-05-06 21:13:18 +00:00
2003-12-20 19:47:53 +00:00
static void send_udppacket ( node_t * , vpn_packet_t * ) ;
2002-02-18 16:25:19 +00:00
2010-11-13 18:05:50 +00:00
unsigned replaywin = 16 ;
2014-06-29 10:06:44 +00:00
bool localdiscovery = true ;
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
bool udp_discovery = true ;
int udp_discovery_interval = 9 ;
int udp_discovery_timeout = 30 ;
2010-11-13 18:05:50 +00:00
2002-02-18 16:25:19 +00:00
# define MAX_SEQNO 1073741824
2015-01-01 10:32:14 +00:00
static void try_fix_mtu ( node_t * n ) {
if ( n - > mtuprobes > 30 )
return ;
if ( n - > mtuprobes = = 30 | | n - > minmtu > = n - > maxmtu ) {
if ( n - > minmtu > n - > maxmtu )
n - > minmtu = n - > maxmtu ;
else
n - > maxmtu = n - > minmtu ;
n - > mtu = n - > minmtu ;
logger ( DEBUG_TRAFFIC , LOG_INFO , " Fixing MTU of %s (%s) to %d after %d probes " , n - > name , n - > hostname , n - > mtu , n - > mtuprobes ) ;
n - > mtuprobes = 31 ;
}
}
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
static void udp_probe_timeout_handler ( void * data ) {
node_t * n = data ;
if ( ! n - > status . udp_confirmed )
return ;
logger ( DEBUG_TRAFFIC , LOG_INFO , " Too much time has elapsed since last UDP ping response from %s (%s), stopping UDP communication " , n - > name , n - > hostname ) ;
n - > status . udp_confirmed = false ;
Remove PMTU discovery code redundant with UDP discovery.
This is a rewrite of the send_mtu_probe_handler() function to make it
focus on the actual discovery of PMTU. In particular, the PMTU
discovery code doesn't care about tunnel state anymore - it only cares
about doing the initial PMTU discovery, and once that's done, making
sure PMTU did not increase by checking it from time to time. All other
duties have already been rewritten in the UDP discovery code.
As a result, the send_mtu_probe_handler(), which previously implemented
a nightmarish state machine which was very difficult to follow and
understand, has been massively simplified. We moved from four persistent
states to only two - initial discovery and steady state.
Furthermore, a side effect is that network chatter is reduced: instead
of sending bursts of three minmtu-sized packets in the steady state,
there is only one such packet that's sent from the UDP discovery code.
However, that introduces a slight regression in the bandwidth estimation
code, which relies on three-packet bursts in order to function.
Considering that this estimation is extremely unreliable (in my
experience) and isn't relied on by anything, this seems like an
acceptable regression.
2014-12-29 16:11:04 +00:00
n - > mtuprobes = 0 ;
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
n - > minmtu = 0 ;
n - > maxmtu = MTU ;
}
static void udp_probe_h ( node_t * n , vpn_packet_t * packet , length_t len ) {
2014-12-24 21:23:24 +00:00
if ( ! DATA ( packet ) [ 0 ] ) {
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
logger ( DEBUG_TRAFFIC , LOG_INFO , " Got UDP probe request %d from %s (%s) " , packet - > len , n - > name , n - > hostname ) ;
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
2012-11-19 13:20:50 +00:00
/* It's a probe request, send back a reply */
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
/* Type 2 probe replies were introduced in protocol 17.3 */
2014-09-27 16:51:33 +00:00
if ( ( n - > options > > 24 ) > = 3 ) {
2014-12-24 21:23:24 +00:00
uint8_t * data = DATA ( packet ) ;
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
* data + + = 2 ;
uint16_t len16 = htons ( len ) ; memcpy ( data , & len16 , 2 ) ; data + = 2 ;
2013-07-22 20:22:26 +00:00
struct timeval now ;
gettimeofday ( & now , NULL ) ;
uint32_t sec = htonl ( now . tv_sec ) ; memcpy ( data , & sec , 4 ) ; data + = 4 ;
uint32_t usec = htonl ( now . tv_usec ) ; memcpy ( data , & usec , 4 ) ; data + = 4 ;
2014-12-24 21:23:24 +00:00
packet - > len - = 10 ;
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
} else {
/* Legacy protocol: n won't understand type 2 probe replies. */
2014-12-24 21:23:24 +00:00
DATA ( packet ) [ 0 ] = 1 ;
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
}
2012-11-19 13:20:50 +00:00
/* Temporarily set udp_confirmed, so that the reply is sent
back exactly the way it came in . */
bool udp_confirmed = n - > status . udp_confirmed ;
n - > status . udp_confirmed = true ;
2009-10-24 19:32:06 +00:00
send_udppacket ( n , packet ) ;
2012-11-19 13:20:50 +00:00
n - > status . udp_confirmed = udp_confirmed ;
2003-12-20 19:47:53 +00:00
} else {
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
length_t probelen = len ;
2014-12-24 21:23:24 +00:00
if ( DATA ( packet ) [ 0 ] = = 2 ) {
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
if ( len < 3 )
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
logger ( DEBUG_TRAFFIC , LOG_WARNING , " Received invalid (too short) UDP probe reply from %s (%s) " , n - > name , n - > hostname ) ;
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
else {
2014-12-24 21:23:24 +00:00
uint16_t probelen16 ; memcpy ( & probelen16 , DATA ( packet ) + 1 , 2 ) ; probelen = ntohs ( probelen16 ) ;
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
}
}
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
logger ( DEBUG_TRAFFIC , LOG_INFO , " Got type %d UDP probe reply %d from %s (%s) " , DATA ( packet ) [ 0 ] , probelen , n - > name , n - > hostname ) ;
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
2012-11-19 13:20:50 +00:00
/* It's a valid reply: now we know bidirectional communication
is possible using the address and socket that the reply
packet used . */
2012-10-10 12:46:22 +00:00
n - > status . udp_confirmed = true ;
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
if ( udp_discovery ) {
timeout_del ( & n - > udp_ping_timeout ) ;
timeout_add ( & n - > udp_ping_timeout , & udp_probe_timeout_handler , n , & ( struct timeval ) { udp_discovery_timeout , 0 } ) ;
}
Remove PMTU discovery code redundant with UDP discovery.
This is a rewrite of the send_mtu_probe_handler() function to make it
focus on the actual discovery of PMTU. In particular, the PMTU
discovery code doesn't care about tunnel state anymore - it only cares
about doing the initial PMTU discovery, and once that's done, making
sure PMTU did not increase by checking it from time to time. All other
duties have already been rewritten in the UDP discovery code.
As a result, the send_mtu_probe_handler(), which previously implemented
a nightmarish state machine which was very difficult to follow and
understand, has been massively simplified. We moved from four persistent
states to only two - initial discovery and steady state.
Furthermore, a side effect is that network chatter is reduced: instead
of sending bursts of three minmtu-sized packets in the steady state,
there is only one such packet that's sent from the UDP discovery code.
However, that introduces a slight regression in the bandwidth estimation
code, which relies on three-packet bursts in order to function.
Considering that this estimation is extremely unreliable (in my
experience) and isn't relied on by anything, this seems like an
acceptable regression.
2014-12-29 16:11:04 +00:00
if ( probelen > = n - > maxmtu + 8 ) {
logger ( DEBUG_TRAFFIC , LOG_INFO , " Increase in PMTU to %s (%s) detected, restarting PMTU discovery " , n - > name , n - > hostname ) ;
n - > maxmtu = MTU ;
n - > mtuprobes = 10 ;
return ;
2011-01-02 14:02:23 +00:00
}
2012-11-19 13:20:50 +00:00
/* If applicable, raise the minimum supported MTU */
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
if ( probelen > n - > maxmtu )
probelen = n - > maxmtu ;
2015-01-01 10:32:14 +00:00
if ( n - > minmtu < probelen ) {
Introduce lightweight PMTU probe replies.
When replying to a PMTU probe, tinc sends a packet with the same length
as the PMTU probe itself, which is usually large (~1450 bytes). This is
not necessary: the other node wants to know the size of the PMTU probes
that have been received, but encoding this information as the actual
reply length is probably the most inefficient way to do it. It doubles
the bandwidth usage of the PMTU discovery process, and makes it less
reliable since large packets are more likely to be dropped.
This patch introduces a new PMTU probe reply type, encoded as type "2"
in the first byte of the packet, that indicates that the length of the
PMTU probe that is being replied to is encoded in the next two bytes of
the packet. Thus reply packets are only 3 bytes long.
(This also protects against very broken networks that drop very small
packets - yes, I've seen it happen on a subnet of a national ISP - in
such a case the PMTU probe replies will be dropped, and tinc won't
enable UDP communication, which is a good thing.)
Because legacy nodes won't understand type 2 probe replies, the minor
protocol number is bumped to 3.
Note that this also improves bandwidth estimation, as it is able to
measure bandwidth in both directions independently (the node receiving
the replies is measuring in the TX direction) and the use of smaller
reply packets might decrease the influence of jitter.
2013-07-21 12:05:42 +00:00
n - > minmtu = probelen ;
2015-01-01 10:32:14 +00:00
try_fix_mtu ( n ) ;
}
2013-01-16 15:31:56 +00:00
/* Calculate RTT and bandwidth.
The RTT is the time between the MTU probe burst was sent and the first
reply is received . The bandwidth is measured using the time between the
2013-07-22 20:22:26 +00:00
arrival of the first and third probe reply ( or type 2 probe requests ) .
2013-01-16 15:31:56 +00:00
*/
struct timeval now , diff ;
gettimeofday ( & now , NULL ) ;
timersub ( & now , & n - > probe_time , & diff ) ;
2013-07-22 20:22:26 +00:00
struct timeval probe_timestamp = now ;
2014-12-24 21:23:24 +00:00
if ( DATA ( packet ) [ 0 ] = = 2 & & packet - > len > = 11 ) {
uint32_t sec ; memcpy ( & sec , DATA ( packet ) + 3 , 4 ) ;
uint32_t usec ; memcpy ( & usec , DATA ( packet ) + 7 , 4 ) ;
2013-07-22 20:22:26 +00:00
probe_timestamp . tv_sec = ntohl ( sec ) ;
probe_timestamp . tv_usec = ntohl ( usec ) ;
}
2013-01-16 15:31:56 +00:00
n - > probe_counter + + ;
if ( n - > probe_counter = = 1 ) {
n - > rtt = diff . tv_sec + diff . tv_usec * 1e-6 ;
2013-07-22 20:22:26 +00:00
n - > probe_time = probe_timestamp ;
2013-01-16 15:31:56 +00:00
} else if ( n - > probe_counter = = 3 ) {
Remove PMTU discovery code redundant with UDP discovery.
This is a rewrite of the send_mtu_probe_handler() function to make it
focus on the actual discovery of PMTU. In particular, the PMTU
discovery code doesn't care about tunnel state anymore - it only cares
about doing the initial PMTU discovery, and once that's done, making
sure PMTU did not increase by checking it from time to time. All other
duties have already been rewritten in the UDP discovery code.
As a result, the send_mtu_probe_handler(), which previously implemented
a nightmarish state machine which was very difficult to follow and
understand, has been massively simplified. We moved from four persistent
states to only two - initial discovery and steady state.
Furthermore, a side effect is that network chatter is reduced: instead
of sending bursts of three minmtu-sized packets in the steady state,
there is only one such packet that's sent from the UDP discovery code.
However, that introduces a slight regression in the bandwidth estimation
code, which relies on three-packet bursts in order to function.
Considering that this estimation is extremely unreliable (in my
experience) and isn't relied on by anything, this seems like an
acceptable regression.
2014-12-29 16:11:04 +00:00
/* TODO: this will never fire after initial MTU discovery. */
2013-07-22 20:22:26 +00:00
struct timeval probe_timestamp_diff ;
timersub ( & probe_timestamp , & n - > probe_time , & probe_timestamp_diff ) ;
n - > bandwidth = 2.0 * probelen / ( probe_timestamp_diff . tv_sec + probe_timestamp_diff . tv_usec * 1e-6 ) ;
2013-01-16 15:31:56 +00:00
logger ( DEBUG_TRAFFIC , LOG_DEBUG , " %s (%s) RTT %.2f ms, burst bandwidth %.3f Mbit/s, rx packet loss %.2f %% " , n - > name , n - > hostname , n - > rtt * 1e3 , n - > bandwidth * 8e-6 , n - > packetloss * 1e2 ) ;
}
2003-12-20 19:47:53 +00:00
}
}
2007-05-18 10:00:00 +00:00
static length_t compress_packet ( uint8_t * dest , const uint8_t * source , length_t len , int level ) {
2010-02-10 13:52:15 +00:00
if ( level = = 0 ) {
memcpy ( dest , source , len ) ;
return len ;
} else if ( level = = 10 ) {
2010-02-10 12:24:33 +00:00
# ifdef HAVE_LZO
2003-05-07 11:21:58 +00:00
lzo_uint lzolen = MAXSIZE ;
2003-05-06 21:13:18 +00:00
lzo1x_1_compress ( source , len , dest , & lzolen , lzo_wrkmem ) ;
return lzolen ;
2010-02-10 12:24:33 +00:00
# else
return - 1 ;
# endif
2003-05-06 21:13:18 +00:00
} else if ( level < 10 ) {
2010-02-10 13:52:15 +00:00
# ifdef HAVE_ZLIB
2003-05-06 23:14:45 +00:00
unsigned long destlen = MAXSIZE ;
2003-05-06 21:13:18 +00:00
if ( compress2 ( dest , & destlen , source , len , level ) = = Z_OK )
return destlen ;
else
2010-02-10 13:52:15 +00:00
# endif
2003-05-06 21:13:18 +00:00
return - 1 ;
} else {
2010-02-10 12:24:33 +00:00
# ifdef HAVE_LZO
2003-05-07 11:21:58 +00:00
lzo_uint lzolen = MAXSIZE ;
2003-05-06 21:13:18 +00:00
lzo1x_999_compress ( source , len , dest , & lzolen , lzo_wrkmem ) ;
return lzolen ;
2010-02-10 12:24:33 +00:00
# else
return - 1 ;
# endif
2003-05-06 21:13:18 +00:00
}
2012-10-10 15:17:49 +00:00
2003-05-06 21:13:18 +00:00
return - 1 ;
}
2007-05-18 10:00:00 +00:00
static length_t uncompress_packet ( uint8_t * dest , const uint8_t * source , length_t len , int level ) {
2010-02-10 13:52:15 +00:00
if ( level = = 0 ) {
memcpy ( dest , source , len ) ;
return len ;
} else if ( level > 9 ) {
2010-02-10 12:24:33 +00:00
# ifdef HAVE_LZO
2003-05-07 11:21:58 +00:00
lzo_uint lzolen = MAXSIZE ;
2003-05-06 21:13:18 +00:00
if ( lzo1x_decompress_safe ( source , len , dest , & lzolen , NULL ) = = LZO_E_OK )
return lzolen ;
else
2010-02-10 12:24:33 +00:00
# endif
2003-05-06 21:13:18 +00:00
return - 1 ;
2010-02-10 13:52:15 +00:00
}
# ifdef HAVE_ZLIB
else {
2003-05-06 23:14:45 +00:00
unsigned long destlen = MAXSIZE ;
2003-05-06 21:13:18 +00:00
if ( uncompress ( dest , & destlen , source , len ) = = Z_OK )
return destlen ;
else
return - 1 ;
}
2010-02-10 13:52:15 +00:00
# endif
2003-05-06 21:13:18 +00:00
return - 1 ;
}
2002-02-18 16:25:19 +00:00
/* VPN packet I/O */
2007-05-18 10:00:00 +00:00
static void receive_packet ( node_t * n , vpn_packet_t * packet ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_DEBUG , " Received packet of %d bytes from %s (%s) " ,
2003-07-06 23:16:29 +00:00
packet - > len , n - > name , n - > hostname ) ;
2011-05-14 22:42:29 +00:00
n - > in_packets + + ;
n - > in_bytes + = packet - > len ;
2003-12-12 19:52:25 +00:00
route ( n , packet ) ;
2003-07-06 23:16:29 +00:00
}
2009-09-29 12:55:29 +00:00
static bool try_mac ( node_t * n , const vpn_packet_t * inpkt ) {
2012-07-31 19:43:49 +00:00
if ( n - > status . sptps )
2014-12-24 21:23:24 +00:00
return sptps_verify_datagram ( & n - > sptps , DATA ( inpkt ) , inpkt - > len ) ;
2012-07-31 18:36:35 +00:00
2014-12-29 21:57:18 +00:00
# ifdef DISABLE_LEGACY
return false ;
# else
2014-12-24 21:23:24 +00:00
if ( ! digest_active ( n - > indigest ) | | inpkt - > len < sizeof ( seqno_t ) + digest_length ( n - > indigest ) )
2009-04-02 23:05:23 +00:00
return false ;
2014-12-24 21:23:24 +00:00
return digest_verify ( n - > indigest , SEQNO ( inpkt ) , inpkt - > len - digest_length ( n - > indigest ) , DATA ( inpkt ) + inpkt - > len - digest_length ( n - > indigest ) ) ;
2014-12-29 21:57:18 +00:00
# endif
2009-04-02 23:05:23 +00:00
}
2014-09-27 17:13:33 +00:00
static bool receive_udppacket ( node_t * n , vpn_packet_t * inpkt ) {
2002-09-09 21:25:28 +00:00
vpn_packet_t pkt1 , pkt2 ;
vpn_packet_t * pkt [ ] = { & pkt1 , & pkt2 , & pkt1 , & pkt2 } ;
int nextpkt = 0 ;
2008-12-11 14:44:44 +00:00
size_t outlen ;
2014-12-24 21:23:24 +00:00
pkt1 . offset = DEFAULT_PACKET_OFFSET ;
pkt2 . offset = DEFAULT_PACKET_OFFSET ;
2002-09-09 21:25:28 +00:00
2012-07-31 19:43:49 +00:00
if ( n - > status . sptps ) {
2013-05-18 14:11:30 +00:00
if ( ! n - > sptps . state ) {
if ( ! n - > status . waitingforkey ) {
logger ( DEBUG_TRAFFIC , LOG_DEBUG , " Got packet from %s (%s) but we haven't exchanged keys yet " , n - > name , n - > hostname ) ;
send_req_key ( n ) ;
} else {
logger ( DEBUG_TRAFFIC , LOG_DEBUG , " Got packet from %s (%s) but he hasn't got our key yet " , n - > name , n - > hostname ) ;
}
2014-09-27 17:13:33 +00:00
return false ;
2013-05-18 14:11:30 +00:00
}
2014-12-24 21:23:24 +00:00
inpkt - > offset + = 2 * sizeof ( node_id_t ) ;
if ( ! sptps_receive_data ( & n - > sptps , DATA ( inpkt ) , inpkt - > len - 2 * sizeof ( node_id_t ) ) ) {
2014-12-07 16:25:30 +00:00
logger ( DEBUG_TRAFFIC , LOG_ERR , " Got bad packet from %s (%s) " , n - > name , n - > hostname ) ;
return false ;
}
return true ;
2012-07-30 16:36:59 +00:00
}
2014-12-29 21:57:18 +00:00
# ifdef DISABLE_LEGACY
return false ;
# else
2014-05-18 19:51:42 +00:00
if ( ! n - > status . validkey ) {
2013-05-18 14:11:30 +00:00
logger ( DEBUG_TRAFFIC , LOG_DEBUG , " Got packet from %s (%s) but he hasn't got our key yet " , n - > name , n - > hostname ) ;
2014-09-27 17:13:33 +00:00
return false ;
2009-04-02 23:05:23 +00:00
}
2004-03-20 22:23:42 +00:00
/* Check packet length */
2014-12-24 21:23:24 +00:00
if ( inpkt - > len < sizeof ( seqno_t ) + digest_length ( n - > indigest ) ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_DEBUG , " Got too short packet from %s (%s) " ,
2004-03-20 22:23:42 +00:00
n - > name , n - > hostname ) ;
2014-09-27 17:13:33 +00:00
return false ;
2003-09-23 20:59:01 +00:00
}
2014-12-24 21:23:24 +00:00
/* It's a legacy UDP packet, the data starts after the seqno */
inpkt - > offset + = sizeof ( seqno_t ) ;
2004-03-20 22:23:42 +00:00
/* Check the message authentication code */
2013-05-01 15:17:22 +00:00
if ( digest_active ( n - > indigest ) ) {
inpkt - > len - = digest_length ( n - > indigest ) ;
2014-12-24 21:23:24 +00:00
if ( ! digest_verify ( n - > indigest , SEQNO ( inpkt ) , inpkt - > len , SEQNO ( inpkt ) + inpkt - > len ) ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_DEBUG , " Got unauthenticated packet from %s (%s) " , n - > name , n - > hostname ) ;
2014-09-27 17:13:33 +00:00
return false ;
2009-12-18 00:15:25 +00:00
}
2004-03-20 22:23:42 +00:00
}
2002-09-09 21:25:28 +00:00
/* Decrypt the packet */
2013-05-01 15:17:22 +00:00
if ( cipher_active ( n - > incipher ) ) {
2014-07-12 10:13:04 +00:00
vpn_packet_t * outpkt = pkt [ nextpkt + + ] ;
2008-12-11 14:44:44 +00:00
outlen = MAXSIZE ;
2002-09-09 21:25:28 +00:00
2014-12-24 21:23:24 +00:00
if ( ! cipher_decrypt ( n - > incipher , SEQNO ( inpkt ) , inpkt - > len , SEQNO ( outpkt ) , & outlen , true ) ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_DEBUG , " Error decrypting packet from %s (%s) " , n - > name , n - > hostname ) ;
2014-09-27 17:13:33 +00:00
return false ;
2003-10-10 16:24:24 +00:00
}
2012-10-10 15:17:49 +00:00
2008-12-11 14:44:44 +00:00
outpkt - > len = outlen ;
2002-09-09 21:25:28 +00:00
inpkt = outpkt ;
}
/* Check the sequence number */
2014-12-24 21:23:24 +00:00
seqno_t seqno ;
memcpy ( & seqno , SEQNO ( inpkt ) , sizeof seqno ) ;
seqno = ntohl ( seqno ) ;
inpkt - > len - = sizeof seqno ;
2002-09-09 21:25:28 +00:00
2010-11-13 18:05:50 +00:00
if ( replaywin ) {
2014-09-27 12:34:56 +00:00
if ( seqno ! = n - > received_seqno + 1 ) {
if ( seqno > = n - > received_seqno + replaywin * 8 ) {
2010-11-13 18:05:51 +00:00
if ( n - > farfuture + + < replaywin > > 2 ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_ALWAYS , LOG_WARNING , " Packet from %s (%s) is %d seqs in the future, dropped (%u) " ,
2014-09-27 12:34:56 +00:00
n - > name , n - > hostname , seqno - n - > received_seqno - 1 , n - > farfuture ) ;
2014-09-27 17:13:33 +00:00
return false ;
2010-11-13 18:05:51 +00:00
}
2012-02-26 17:37:36 +00:00
logger ( DEBUG_ALWAYS , LOG_WARNING , " Lost %d packets from %s (%s) " ,
2014-09-27 12:34:56 +00:00
seqno - n - > received_seqno - 1 , n - > name , n - > hostname ) ;
2010-11-13 18:05:50 +00:00
memset ( n - > late , 0 , replaywin ) ;
2014-09-27 12:34:56 +00:00
} else if ( seqno < = n - > received_seqno ) {
if ( ( n - > received_seqno > = replaywin * 8 & & seqno < = n - > received_seqno - replaywin * 8 ) | | ! ( n - > late [ ( seqno / 8 ) % replaywin ] & ( 1 < < seqno % 8 ) ) ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_ALWAYS , LOG_WARNING , " Got late or replayed packet from %s (%s), seqno %d, last received %d " ,
2014-09-27 12:34:56 +00:00
n - > name , n - > hostname , seqno , n - > received_seqno ) ;
2014-09-27 17:13:33 +00:00
return false ;
2010-11-13 18:05:50 +00:00
}
} else {
2014-09-27 12:34:56 +00:00
for ( int i = n - > received_seqno + 1 ; i < seqno ; i + + )
2010-11-13 18:05:50 +00:00
n - > late [ ( i / 8 ) % replaywin ] | = 1 < < i % 8 ;
2004-09-20 20:55:49 +00:00
}
2003-04-18 21:18:36 +00:00
}
2010-11-13 18:05:51 +00:00
n - > farfuture = 0 ;
2014-09-27 12:34:56 +00:00
n - > late [ ( seqno / 8 ) % replaywin ] & = ~ ( 1 < < seqno % 8 ) ;
2002-09-09 21:25:28 +00:00
}
2004-11-08 22:30:13 +00:00
2014-09-27 12:34:56 +00:00
if ( seqno > n - > received_seqno )
n - > received_seqno = seqno ;
2012-10-10 15:17:49 +00:00
2013-01-15 12:33:16 +00:00
n - > received + + ;
2002-09-09 21:25:28 +00:00
if ( n - > received_seqno > MAX_SEQNO )
2007-05-17 23:57:48 +00:00
regenerate_key ( ) ;
2002-09-09 21:25:28 +00:00
/* Decompress the packet */
2009-05-25 10:19:37 +00:00
length_t origlen = inpkt - > len ;
2009-04-02 23:05:23 +00:00
if ( n - > incompression ) {
2014-07-12 10:13:04 +00:00
vpn_packet_t * outpkt = pkt [ nextpkt + + ] ;
2002-09-09 21:25:28 +00:00
2014-12-24 21:23:24 +00:00
if ( ( outpkt - > len = uncompress_packet ( DATA ( outpkt ) , DATA ( inpkt ) , inpkt - > len , n - > incompression ) ) < 0 ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_ERR , " Error while uncompressing packet from %s (%s) " ,
2012-10-10 15:17:49 +00:00
n - > name , n - > hostname ) ;
2014-09-27 17:13:33 +00:00
return false ;
2002-09-09 21:25:28 +00:00
}
inpkt = outpkt ;
2009-05-25 10:19:37 +00:00
origlen - = MTU / 64 + 20 ;
2002-09-09 21:25:28 +00:00
}
2009-01-03 21:33:55 +00:00
inpkt - > priority = 0 ;
2014-12-24 21:23:24 +00:00
if ( ! DATA ( inpkt ) [ 12 ] & & ! DATA ( inpkt ) [ 13 ] )
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
udp_probe_h ( n , inpkt , origlen ) ;
2003-12-20 19:47:53 +00:00
else
receive_packet ( n , inpkt ) ;
2014-09-27 17:13:33 +00:00
return true ;
2014-12-29 21:57:18 +00:00
# endif
2002-02-18 16:25:19 +00:00
}
2011-05-28 21:36:52 +00:00
void receive_tcppacket ( connection_t * c , const char * buffer , int len ) {
2002-09-09 21:25:28 +00:00
vpn_packet_t outpkt ;
2014-12-24 21:23:24 +00:00
outpkt . offset = DEFAULT_PACKET_OFFSET ;
2002-09-09 21:25:28 +00:00
2014-12-24 21:23:24 +00:00
if ( len > sizeof outpkt . data - outpkt . offset )
2013-04-12 15:15:05 +00:00
return ;
2002-09-09 21:25:28 +00:00
outpkt . len = len ;
2009-01-03 21:33:55 +00:00
if ( c - > options & OPTION_TCPONLY )
outpkt . priority = 0 ;
else
outpkt . priority = - 1 ;
2014-12-24 21:23:24 +00:00
memcpy ( DATA ( & outpkt ) , buffer , len ) ;
2002-09-09 21:25:28 +00:00
receive_packet ( c - > node , & outpkt ) ;
2002-02-18 16:25:19 +00:00
}
2012-08-02 15:44:59 +00:00
static void send_sptps_packet ( node_t * n , vpn_packet_t * origpkt ) {
2014-12-28 17:16:27 +00:00
if ( ! n - > status . validkey & & ! n - > connection )
2012-10-07 12:03:50 +00:00
return ;
2012-08-02 15:44:59 +00:00
2012-10-07 11:31:19 +00:00
uint8_t type = 0 ;
int offset = 0 ;
2012-08-02 15:44:59 +00:00
2014-12-24 21:23:24 +00:00
if ( ! ( DATA ( origpkt ) [ 12 ] | DATA ( origpkt ) [ 13 ] ) ) {
sptps_send_record ( & n - > sptps , PKT_PROBE , ( char * ) DATA ( origpkt ) , origpkt - > len ) ;
2012-10-07 11:31:19 +00:00
return ;
}
2012-08-02 15:44:59 +00:00
2012-10-07 11:31:19 +00:00
if ( routing_mode = = RMODE_ROUTER )
offset = 14 ;
else
type = PKT_MAC ;
2012-08-02 15:44:59 +00:00
2012-10-07 11:31:19 +00:00
if ( origpkt - > len < offset )
2012-08-02 15:44:59 +00:00
return ;
2012-10-07 11:31:19 +00:00
vpn_packet_t outpkt ;
if ( n - > outcompression ) {
2014-12-24 21:23:24 +00:00
outpkt . offset = 0 ;
int len = compress_packet ( DATA ( & outpkt ) + offset , DATA ( origpkt ) + offset , origpkt - > len - offset , n - > outcompression ) ;
2012-10-07 11:31:19 +00:00
if ( len < 0 ) {
logger ( DEBUG_TRAFFIC , LOG_ERR , " Error while compressing packet to %s (%s) " , n - > name , n - > hostname ) ;
} else if ( len < origpkt - > len - offset ) {
outpkt . len = len + offset ;
origpkt = & outpkt ;
type | = PKT_COMPRESSED ;
}
2012-08-02 15:44:59 +00:00
}
2012-10-07 11:31:19 +00:00
2014-10-12 11:14:46 +00:00
/* If we have a direct metaconnection to n, and we can't use UDP, then
don ' t bother with SPTPS and just use a " plaintext " PACKET message .
We don ' t really care about end - to - end security since we ' re not
sending the message through any intermediate nodes . */
if ( n - > connection & & origpkt - > len > n - > minmtu )
send_tcppacket ( n - > connection , origpkt ) ;
else
sptps_send_record ( & n - > sptps , type , DATA ( origpkt ) + offset , origpkt - > len - offset ) ;
2012-10-07 11:31:19 +00:00
return ;
2012-08-02 15:44:59 +00:00
}
2014-06-22 16:27:55 +00:00
static void adapt_socket ( const sockaddr_t * sa , int * sock ) {
/* Make sure we have a suitable socket for the chosen address */
if ( listen_socket [ * sock ] . sa . sa . sa_family ! = sa - > sa . sa_family ) {
for ( int i = 0 ; i < listen_sockets ; i + + ) {
if ( listen_socket [ i ] . sa . sa . sa_family = = sa - > sa . sa_family ) {
* sock = i ;
break ;
}
}
}
}
2012-11-17 21:48:06 +00:00
static void choose_udp_address ( const node_t * n , const sockaddr_t * * sa , int * sock ) {
/* Latest guess */
* sa = & n - > address ;
* sock = n - > sock ;
/* If the UDP address is confirmed, use it. */
if ( n - > status . udp_confirmed )
return ;
2012-11-19 12:50:17 +00:00
/* Send every third packet to n->address; that could be set
to the node ' s reflexive UDP address discovered during key
exchange . */
2012-11-17 21:48:06 +00:00
2012-11-19 12:50:17 +00:00
static int x = 0 ;
if ( + + x > = 3 ) {
x = 0 ;
return ;
}
/* Otherwise, address are found in edges to this node.
So we pick a random edge and a random socket . */
2012-11-17 21:48:06 +00:00
int i = 0 ;
int j = rand ( ) % n - > edge_tree - > count ;
edge_t * candidate = NULL ;
2012-11-19 12:50:17 +00:00
for splay_each ( edge_t , e , n - > edge_tree ) {
if ( i + + = = j ) {
candidate = e - > reverse ;
break ;
}
2012-11-17 21:48:06 +00:00
}
if ( candidate ) {
* sa = & candidate - > address ;
* sock = rand ( ) % listen_sockets ;
}
2014-06-22 16:27:55 +00:00
adapt_socket ( * sa , sock ) ;
}
static void choose_local_address ( const node_t * n , const sockaddr_t * * sa , int * sock ) {
2014-06-29 10:01:24 +00:00
* sa = NULL ;
2014-06-22 16:27:55 +00:00
/* Pick one of the edges from this node at random, then use its local address. */
int i = 0 ;
int j = rand ( ) % n - > edge_tree - > count ;
edge_t * candidate = NULL ;
for splay_each ( edge_t , e , n - > edge_tree ) {
if ( i + + = = j ) {
candidate = e ;
break ;
2012-11-17 21:48:06 +00:00
}
}
2014-06-22 16:27:55 +00:00
if ( candidate & & candidate - > local_address . sa . sa_family ) {
* sa = & candidate - > local_address ;
* sock = rand ( ) % listen_sockets ;
adapt_socket ( * sa , sock ) ;
2012-11-17 21:48:06 +00:00
}
}
2007-05-18 10:00:00 +00:00
static void send_udppacket ( node_t * n , vpn_packet_t * origpkt ) {
2002-09-09 21:25:28 +00:00
vpn_packet_t pkt1 , pkt2 ;
vpn_packet_t * pkt [ ] = { & pkt1 , & pkt2 , & pkt1 , & pkt2 } ;
2006-04-26 16:29:47 +00:00
vpn_packet_t * inpkt = origpkt ;
2002-09-09 21:25:28 +00:00
int nextpkt = 0 ;
vpn_packet_t * outpkt ;
2011-06-02 16:22:26 +00:00
int origlen = origpkt - > len ;
2008-12-11 14:44:44 +00:00
size_t outlen ;
2010-05-04 13:43:48 +00:00
# if defined(SOL_IP) && defined(IP_TOS)
2002-09-09 21:25:28 +00:00
static int priority = 0 ;
2012-07-21 10:51:53 +00:00
int origpriority = origpkt - > priority ;
2014-07-12 11:49:59 +00:00
# endif
2002-03-18 22:47:20 +00:00
2014-12-24 21:23:24 +00:00
pkt1 . offset = DEFAULT_PACKET_OFFSET ;
pkt2 . offset = DEFAULT_PACKET_OFFSET ;
2009-06-11 16:36:08 +00:00
if ( ! n - > status . reachable ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_INFO , " Trying to send UDP packet to unreachable node %s (%s) " , n - > name , n - > hostname ) ;
2009-06-11 16:36:08 +00:00
return ;
}
2012-08-02 15:44:59 +00:00
if ( n - > status . sptps )
return send_sptps_packet ( n , origpkt ) ;
2014-12-29 21:57:18 +00:00
# ifdef DISABLE_LEGACY
return ;
# else
2002-09-09 21:25:28 +00:00
/* Make sure we have a valid key */
2002-02-18 16:25:19 +00:00
2002-09-09 21:25:28 +00:00
if ( ! n - > status . validkey ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_INFO ,
2009-09-24 22:54:07 +00:00
" No valid key known yet for %s (%s), forwarding via TCP " ,
2002-09-09 21:25:28 +00:00
n - > name , n - > hostname ) ;
2009-01-03 21:33:55 +00:00
send_tcppacket ( n - > nexthop - > connection , origpkt ) ;
2002-09-09 21:25:28 +00:00
return ;
}
2002-02-18 16:25:19 +00:00
2014-12-24 21:23:24 +00:00
if ( n - > options & OPTION_PMTU_DISCOVERY & & inpkt - > len > n - > minmtu & & ( DATA ( inpkt ) [ 12 ] | DATA ( inpkt ) [ 13 ] ) ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_INFO ,
2009-10-24 19:53:01 +00:00
" Packet for %s (%s) larger than minimum MTU, forwarding via %s " ,
n - > name , n - > hostname , n ! = n - > nexthop ? n - > nexthop - > name : " TCP " ) ;
2009-01-03 21:33:55 +00:00
2009-10-24 19:53:01 +00:00
if ( n ! = n - > nexthop )
send_packet ( n - > nexthop , origpkt ) ;
else
send_tcppacket ( n - > nexthop - > connection , origpkt ) ;
2009-04-02 23:05:23 +00:00
return ;
2009-01-03 21:33:55 +00:00
}
2002-09-09 21:25:28 +00:00
/* Compress the packet */
2002-02-18 16:25:19 +00:00
2009-04-02 23:05:23 +00:00
if ( n - > outcompression ) {
2002-09-09 21:25:28 +00:00
outpkt = pkt [ nextpkt + + ] ;
2002-02-18 16:25:19 +00:00
2014-12-24 21:23:24 +00:00
if ( ( outpkt - > len = compress_packet ( DATA ( outpkt ) , DATA ( inpkt ) , inpkt - > len , n - > outcompression ) ) < 0 ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_ERR , " Error while compressing packet to %s (%s) " ,
2002-09-09 21:25:28 +00:00
n - > name , n - > hostname ) ;
return ;
}
2002-02-18 16:25:19 +00:00
2002-09-09 21:25:28 +00:00
inpkt = outpkt ;
}
2002-02-18 16:25:19 +00:00
2002-09-09 21:25:28 +00:00
/* Add sequence number */
2002-02-18 16:25:19 +00:00
2014-12-24 21:23:24 +00:00
seqno_t seqno = htonl ( + + ( n - > sent_seqno ) ) ;
memcpy ( SEQNO ( inpkt ) , & seqno , sizeof seqno ) ;
inpkt - > len + = sizeof seqno ;
2002-02-18 16:25:19 +00:00
2002-09-09 21:25:28 +00:00
/* Encrypt the packet */
2002-02-18 16:25:19 +00:00
2013-05-01 15:17:22 +00:00
if ( cipher_active ( n - > outcipher ) ) {
2002-09-09 21:25:28 +00:00
outpkt = pkt [ nextpkt + + ] ;
2008-12-11 14:44:44 +00:00
outlen = MAXSIZE ;
2002-02-18 16:25:19 +00:00
2014-12-24 21:23:24 +00:00
if ( ! cipher_encrypt ( n - > outcipher , SEQNO ( inpkt ) , inpkt - > len , SEQNO ( outpkt ) , & outlen , true ) ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_ERR , " Error while encrypting packet to %s (%s) " , n - > name , n - > hostname ) ;
2003-12-24 10:48:15 +00:00
goto end ;
2003-10-10 16:24:24 +00:00
}
2002-02-18 16:25:19 +00:00
2008-12-11 14:44:44 +00:00
outpkt - > len = outlen ;
2002-09-09 21:25:28 +00:00
inpkt = outpkt ;
}
2002-03-18 22:47:20 +00:00
2002-09-09 21:25:28 +00:00
/* Add the message authentication code */
2002-03-18 22:47:20 +00:00
2013-05-01 15:17:22 +00:00
if ( digest_active ( n - > outdigest ) ) {
2014-12-24 21:23:24 +00:00
if ( ! digest_create ( n - > outdigest , SEQNO ( inpkt ) , inpkt - > len , SEQNO ( inpkt ) + inpkt - > len ) ) {
2013-05-10 18:30:47 +00:00
logger ( DEBUG_TRAFFIC , LOG_ERR , " Error while encrypting packet to %s (%s) " , n - > name , n - > hostname ) ;
goto end ;
}
2013-05-01 15:17:22 +00:00
inpkt - > len + = digest_length ( n - > outdigest ) ;
2002-09-09 21:25:28 +00:00
}
/* Send the packet */
2002-02-18 16:25:19 +00:00
2014-06-29 10:01:24 +00:00
const sockaddr_t * sa = NULL ;
2012-02-22 22:17:43 +00:00
int sock ;
2014-06-22 16:27:55 +00:00
if ( n - > status . send_locally )
choose_local_address ( n , & sa , & sock ) ;
2014-06-29 10:01:24 +00:00
if ( ! sa )
2012-11-17 21:48:06 +00:00
choose_udp_address ( n , & sa , & sock ) ;
2012-10-10 12:46:22 +00:00
2002-03-01 15:14:29 +00:00
# if defined(SOL_IP) && defined(IP_TOS)
2002-09-09 21:25:28 +00:00
if ( priorityinheritance & & origpriority ! = priority
2012-02-18 10:48:21 +00:00
& & listen_socket [ n - > sock ] . sa . sa . sa_family = = AF_INET ) {
2002-09-09 21:25:28 +00:00
priority = origpriority ;
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_DEBUG , " Setting outgoing packet priority to %d " , priority ) ;
2012-11-29 11:28:23 +00:00
if ( setsockopt ( listen_socket [ n - > sock ] . udp . fd , SOL_IP , IP_TOS , & priority , sizeof ( priority ) ) ) /* SO_PRIORITY doesn't seem to work */
2014-06-26 19:42:40 +00:00
logger ( DEBUG_ALWAYS , LOG_ERR , " System call `%s' failed: %s " , " setsockopt " , sockstrerror ( sockerrno ) ) ;
2002-09-09 21:25:28 +00:00
}
2002-03-01 15:14:29 +00:00
# endif
2002-03-01 12:26:56 +00:00
2014-12-24 21:23:24 +00:00
if ( sendto ( listen_socket [ sock ] . udp . fd , SEQNO ( inpkt ) , inpkt - > len , 0 , & sa - > sa , SALEN ( sa - > sa ) ) < 0 & & ! sockwouldblock ( sockerrno ) ) {
2009-10-24 23:40:07 +00:00
if ( sockmsgsize ( sockerrno ) ) {
2003-12-22 11:04:17 +00:00
if ( n - > maxmtu > = origlen )
n - > maxmtu = origlen - 1 ;
2003-12-20 19:47:53 +00:00
if ( n - > mtu > = origlen )
n - > mtu = origlen - 1 ;
2015-01-01 10:32:14 +00:00
try_fix_mtu ( n ) ;
2003-12-24 10:48:15 +00:00
} else
2012-03-08 20:15:08 +00:00
logger ( DEBUG_TRAFFIC , LOG_WARNING , " Error sending packet to %s (%s): %s " , n - > name , n - > hostname , sockstrerror ( sockerrno ) ) ;
2002-09-09 21:25:28 +00:00
}
2003-12-24 10:48:15 +00:00
end :
2006-04-26 16:29:47 +00:00
origpkt - > len = origlen ;
2014-12-29 21:57:18 +00:00
# endif
2002-02-18 16:25:19 +00:00
}
2014-12-03 13:51:45 +00:00
static bool send_sptps_data_priv ( node_t * to , node_t * from , int type , const void * data , size_t len ) {
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
node_t * relay = ( to - > via ! = myself & & ( type = = PKT_PROBE | | ( len - SPTPS_DATAGRAM_OVERHEAD ) < = to - > via - > minmtu ) ) ? to - > via : to - > nexthop ;
bool direct = from = = myself & & to = = relay ;
bool relay_supported = ( relay - > options > > 24 ) > = 4 ;
2014-10-04 14:01:11 +00:00
bool tcponly = ( myself - > options | relay - > options ) & OPTION_TCPONLY ;
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
/* Send it via TCP if it is a handshake packet, TCPOnly is in use, this is a relay packet that the other node cannot understand, or this packet is larger than the MTU.
TODO : When relaying , the original sender does not know the end - to - end PMTU ( it only knows the PMTU of the first hop ) .
This can lead to scenarios where large packets are sent over UDP to relay , but then relay has no choice but fall back to TCP . */
2012-10-14 12:33:54 +00:00
2014-10-04 14:01:11 +00:00
if ( type = = SPTPS_HANDSHAKE | | tcponly | | ( ! direct & & ! relay_supported ) | | ( type ! = PKT_PROBE & & ( len - SPTPS_DATAGRAM_OVERHEAD ) > relay - > minmtu ) ) {
2012-07-30 16:36:59 +00:00
char buf [ len * 4 / 3 + 5 ] ;
b64encode ( data , buf , len ) ;
2012-10-14 12:33:54 +00:00
/* If no valid key is known yet, send the packets using ANS_KEY requests,
to ensure we get to learn the reflexive UDP address . */
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
if ( from = = myself & & ! to - > status . validkey ) {
2013-07-24 18:48:31 +00:00
to - > incompression = myself - > incompression ;
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
return send_request ( to - > nexthop - > connection , " %d %s %s %s -1 -1 -1 %d " , ANS_KEY , from - > name , to - > name , buf , to - > incompression ) ;
2013-07-24 18:48:31 +00:00
} else {
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
return send_request ( to - > nexthop - > connection , " %d %s %s %d %s " , REQ_KEY , from - > name , to - > name , REQ_SPTPS , buf ) ;
2013-07-24 18:48:31 +00:00
}
2012-07-30 16:36:59 +00:00
}
2014-09-27 17:13:33 +00:00
size_t overhead = 0 ;
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
if ( relay_supported ) overhead + = sizeof to - > id + sizeof from - > id ;
2014-09-27 17:13:33 +00:00
char buf [ len + overhead ] ; char * buf_ptr = buf ;
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
if ( relay_supported ) {
if ( direct ) {
/* Inform the recipient that this packet was sent directly. */
2014-12-03 13:51:45 +00:00
node_id_t nullid = { } ;
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
memcpy ( buf_ptr , & nullid , sizeof nullid ) ; buf_ptr + = sizeof nullid ;
} else {
memcpy ( buf_ptr , & to - > id , sizeof to - > id ) ; buf_ptr + = sizeof to - > id ;
}
memcpy ( buf_ptr , & from - > id , sizeof from - > id ) ; buf_ptr + = sizeof from - > id ;
2014-09-27 17:13:33 +00:00
}
/* TODO: if this copy turns out to be a performance concern, change sptps_send_record() to add some "pre-padding" to the buffer and use that instead */
memcpy ( buf_ptr , data , len ) ; buf_ptr + = len ;
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
const sockaddr_t * sa = NULL ;
int sock ;
if ( relay - > status . send_locally )
choose_local_address ( relay , & sa , & sock ) ;
if ( ! sa )
choose_udp_address ( relay , & sa , & sock ) ;
logger ( DEBUG_TRAFFIC , LOG_INFO , " Sending packet from %s (%s) to %s (%s) via %s (%s) " , from - > name , from - > hostname , to - > name , to - > hostname , relay - > name , relay - > hostname ) ;
2014-09-27 17:13:33 +00:00
if ( sendto ( listen_socket [ sock ] . udp . fd , buf , buf_ptr - buf , 0 , & sa - > sa , SALEN ( sa - > sa ) ) < 0 & & ! sockwouldblock ( sockerrno ) ) {
2012-07-30 16:36:59 +00:00
if ( sockmsgsize ( sockerrno ) ) {
2014-05-12 13:57:40 +00:00
// Compensate for SPTPS overhead
len - = SPTPS_DATAGRAM_OVERHEAD ;
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
if ( relay - > maxmtu > = len )
relay - > maxmtu = len - 1 ;
if ( relay - > mtu > = len )
relay - > mtu = len - 1 ;
2015-01-01 10:32:14 +00:00
try_fix_mtu ( relay ) ;
2012-07-30 16:36:59 +00:00
} else {
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
logger ( DEBUG_TRAFFIC , LOG_WARNING , " Error sending UDP SPTPS packet to %s (%s): %s " , relay - > name , relay - > hostname , sockstrerror ( sockerrno ) ) ;
2012-07-30 16:36:59 +00:00
return false ;
}
}
return true ;
}
2014-12-24 21:23:24 +00:00
bool send_sptps_data ( void * handle , uint8_t type , const void * data , size_t len ) {
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
return send_sptps_data_priv ( handle , myself , type , data , len ) ;
}
2014-12-24 21:23:24 +00:00
bool receive_sptps_record ( void * handle , uint8_t type , const void * data , uint16_t len ) {
2012-07-30 16:36:59 +00:00
node_t * from = handle ;
if ( type = = SPTPS_HANDSHAKE ) {
2012-10-14 12:45:27 +00:00
if ( ! from - > status . validkey ) {
from - > status . validkey = true ;
from - > status . waitingforkey = false ;
logger ( DEBUG_META , LOG_INFO , " SPTPS key exchange with %s (%s) succesful " , from - > name , from - > hostname ) ;
}
2012-07-30 16:36:59 +00:00
return true ;
}
if ( len > MTU ) {
logger ( DEBUG_ALWAYS , LOG_ERR , " Packet from %s (%s) larger than maximum supported size (%d > %d) " , from - > name , from - > hostname , len , MTU ) ;
return false ;
}
vpn_packet_t inpkt ;
2014-12-24 21:23:24 +00:00
inpkt . offset = DEFAULT_PACKET_OFFSET ;
2012-07-30 16:36:59 +00:00
if ( type = = PKT_PROBE ) {
2012-08-02 15:44:59 +00:00
inpkt . len = len ;
2014-12-24 21:23:24 +00:00
memcpy ( DATA ( & inpkt ) , data , len ) ;
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
udp_probe_h ( from , & inpkt , len ) ;
2012-07-30 16:36:59 +00:00
return true ;
}
2012-08-02 15:44:59 +00:00
if ( type & ~ ( PKT_COMPRESSED | PKT_MAC ) ) {
2012-07-30 16:36:59 +00:00
logger ( DEBUG_ALWAYS , LOG_ERR , " Unexpected SPTPS record type %d len %d from %s (%s) " , type , len , from - > name , from - > hostname ) ;
return false ;
}
2012-08-30 12:21:23 +00:00
/* Check if we have the headers we need */
if ( routing_mode ! = RMODE_ROUTER & & ! ( type & PKT_MAC ) ) {
logger ( DEBUG_TRAFFIC , LOG_ERR , " Received packet from %s (%s) without MAC header (maybe Mode is not set correctly) " , from - > name , from - > hostname ) ;
return false ;
} else if ( routing_mode = = RMODE_ROUTER & & ( type & PKT_MAC ) ) {
logger ( DEBUG_TRAFFIC , LOG_WARNING , " Received packet from %s (%s) with MAC header (maybe Mode is not set correctly) " , from - > name , from - > hostname ) ;
}
2012-08-02 15:44:59 +00:00
int offset = ( type & PKT_MAC ) ? 0 : 14 ;
if ( type & PKT_COMPRESSED ) {
2014-12-24 21:23:24 +00:00
length_t ulen = uncompress_packet ( DATA ( & inpkt ) + offset , ( const uint8_t * ) data , len , from - > incompression ) ;
2012-12-05 13:42:21 +00:00
if ( ulen < 0 ) {
2012-08-02 15:44:59 +00:00
return false ;
} else {
2012-12-05 13:42:21 +00:00
inpkt . len = ulen + offset ;
2012-08-02 15:44:59 +00:00
}
if ( inpkt . len > MAXSIZE )
abort ( ) ;
} else {
2014-12-24 21:23:24 +00:00
memcpy ( DATA ( & inpkt ) + offset , data , len ) ;
2012-08-02 15:44:59 +00:00
inpkt . len = len + offset ;
}
2012-08-30 12:21:23 +00:00
/* Generate the Ethernet packet type if necessary */
if ( offset ) {
2014-12-24 21:23:24 +00:00
switch ( DATA ( & inpkt ) [ 14 ] > > 4 ) {
2012-08-30 12:21:23 +00:00
case 4 :
2014-12-24 21:23:24 +00:00
DATA ( & inpkt ) [ 12 ] = 0x08 ;
DATA ( & inpkt ) [ 13 ] = 0x00 ;
2012-08-30 12:21:23 +00:00
break ;
case 6 :
2014-12-24 21:23:24 +00:00
DATA ( & inpkt ) [ 12 ] = 0x86 ;
DATA ( & inpkt ) [ 13 ] = 0xDD ;
2012-08-30 12:21:23 +00:00
break ;
default :
logger ( DEBUG_TRAFFIC , LOG_ERR ,
" Unknown IP version %d while reading packet from %s (%s) " ,
2014-12-24 21:23:24 +00:00
DATA ( & inpkt ) [ 14 ] > > 4 , from - > name , from - > hostname ) ;
2012-08-30 12:21:23 +00:00
return false ;
}
}
2012-07-30 16:36:59 +00:00
receive_packet ( from , & inpkt ) ;
return true ;
}
2014-12-28 17:29:03 +00:00
// This function tries to get SPTPS keys, if they aren't already known.
// This function makes no guarantees - it is up to the caller to check the node's state to figure out if the keys are available.
static void try_sptps ( node_t * n ) {
if ( n - > status . validkey )
return ;
logger ( DEBUG_TRAFFIC , LOG_INFO , " No valid key known yet for %s (%s) " , n - > name , n - > hostname ) ;
if ( ! n - > status . waitingforkey )
send_req_key ( n ) ;
else if ( n - > last_req_key + 10 < now . tv_sec ) {
logger ( DEBUG_ALWAYS , LOG_DEBUG , " No key from %s after 10 seconds, restarting SPTPS " , n - > name ) ;
sptps_stop ( & n - > sptps ) ;
n - > status . waitingforkey = false ;
send_req_key ( n ) ;
}
return ;
}
2014-12-29 17:05:19 +00:00
static void send_udp_probe_packet ( node_t * n , int len ) {
vpn_packet_t packet ;
packet . offset = DEFAULT_PACKET_OFFSET ;
memset ( DATA ( & packet ) , 0 , 14 ) ;
randomize ( DATA ( & packet ) + 14 , len - 14 ) ;
packet . len = len ;
packet . priority = 0 ;
logger ( DEBUG_TRAFFIC , LOG_INFO , " Sending UDP probe length %d to %s (%s) " , len , n - > name , n - > hostname ) ;
send_udppacket ( n , & packet ) ;
}
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
// This function tries to establish a UDP tunnel to a node so that packets can be sent.
// If a tunnel is already established, it makes sure it stays up.
// This function makes no guarantees - it is up to the caller to check the node's state to figure out if UDP is usable.
static void try_udp ( node_t * n ) {
if ( ! udp_discovery )
return ;
struct timeval now ;
gettimeofday ( & now , NULL ) ;
struct timeval ping_tx_elapsed ;
timersub ( & now , & n - > udp_ping_sent , & ping_tx_elapsed ) ;
if ( ping_tx_elapsed . tv_sec > = udp_discovery_interval ) {
send_udp_probe_packet ( n , MAX ( n - > minmtu , 16 ) ) ;
n - > udp_ping_sent = now ;
2014-12-29 15:40:55 +00:00
if ( localdiscovery & & ! n - > status . udp_confirmed & & n - > prevedge ) {
n - > status . send_locally = true ;
send_udp_probe_packet ( n , 16 ) ;
n - > status . send_locally = false ;
}
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
}
}
2014-12-29 17:05:19 +00:00
// This function tries to determines the MTU of a node.
// By calling this function repeatedly, n->minmtu will be progressively increased, and at some point, n->mtu will be fixed to n->minmtu.
// If the MTU is already fixed, this function checks if it can be increased.
static void try_mtu ( node_t * n ) {
if ( ! ( n - > options & OPTION_PMTU_DISCOVERY ) )
return ;
if ( udp_discovery & & ! n - > status . udp_confirmed ) {
n - > mtuprobes = 0 ;
n - > minmtu = 0 ;
n - > maxmtu = MTU ;
return ;
}
/* mtuprobes == 0..29: initial discovery, send bursts with 1 second interval, mtuprobes++
mtuprobes = = 30 : fix MTU , and go to 31
mtuprobes = = 31 : send one > maxmtu probe every pingtimeout */
struct timeval now ;
gettimeofday ( & now , NULL ) ;
struct timeval elapsed ;
timersub ( & now , & n - > probe_sent_time , & elapsed ) ;
if ( n - > mtuprobes < 31 ) {
if ( n - > mtuprobes ! = 0 & & elapsed . tv_sec < 1 )
return ;
} else {
if ( elapsed . tv_sec < pingtimeout )
return ;
}
2015-01-01 10:32:14 +00:00
try_fix_mtu ( n ) ;
2014-12-29 17:05:19 +00:00
int timeout ;
if ( n - > mtuprobes = = 31 ) {
/* After the initial discovery, we only send one >maxmtu probe
to detect PMTU increases . */
if ( n - > maxmtu + 8 < MTU )
send_udp_probe_packet ( n , n - > maxmtu + 8 ) ;
} else {
/* Probes are sent in batches of three, with random sizes between the
lower and upper boundaries for the MTU thus far discovered . */
for ( int i = 0 ; i < 3 ; i + + ) {
int len = n - > maxmtu ;
if ( n - > minmtu < n - > maxmtu )
len = n - > minmtu + 1 + rand ( ) % ( n - > maxmtu - n - > minmtu ) ;
send_udp_probe_packet ( n , MAX ( len , 64 ) ) ;
}
2015-01-01 10:32:14 +00:00
if ( n - > mtuprobes > = 0 )
n - > mtuprobes + + ;
2014-12-29 17:05:19 +00:00
}
n - > probe_counter = 0 ;
n - > probe_sent_time = now ;
n - > probe_time = now ;
/* Calculate the packet loss of incoming traffic by comparing the rate of
packets received to the rate with which the sequence number has increased .
TODO : this is unrelated to PMTU discovery - it should be moved elsewhere .
*/
if ( n - > received > n - > prev_received )
n - > packetloss = 1.0 - ( n - > received - n - > prev_received ) / ( float ) ( n - > received_seqno - n - > prev_received_seqno ) ;
else
n - > packetloss = n - > received_seqno < = n - > prev_received_seqno ;
n - > prev_received_seqno = n - > received_seqno ;
n - > prev_received = n - > received ;
}
2014-12-28 17:16:27 +00:00
// This function tries to establish a tunnel to a node (or its relay) so that packets can be sent (e.g. get SPTPS keys).
// If a tunnel is already established, it tries to improve it (e.g. by trying to establish a UDP tunnel instead of TCP).
// This function makes no guarantees - it is up to the caller to check the node's state to figure out if TCP and/or UDP is usable.
// By calling this function repeatedly, the tunnel is gradually improved until we hit the wall imposed by the underlying network environment.
// It is recommended to call this function every time a packet is sent (or intended to be sent) to a node,
// so that the tunnel keeps improving as packets flow, and then gracefully downgrades itself as it goes idle.
static void try_tx ( node_t * n ) {
/* If n is a TCP-only neighbor, we'll only use "cleartext" PACKET
messages anyway , so there ' s no need for SPTPS at all . Otherwise , get the keys . */
if ( n - > status . sptps & & ! ( n - > connection & & ( ( myself - > options | n - > options ) & OPTION_TCPONLY ) ) ) {
try_sptps ( n ) ;
if ( ! n - > status . validkey )
return ;
}
node_t * via = ( n - > via = = myself ) ? n - > nexthop : n - > via ;
if ( ( myself - > options | via - > options ) & OPTION_TCPONLY )
return ;
if ( ! n - > status . sptps & & ! via - > status . validkey & & via - > last_req_key + 10 < = now . tv_sec ) {
send_req_key ( via ) ;
via - > last_req_key = now . tv_sec ;
Move PMTU discovery code into the TX path.
Currently, the PMTU discovery code is run by a timeout callback,
independently of tunnel activity. This commit moves it into the TX
path, meaning that send_mtu_probe_handler() is only called if a
packet is about to be sent. Consequently, it has been renamed to
try_mtu() for consistency with try_tx(), try_udp() and try_sptps().
Running PMTU discovery code only as part of the TX path prevents
PMTU discovery from generating unreasonable amounts of traffic when
the "real" traffic is negligible. One extreme example is sending one
real packet and then going silent: in the current code this one little
packet will result in the entire PMTU discovery algorithm being run
from start to finish, resulting in absurd write traffic amplification.
With this patch, PMTU discovery stops as soon as "real" packets stop
flowing, and will be no more aggressive than the underlying traffic.
Furthermore, try_mtu() only runs if there is confirmed UDP
connectivity as per the UDP discovery mechanism. This prevents
unnecessary network chatter - previously, the PMTU discovery code
would send bursts of (potentially large) probe packets every second
even if there was nothing on the other side. With this patch, the
PMTU code only does that if something replied to the lightweight UDP
discovery pings.
These inefficiencies were made even worse when the node is not a
direct neighbour, as tinc will use PMTU discovery both on the
destination node *and* the relay. UDP discovery is more lightweight for
this purpose.
As a bonus, this code simplifies overall code somewhat - state is
easier to manage when code is run in predictable contexts as opposed
to "surprise callbacks". In addition, there is no need to call PMTU
discovery code outside of net_packet.c anymore, thereby simplifying
module boundaries.
2014-12-29 16:47:49 +00:00
} else if ( via = = n | | ! n - > status . sptps | | ( via - > options > > 24 ) > = 4 ) {
Add UDP discovery mechanism.
This adds a new mechanism by which tinc can determine if a node is
reachable via UDP. The new mechanism is currently redundant with the
PMTU discovery mechanism - that will be fixed in a future commit.
Conceptually, the UDP discovery mechanism works similarly to PMTU
discovery: it sends UDP probes (of minmtu size, to make sure the tunnel
is fully usable), and assumes UDP is usable if it gets replies. It
assumes UDP is broken if too much time has passed since the last reply.
The big difference with the current PMTU discovery mechanism, however,
is that UDP discovery probes are only triggered as part of the
packet TX path (through try_tx()). This is quite interesting, because
it means tinc will never send UDP pings more often than normal packets,
and most importantly, it will automatically stop sending pings as soon
as packets stop flowing, thereby nicely reducing network chatter.
Of course, there are small drawbacks in some edge cases: for example,
if a node only sends one packet every minute to another node, these
packets will only be sent over TCP, because the interval between packets
is too long for tinc to maintain the UDP tunnel. I consider this a
feature, not a bug: I believe it is appropriate to use TCP in scenarios
where traffic is negligible, so that we don't pollute the network with
pings just to maintain a UDP tunnel that's seeing negligible usage.
2014-12-29 10:34:39 +00:00
try_udp ( via ) ;
Move PMTU discovery code into the TX path.
Currently, the PMTU discovery code is run by a timeout callback,
independently of tunnel activity. This commit moves it into the TX
path, meaning that send_mtu_probe_handler() is only called if a
packet is about to be sent. Consequently, it has been renamed to
try_mtu() for consistency with try_tx(), try_udp() and try_sptps().
Running PMTU discovery code only as part of the TX path prevents
PMTU discovery from generating unreasonable amounts of traffic when
the "real" traffic is negligible. One extreme example is sending one
real packet and then going silent: in the current code this one little
packet will result in the entire PMTU discovery algorithm being run
from start to finish, resulting in absurd write traffic amplification.
With this patch, PMTU discovery stops as soon as "real" packets stop
flowing, and will be no more aggressive than the underlying traffic.
Furthermore, try_mtu() only runs if there is confirmed UDP
connectivity as per the UDP discovery mechanism. This prevents
unnecessary network chatter - previously, the PMTU discovery code
would send bursts of (potentially large) probe packets every second
even if there was nothing on the other side. With this patch, the
PMTU code only does that if something replied to the lightweight UDP
discovery pings.
These inefficiencies were made even worse when the node is not a
direct neighbour, as tinc will use PMTU discovery both on the
destination node *and* the relay. UDP discovery is more lightweight for
this purpose.
As a bonus, this code simplifies overall code somewhat - state is
easier to manage when code is run in predictable contexts as opposed
to "surprise callbacks". In addition, there is no need to call PMTU
discovery code outside of net_packet.c anymore, thereby simplifying
module boundaries.
2014-12-29 16:47:49 +00:00
try_mtu ( via ) ;
}
2014-12-28 17:16:27 +00:00
/* If we don't know how to reach "via" yet, then try to reach it through a relay. */
if ( n - > status . sptps & & ! via - > status . udp_confirmed & & via - > nexthop ! = via & & ( via - > nexthop - > options > > 24 ) > = 4 )
try_tx ( via - > nexthop ) ;
}
2002-02-18 16:25:19 +00:00
/*
send a packet to the given vpn ip .
*/
2011-05-14 22:42:29 +00:00
void send_packet ( node_t * n , vpn_packet_t * packet ) {
2002-09-09 21:25:28 +00:00
node_t * via ;
if ( n = = myself ) {
2003-12-27 16:32:52 +00:00
if ( overwrite_mac )
2014-12-24 21:23:24 +00:00
memcpy ( DATA ( packet ) , mymac . x , ETH_ALEN ) ;
2011-05-14 22:42:29 +00:00
n - > out_packets + + ;
n - > out_bytes + = packet - > len ;
2011-12-04 00:20:59 +00:00
devops . write ( packet ) ;
2002-09-09 21:25:28 +00:00
return ;
}
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_ERR , " Sending packet of %d bytes to %s (%s) " ,
2003-12-12 19:52:25 +00:00
packet - > len , n - > name , n - > hostname ) ;
2002-09-09 21:25:28 +00:00
if ( ! n - > status . reachable ) {
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_INFO , " Node %s (%s) is not reachable " ,
2002-09-09 21:25:28 +00:00
n - > name , n - > hostname ) ;
return ;
}
2011-05-14 22:42:29 +00:00
n - > out_packets + + ;
n - > out_bytes + = packet - > len ;
2012-08-02 15:44:59 +00:00
if ( n - > status . sptps ) {
send_sptps_packet ( n , packet ) ;
2014-12-28 17:16:27 +00:00
goto end ;
2012-08-02 15:44:59 +00:00
}
2009-01-03 21:33:55 +00:00
via = ( packet - > priority = = - 1 | | n - > via = = myself ) ? n - > nexthop : n - > via ;
2002-09-09 21:25:28 +00:00
2003-07-06 22:11:37 +00:00
if ( via ! = n )
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_INFO , " Sending packet to %s via %s (%s) " ,
2002-09-09 21:25:28 +00:00
n - > name , via - > name , n - > via - > hostname ) ;
2009-01-03 21:33:55 +00:00
if ( packet - > priority = = - 1 | | ( ( myself - > options | via - > options ) & OPTION_TCPONLY ) ) {
2003-07-22 20:55:21 +00:00
if ( ! send_tcppacket ( via - > connection , packet ) )
terminate_connection ( via - > connection , true ) ;
2002-09-09 21:25:28 +00:00
} else
send_udppacket ( via , packet ) ;
2014-12-28 17:16:27 +00:00
end :
/* Try to improve the tunnel.
Note that we do this * after * we send the packet because sending actual packets take priority
with regard to the send buffer space and latency . */
try_tx ( n ) ;
2002-02-18 16:25:19 +00:00
}
/* Broadcast a packet using the minimum spanning tree */
2007-05-18 10:00:00 +00:00
void broadcast_packet ( const node_t * from , vpn_packet_t * packet ) {
2012-04-15 23:57:25 +00:00
// Always give ourself a copy of the packet.
if ( from ! = myself )
send_packet ( myself , packet ) ;
// In TunnelServer mode, do not forward broadcast packets.
2012-10-10 15:17:49 +00:00
// The MST might not be valid and create loops.
2012-04-15 23:57:25 +00:00
if ( tunnelserver | | broadcast_mode = = BMODE_NONE )
return ;
2002-09-09 21:25:28 +00:00
2012-02-26 17:37:36 +00:00
logger ( DEBUG_TRAFFIC , LOG_INFO , " Broadcasting packet of %d bytes from %s (%s) " ,
2002-09-09 21:25:28 +00:00
packet - > len , from - > name , from - > hostname ) ;
2012-04-15 23:57:25 +00:00
switch ( broadcast_mode ) {
// In MST mode, broadcast packets travel via the Minimum Spanning Tree.
// This guarantees all nodes receive the broadcast packet, and
// usually distributes the sending of broadcast packets over all nodes.
case BMODE_MST :
2012-10-07 22:35:38 +00:00
for list_each ( connection_t , c , connection_list )
2014-07-12 10:57:03 +00:00
if ( c - > edge & & c - > status . mst & & c ! = from - > nexthop - > connection )
2012-04-15 23:57:25 +00:00
send_packet ( c - > node , packet ) ;
break ;
// In direct mode, we send copies to each node we know of.
2012-10-10 15:17:49 +00:00
// However, this only reaches nodes that can be reached in a single hop.
2012-04-15 23:57:25 +00:00
// We don't have enough information to forward broadcast packets in this case.
case BMODE_DIRECT :
if ( from ! = myself )
break ;
2012-10-07 22:35:38 +00:00
for splay_each ( node_t , n , node_tree )
2013-08-08 15:40:15 +00:00
if ( n - > status . reachable & & n ! = myself & & ( ( n - > via = = myself & & n - > nexthop = = n ) | | n - > via = = n ) )
2012-06-25 17:03:54 +00:00
send_packet ( n , packet ) ;
2012-04-15 23:57:25 +00:00
break ;
2002-09-09 21:25:28 +00:00
2012-04-15 23:57:25 +00:00
default :
break ;
2002-09-09 21:25:28 +00:00
}
2002-02-18 16:25:19 +00:00
}
2009-04-02 23:05:23 +00:00
static node_t * try_harder ( const sockaddr_t * from , const vpn_packet_t * pkt ) {
node_t * n = NULL ;
2011-02-18 22:02:11 +00:00
bool hard = false ;
2009-10-24 18:54:44 +00:00
static time_t last_hard_try = 0 ;
2009-04-02 23:05:23 +00:00
2012-10-07 22:35:38 +00:00
for splay_each ( edge_t , e , edge_weight_tree ) {
2012-09-26 20:20:43 +00:00
if ( ! e - > to - > status . reachable | | e - > to = = myself )
2009-04-02 23:05:23 +00:00
continue ;
2009-10-24 18:54:44 +00:00
if ( sockaddrcmp_noport ( from , & e - > address ) ) {
2012-11-29 11:28:23 +00:00
if ( last_hard_try = = now . tv_sec )
2009-10-24 18:54:44 +00:00
continue ;
2011-02-18 22:02:11 +00:00
hard = true ;
2009-12-18 00:15:25 +00:00
}
2009-04-02 23:05:23 +00:00
if ( ! try_mac ( e - > to , pkt ) )
continue ;
n = e - > to ;
break ;
}
2011-02-18 22:02:11 +00:00
if ( hard )
2012-11-29 11:28:23 +00:00
last_hard_try = now . tv_sec ;
2011-02-18 22:02:11 +00:00
2012-11-29 11:28:23 +00:00
last_hard_try = now . tv_sec ;
2009-04-02 23:05:23 +00:00
return n ;
}
2012-11-29 11:28:23 +00:00
void handle_incoming_vpn_data ( void * data , int flags ) {
listen_socket_t * ls = data ;
2002-09-09 21:25:28 +00:00
vpn_packet_t pkt ;
char * hostname ;
2014-12-07 23:58:09 +00:00
node_id_t nullid = { } ;
sockaddr_t addr = { } ;
socklen_t addrlen = sizeof addr ;
node_t * from , * to ;
bool direct = false ;
2002-09-09 21:25:28 +00:00
2014-12-24 21:23:24 +00:00
pkt . offset = 0 ;
int len = recvfrom ( ls - > udp . fd , DATA ( & pkt ) , MAXSIZE , 0 , & addr . sa , & addrlen ) ;
2002-09-09 21:25:28 +00:00
2009-12-19 19:52:19 +00:00
if ( len < = 0 | | len > MAXSIZE ) {
2009-10-24 23:40:07 +00:00
if ( ! sockwouldblock ( sockerrno ) )
2012-02-26 17:37:36 +00:00
logger ( DEBUG_ALWAYS , LOG_ERR , " Receiving packet failed: %s " , sockstrerror ( sockerrno ) ) ;
2002-09-09 21:25:28 +00:00
return ;
}
2009-12-19 19:52:19 +00:00
pkt . len = len ;
2014-12-07 23:58:09 +00:00
sockaddrunmap ( & addr ) ; /* Some braindead IPv6 implementations do stupid things. */
2002-09-09 21:25:28 +00:00
2014-12-07 23:58:09 +00:00
// Try to figure out who sent this packet.
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
2014-12-07 23:58:09 +00:00
node_t * n = lookup_node_udp ( & addr ) ;
Add UDP datagram relay support to SPTPS.
This commit changes the layout of UDP datagrams to include a 6-byte
destination node ID at the very beginning of the datagram (i.e. before
the source node ID and the seqno). Note that this only applies to SPTPS.
Thanks to this new field, it is now possible to send SPTPS datagrams to
nodes that are not the final recipient of the packets, thereby using
these nodes as relay nodes. Previously SPTPS was unable to relay packets
using UDP, and required a fallback to TCP if the final recipient could
not be contacted directly using UDP. In that sense it fixes a regression
that SPTPS introduced with regard to the legacy protocol.
This change also updates tinc's low-level routing logic (i.e.
send_sptps_data()) to automatically use this relaying facility if at all
possible. Specifically, it will relay packets if we don't have a
confirmed UDP link to the final recipient (but we have one with the next
hop node), or if IndirectData is specified. This is similar to how the
legacy protocol forwards packets.
When sending packets directly without any relaying, the sender node uses
a special value for the destination node ID: instead of setting the
field to the ID of the recipient node, it writes a zero ID instead. This
allows the recipient node to distinguish between a relayed packet and a
direct packet, which is important when determining the UDP address of
the sending node.
On the relay side, relay nodes will happily relay packets that have a
destination ID which is non-zero *and* is different from their own,
provided that the source IP address of the packet is known. This is to
prevent abuse by random strangers, since a node can't authenticate the
packets that are being relayed through it.
This change keeps the protocol number from the previous datagram format
change (source IDs), 17.4. Compatibility is still preserved with 1.0 and
with pre-1.1 releases. Note, however, that nodes running this code won't
understand datagrams sent from nodes that only use source IDs and
vice-versa (not that we really care).
There is one caveat: in the current state, there is no way for the
original sender to know what the PMTU is beyond the first hop, and
contrary to the legacy protocol, relay nodes can't apply MSS clamping
because they can't decrypt the relayed packets. This leads to
inefficient scenarios where a reduced PMTU over some link that's part of
the relay path will result in relays falling back to TCP to send packets
to their final destinations.
Another caveat is that once a packet gets sent over TCP, it will use
TCP over the entire path, even if it is technically possible to use UDP
beyond the TCP-only link(s).
Arguably, these two caveats can be fixed by improving the
metaconnection protocol, but that's out of scope for this change. TODOs
are added instead. In any case, this is no worse than before.
In addition, this change increases SPTPS datagram overhead by another
6 bytes for the destination ID, on top of the existing 6-byte overhead
from the source ID.
2014-09-28 11:38:06 +00:00
if ( ! n ) {
2014-12-07 23:58:09 +00:00
// It might be from a 1.1 node, which might have a source ID in the packet.
2014-12-24 21:23:24 +00:00
pkt . offset = 2 * sizeof ( node_id_t ) ;
from = lookup_node_id ( SRCID ( & pkt ) ) ;
if ( from & & ! memcmp ( DSTID ( & pkt ) , & nullid , sizeof nullid ) & & from - > status . sptps ) {
if ( sptps_verify_datagram ( & from - > sptps , DATA ( & pkt ) , pkt . len - 2 * sizeof ( node_id_t ) ) )
2014-12-07 23:58:09 +00:00
n = from ;
else
goto skip_harder ;
}
2014-09-27 17:13:33 +00:00
}
2002-09-09 21:25:28 +00:00
2014-12-24 21:23:24 +00:00
if ( ! n ) {
pkt . offset = 0 ;
2014-12-07 23:58:09 +00:00
n = try_harder ( & addr , & pkt ) ;
2014-12-24 21:23:24 +00:00
}
2014-09-27 17:13:33 +00:00
2014-12-07 23:58:09 +00:00
skip_harder :
2014-09-27 17:13:33 +00:00
if ( ! n ) {
if ( debug_level > = DEBUG_PROTOCOL ) {
2014-12-07 23:58:09 +00:00
hostname = sockaddr2hostname ( & addr ) ;
2012-02-26 17:37:36 +00:00
logger ( DEBUG_PROTOCOL , LOG_WARNING , " Received UDP packet from unknown source %s " , hostname ) ;
2009-04-02 23:05:23 +00:00
free ( hostname ) ;
}
2014-09-27 17:13:33 +00:00
return ;
2002-09-09 21:25:28 +00:00
}
2014-12-07 23:58:09 +00:00
if ( n - > status . sptps ) {
2014-12-24 21:23:24 +00:00
pkt . offset = 2 * sizeof ( node_id_t ) ;
if ( ! memcmp ( DSTID ( & pkt ) , & nullid , sizeof nullid ) ) {
2014-12-07 23:58:09 +00:00
direct = true ;
from = n ;
to = myself ;
} else {
2014-12-24 21:23:24 +00:00
from = lookup_node_id ( SRCID ( & pkt ) ) ;
to = lookup_node_id ( DSTID ( & pkt ) ) ;
2014-12-07 23:58:09 +00:00
}
if ( ! from | | ! to ) {
logger ( DEBUG_PROTOCOL , LOG_WARNING , " Received UDP packet from %s (%s) with unknown source and/or destination ID " , n - > name , n - > hostname ) ;
return ;
}
2014-12-08 07:43:15 +00:00
2014-12-07 23:58:09 +00:00
if ( to ! = myself ) {
2014-12-24 21:23:24 +00:00
send_sptps_data_priv ( to , n , 0 , DATA ( & pkt ) , pkt . len - 2 * sizeof ( node_id_t ) ) ;
2014-12-07 23:58:09 +00:00
return ;
}
} else {
direct = true ;
from = n ;
}
2014-12-24 21:23:24 +00:00
pkt . offset = 0 ;
2014-12-07 23:58:09 +00:00
if ( ! receive_udppacket ( from , & pkt ) )
2014-09-27 17:13:33 +00:00
return ;
2012-02-18 10:48:21 +00:00
2014-09-27 17:13:33 +00:00
n - > sock = ls - listen_socket ;
2014-12-07 23:58:09 +00:00
if ( direct & & sockaddrcmp ( & addr , & n - > address ) )
update_node_udp ( n , & addr ) ;
2002-02-18 16:25:19 +00:00
}
2007-02-27 01:57:01 +00:00
2012-11-29 11:28:23 +00:00
void handle_device_data ( void * data , int flags ) {
2007-02-27 01:57:01 +00:00
vpn_packet_t packet ;
2014-12-24 21:23:24 +00:00
packet . offset = DEFAULT_PACKET_OFFSET ;
2011-05-29 20:14:35 +00:00
packet . priority = 0 ;
2012-02-22 13:23:59 +00:00
if ( devops . read ( & packet ) ) {
2011-05-14 22:42:29 +00:00
myself - > in_packets + + ;
myself - > in_bytes + = packet . len ;
2007-02-27 01:57:01 +00:00
route ( myself , & packet ) ;
2011-05-14 22:42:29 +00:00
}
2007-02-27 01:57:01 +00:00
}