lwip: rework the tcp ooseq handling.
It now accounts for the number of rx pool buffers used and the available memory when deciding the number of ooseq buffers to retain. Enable the TCP Selective ACK support which appears to help a lot on lossy wifi when using the OOSEQ option.
This commit is contained in:
parent
1bfa6c4364
commit
f5817aef01
4 changed files with 135 additions and 13 deletions
|
@ -50,6 +50,8 @@
|
||||||
#include "netif/etharp.h"
|
#include "netif/etharp.h"
|
||||||
#include "sysparam.h"
|
#include "sysparam.h"
|
||||||
#include "netif/ppp/pppoe.h"
|
#include "netif/ppp/pppoe.h"
|
||||||
|
#include "FreeRTOS.h"
|
||||||
|
#include "task.h"
|
||||||
|
|
||||||
/* declared in libnet80211.a */
|
/* declared in libnet80211.a */
|
||||||
int8_t sdk_ieee80211_output_pbuf(struct netif *ifp, struct pbuf* pb);
|
int8_t sdk_ieee80211_output_pbuf(struct netif *ifp, struct pbuf* pb);
|
||||||
|
@ -137,6 +139,51 @@ low_level_output(struct netif *netif, struct pbuf *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Keep account of the number the PP RX pool buffers being used in lwip,
|
||||||
|
* to help make decision about the number of OOSEQ buffers to maintain etc.
|
||||||
|
*/
|
||||||
|
uint32_t pp_rx_pool_usage;
|
||||||
|
|
||||||
|
/* Support for recycling a pbuf from the sdk rx pool, and accounting for the
|
||||||
|
* number of these used in lwip. */
|
||||||
|
void pp_recycle_rx_pbuf(struct pbuf *p)
|
||||||
|
{
|
||||||
|
LWIP_ASSERT("expected esf_buf", p->esf_buf);
|
||||||
|
sdk_system_pp_recycle_rx_pkt(p->esf_buf);
|
||||||
|
taskENTER_CRITICAL();
|
||||||
|
pp_rx_pool_usage--;
|
||||||
|
taskEXIT_CRITICAL();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the number of ooseq bytes that can be retained given the current
|
||||||
|
* size 'n'. */
|
||||||
|
size_t ooseq_max_bytes(size_t n)
|
||||||
|
{
|
||||||
|
size_t free = xPortGetFreeHeapSize();
|
||||||
|
size_t target = (free - 30000) + n;
|
||||||
|
|
||||||
|
if (target < 0) {
|
||||||
|
target = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return target;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the number of ooseq pbufs that can be retained given the current
|
||||||
|
* size 'n'. */
|
||||||
|
size_t ooseq_max_pbufs(size_t n)
|
||||||
|
{
|
||||||
|
uint32_t usage = pp_rx_pool_usage;
|
||||||
|
size_t target = 3 - (usage - n);
|
||||||
|
|
||||||
|
if (target < 0) {
|
||||||
|
target = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return target;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function should be called when a packet is ready to be read
|
* This function should be called when a packet is ready to be read
|
||||||
* from the interface. It uses the function low_level_input() that
|
* from the interface. It uses the function low_level_input() that
|
||||||
|
@ -165,24 +212,52 @@ void ethernetif_input(struct netif *netif, struct pbuf *p)
|
||||||
|
|
||||||
ethhdr = p->payload;
|
ethhdr = p->payload;
|
||||||
|
|
||||||
|
/* Account for the number of rx pool buffers being used. */
|
||||||
|
taskENTER_CRITICAL();
|
||||||
|
uint32_t usage = pp_rx_pool_usage + 1;
|
||||||
|
pp_rx_pool_usage = usage;
|
||||||
|
taskEXIT_CRITICAL();
|
||||||
|
|
||||||
switch(htons(ethhdr->type)) {
|
switch(htons(ethhdr->type)) {
|
||||||
/* IP or ARP packet? */
|
/* IP or ARP packet? */
|
||||||
case ETHTYPE_IP:
|
case ETHTYPE_IP:
|
||||||
case ETHTYPE_IPV6:
|
case ETHTYPE_IPV6:
|
||||||
|
#if 0
|
||||||
|
/* Simulate IP packet loss. */
|
||||||
|
if ((random() & 0xff) < 0x10) {
|
||||||
|
pbuf_free(p);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
case ETHTYPE_ARP:
|
case ETHTYPE_ARP:
|
||||||
#if PPPOE_SUPPORT
|
#if PPPOE_SUPPORT
|
||||||
/* PPPoE packet? */
|
/* PPPoE packet? */
|
||||||
case ETHTYPE_PPPOEDISC:
|
case ETHTYPE_PPPOEDISC:
|
||||||
case ETHTYPE_PPPOE:
|
case ETHTYPE_PPPOE:
|
||||||
#endif /* PPPOE_SUPPORT */
|
#endif /* PPPOE_SUPPORT */
|
||||||
/* full packet send to tcpip_thread to process */
|
|
||||||
if (netif->input(p, netif) != ERR_OK)
|
|
||||||
{
|
{
|
||||||
|
/* full packet send to tcpip_thread to process */
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
/* Optionally copy the rx pool buffer and free it immediately. This
|
||||||
|
* helps avoid exhausting the limited rx buffer pool but uses more
|
||||||
|
* memory. */
|
||||||
|
struct pbuf *q = pbuf_clone(PBUF_RAW, PBUF_RAM, p);
|
||||||
|
pbuf_free(p);
|
||||||
|
if (q == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
p = q;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (netif->input(p, netif) != ERR_OK) {
|
||||||
LWIP_DEBUGF(NETIF_DEBUG, ("ethernetif_input: IP input error\n"));
|
LWIP_DEBUGF(NETIF_DEBUG, ("ethernetif_input: IP input error\n"));
|
||||||
pbuf_free(p);
|
pbuf_free(p);
|
||||||
p = NULL;
|
p = NULL;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
pbuf_free(p);
|
pbuf_free(p);
|
||||||
|
|
|
@ -45,6 +45,12 @@ struct esf_buf;
|
||||||
void sdk_system_station_got_ip_set(struct ip4_addr *, struct ip4_addr *, struct ip4_addr *);
|
void sdk_system_station_got_ip_set(struct ip4_addr *, struct ip4_addr *, struct ip4_addr *);
|
||||||
void sdk_system_pp_recycle_rx_pkt(struct esf_buf *);
|
void sdk_system_pp_recycle_rx_pkt(struct esf_buf *);
|
||||||
|
|
||||||
|
struct pbuf;
|
||||||
|
void pp_recycle_rx_pbuf(struct pbuf *);
|
||||||
|
|
||||||
|
size_t ooseq_max_bytes(size_t n);
|
||||||
|
size_t ooseq_max_pbufs(size_t n);
|
||||||
|
|
||||||
/* Define generic types used in lwIP */
|
/* Define generic types used in lwIP */
|
||||||
typedef uint8_t u8_t;
|
typedef uint8_t u8_t;
|
||||||
typedef int8_t s8_t;
|
typedef int8_t s8_t;
|
||||||
|
|
|
@ -307,6 +307,13 @@
|
||||||
#define TCP_QUEUE_OOSEQ 1
|
#define TCP_QUEUE_OOSEQ 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* LWIP_TCP_SACK_OUT==1: TCP will support sending selective acknowledgements (SACKs).
|
||||||
|
*/
|
||||||
|
#ifndef LWIP_TCP_SACK_OUT
|
||||||
|
#define LWIP_TCP_SACK_OUT 1
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* TCP_MSS: TCP Maximum segment size. (default is 536, a conservative default,
|
* TCP_MSS: TCP Maximum segment size. (default is 536, a conservative default,
|
||||||
* you might want to increase this.)
|
* you might want to increase this.)
|
||||||
|
@ -319,23 +326,21 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* TCP_OOSEQ_MAX_BYTES: The maximum number of bytes queued on ooseq per pcb.
|
* TCP_OOSEQ_MAX_BYTES(n):
|
||||||
* Default is 0 (no limit). Only valid for TCP_QUEUE_OOSEQ==1.
|
* Return the maximum number of bytes to be queued on ooseq per pcb, given the
|
||||||
|
* current number queued on a pcb. Only valid for TCP_QUEUE_OOSEQ==1.
|
||||||
*/
|
*/
|
||||||
#ifndef TCP_OOSEQ_MAX_BYTES
|
#ifndef TCP_OOSEQ_MAX_BYTES
|
||||||
#if TCP_OOSEQ_MAX_BYTES
|
#define TCP_OOSEQ_MAX_BYTES(n) ooseq_max_bytes(n)
|
||||||
#define TCP_OOSEQ_MAX_BYTES (2 * TCP_MSS)
|
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* TCP_OOSEQ_MAX_PBUFS: The maximum number of pbufs queued on ooseq per pcb.
|
* TCP_OOSEQ_MAX_PBUFS(n):
|
||||||
* Default is 0 (no limit). Only valid for TCP_QUEUE_OOSEQ==1.
|
* Return the maximum number of pbufs to be queued on ooseq per pcb, given the
|
||||||
|
* current number queued on a pcb. Only valid for TCP_QUEUE_OOSEQ==1.
|
||||||
*/
|
*/
|
||||||
#ifndef TCP_OOSEQ_MAX_PBUFS
|
#ifndef TCP_OOSEQ_MAX_PBUFS
|
||||||
#if TCP_OOSEQ_MAX_PBUFS
|
#define TCP_OOSEQ_MAX_PBUFS(n) ooseq_max_pbufs(n)
|
||||||
#define TCP_OOSEQ_MAX_PBUFS 2
|
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -354,6 +359,42 @@
|
||||||
#define TCP_DEFAULT_LISTEN_BACKLOG 2
|
#define TCP_DEFAULT_LISTEN_BACKLOG 2
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TCP_OVERSIZE: The maximum number of bytes that tcp_write may
|
||||||
|
* allocate ahead of time in an attempt to create shorter pbuf chains
|
||||||
|
* for transmission. The meaningful range is 0 to TCP_MSS. Some
|
||||||
|
* suggested values are:
|
||||||
|
*
|
||||||
|
* 0: Disable oversized allocation. Each tcp_write() allocates a new
|
||||||
|
pbuf (old behaviour).
|
||||||
|
* 1: Allocate size-aligned pbufs with minimal excess. Use this if your
|
||||||
|
* scatter-gather DMA requires aligned fragments.
|
||||||
|
* 128: Limit the pbuf/memory overhead to 20%.
|
||||||
|
* TCP_MSS: Try to create unfragmented TCP packets.
|
||||||
|
* TCP_MSS/4: Try to create 4 fragments or less per TCP packet.
|
||||||
|
*/
|
||||||
|
#ifndef TCP_OVERSIZE
|
||||||
|
#define TCP_OVERSIZE TCP_MSS
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* LWIP_TCP_TIMESTAMPS==1: support the TCP timestamp option.
|
||||||
|
* The timestamp option is currently only used to help remote hosts, it is not
|
||||||
|
* really used locally. Therefore, it is only enabled when a TS option is
|
||||||
|
* received in the initial SYN packet from a remote host.
|
||||||
|
*/
|
||||||
|
#ifndef LWIP_TCP_TIMESTAMPS
|
||||||
|
#define LWIP_TCP_TIMESTAMPS 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TCP_WND_UPDATE_THRESHOLD: difference in window to trigger an
|
||||||
|
* explicit window update
|
||||||
|
*/
|
||||||
|
#ifndef TCP_WND_UPDATE_THRESHOLD
|
||||||
|
#define TCP_WND_UPDATE_THRESHOLD LWIP_MIN((TCP_WND / 4), (TCP_MSS * 4))
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
----------------------------------
|
----------------------------------
|
||||||
---------- Pbuf options ----------
|
---------- Pbuf options ----------
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 0983b8838ea6f1d8340e3f42ec5fb5cf5f05bd4c
|
Subproject commit 4caae2e4af10c7fbebf26d05c8263856dbc07ac7
|
Loading…
Reference in a new issue