Import Upstream version 1.1~pre17

This commit is contained in:
Guus Sliepen 2019-08-26 13:44:53 +02:00
parent bc8ca65653
commit b511a112e6
216 changed files with 43313 additions and 18448 deletions

View file

@ -41,25 +41,47 @@ static int io_compare(const io_t *a, const io_t *b) {
#ifndef HAVE_MINGW
return a->fd - b->fd;
#else
return a->event - b->event;
if(a->event < b->event) {
return -1;
}
if(a->event > b->event) {
return 1;
}
return 0;
#endif
}
static int timeout_compare(const timeout_t *a, const timeout_t *b) {
struct timeval diff;
timersub(&a->tv, &b->tv, &diff);
if(diff.tv_sec < 0)
if(diff.tv_sec < 0) {
return -1;
if(diff.tv_sec > 0)
}
if(diff.tv_sec > 0) {
return 1;
if(diff.tv_usec < 0)
}
if(diff.tv_usec < 0) {
return -1;
if(diff.tv_usec > 0)
}
if(diff.tv_usec > 0) {
return 1;
if(a < b)
}
if(a < b) {
return -1;
if(a > b)
}
if(a > b) {
return 1;
}
return 0;
}
@ -67,16 +89,21 @@ static splay_tree_t io_tree = {.compare = (splay_compare_t)io_compare};
static splay_tree_t timeout_tree = {.compare = (splay_compare_t)timeout_compare};
void io_add(io_t *io, io_cb_t cb, void *data, int fd, int flags) {
if(io->cb)
if(io->cb) {
return;
}
io->fd = fd;
#ifdef HAVE_MINGW
if (io->fd != -1) {
if(io->fd != -1) {
io->event = WSACreateEvent();
if (io->event == WSA_INVALID_EVENT)
if(io->event == WSA_INVALID_EVENT) {
abort();
}
}
event_count++;
#endif
io->cb = cb;
@ -85,8 +112,9 @@ void io_add(io_t *io, io_cb_t cb, void *data, int fd, int flags) {
io_set(io, flags);
if(!splay_insert_node(&io_tree, &io->node))
if(!splay_insert_node(&io_tree, &io->node)) {
abort();
}
}
#ifdef HAVE_MINGW
@ -97,41 +125,60 @@ void io_add_event(io_t *io, io_cb_t cb, void *data, WSAEVENT event) {
#endif
void io_set(io_t *io, int flags) {
if (flags == io->flags)
if(flags == io->flags) {
return;
}
io->flags = flags;
if (io->fd == -1)
if(io->fd == -1) {
return;
}
#ifndef HAVE_MINGW
if(flags & IO_READ)
FD_SET(io->fd, &readfds);
else
FD_CLR(io->fd, &readfds);
if(flags & IO_WRITE)
if(flags & IO_READ) {
FD_SET(io->fd, &readfds);
} else {
FD_CLR(io->fd, &readfds);
}
if(flags & IO_WRITE) {
FD_SET(io->fd, &writefds);
else
} else {
FD_CLR(io->fd, &writefds);
}
#else
long events = 0;
if (flags & IO_WRITE)
if(flags & IO_WRITE) {
events |= WRITE_EVENTS;
if (flags & IO_READ)
}
if(flags & IO_READ) {
events |= READ_EVENTS;
if (WSAEventSelect(io->fd, io->event, events) != 0)
}
if(WSAEventSelect(io->fd, io->event, events) != 0) {
abort();
}
#endif
}
void io_del(io_t *io) {
if(!io->cb)
if(!io->cb) {
return;
}
io_set(io, 0);
#ifdef HAVE_MINGW
if (io->fd != -1 && WSACloseEvent(io->event) == FALSE)
if(io->fd != -1 && WSACloseEvent(io->event) == FALSE) {
abort();
}
event_count--;
#endif
@ -148,25 +195,31 @@ void timeout_add(timeout_t *timeout, timeout_cb_t cb, void *data, struct timeval
}
void timeout_set(timeout_t *timeout, struct timeval *tv) {
if(timerisset(&timeout->tv))
if(timerisset(&timeout->tv)) {
splay_unlink_node(&timeout_tree, &timeout->node);
}
if(!now.tv_sec)
if(!now.tv_sec) {
gettimeofday(&now, NULL);
}
timeradd(&now, tv, &timeout->tv);
if(!splay_insert_node(&timeout_tree, &timeout->node))
if(!splay_insert_node(&timeout_tree, &timeout->node)) {
abort();
}
}
void timeout_del(timeout_t *timeout) {
if(!timeout->cb)
if(!timeout->cb) {
return;
}
splay_unlink_node(&timeout_tree, &timeout->node);
timeout->cb = 0;
timeout->tv = (struct timeval){0, 0};
timeout->tv = (struct timeval) {
0, 0
};
}
#ifndef HAVE_MINGW
@ -184,41 +237,54 @@ static void signal_handler(int signum) {
}
static void signalio_handler(void *data, int flags) {
(void)data;
(void)flags;
unsigned char signum;
if(read(pipefd[0], &signum, 1) != 1)
return;
signal_t *sig = splay_search(&signal_tree, &((signal_t){.signum = signum}));
if(sig)
if(read(pipefd[0], &signum, 1) != 1) {
return;
}
signal_t *sig = splay_search(&signal_tree, &((signal_t) {
.signum = signum
}));
if(sig) {
sig->cb(sig->data);
}
}
static void pipe_init(void) {
if(!pipe(pipefd))
if(!pipe(pipefd)) {
io_add(&signalio, signalio_handler, NULL, pipefd[0], IO_READ);
}
}
void signal_add(signal_t *sig, signal_cb_t cb, void *data, int signum) {
if(sig->cb)
if(sig->cb) {
return;
}
sig->cb = cb;
sig->data = data;
sig->signum = signum;
sig->node.data = sig;
if(pipefd[0] == -1)
if(pipefd[0] == -1) {
pipe_init();
}
signal(sig->signum, signal_handler);
if(!splay_insert_node(&signal_tree, &sig->node))
if(!splay_insert_node(&signal_tree, &sig->node)) {
abort();
}
}
void signal_del(signal_t *sig) {
if(!sig->cb)
if(!sig->cb) {
return;
}
signal(sig->signum, SIG_DFL);
@ -227,7 +293,7 @@ void signal_del(signal_t *sig) {
}
#endif
static struct timeval * get_time_remaining(struct timeval *diff) {
static struct timeval *get_time_remaining(struct timeval *diff) {
gettimeofday(&now, NULL);
struct timeval *tv = NULL;
@ -237,8 +303,10 @@ static struct timeval * get_time_remaining(struct timeval *diff) {
if(diff->tv_sec < 0) {
timeout->cb(timeout->data);
if(timercmp(&timeout->tv, &now, <))
if(timercmp(&timeout->tv, &now, <)) {
timeout_del(timeout);
}
} else {
tv = diff;
break;
@ -258,8 +326,8 @@ bool event_loop(void) {
while(running) {
struct timeval diff;
struct timeval *tv = get_time_remaining(&diff);
memcpy(&readable, &readfds, sizeof readable);
memcpy(&writable, &writefds, sizeof writable);
memcpy(&readable, &readfds, sizeof(readable));
memcpy(&writable, &writefds, sizeof(writable));
int fds = 0;
@ -271,39 +339,48 @@ bool event_loop(void) {
int n = select(fds, &readable, &writable, NULL, tv);
if(n < 0) {
if(sockwouldblock(sockerrno))
if(sockwouldblock(sockerrno)) {
continue;
else
} else {
return false;
}
}
if(!n)
if(!n) {
continue;
}
unsigned int curgen = io_tree.generation;
for splay_each(io_t, io, &io_tree) {
if(FD_ISSET(io->fd, &writable))
if(FD_ISSET(io->fd, &writable)) {
io->cb(io->data, IO_WRITE);
else if(FD_ISSET(io->fd, &readable))
} else if(FD_ISSET(io->fd, &readable)) {
io->cb(io->data, IO_READ);
else
} else {
continue;
}
/*
There are scenarios in which the callback will remove another io_t from the tree
(e.g. closing a double connection). Since splay_each does not support that, we
need to exit the loop now. That's okay, since any remaining events will get picked
up by the next select() call.
need to exit the loop if that happens. That's okay, since any remaining events will
get picked up by the next select() call.
*/
break;
if(curgen != io_tree.generation) {
break;
}
}
}
#else
while (running) {
while(running) {
struct timeval diff;
struct timeval *tv = get_time_remaining(&diff);
DWORD timeout_ms = tv ? (tv->tv_sec * 1000 + tv->tv_usec / 1000 + 1) : WSA_INFINITE;
if (!event_count) {
if(!event_count) {
Sleep(timeout_ms);
continue;
}
@ -318,54 +395,90 @@ bool event_loop(void) {
Note that technically FD_CLOSE has the same problem, but it's okay because user code does not rely on
this event being fired again if ignored.
*/
io_t* writeable_io = NULL;
for splay_each(io_t, io, &io_tree)
if (io->flags & IO_WRITE && send(io->fd, NULL, 0, 0) == 0) {
writeable_io = io;
break;
unsigned int curgen = io_tree.generation;
for splay_each(io_t, io, &io_tree) {
if(io->flags & IO_WRITE && send(io->fd, NULL, 0, 0) == 0) {
io->cb(io->data, IO_WRITE);
if(curgen != io_tree.generation) {
break;
}
}
if (writeable_io) {
writeable_io->cb(writeable_io->data, IO_WRITE);
continue;
}
WSAEVENT* events = xmalloc(event_count * sizeof(*events));
if(event_count > WSA_MAXIMUM_WAIT_EVENTS) {
WSASetLastError(WSA_INVALID_PARAMETER);
return(false);
}
WSAEVENT events[WSA_MAXIMUM_WAIT_EVENTS];
io_t *io_map[WSA_MAXIMUM_WAIT_EVENTS];
DWORD event_index = 0;
for splay_each(io_t, io, &io_tree) {
events[event_index] = io->event;
io_map[event_index] = io;
event_index++;
}
DWORD result = WSAWaitForMultipleEvents(event_count, events, FALSE, timeout_ms, FALSE);
/*
* If the generation number changes due to event addition
* or removal by a callback we restart the loop.
*/
curgen = io_tree.generation;
WSAEVENT event;
if (result >= WSA_WAIT_EVENT_0 && result < WSA_WAIT_EVENT_0 + event_count)
event = events[result - WSA_WAIT_EVENT_0];
free(events);
if (result == WSA_WAIT_TIMEOUT)
continue;
if (result < WSA_WAIT_EVENT_0 || result >= WSA_WAIT_EVENT_0 + event_count)
return false;
for(DWORD event_offset = 0; event_offset < event_count;) {
DWORD result = WSAWaitForMultipleEvents(event_count - event_offset, &events[event_offset], FALSE, timeout_ms, FALSE);
io_t *io = splay_search(&io_tree, &((io_t){.event = event}));
if (!io)
abort();
if(result == WSA_WAIT_TIMEOUT) {
break;
}
if (io->fd == -1) {
io->cb(io->data, 0);
} else {
WSANETWORKEVENTS network_events;
if (WSAEnumNetworkEvents(io->fd, io->event, &network_events) != 0)
return false;
if (network_events.lNetworkEvents & READ_EVENTS)
io->cb(io->data, IO_READ);
/*
The fd might be available for write too. However, if we already fired the read callback, that
callback might have deleted the io (e.g. through terminate_connection()), so we can't fire the
write callback here. Instead, we loop back and let the writable io loop above handle it.
*/
if(result < WSA_WAIT_EVENT_0 || result >= WSA_WAIT_EVENT_0 + event_count - event_offset) {
return(false);
}
/* Look up io in the map by index. */
event_index = result - WSA_WAIT_EVENT_0 + event_offset;
io_t *io = io_map[event_index];
if(io->fd == -1) {
io->cb(io->data, 0);
if(curgen != io_tree.generation) {
break;
}
} else {
WSANETWORKEVENTS network_events;
if(WSAEnumNetworkEvents(io->fd, io->event, &network_events) != 0) {
return(false);
}
if(network_events.lNetworkEvents & READ_EVENTS) {
io->cb(io->data, IO_READ);
if(curgen != io_tree.generation) {
break;
}
}
/*
The fd might be available for write too. However, if we already fired the read callback, that
callback might have deleted the io (e.g. through terminate_connection()), so we can't fire the
write callback here. Instead, we loop back and let the writable io loop above handle it.
*/
}
/* Continue checking the rest of the events. */
event_offset = event_index + 1;
/* Just poll the next time through. */
timeout_ms = 0;
}
}
#endif
return true;