libuv 2022-01-13 (2fbfa035)

Code extracted from:

    https://github.com/libuv/libuv.git

at commit 2fbfa0358b3bb920ddd1c2747d4887b35b9ac161 (v1.x).
This commit is contained in:
libuv upstream
2022-01-13 06:20:14 +01:00
committed by Brad King
parent 27e34e6190
commit 09ec116d91
21 changed files with 169 additions and 914 deletions

View File

@@ -64,7 +64,3 @@ The externally maintained libraries used by libuv are:
- pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB.
Three clause BSD license.
- android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design
Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement
n° 289016). Three clause BSD license.

View File

@@ -45,6 +45,8 @@ extern "C" {
# endif
#elif __GNUC__ >= 4
# define UV_EXTERN __attribute__((visibility("default")))
#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x550) /* Sun Studio >= 8 */
# define UV_EXTERN __global
#else
# define UV_EXTERN /* nothing */
#endif
@@ -1658,6 +1660,7 @@ UV_EXTERN int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr);
UV_EXTERN int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size);
UV_EXTERN int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size);
UV_EXTERN int uv_ip_name(const struct sockaddr* src, char* dst, size_t size);
UV_EXTERN int uv_inet_ntop(int af, const void* src, char* dst, size_t size);
UV_EXTERN int uv_inet_pton(int af, const char* src, void* dst);

View File

@@ -1,54 +0,0 @@
/*
* Copyright (c) 1995, 1999
* Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* THIS SOFTWARE IS PROVIDED BY Berkeley Software Design, Inc. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, Inc. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* BSDI ifaddrs.h,v 2.5 2000/02/23 14:51:59 dab Exp
*/
#ifndef _IFADDRS_H_
#define _IFADDRS_H_
struct ifaddrs {
struct ifaddrs *ifa_next;
char *ifa_name;
unsigned int ifa_flags;
struct sockaddr *ifa_addr;
struct sockaddr *ifa_netmask;
struct sockaddr *ifa_dstaddr;
void *ifa_data;
};
/*
* This may have been defined in <net/if.h>. Note that if <net/if.h> is
* to be included it must be included before this header file.
*/
#ifndef ifa_broadaddr
#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
#endif
#include <sys/cdefs.h>
__BEGIN_DECLS
extern int getifaddrs(struct ifaddrs **ifap);
extern void freeifaddrs(struct ifaddrs *ifa);
__END_DECLS
#endif

View File

@@ -31,7 +31,7 @@
*/
#define UV_VERSION_MAJOR 1
#define UV_VERSION_MINOR 42
#define UV_VERSION_MINOR 43
#define UV_VERSION_PATCH 1
#define UV_VERSION_IS_RELEASE 0
#define UV_VERSION_SUFFIX "dev"

View File

@@ -263,21 +263,14 @@ typedef union {
} unused_; /* TODO: retained for ABI compatibility; remove me in v2.x. */
} uv_cond_t;
typedef union {
struct {
unsigned int num_readers_;
CRITICAL_SECTION num_readers_lock_;
HANDLE write_semaphore_;
} state_;
/* TODO: remove me in v2.x. */
struct {
SRWLOCK unused_;
} unused1_;
/* TODO: remove me in v2.x. */
struct {
uv_mutex_t unused1_;
uv_mutex_t unused2_;
} unused2_;
typedef struct {
SRWLOCK read_write_lock_;
/* TODO: retained for ABI compatibility; remove me in v2.x */
#ifdef _WIN64
unsigned char padding_[72];
#else
unsigned char padding_[44];
#endif
} uv_rwlock_t;
typedef struct {

View File

@@ -160,13 +160,20 @@ static void post(QUEUE* q, enum uv__work_kind kind) {
}
#ifdef __MVS__
/* TODO(itodorov) - zos: revisit when Woz compiler is available. */
__attribute__((destructor))
#endif
void uv__threadpool_cleanup(void) {
unsigned int i;
if (nthreads == 0)
return;
#ifndef __MVS__
/* TODO(gabylb) - zos: revisit when Woz compiler is available. */
post(&exit_message, UV__WORK_CPU);
#endif
for (i = 0; i < nthreads; i++)
if (uv_thread_join(threads + i))

View File

@@ -1,713 +0,0 @@
/*
Copyright (c) 2013, Kenneth MacKay
Copyright (c) 2014, Emergya (Cloud4all, FP7/2007-2013 grant agreement #289016)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "uv/android-ifaddrs.h"
#include "uv-common.h"
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <sys/socket.h>
#include <net/if_arp.h>
#include <netinet/in.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/if_packet.h>
typedef struct NetlinkList
{
struct NetlinkList *m_next;
struct nlmsghdr *m_data;
unsigned int m_size;
} NetlinkList;
static int netlink_socket(pid_t *p_pid)
{
struct sockaddr_nl l_addr;
socklen_t l_len;
int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if(l_socket < 0)
{
return -1;
}
memset(&l_addr, 0, sizeof(l_addr));
l_addr.nl_family = AF_NETLINK;
if(bind(l_socket, (struct sockaddr *)&l_addr, sizeof(l_addr)) < 0)
{
close(l_socket);
return -1;
}
l_len = sizeof(l_addr);
if(getsockname(l_socket, (struct sockaddr *)&l_addr, &l_len) < 0)
{
close(l_socket);
return -1;
}
*p_pid = l_addr.nl_pid;
return l_socket;
}
static int netlink_send(int p_socket, int p_request)
{
char l_buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))];
struct nlmsghdr *l_hdr;
struct rtgenmsg *l_msg;
struct sockaddr_nl l_addr;
memset(l_buffer, 0, sizeof(l_buffer));
l_hdr = (struct nlmsghdr *)l_buffer;
l_msg = (struct rtgenmsg *)NLMSG_DATA(l_hdr);
l_hdr->nlmsg_len = NLMSG_LENGTH(sizeof(*l_msg));
l_hdr->nlmsg_type = p_request;
l_hdr->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
l_hdr->nlmsg_pid = 0;
l_hdr->nlmsg_seq = p_socket;
l_msg->rtgen_family = AF_UNSPEC;
memset(&l_addr, 0, sizeof(l_addr));
l_addr.nl_family = AF_NETLINK;
return (sendto(p_socket, l_hdr, l_hdr->nlmsg_len, 0, (struct sockaddr *)&l_addr, sizeof(l_addr)));
}
static int netlink_recv(int p_socket, void *p_buffer, size_t p_len)
{
struct sockaddr_nl l_addr;
struct msghdr l_msg;
struct iovec l_iov;
l_iov.iov_base = p_buffer;
l_iov.iov_len = p_len;
for(;;)
{
int l_result;
l_msg.msg_name = (void *)&l_addr;
l_msg.msg_namelen = sizeof(l_addr);
l_msg.msg_iov = &l_iov;
l_msg.msg_iovlen = 1;
l_msg.msg_control = NULL;
l_msg.msg_controllen = 0;
l_msg.msg_flags = 0;
l_result = recvmsg(p_socket, &l_msg, 0);
if(l_result < 0)
{
if(errno == EINTR)
{
continue;
}
return -2;
}
/* Buffer was too small */
if(l_msg.msg_flags & MSG_TRUNC)
{
return -1;
}
return l_result;
}
}
static struct nlmsghdr *getNetlinkResponse(int p_socket, pid_t p_pid, int *p_size, int *p_done)
{
size_t l_size = 4096;
void *l_buffer = NULL;
for(;;)
{
int l_read;
uv__free(l_buffer);
l_buffer = uv__malloc(l_size);
if (l_buffer == NULL)
{
return NULL;
}
l_read = netlink_recv(p_socket, l_buffer, l_size);
*p_size = l_read;
if(l_read == -2)
{
uv__free(l_buffer);
return NULL;
}
if(l_read >= 0)
{
struct nlmsghdr *l_hdr;
for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read))
{
if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
*p_done = 1;
break;
}
if(l_hdr->nlmsg_type == NLMSG_ERROR)
{
uv__free(l_buffer);
return NULL;
}
}
return l_buffer;
}
l_size *= 2;
}
}
static NetlinkList *newListItem(struct nlmsghdr *p_data, unsigned int p_size)
{
NetlinkList *l_item = uv__malloc(sizeof(NetlinkList));
if (l_item == NULL)
{
return NULL;
}
l_item->m_next = NULL;
l_item->m_data = p_data;
l_item->m_size = p_size;
return l_item;
}
static void freeResultList(NetlinkList *p_list)
{
NetlinkList *l_cur;
while(p_list)
{
l_cur = p_list;
p_list = p_list->m_next;
uv__free(l_cur->m_data);
uv__free(l_cur);
}
}
static NetlinkList *getResultList(int p_socket, int p_request, pid_t p_pid)
{
int l_size;
int l_done;
NetlinkList *l_list;
NetlinkList *l_end;
if(netlink_send(p_socket, p_request) < 0)
{
return NULL;
}
l_list = NULL;
l_end = NULL;
l_done = 0;
while(!l_done)
{
NetlinkList *l_item;
struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, p_pid, &l_size, &l_done);
/* Error */
if(!l_hdr)
{
freeResultList(l_list);
return NULL;
}
l_item = newListItem(l_hdr, l_size);
if (!l_item)
{
freeResultList(l_list);
return NULL;
}
if(!l_list)
{
l_list = l_item;
}
else
{
l_end->m_next = l_item;
}
l_end = l_item;
}
return l_list;
}
static size_t maxSize(size_t a, size_t b)
{
return (a > b ? a : b);
}
static size_t calcAddrLen(sa_family_t p_family, int p_dataSize)
{
switch(p_family)
{
case AF_INET:
return sizeof(struct sockaddr_in);
case AF_INET6:
return sizeof(struct sockaddr_in6);
case AF_PACKET:
return maxSize(sizeof(struct sockaddr_ll), offsetof(struct sockaddr_ll, sll_addr) + p_dataSize);
default:
return maxSize(sizeof(struct sockaddr), offsetof(struct sockaddr, sa_data) + p_dataSize);
}
}
static void makeSockaddr(sa_family_t p_family, struct sockaddr *p_dest, void *p_data, size_t p_size)
{
switch(p_family)
{
case AF_INET:
memcpy(&((struct sockaddr_in*)p_dest)->sin_addr, p_data, p_size);
break;
case AF_INET6:
memcpy(&((struct sockaddr_in6*)p_dest)->sin6_addr, p_data, p_size);
break;
case AF_PACKET:
memcpy(((struct sockaddr_ll*)p_dest)->sll_addr, p_data, p_size);
((struct sockaddr_ll*)p_dest)->sll_halen = p_size;
break;
default:
memcpy(p_dest->sa_data, p_data, p_size);
break;
}
p_dest->sa_family = p_family;
}
static void addToEnd(struct ifaddrs **p_resultList, struct ifaddrs *p_entry)
{
if(!*p_resultList)
{
*p_resultList = p_entry;
}
else
{
struct ifaddrs *l_cur = *p_resultList;
while(l_cur->ifa_next)
{
l_cur = l_cur->ifa_next;
}
l_cur->ifa_next = p_entry;
}
}
static int interpretLink(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList)
{
struct ifaddrs *l_entry;
char *l_index;
char *l_name;
char *l_addr;
char *l_data;
struct ifinfomsg *l_info = (struct ifinfomsg *)NLMSG_DATA(p_hdr);
size_t l_nameSize = 0;
size_t l_addrSize = 0;
size_t l_dataSize = 0;
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
struct rtattr *l_rta;
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFLA_ADDRESS:
case IFLA_BROADCAST:
l_addrSize += NLMSG_ALIGN(calcAddrLen(AF_PACKET, l_rtaDataSize));
break;
case IFLA_IFNAME:
l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
break;
case IFLA_STATS:
l_dataSize += NLMSG_ALIGN(l_rtaSize);
break;
default:
break;
}
}
l_entry = uv__malloc(sizeof(struct ifaddrs) + sizeof(int) + l_nameSize + l_addrSize + l_dataSize);
if (l_entry == NULL)
{
return -1;
}
memset(l_entry, 0, sizeof(struct ifaddrs));
l_entry->ifa_name = "";
l_index = ((char *)l_entry) + sizeof(struct ifaddrs);
l_name = l_index + sizeof(int);
l_addr = l_name + l_nameSize;
l_data = l_addr + l_addrSize;
/* Save the interface index so we can look it up when handling the
* addresses.
*/
memcpy(l_index, &l_info->ifi_index, sizeof(int));
l_entry->ifa_flags = l_info->ifi_flags;
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
void *l_rtaData = RTA_DATA(l_rta);
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFLA_ADDRESS:
case IFLA_BROADCAST:
{
size_t l_addrLen = calcAddrLen(AF_PACKET, l_rtaDataSize);
makeSockaddr(AF_PACKET, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
((struct sockaddr_ll *)l_addr)->sll_ifindex = l_info->ifi_index;
((struct sockaddr_ll *)l_addr)->sll_hatype = l_info->ifi_type;
if(l_rta->rta_type == IFLA_ADDRESS)
{
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
}
l_addr += NLMSG_ALIGN(l_addrLen);
break;
}
case IFLA_IFNAME:
strncpy(l_name, l_rtaData, l_rtaDataSize);
l_name[l_rtaDataSize] = '\0';
l_entry->ifa_name = l_name;
break;
case IFLA_STATS:
memcpy(l_data, l_rtaData, l_rtaDataSize);
l_entry->ifa_data = l_data;
break;
default:
break;
}
}
addToEnd(p_resultList, l_entry);
return 0;
}
static struct ifaddrs *findInterface(int p_index, struct ifaddrs **p_links, int p_numLinks)
{
int l_num = 0;
struct ifaddrs *l_cur = *p_links;
while(l_cur && l_num < p_numLinks)
{
char *l_indexPtr = ((char *)l_cur) + sizeof(struct ifaddrs);
int l_index;
memcpy(&l_index, l_indexPtr, sizeof(int));
if(l_index == p_index)
{
return l_cur;
}
l_cur = l_cur->ifa_next;
++l_num;
}
return NULL;
}
static int interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList, int p_numLinks)
{
struct ifaddrmsg *l_info = (struct ifaddrmsg *)NLMSG_DATA(p_hdr);
struct ifaddrs *l_interface = findInterface(l_info->ifa_index, p_resultList, p_numLinks);
size_t l_nameSize = 0;
size_t l_addrSize = 0;
int l_addedNetmask = 0;
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
struct rtattr *l_rta;
struct ifaddrs *l_entry;
char *l_name;
char *l_addr;
for(l_rta = IFA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
if(l_info->ifa_family == AF_PACKET)
{
continue;
}
switch(l_rta->rta_type)
{
case IFA_ADDRESS:
case IFA_LOCAL:
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
if((l_info->ifa_family == AF_INET || l_info->ifa_family == AF_INET6) && !l_addedNetmask)
{
/* Make room for netmask */
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
l_addedNetmask = 1;
}
break;
case IFA_BROADCAST:
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
break;
case IFA_LABEL:
l_nameSize += NLMSG_ALIGN(l_rtaDataSize + 1);
break;
default:
break;
}
}
l_entry = uv__malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize);
if (l_entry == NULL)
{
return -1;
}
memset(l_entry, 0, sizeof(struct ifaddrs));
l_entry->ifa_name = (l_interface ? l_interface->ifa_name : "");
l_name = ((char *)l_entry) + sizeof(struct ifaddrs);
l_addr = l_name + l_nameSize;
l_entry->ifa_flags = l_info->ifa_flags;
if(l_interface)
{
l_entry->ifa_flags |= l_interface->ifa_flags;
}
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
for(l_rta = IFA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
void *l_rtaData = RTA_DATA(l_rta);
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFA_ADDRESS:
case IFA_BROADCAST:
case IFA_LOCAL:
{
size_t l_addrLen = calcAddrLen(l_info->ifa_family, l_rtaDataSize);
makeSockaddr(l_info->ifa_family, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
if(l_info->ifa_family == AF_INET6)
{
if(IN6_IS_ADDR_LINKLOCAL((struct in6_addr *)l_rtaData) || IN6_IS_ADDR_MC_LINKLOCAL((struct in6_addr *)l_rtaData))
{
((struct sockaddr_in6 *)l_addr)->sin6_scope_id = l_info->ifa_index;
}
}
/* Apparently in a point-to-point network IFA_ADDRESS contains
* the dest address and IFA_LOCAL contains the local address
*/
if(l_rta->rta_type == IFA_ADDRESS)
{
if(l_entry->ifa_addr)
{
l_entry->ifa_dstaddr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
}
else if(l_rta->rta_type == IFA_LOCAL)
{
if(l_entry->ifa_addr)
{
l_entry->ifa_dstaddr = l_entry->ifa_addr;
}
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
}
l_addr += NLMSG_ALIGN(l_addrLen);
break;
}
case IFA_LABEL:
strncpy(l_name, l_rtaData, l_rtaDataSize);
l_name[l_rtaDataSize] = '\0';
l_entry->ifa_name = l_name;
break;
default:
break;
}
}
if(l_entry->ifa_addr && (l_entry->ifa_addr->sa_family == AF_INET || l_entry->ifa_addr->sa_family == AF_INET6))
{
unsigned l_maxPrefix = (l_entry->ifa_addr->sa_family == AF_INET ? 32 : 128);
unsigned l_prefix = (l_info->ifa_prefixlen > l_maxPrefix ? l_maxPrefix : l_info->ifa_prefixlen);
unsigned char l_mask[16] = {0};
unsigned i;
for(i=0; i<(l_prefix/8); ++i)
{
l_mask[i] = 0xff;
}
if(l_prefix % 8)
{
l_mask[i] = 0xff << (8 - (l_prefix % 8));
}
makeSockaddr(l_entry->ifa_addr->sa_family, (struct sockaddr *)l_addr, l_mask, l_maxPrefix / 8);
l_entry->ifa_netmask = (struct sockaddr *)l_addr;
}
addToEnd(p_resultList, l_entry);
return 0;
}
static int interpretLinks(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList)
{
int l_numLinks = 0;
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
break;
}
if(l_hdr->nlmsg_type == RTM_NEWLINK)
{
if(interpretLink(l_hdr, p_resultList) == -1)
{
return -1;
}
++l_numLinks;
}
}
}
return l_numLinks;
}
static int interpretAddrs(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks)
{
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
break;
}
if(l_hdr->nlmsg_type == RTM_NEWADDR)
{
if (interpretAddr(l_hdr, p_resultList, p_numLinks) == -1)
{
return -1;
}
}
}
}
return 0;
}
int getifaddrs(struct ifaddrs **ifap)
{
int l_socket;
int l_result;
int l_numLinks;
pid_t l_pid;
NetlinkList *l_linkResults;
NetlinkList *l_addrResults;
if(!ifap)
{
return -1;
}
*ifap = NULL;
l_socket = netlink_socket(&l_pid);
if(l_socket < 0)
{
return -1;
}
l_linkResults = getResultList(l_socket, RTM_GETLINK, l_pid);
if(!l_linkResults)
{
close(l_socket);
return -1;
}
l_addrResults = getResultList(l_socket, RTM_GETADDR, l_pid);
if(!l_addrResults)
{
close(l_socket);
freeResultList(l_linkResults);
return -1;
}
l_result = 0;
l_numLinks = interpretLinks(l_socket, l_pid, l_linkResults, ifap);
if(l_numLinks == -1 || interpretAddrs(l_socket, l_pid, l_addrResults, ifap, l_numLinks) == -1)
{
l_result = -1;
}
freeResultList(l_linkResults);
freeResultList(l_addrResults);
close(l_socket);
return l_result;
}
void freeifaddrs(struct ifaddrs *ifa)
{
struct ifaddrs *l_cur;
while(ifa)
{
l_cur = ifa;
ifa = ifa->ifa_next;
uv__free(l_cur);
}
}

View File

@@ -96,9 +96,9 @@ static int uv__run_pending(uv_loop_t* loop);
/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
sizeof(((struct iovec*) 0)->iov_base));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
sizeof(((struct iovec*) 0)->iov_len));
STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));

View File

@@ -280,14 +280,18 @@ static int uv__get_cpu_speed(uint64_t* speed) {
NULL,
0);
if (freq_ref) {
uint32_t freq;
const UInt8* freq_ref_ptr = pCFDataGetBytePtr(freq_ref);
CFIndex len = pCFDataGetLength(freq_ref);
CFRange range;
range.location = 0;
range.length = len;
if (len == 8)
memcpy(speed, freq_ref_ptr, 8);
else if (len == 4) {
uint32_t v;
memcpy(&v, freq_ref_ptr, 4);
*speed = v;
} else {
*speed = 0;
}
pCFDataGetBytes(freq_ref, range, (UInt8*)&freq);
*speed = freq;
pCFRelease(freq_ref);
pCFRelease(data);
break;

View File

@@ -53,7 +53,7 @@ void uv_dlclose(uv_lib_t* lib) {
int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
dlerror(); /* Reset error status. */
*ptr = dlsym(lib->handle, name);
return uv__dlerror(lib);
return *ptr ? 0 : uv__dlerror(lib);
}

View File

@@ -326,6 +326,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (errno != ENOENT)
abort();
}
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
revents |= UV__POLLRDHUP;
}
if (ev->filter == EV_OOBAND) {
@@ -359,9 +361,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (ev->flags & EV_ERROR)
revents |= POLLERR;
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
revents |= UV__POLLRDHUP;
if (revents == 0)
continue;

View File

@@ -45,6 +45,10 @@
#define HAVE_IFADDRS_H 1
# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
# undef HAVE_IFADDRS_H
#endif
#ifdef __UCLIBC__
# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
# undef HAVE_IFADDRS_H
@@ -52,11 +56,7 @@
#endif
#ifdef HAVE_IFADDRS_H
# if defined(__ANDROID__)
# include "uv/android-ifaddrs.h"
# else
# include <ifaddrs.h>
# endif
# include <ifaddrs.h>
# include <sys/socket.h>
# include <net/ethernet.h>
# include <netpacket/packet.h>
@@ -788,7 +788,7 @@ uint64_t uv_get_free_memory(void) {
struct sysinfo info;
uint64_t rc;
rc = uv__read_proc_meminfo("MemFree:");
rc = uv__read_proc_meminfo("MemAvailable:");
if (rc != 0)
return rc;

View File

@@ -136,6 +136,11 @@ static void maybe_resize(uv__os390_epoll* lst, unsigned int len) {
}
void uv__os390_cleanup(void) {
msgctl(uv_backend_fd(uv_default_loop()), IPC_RMID, NULL);
}
static void init_message_queue(uv__os390_epoll* lst) {
struct {
long int header;

View File

@@ -70,5 +70,6 @@ int sem_destroy(UV_PLATFORM_SEM_T* semid);
int sem_post(UV_PLATFORM_SEM_T* semid);
int sem_trywait(UV_PLATFORM_SEM_T* semid);
int sem_wait(UV_PLATFORM_SEM_T* semid);
void uv__os390_cleanup(void);
#endif /* UV_OS390_SYSCALL_H_ */

View File

@@ -58,20 +58,6 @@ struct uv__stream_select_s {
fd_set* swrite;
size_t swrite_sz;
};
/* Due to a possible kernel bug at least in OS X 10.10 "Yosemite",
* EPROTOTYPE can be returned while trying to write to a socket that is
* shutting down. If we retry the write, we should get the expected EPIPE
* instead.
*/
# define RETRY_ON_WRITE_ERROR(errno) (errno == EINTR || errno == EPROTOTYPE)
# define IS_TRANSIENT_WRITE_ERROR(errno, send_handle) \
(errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS || \
(errno == EMSGSIZE && send_handle != NULL))
#else
# define RETRY_ON_WRITE_ERROR(errno) (errno == EINTR)
# define IS_TRANSIENT_WRITE_ERROR(errno, send_handle) \
(errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
#endif /* defined(__APPLE__) */
static void uv__stream_connect(uv_stream_t*);
@@ -866,19 +852,33 @@ static int uv__try_write(uv_stream_t* stream,
do
n = sendmsg(uv__stream_fd(stream), &msg, 0);
while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
while (n == -1 && errno == EINTR);
} else {
do
n = uv__writev(uv__stream_fd(stream), iov, iovcnt);
while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
while (n == -1 && errno == EINTR);
}
if (n >= 0)
return n;
if (IS_TRANSIENT_WRITE_ERROR(errno, send_handle))
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
return UV_EAGAIN;
#ifdef __APPLE__
/* macOS versions 10.10 and 10.15 - and presumbaly 10.11 to 10.14, too -
* have a bug where a race condition causes the kernel to return EPROTOTYPE
* because the socket isn't fully constructed. It's probably the result of
* the peer closing the connection and that is why libuv translates it to
* ECONNRESET. Previously, libuv retried until the EPROTOTYPE error went
* away but some VPN software causes the same behavior except the error is
* permanent, not transient, turning the retry mechanism into an infinite
* loop. See https://github.com/libuv/libuv/pull/482.
*/
if (errno == EPROTOTYPE)
return UV_ECONNRESET;
#endif /* __APPLE__ */
return UV__ERR(errno);
}
@@ -1010,7 +1010,6 @@ uv_handle_type uv__handle_type(int fd) {
static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) {
stream->flags |= UV_HANDLE_READ_EOF;
stream->flags &= ~UV_HANDLE_READING;
stream->flags &= ~UV_HANDLE_READABLE;
uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
uv__handle_stop(stream);
uv__stream_osx_interrupt_select(stream);
@@ -1550,15 +1549,12 @@ int uv__read_start(uv_stream_t* stream,
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
stream->type == UV_TTY);
/* The UV_HANDLE_READING flag is irrelevant of the state of the tcp - it just
* expresses the desired state of the user.
*/
/* The UV_HANDLE_READING flag is irrelevant of the state of the stream - it
* just expresses the desired state of the user. */
stream->flags |= UV_HANDLE_READING;
stream->flags &= ~UV_HANDLE_READ_EOF;
/* TODO: try to do the read inline? */
/* TODO: keep track of tcp state. If we've gotten a EOF then we should
* not start the IO watcher.
*/
assert(uv__stream_fd(stream) >= 0);
assert(alloc_cb);

View File

@@ -654,28 +654,71 @@ int uv__udp_connect(uv_udp_t* handle,
return 0;
}
/* From https://pubs.opengroup.org/onlinepubs/9699919799/functions/connect.html
* Any of uv supported UNIXs kernel should be standardized, but the kernel
* implementation logic not same, let's use pseudocode to explain the udp
* disconnect behaviors:
*
* Predefined stubs for pseudocode:
* 1. sodisconnect: The function to perform the real udp disconnect
* 2. pru_connect: The function to perform the real udp connect
* 3. so: The kernel object match with socket fd
* 4. addr: The sockaddr parameter from user space
*
* BSDs:
* if(sodisconnect(so) == 0) { // udp disconnect succeed
* if (addr->sa_len != so->addr->sa_len) return EINVAL;
* if (addr->sa_family != so->addr->sa_family) return EAFNOSUPPORT;
* pru_connect(so);
* }
* else return EISCONN;
*
* z/OS (same with Windows):
* if(addr->sa_len < so->addr->sa_len) return EINVAL;
* if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
*
* AIX:
* if(addr->sa_len != sizeof(struct sockaddr)) return EINVAL; // ignore ip proto version
* if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
*
* Linux,Others:
* if(addr->sa_len < sizeof(struct sockaddr)) return EINVAL;
* if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
*/
int uv__udp_disconnect(uv_udp_t* handle) {
int r;
#if defined(__MVS__)
struct sockaddr_storage addr;
#else
struct sockaddr addr;
#endif
memset(&addr, 0, sizeof(addr));
#if defined(__MVS__)
addr.ss_family = AF_UNSPEC;
#else
addr.sa_family = AF_UNSPEC;
#endif
do {
errno = 0;
r = connect(handle->io_watcher.fd, &addr, sizeof(addr));
r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
} while (r == -1 && errno == EINTR);
if (r == -1 && errno != EAFNOSUPPORT)
if (r == -1) {
#if defined(BSD) /* The macro BSD is from sys/param.h */
if (errno != EAFNOSUPPORT && errno != EINVAL)
return UV__ERR(errno);
#else
return UV__ERR(errno);
#endif
}
handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
return 0;
}
int uv__udp_send(uv_udp_send_t* req,
uv_udp_t* handle,
const uv_buf_t bufs[],

View File

@@ -274,6 +274,20 @@ int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
}
int uv_ip_name(const struct sockaddr *src, char *dst, size_t size) {
switch (src->sa_family) {
case AF_INET:
return uv_inet_ntop(AF_INET, &((struct sockaddr_in *)src)->sin_addr,
dst, size);
case AF_INET6:
return uv_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)src)->sin6_addr,
dst, size);
default:
return UV_EAFNOSUPPORT;
}
}
int uv_tcp_bind(uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int flags) {
@@ -887,7 +901,12 @@ void uv_library_shutdown(void) {
uv__process_title_cleanup();
uv__signal_cleanup();
#ifdef __MVS__
/* TODO(itodorov) - zos: revisit when Woz compiler is available. */
uv__os390_cleanup();
#else
uv__threadpool_cleanup();
#endif
uv__store_relaxed(&was_shutdown, 1);
}

View File

@@ -1796,7 +1796,6 @@ static void uv_pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
* it. */
eof_timer_destroy(handle);
handle->flags &= ~UV_HANDLE_READABLE;
uv_read_stop((uv_stream_t*) handle);
handle->read_cb((uv_stream_t*) handle, UV_EOF, &buf);

View File

@@ -1044,7 +1044,6 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
handle->flags &= ~UV_HANDLE_READING;
DECREASE_ACTIVE_COUNT(loop, handle);
}
handle->flags &= ~UV_HANDLE_READABLE;
buf.base = 0;
buf.len = 0;
@@ -1081,7 +1080,7 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
}
} else {
/* Connection closed */
handle->flags &= ~(UV_HANDLE_READING | UV_HANDLE_READABLE);
handle->flags &= ~UV_HANDLE_READING;
DECREASE_ACTIVE_COUNT(loop, handle);
handle->read_cb((uv_stream_t*)handle, UV_EOF, &buf);
@@ -1651,7 +1650,7 @@ int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int f
err = WSAGetLastError();
if (err == ERROR_IO_PENDING) {
/* Result should complete immediately, since we already called connect,
* but emperically, we sometimes have to poll the kernel a couple times
* but empirically, we sometimes have to poll the kernel a couple times
* until it notices that. */
while (!WSAGetOverlappedResult(client1, &overlap, &bytes, FALSE, &flags)) {
err = WSAGetLastError();

View File

@@ -103,7 +103,7 @@ static UINT __stdcall uv__thread_start(void* arg) {
uv__free(ctx_p);
uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
uv_key_set(&uv__current_thread_key, (void*) ctx.self);
uv_key_set(&uv__current_thread_key, ctx.self);
ctx.entry(ctx.arg);
@@ -183,7 +183,18 @@ int uv_thread_create_ex(uv_thread_t* tid,
uv_thread_t uv_thread_self(void) {
uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
return (uv_thread_t) uv_key_get(&uv__current_thread_key);
uv_thread_t key = uv_key_get(&uv__current_thread_key);
if (key == NULL) {
/* If the thread wasn't started by uv_thread_create (such as the main
* thread), we assign an id to it now. */
if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
GetCurrentProcess(), &key, 0,
FALSE, DUPLICATE_SAME_ACCESS)) {
uv_fatal_error(GetLastError(), "DuplicateHandle");
}
uv_key_set(&uv__current_thread_key, key);
}
return key;
}
@@ -237,113 +248,60 @@ void uv_mutex_unlock(uv_mutex_t* mutex) {
LeaveCriticalSection(mutex);
}
/* Ensure that the ABI for this type remains stable in v1.x */
#ifdef _WIN64
STATIC_ASSERT(sizeof(uv_rwlock_t) == 80);
#else
STATIC_ASSERT(sizeof(uv_rwlock_t) == 48);
#endif
int uv_rwlock_init(uv_rwlock_t* rwlock) {
/* Initialize the semaphore that acts as the write lock. */
HANDLE handle = CreateSemaphoreW(NULL, 1, 1, NULL);
if (handle == NULL)
return uv_translate_sys_error(GetLastError());
rwlock->state_.write_semaphore_ = handle;
/* Initialize the critical section protecting the reader count. */
InitializeCriticalSection(&rwlock->state_.num_readers_lock_);
/* Initialize the reader count. */
rwlock->state_.num_readers_ = 0;
memset(rwlock, 0, sizeof(*rwlock));
InitializeSRWLock(&rwlock->read_write_lock_);
return 0;
}
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
DeleteCriticalSection(&rwlock->state_.num_readers_lock_);
CloseHandle(rwlock->state_.write_semaphore_);
/* SRWLock does not need explicit destruction so long as there are no waiting threads
See: https://docs.microsoft.com/windows/win32/api/synchapi/nf-synchapi-initializesrwlock#remarks */
}
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
/* Acquire the lock that protects the reader count. */
EnterCriticalSection(&rwlock->state_.num_readers_lock_);
/* Increase the reader count, and lock for write if this is the first
* reader.
*/
if (++rwlock->state_.num_readers_ == 1) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
}
/* Release the lock that protects the reader count. */
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
AcquireSRWLockShared(&rwlock->read_write_lock_);
}
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
int err;
if (!TryEnterCriticalSection(&rwlock->state_.num_readers_lock_))
if (!TryAcquireSRWLockShared(&rwlock->read_write_lock_))
return UV_EBUSY;
err = 0;
if (rwlock->state_.num_readers_ == 0) {
/* Currently there are no other readers, which means that the write lock
* needs to be acquired.
*/
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
if (r == WAIT_OBJECT_0)
rwlock->state_.num_readers_++;
else if (r == WAIT_TIMEOUT)
err = UV_EBUSY;
else if (r == WAIT_FAILED)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
} else {
/* The write lock has already been acquired because there are other
* active readers.
*/
rwlock->state_.num_readers_++;
}
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
return err;
return 0;
}
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
EnterCriticalSection(&rwlock->state_.num_readers_lock_);
if (--rwlock->state_.num_readers_ == 0) {
if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
}
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
ReleaseSRWLockShared(&rwlock->read_write_lock_);
}
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
AcquireSRWLockExclusive(&rwlock->read_write_lock_);
}
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
if (r == WAIT_OBJECT_0)
return 0;
else if (r == WAIT_TIMEOUT)
if (!TryAcquireSRWLockExclusive(&rwlock->read_write_lock_))
return UV_EBUSY;
else
uv_fatal_error(GetLastError(), "WaitForSingleObject");
return 0;
}
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
ReleaseSRWLockExclusive(&rwlock->read_write_lock_);
}

View File

@@ -1083,7 +1083,7 @@ int uv__udp_connect(uv_udp_t* handle,
int uv__udp_disconnect(uv_udp_t* handle) {
int err;
struct sockaddr addr;
struct sockaddr_storage addr;
memset(&addr, 0, sizeof(addr));