mirror of
https://github.com/Kitware/CMake.git
synced 2026-01-09 15:20:56 -06:00
Merge branch 'upstream-libuv' into update-libuv
* upstream-libuv: libuv 2020-09-22 (ed5b42d5)
This commit is contained in:
@@ -251,7 +251,8 @@ typedef struct uv_utsname_s uv_utsname_t;
|
||||
typedef struct uv_statfs_s uv_statfs_t;
|
||||
|
||||
typedef enum {
|
||||
UV_LOOP_BLOCK_SIGNAL
|
||||
UV_LOOP_BLOCK_SIGNAL = 0,
|
||||
UV_METRICS_IDLE_TIME
|
||||
} uv_loop_option;
|
||||
|
||||
typedef enum {
|
||||
@@ -617,6 +618,12 @@ enum uv_udp_flags {
|
||||
* must not be freed by the recv_cb callback.
|
||||
*/
|
||||
UV_UDP_MMSG_CHUNK = 8,
|
||||
/*
|
||||
* Indicates that the buffer provided has been fully utilized by recvmmsg and
|
||||
* that it should now be freed by the recv_cb callback. When this flag is set
|
||||
* in uv_udp_recv_cb, nread will always be 0 and addr will always be NULL.
|
||||
*/
|
||||
UV_UDP_MMSG_FREE = 16,
|
||||
|
||||
/*
|
||||
* Indicates that recvmmsg should be used, if available.
|
||||
@@ -697,6 +704,7 @@ UV_EXTERN int uv_udp_try_send(uv_udp_t* handle,
|
||||
UV_EXTERN int uv_udp_recv_start(uv_udp_t* handle,
|
||||
uv_alloc_cb alloc_cb,
|
||||
uv_udp_recv_cb recv_cb);
|
||||
UV_EXTERN int uv_udp_using_recvmmsg(const uv_udp_t* handle);
|
||||
UV_EXTERN int uv_udp_recv_stop(uv_udp_t* handle);
|
||||
UV_EXTERN size_t uv_udp_get_send_queue_size(const uv_udp_t* handle);
|
||||
UV_EXTERN size_t uv_udp_get_send_queue_count(const uv_udp_t* handle);
|
||||
@@ -867,6 +875,7 @@ UV_EXTERN int uv_timer_stop(uv_timer_t* handle);
|
||||
UV_EXTERN int uv_timer_again(uv_timer_t* handle);
|
||||
UV_EXTERN void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat);
|
||||
UV_EXTERN uint64_t uv_timer_get_repeat(const uv_timer_t* handle);
|
||||
UV_EXTERN uint64_t uv_timer_get_due_in(const uv_timer_t* handle);
|
||||
|
||||
|
||||
/*
|
||||
@@ -1208,12 +1217,12 @@ UV_EXTERN uv_pid_t uv_os_getppid(void);
|
||||
|
||||
#if defined(__PASE__)
|
||||
/* On IBM i PASE, the highest process priority is -10 */
|
||||
# define UV_PRIORITY_LOW 39 // RUNPTY(99)
|
||||
# define UV_PRIORITY_BELOW_NORMAL 15 // RUNPTY(50)
|
||||
# define UV_PRIORITY_NORMAL 0 // RUNPTY(20)
|
||||
# define UV_PRIORITY_ABOVE_NORMAL -4 // RUNTY(12)
|
||||
# define UV_PRIORITY_HIGH -7 // RUNPTY(6)
|
||||
# define UV_PRIORITY_HIGHEST -10 // RUNPTY(1)
|
||||
# define UV_PRIORITY_LOW 39 /* RUNPTY(99) */
|
||||
# define UV_PRIORITY_BELOW_NORMAL 15 /* RUNPTY(50) */
|
||||
# define UV_PRIORITY_NORMAL 0 /* RUNPTY(20) */
|
||||
# define UV_PRIORITY_ABOVE_NORMAL -4 /* RUNTY(12) */
|
||||
# define UV_PRIORITY_HIGH -7 /* RUNPTY(6) */
|
||||
# define UV_PRIORITY_HIGHEST -10 /* RUNPTY(1) */
|
||||
#else
|
||||
# define UV_PRIORITY_LOW 19
|
||||
# define UV_PRIORITY_BELOW_NORMAL 10
|
||||
@@ -1261,6 +1270,7 @@ UV_EXTERN int uv_os_gethostname(char* buffer, size_t* size);
|
||||
|
||||
UV_EXTERN int uv_os_uname(uv_utsname_t* buffer);
|
||||
|
||||
UV_EXTERN uint64_t uv_metrics_idle_time(uv_loop_t* loop);
|
||||
|
||||
typedef enum {
|
||||
UV_FS_UNKNOWN = -1,
|
||||
@@ -1792,9 +1802,11 @@ struct uv_loop_s {
|
||||
unsigned int active_handles;
|
||||
void* handle_queue[2];
|
||||
union {
|
||||
void* unused[2];
|
||||
void* unused;
|
||||
unsigned int count;
|
||||
} active_reqs;
|
||||
/* Internal storage for future extensions. */
|
||||
void* internal_fields;
|
||||
/* Internal flag to signal loop stop. */
|
||||
unsigned int stop_flag;
|
||||
UV_LOOP_PRIVATE_FIELDS
|
||||
|
||||
@@ -317,7 +317,7 @@
|
||||
#if defined(EPROTO) && !defined(_WIN32)
|
||||
# define UV__EPROTO UV__ERR(EPROTO)
|
||||
#else
|
||||
# define UV__EPROTO UV__ERR(4046)
|
||||
# define UV__EPROTO UV__ERR(-4046)
|
||||
#endif
|
||||
|
||||
#if defined(EPROTONOSUPPORT) && !defined(_WIN32)
|
||||
|
||||
@@ -76,6 +76,8 @@
|
||||
# include "posix.h"
|
||||
#elif defined(__HAIKU__)
|
||||
# include "posix.h"
|
||||
#elif defined(__QNX__)
|
||||
# include "posix.h"
|
||||
#endif
|
||||
|
||||
#ifndef NI_MAXHOST
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
*/
|
||||
|
||||
#define UV_VERSION_MAJOR 1
|
||||
#define UV_VERSION_MINOR 37
|
||||
#define UV_VERSION_MINOR 39
|
||||
#define UV_VERSION_PATCH 1
|
||||
#define UV_VERSION_IS_RELEASE 0
|
||||
#define UV_VERSION_SUFFIX "dev"
|
||||
|
||||
@@ -33,7 +33,7 @@ static int uv__random(void* buf, size_t buflen) {
|
||||
|
||||
#if defined(__PASE__)
|
||||
rc = uv__random_readpath("/dev/urandom", buf, buflen);
|
||||
#elif defined(_AIX)
|
||||
#elif defined(_AIX) || defined(__QNX__)
|
||||
rc = uv__random_readpath("/dev/random", buf, buflen);
|
||||
#elif defined(__APPLE__) || defined(__OpenBSD__) || \
|
||||
(defined(__ANDROID_API__) && __ANDROID_API__ >= 28)
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
/* Copyright libuv project contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "strscpy.h"
|
||||
#include <limits.h> /* SSIZE_MAX */
|
||||
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
/* Copyright libuv project contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef UV_STRSCPY_H_
|
||||
#define UV_STRSCPY_H_
|
||||
|
||||
|
||||
@@ -130,6 +130,14 @@ uint64_t uv_timer_get_repeat(const uv_timer_t* handle) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_timer_get_due_in(const uv_timer_t* handle) {
|
||||
if (handle->loop->time >= handle->timeout)
|
||||
return 0;
|
||||
|
||||
return handle->timeout - handle->loop->time;
|
||||
}
|
||||
|
||||
|
||||
int uv__next_timeout(const uv_loop_t* loop) {
|
||||
const struct heap_node* heap_node;
|
||||
const uv_timer_t* handle;
|
||||
|
||||
@@ -22,42 +22,23 @@
|
||||
#include "uv.h"
|
||||
#include "internal.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <net/if.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/in6_var.h>
|
||||
#include <arpa/inet.h>
|
||||
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <utmp.h>
|
||||
#include <libgen.h>
|
||||
|
||||
#include <sys/protosw.h>
|
||||
#include <procinfo.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/procfs.h>
|
||||
|
||||
#include <sys/poll.h>
|
||||
|
||||
#include <sys/pollset.h>
|
||||
#include <ctype.h>
|
||||
|
||||
#include <sys/mntctl.h>
|
||||
#include <sys/vmount.h>
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
#include <sys/vnode.h>
|
||||
extern char* original_exepath;
|
||||
extern uv_mutex_t process_title_mutex;
|
||||
extern uv_once_t process_title_mutex_once;
|
||||
extern void init_process_title_mutex_once(void);
|
||||
|
||||
uint64_t uv__hrtime(uv_clocktype_t type) {
|
||||
uint64_t G = 1000000000;
|
||||
@@ -78,81 +59,32 @@ uint64_t uv__hrtime(uv_clocktype_t type) {
|
||||
*/
|
||||
int uv_exepath(char* buffer, size_t* size) {
|
||||
int res;
|
||||
char args[PATH_MAX];
|
||||
char abspath[PATH_MAX];
|
||||
size_t abspath_size;
|
||||
char args[UV__PATH_MAX];
|
||||
size_t cached_len;
|
||||
struct procsinfo pi;
|
||||
|
||||
if (buffer == NULL || size == NULL || *size == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
|
||||
uv_mutex_lock(&process_title_mutex);
|
||||
if (original_exepath != NULL) {
|
||||
cached_len = strlen(original_exepath);
|
||||
*size -= 1;
|
||||
if (*size > cached_len)
|
||||
*size = cached_len;
|
||||
memcpy(buffer, original_exepath, *size);
|
||||
buffer[*size] = '\0';
|
||||
uv_mutex_unlock(&process_title_mutex);
|
||||
return 0;
|
||||
}
|
||||
uv_mutex_unlock(&process_title_mutex);
|
||||
pi.pi_pid = getpid();
|
||||
res = getargs(&pi, sizeof(pi), args, sizeof(args));
|
||||
|
||||
if (res < 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
/*
|
||||
* Possibilities for args:
|
||||
* i) an absolute path such as: /home/user/myprojects/nodejs/node
|
||||
* ii) a relative path such as: ./node or ../myprojects/nodejs/node
|
||||
* iii) a bare filename such as "node", after exporting PATH variable
|
||||
* to its location.
|
||||
*/
|
||||
|
||||
/* Case i) and ii) absolute or relative paths */
|
||||
if (strchr(args, '/') != NULL) {
|
||||
if (realpath(args, abspath) != abspath)
|
||||
return UV__ERR(errno);
|
||||
|
||||
abspath_size = strlen(abspath);
|
||||
|
||||
*size -= 1;
|
||||
if (*size > abspath_size)
|
||||
*size = abspath_size;
|
||||
|
||||
memcpy(buffer, abspath, *size);
|
||||
buffer[*size] = '\0';
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
/* Case iii). Search PATH environment variable */
|
||||
char trypath[PATH_MAX];
|
||||
char *clonedpath = NULL;
|
||||
char *token = NULL;
|
||||
char *path = getenv("PATH");
|
||||
|
||||
if (path == NULL)
|
||||
return UV_EINVAL;
|
||||
|
||||
clonedpath = uv__strdup(path);
|
||||
if (clonedpath == NULL)
|
||||
return UV_ENOMEM;
|
||||
|
||||
token = strtok(clonedpath, ":");
|
||||
while (token != NULL) {
|
||||
snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, args);
|
||||
if (realpath(trypath, abspath) == abspath) {
|
||||
/* Check the match is executable */
|
||||
if (access(abspath, X_OK) == 0) {
|
||||
abspath_size = strlen(abspath);
|
||||
|
||||
*size -= 1;
|
||||
if (*size > abspath_size)
|
||||
*size = abspath_size;
|
||||
|
||||
memcpy(buffer, abspath, *size);
|
||||
buffer[*size] = '\0';
|
||||
|
||||
uv__free(clonedpath);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
token = strtok(NULL, ":");
|
||||
}
|
||||
uv__free(clonedpath);
|
||||
|
||||
/* Out of tokens (path entries), and no match found */
|
||||
return UV_EINVAL;
|
||||
}
|
||||
return uv__search_path(args, buffer, size);
|
||||
}
|
||||
|
||||
|
||||
@@ -65,14 +65,15 @@
|
||||
#define RDWR_BUF_SIZE 4096
|
||||
#define EQ(a,b) (strcmp(a,b) == 0)
|
||||
|
||||
static uv_mutex_t process_title_mutex;
|
||||
static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
|
||||
char* original_exepath = NULL;
|
||||
uv_mutex_t process_title_mutex;
|
||||
uv_once_t process_title_mutex_once = UV_ONCE_INIT;
|
||||
static void* args_mem = NULL;
|
||||
static char** process_argv = NULL;
|
||||
static int process_argc = 0;
|
||||
static char* process_title_ptr = NULL;
|
||||
|
||||
static void init_process_title_mutex_once(void) {
|
||||
void init_process_title_mutex_once(void) {
|
||||
uv_mutex_init(&process_title_mutex);
|
||||
}
|
||||
|
||||
@@ -145,6 +146,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int i;
|
||||
int rc;
|
||||
int add_failed;
|
||||
int user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
@@ -214,7 +217,21 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
base = loop->time;
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
} else {
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
nfds = pollset_poll(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
@@ -227,6 +244,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
|
||||
if (nfds == 0) {
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
if (timeout > 0)
|
||||
goto update_timeout;
|
||||
}
|
||||
|
||||
assert(timeout != -1);
|
||||
return;
|
||||
}
|
||||
@@ -236,6 +262,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
abort();
|
||||
}
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
@@ -280,16 +311,25 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
/* Run signal watchers last. This also affects child process watchers
|
||||
* because those are implemented in terms of signal watchers.
|
||||
*/
|
||||
if (w == &loop->signal_io_watcher)
|
||||
if (w == &loop->signal_io_watcher) {
|
||||
have_signals = 1;
|
||||
else
|
||||
} else {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
w->cb(loop, w, pe->revents);
|
||||
}
|
||||
|
||||
nevents++;
|
||||
}
|
||||
|
||||
if (have_signals != 0)
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
|
||||
}
|
||||
|
||||
loop->watchers[loop->nwatchers] = NULL;
|
||||
loop->watchers[loop->nwatchers + 1] = NULL;
|
||||
@@ -830,6 +870,7 @@ void uv__fs_event_close(uv_fs_event_t* handle) {
|
||||
|
||||
|
||||
char** uv_setup_args(int argc, char** argv) {
|
||||
char exepath[UV__PATH_MAX];
|
||||
char** new_argv;
|
||||
size_t size;
|
||||
char* s;
|
||||
@@ -845,6 +886,15 @@ char** uv_setup_args(int argc, char** argv) {
|
||||
process_argv = argv;
|
||||
process_argc = argc;
|
||||
|
||||
/* Use argv[0] to determine value for uv_exepath(). */
|
||||
size = sizeof(exepath);
|
||||
if (uv__search_path(argv[0], exepath, &size) == 0) {
|
||||
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
|
||||
uv_mutex_lock(&process_title_mutex);
|
||||
original_exepath = uv__strdup(exepath);
|
||||
uv_mutex_unlock(&process_title_mutex);
|
||||
}
|
||||
|
||||
/* Calculate how much memory we need for the argv strings. */
|
||||
size = 0;
|
||||
for (i = 0; i < argc; i++)
|
||||
@@ -875,6 +925,10 @@ char** uv_setup_args(int argc, char** argv) {
|
||||
int uv_set_process_title(const char* title) {
|
||||
char* new_title;
|
||||
|
||||
/* If uv_setup_args wasn't called or failed, we can't continue. */
|
||||
if (process_argv == NULL || args_mem == NULL)
|
||||
return UV_ENOBUFS;
|
||||
|
||||
/* We cannot free this pointer when libuv shuts down,
|
||||
* the process may still be using it.
|
||||
*/
|
||||
@@ -908,6 +962,10 @@ int uv_get_process_title(char* buffer, size_t size) {
|
||||
if (buffer == NULL || size == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
/* If uv_setup_args wasn't called, we can't continue. */
|
||||
if (process_argv == NULL)
|
||||
return UV_ENOBUFS;
|
||||
|
||||
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
|
||||
uv_mutex_lock(&process_title_mutex);
|
||||
|
||||
|
||||
@@ -113,7 +113,9 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
|
||||
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
|
||||
}
|
||||
|
||||
if (ent->ifa_netmask->sa_family == AF_INET6) {
|
||||
if (ent->ifa_netmask == NULL) {
|
||||
memset(&address->netmask, 0, sizeof(address->netmask));
|
||||
} else if (ent->ifa_netmask->sa_family == AF_INET6) {
|
||||
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
|
||||
} else {
|
||||
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
|
||||
|
||||
@@ -82,10 +82,6 @@ extern char** environ;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
|
||||
# include <dlfcn.h> /* for dlsym */
|
||||
#endif
|
||||
|
||||
#if defined(__MVS__)
|
||||
#include <sys/ioctl.h>
|
||||
#endif
|
||||
@@ -223,15 +219,23 @@ int uv__getiovmax(void) {
|
||||
#if defined(IOV_MAX)
|
||||
return IOV_MAX;
|
||||
#elif defined(_SC_IOV_MAX)
|
||||
static int iovmax = -1;
|
||||
if (iovmax == -1) {
|
||||
iovmax = sysconf(_SC_IOV_MAX);
|
||||
/* On some embedded devices (arm-linux-uclibc based ip camera),
|
||||
* sysconf(_SC_IOV_MAX) can not get the correct value. The return
|
||||
* value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
|
||||
*/
|
||||
if (iovmax == -1) iovmax = 1;
|
||||
}
|
||||
static int iovmax_cached = -1;
|
||||
int iovmax;
|
||||
|
||||
iovmax = uv__load_relaxed(&iovmax_cached);
|
||||
if (iovmax != -1)
|
||||
return iovmax;
|
||||
|
||||
/* On some embedded devices (arm-linux-uclibc based ip camera),
|
||||
* sysconf(_SC_IOV_MAX) can not get the correct value. The return
|
||||
* value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
|
||||
*/
|
||||
iovmax = sysconf(_SC_IOV_MAX);
|
||||
if (iovmax == -1)
|
||||
iovmax = 1;
|
||||
|
||||
uv__store_relaxed(&iovmax_cached, iovmax);
|
||||
|
||||
return iovmax;
|
||||
#else
|
||||
return 1024;
|
||||
@@ -382,6 +386,14 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
|
||||
timeout = uv_backend_timeout(loop);
|
||||
|
||||
uv__io_poll(loop, timeout);
|
||||
|
||||
/* Run one final update on the provider_idle_time in case uv__io_poll
|
||||
* returned because the timeout expired, but no events were received. This
|
||||
* call will be ignored if the provider_entry_time was either never set (if
|
||||
* the timeout == 0) or was already updated b/c an event was received.
|
||||
*/
|
||||
uv__metrics_update_idle_time(loop);
|
||||
|
||||
uv__run_check(loop);
|
||||
uv__run_closing_handles(loop);
|
||||
|
||||
@@ -665,7 +677,7 @@ ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
|
||||
int* end;
|
||||
#if defined(__linux__)
|
||||
static int no_msg_cmsg_cloexec;
|
||||
if (no_msg_cmsg_cloexec == 0) {
|
||||
if (0 == uv__load_relaxed(&no_msg_cmsg_cloexec)) {
|
||||
rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
|
||||
if (rc != -1)
|
||||
return rc;
|
||||
@@ -674,7 +686,7 @@ ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
|
||||
rc = recvmsg(fd, msg, flags);
|
||||
if (rc == -1)
|
||||
return UV__ERR(errno);
|
||||
no_msg_cmsg_cloexec = 1;
|
||||
uv__store_relaxed(&no_msg_cmsg_cloexec, 1);
|
||||
} else {
|
||||
rc = recvmsg(fd, msg, flags);
|
||||
}
|
||||
@@ -1145,13 +1157,6 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
|
||||
size_t shell_size;
|
||||
long initsize;
|
||||
int r;
|
||||
#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
|
||||
int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**);
|
||||
|
||||
getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r");
|
||||
if (getpwuid_r == NULL)
|
||||
return UV_ENOSYS;
|
||||
#endif
|
||||
|
||||
if (pwd == NULL)
|
||||
return UV_EINVAL;
|
||||
@@ -1543,3 +1548,78 @@ void uv_sleep(unsigned int msec) {
|
||||
|
||||
assert(rc == 0);
|
||||
}
|
||||
|
||||
int uv__search_path(const char* prog, char* buf, size_t* buflen) {
|
||||
char abspath[UV__PATH_MAX];
|
||||
size_t abspath_size;
|
||||
char trypath[UV__PATH_MAX];
|
||||
char* cloned_path;
|
||||
char* path_env;
|
||||
char* token;
|
||||
|
||||
if (buf == NULL || buflen == NULL || *buflen == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
/*
|
||||
* Possibilities for prog:
|
||||
* i) an absolute path such as: /home/user/myprojects/nodejs/node
|
||||
* ii) a relative path such as: ./node or ../myprojects/nodejs/node
|
||||
* iii) a bare filename such as "node", after exporting PATH variable
|
||||
* to its location.
|
||||
*/
|
||||
|
||||
/* Case i) and ii) absolute or relative paths */
|
||||
if (strchr(prog, '/') != NULL) {
|
||||
if (realpath(prog, abspath) != abspath)
|
||||
return UV__ERR(errno);
|
||||
|
||||
abspath_size = strlen(abspath);
|
||||
|
||||
*buflen -= 1;
|
||||
if (*buflen > abspath_size)
|
||||
*buflen = abspath_size;
|
||||
|
||||
memcpy(buf, abspath, *buflen);
|
||||
buf[*buflen] = '\0';
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Case iii). Search PATH environment variable */
|
||||
cloned_path = NULL;
|
||||
token = NULL;
|
||||
path_env = getenv("PATH");
|
||||
|
||||
if (path_env == NULL)
|
||||
return UV_EINVAL;
|
||||
|
||||
cloned_path = uv__strdup(path_env);
|
||||
if (cloned_path == NULL)
|
||||
return UV_ENOMEM;
|
||||
|
||||
token = strtok(cloned_path, ":");
|
||||
while (token != NULL) {
|
||||
snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
|
||||
if (realpath(trypath, abspath) == abspath) {
|
||||
/* Check the match is executable */
|
||||
if (access(abspath, X_OK) == 0) {
|
||||
abspath_size = strlen(abspath);
|
||||
|
||||
*buflen -= 1;
|
||||
if (*buflen > abspath_size)
|
||||
*buflen = abspath_size;
|
||||
|
||||
memcpy(buf, abspath, *buflen);
|
||||
buf[*buflen] = '\0';
|
||||
|
||||
uv__free(cloned_path);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
token = strtok(NULL, ":");
|
||||
}
|
||||
uv__free(cloned_path);
|
||||
|
||||
/* Out of tokens (path entries), and no match found */
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
struct CFArrayCallBacks;
|
||||
struct CFRunLoopSourceContext;
|
||||
struct FSEventStreamContext;
|
||||
struct CFRange;
|
||||
|
||||
typedef double CFAbsoluteTime;
|
||||
typedef double CFTimeInterval;
|
||||
@@ -42,13 +43,23 @@ typedef unsigned CFStringEncoding;
|
||||
typedef void* CFAllocatorRef;
|
||||
typedef void* CFArrayRef;
|
||||
typedef void* CFBundleRef;
|
||||
typedef void* CFDataRef;
|
||||
typedef void* CFDictionaryRef;
|
||||
typedef void* CFMutableDictionaryRef;
|
||||
typedef struct CFRange CFRange;
|
||||
typedef void* CFRunLoopRef;
|
||||
typedef void* CFRunLoopSourceRef;
|
||||
typedef void* CFStringRef;
|
||||
typedef void* CFTypeRef;
|
||||
typedef void* FSEventStreamRef;
|
||||
|
||||
typedef uint32_t IOOptionBits;
|
||||
typedef unsigned int io_iterator_t;
|
||||
typedef unsigned int io_object_t;
|
||||
typedef unsigned int io_service_t;
|
||||
typedef unsigned int io_registry_entry_t;
|
||||
|
||||
|
||||
typedef void (*FSEventStreamCallback)(const FSEventStreamRef,
|
||||
void*,
|
||||
size_t,
|
||||
@@ -69,6 +80,11 @@ struct FSEventStreamContext {
|
||||
void* pad[3];
|
||||
};
|
||||
|
||||
struct CFRange {
|
||||
CFIndex location;
|
||||
CFIndex length;
|
||||
};
|
||||
|
||||
static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100;
|
||||
static const OSStatus noErr = 0;
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <stdint.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <mach/mach.h>
|
||||
#include <mach/mach_time.h>
|
||||
#include <mach-o/dyld.h> /* _NSGetExecutablePath */
|
||||
@@ -32,6 +33,15 @@
|
||||
#include <sys/sysctl.h>
|
||||
#include <unistd.h> /* sysconf */
|
||||
|
||||
#if !TARGET_OS_IPHONE
|
||||
#include "darwin-stub.h"
|
||||
#endif
|
||||
|
||||
static uv_once_t once = UV_ONCE_INIT;
|
||||
static uint64_t (*time_func)(void);
|
||||
static mach_timebase_info_data_t timebase;
|
||||
|
||||
typedef unsigned char UInt8;
|
||||
|
||||
int uv__platform_loop_init(uv_loop_t* loop) {
|
||||
loop->cf_state = NULL;
|
||||
@@ -48,15 +58,19 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv__hrtime(uv_clocktype_t type) {
|
||||
static mach_timebase_info_data_t info;
|
||||
|
||||
if ((ACCESS_ONCE(uint32_t, info.numer) == 0 ||
|
||||
ACCESS_ONCE(uint32_t, info.denom) == 0) &&
|
||||
mach_timebase_info(&info) != KERN_SUCCESS)
|
||||
static void uv__hrtime_init_once(void) {
|
||||
if (KERN_SUCCESS != mach_timebase_info(&timebase))
|
||||
abort();
|
||||
|
||||
return mach_absolute_time() * info.numer / info.denom;
|
||||
time_func = (uint64_t (*)(void)) dlsym(RTLD_DEFAULT, "mach_continuous_time");
|
||||
if (time_func == NULL)
|
||||
time_func = mach_absolute_time;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv__hrtime(uv_clocktype_t type) {
|
||||
uv_once(&once, uv__hrtime_init_once);
|
||||
return time_func() * timebase.numer / timebase.denom;
|
||||
}
|
||||
|
||||
|
||||
@@ -171,17 +185,149 @@ int uv_uptime(double* uptime) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uv__get_cpu_speed(uint64_t* speed) {
|
||||
/* IOKit */
|
||||
void (*pIOObjectRelease)(io_object_t);
|
||||
kern_return_t (*pIOMasterPort)(mach_port_t, mach_port_t*);
|
||||
CFMutableDictionaryRef (*pIOServiceMatching)(const char*);
|
||||
kern_return_t (*pIOServiceGetMatchingServices)(mach_port_t,
|
||||
CFMutableDictionaryRef,
|
||||
io_iterator_t*);
|
||||
io_service_t (*pIOIteratorNext)(io_iterator_t);
|
||||
CFTypeRef (*pIORegistryEntryCreateCFProperty)(io_registry_entry_t,
|
||||
CFStringRef,
|
||||
CFAllocatorRef,
|
||||
IOOptionBits);
|
||||
|
||||
/* CoreFoundation */
|
||||
CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
|
||||
const char*,
|
||||
CFStringEncoding);
|
||||
CFStringEncoding (*pCFStringGetSystemEncoding)(void);
|
||||
UInt8 *(*pCFDataGetBytePtr)(CFDataRef);
|
||||
CFIndex (*pCFDataGetLength)(CFDataRef);
|
||||
void (*pCFDataGetBytes)(CFDataRef, CFRange, UInt8*);
|
||||
void (*pCFRelease)(CFTypeRef);
|
||||
|
||||
void* core_foundation_handle;
|
||||
void* iokit_handle;
|
||||
int err;
|
||||
|
||||
kern_return_t kr;
|
||||
mach_port_t mach_port;
|
||||
io_iterator_t it;
|
||||
io_object_t service;
|
||||
|
||||
mach_port = 0;
|
||||
|
||||
err = UV_ENOENT;
|
||||
core_foundation_handle = dlopen("/System/Library/Frameworks/"
|
||||
"CoreFoundation.framework/"
|
||||
"Versions/A/CoreFoundation",
|
||||
RTLD_LAZY | RTLD_LOCAL);
|
||||
iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/"
|
||||
"Versions/A/IOKit",
|
||||
RTLD_LAZY | RTLD_LOCAL);
|
||||
|
||||
if (core_foundation_handle == NULL || iokit_handle == NULL)
|
||||
goto out;
|
||||
|
||||
#define V(handle, symbol) \
|
||||
do { \
|
||||
*(void **)(&p ## symbol) = dlsym((handle), #symbol); \
|
||||
if (p ## symbol == NULL) \
|
||||
goto out; \
|
||||
} \
|
||||
while (0)
|
||||
V(iokit_handle, IOMasterPort);
|
||||
V(iokit_handle, IOServiceMatching);
|
||||
V(iokit_handle, IOServiceGetMatchingServices);
|
||||
V(iokit_handle, IOIteratorNext);
|
||||
V(iokit_handle, IOObjectRelease);
|
||||
V(iokit_handle, IORegistryEntryCreateCFProperty);
|
||||
V(core_foundation_handle, CFStringCreateWithCString);
|
||||
V(core_foundation_handle, CFStringGetSystemEncoding);
|
||||
V(core_foundation_handle, CFDataGetBytePtr);
|
||||
V(core_foundation_handle, CFDataGetLength);
|
||||
V(core_foundation_handle, CFDataGetBytes);
|
||||
V(core_foundation_handle, CFRelease);
|
||||
#undef V
|
||||
|
||||
#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
|
||||
|
||||
kr = pIOMasterPort(MACH_PORT_NULL, &mach_port);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
CFMutableDictionaryRef classes_to_match
|
||||
= pIOServiceMatching("IOPlatformDevice");
|
||||
kr = pIOServiceGetMatchingServices(mach_port, classes_to_match, &it);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
service = pIOIteratorNext(it);
|
||||
|
||||
CFStringRef device_type_str = S("device_type");
|
||||
CFStringRef clock_frequency_str = S("clock-frequency");
|
||||
|
||||
while (service != 0) {
|
||||
CFDataRef data;
|
||||
data = pIORegistryEntryCreateCFProperty(service,
|
||||
device_type_str,
|
||||
NULL,
|
||||
0);
|
||||
if (data) {
|
||||
const UInt8* raw = pCFDataGetBytePtr(data);
|
||||
if (strncmp((char*)raw, "cpu", 3) == 0 ||
|
||||
strncmp((char*)raw, "processor", 9) == 0) {
|
||||
CFDataRef freq_ref;
|
||||
freq_ref = pIORegistryEntryCreateCFProperty(service,
|
||||
clock_frequency_str,
|
||||
NULL,
|
||||
0);
|
||||
if (freq_ref) {
|
||||
uint32_t freq;
|
||||
CFIndex len = pCFDataGetLength(freq_ref);
|
||||
CFRange range;
|
||||
range.location = 0;
|
||||
range.length = len;
|
||||
|
||||
pCFDataGetBytes(freq_ref, range, (UInt8*)&freq);
|
||||
*speed = freq;
|
||||
pCFRelease(freq_ref);
|
||||
pCFRelease(data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
pCFRelease(data);
|
||||
}
|
||||
|
||||
service = pIOIteratorNext(it);
|
||||
}
|
||||
|
||||
pIOObjectRelease(it);
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
if (core_foundation_handle != NULL)
|
||||
dlclose(core_foundation_handle);
|
||||
|
||||
if (iokit_handle != NULL)
|
||||
dlclose(iokit_handle);
|
||||
|
||||
mach_port_deallocate(mach_task_self(), mach_port);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
|
||||
multiplier = ((uint64_t)1000L / ticks);
|
||||
char model[512];
|
||||
uint64_t cpuspeed;
|
||||
size_t size;
|
||||
unsigned int i;
|
||||
natural_t numcpus;
|
||||
mach_msg_type_number_t msg_type;
|
||||
processor_cpu_load_info_data_t *info;
|
||||
uv_cpu_info_t* cpu_info;
|
||||
uint64_t cpuspeed;
|
||||
int err;
|
||||
|
||||
size = sizeof(model);
|
||||
if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
|
||||
@@ -189,9 +335,9 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
return UV__ERR(errno);
|
||||
}
|
||||
|
||||
size = sizeof(cpuspeed);
|
||||
if (sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0))
|
||||
return UV__ERR(errno);
|
||||
err = uv__get_cpu_speed(&cpuspeed);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
|
||||
(processor_info_array_t*)&info,
|
||||
|
||||
@@ -56,31 +56,6 @@ int uv__platform_loop_init(uv_loop_t* loop) {
|
||||
void uv__platform_loop_delete(uv_loop_t* loop) {
|
||||
}
|
||||
|
||||
|
||||
#ifdef __DragonFly__
|
||||
int uv_exepath(char* buffer, size_t* size) {
|
||||
char abspath[PATH_MAX * 2 + 1];
|
||||
ssize_t abspath_size;
|
||||
|
||||
if (buffer == NULL || size == NULL || *size == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
abspath_size = readlink("/proc/curproc/file", abspath, sizeof(abspath));
|
||||
if (abspath_size < 0)
|
||||
return UV__ERR(errno);
|
||||
|
||||
assert(abspath_size > 0);
|
||||
*size -= 1;
|
||||
|
||||
if (*size > abspath_size)
|
||||
*size = abspath_size;
|
||||
|
||||
memcpy(buffer, abspath, *size);
|
||||
buffer[*size] = '\0';
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int uv_exepath(char* buffer, size_t* size) {
|
||||
char abspath[PATH_MAX * 2 + 1];
|
||||
int mib[4];
|
||||
@@ -110,7 +85,6 @@ int uv_exepath(char* buffer, size_t* size) {
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
uint64_t uv_get_free_memory(void) {
|
||||
int freecount;
|
||||
@@ -290,25 +264,18 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
}
|
||||
|
||||
|
||||
int uv__sendmmsg(int fd,
|
||||
struct uv__mmsghdr* mmsg,
|
||||
unsigned int vlen,
|
||||
unsigned int flags) {
|
||||
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
|
||||
#if __FreeBSD__ >= 11
|
||||
return sendmmsg(fd, mmsg, vlen, flags);
|
||||
return sendmmsg(fd, mmsg, vlen, /* flags */ 0);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int uv__recvmmsg(int fd,
|
||||
struct uv__mmsghdr* mmsg,
|
||||
unsigned int vlen,
|
||||
unsigned int flags,
|
||||
struct timespec* timeout) {
|
||||
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
|
||||
#if __FreeBSD__ >= 11
|
||||
return recvmmsg(fd, mmsg, vlen, flags, timeout);
|
||||
return recvmmsg(fd, mmsg, vlen, 0 /* flags */, NULL /* timeout */);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
|
||||
@@ -79,7 +79,11 @@
|
||||
defined(__NetBSD__)
|
||||
# include <sys/param.h>
|
||||
# include <sys/mount.h>
|
||||
#elif defined(__sun) || defined(__MVS__) || defined(__NetBSD__) || defined(__HAIKU__)
|
||||
#elif defined(__sun) || \
|
||||
defined(__MVS__) || \
|
||||
defined(__NetBSD__) || \
|
||||
defined(__HAIKU__) || \
|
||||
defined(__QNX__)
|
||||
# include <sys/statvfs.h>
|
||||
#else
|
||||
# include <sys/statfs.h>
|
||||
@@ -229,11 +233,7 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
|
||||
struct timespec ts[2];
|
||||
ts[0] = uv__fs_to_timespec(req->atime);
|
||||
ts[1] = uv__fs_to_timespec(req->mtime);
|
||||
#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
|
||||
return utimensat(req->file, NULL, ts, 0);
|
||||
#else
|
||||
return futimens(req->file, ts);
|
||||
#endif
|
||||
#elif defined(__APPLE__) \
|
||||
|| defined(__DragonFly__) \
|
||||
|| defined(__FreeBSD__) \
|
||||
@@ -320,13 +320,14 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
|
||||
if (path_length < pattern_size ||
|
||||
strcmp(path + path_length - pattern_size, pattern)) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
r = -1;
|
||||
goto clobber;
|
||||
}
|
||||
|
||||
uv_once(&once, uv__mkostemp_initonce);
|
||||
|
||||
#ifdef O_CLOEXEC
|
||||
if (no_cloexec_support == 0 && uv__mkostemp != NULL) {
|
||||
if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
|
||||
r = uv__mkostemp(path, O_CLOEXEC);
|
||||
|
||||
if (r >= 0)
|
||||
@@ -335,11 +336,11 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
|
||||
/* If mkostemp() returns EINVAL, it means the kernel doesn't
|
||||
support O_CLOEXEC, so we just fallback to mkstemp() below. */
|
||||
if (errno != EINVAL)
|
||||
return r;
|
||||
goto clobber;
|
||||
|
||||
/* We set the static variable so that next calls don't even
|
||||
try to use mkostemp. */
|
||||
no_cloexec_support = 1;
|
||||
uv__store_relaxed(&no_cloexec_support, 1);
|
||||
}
|
||||
#endif /* O_CLOEXEC */
|
||||
|
||||
@@ -361,6 +362,9 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
|
||||
if (req->cb != NULL)
|
||||
uv_rwlock_rdunlock(&req->loop->cloexec_lock);
|
||||
|
||||
clobber:
|
||||
if (r < 0)
|
||||
path[0] = '\0';
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -470,7 +474,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
|
||||
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
|
||||
#else
|
||||
# if defined(__linux__)
|
||||
if (no_preadv) retry:
|
||||
if (uv__load_relaxed(&no_preadv)) retry:
|
||||
# endif
|
||||
{
|
||||
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
|
||||
@@ -482,7 +486,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
|
||||
req->nbufs,
|
||||
req->off);
|
||||
if (result == -1 && errno == ENOSYS) {
|
||||
no_preadv = 1;
|
||||
uv__store_relaxed(&no_preadv, 1);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
@@ -639,7 +643,11 @@ static int uv__fs_closedir(uv_fs_t* req) {
|
||||
|
||||
static int uv__fs_statfs(uv_fs_t* req) {
|
||||
uv_statfs_t* stat_fs;
|
||||
#if defined(__sun) || defined(__MVS__) || defined(__NetBSD__) || defined(__HAIKU__)
|
||||
#if defined(__sun) || \
|
||||
defined(__MVS__) || \
|
||||
defined(__NetBSD__) || \
|
||||
defined(__HAIKU__) || \
|
||||
defined(__QNX__)
|
||||
struct statvfs buf;
|
||||
|
||||
if (0 != statvfs(req->path, &buf))
|
||||
@@ -656,7 +664,12 @@ static int uv__fs_statfs(uv_fs_t* req) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
#if defined(__sun) || defined(__MVS__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__HAIKU__)
|
||||
#if defined(__sun) || \
|
||||
defined(__MVS__) || \
|
||||
defined(__OpenBSD__) || \
|
||||
defined(__NetBSD__) || \
|
||||
defined(__HAIKU__) || \
|
||||
defined(__QNX__)
|
||||
stat_fs->f_type = 0; /* f_type is not supported. */
|
||||
#else
|
||||
stat_fs->f_type = buf.f_type;
|
||||
@@ -897,8 +910,27 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
|
||||
ssize_t r;
|
||||
|
||||
off = req->off;
|
||||
|
||||
#ifdef __linux__
|
||||
{
|
||||
static int copy_file_range_support = 1;
|
||||
|
||||
if (copy_file_range_support) {
|
||||
r = uv__fs_copy_file_range(in_fd, NULL, out_fd, &off, req->bufsml[0].len, 0);
|
||||
|
||||
if (r == -1 && errno == ENOSYS) {
|
||||
errno = 0;
|
||||
copy_file_range_support = 0;
|
||||
} else {
|
||||
goto ok;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
|
||||
|
||||
ok:
|
||||
/* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
|
||||
* it still writes out data. Fortunately, we can detect it by checking if
|
||||
* the offset has been updated.
|
||||
@@ -1141,7 +1173,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
dst_flags = O_WRONLY | O_CREAT | O_TRUNC;
|
||||
dst_flags = O_WRONLY | O_CREAT;
|
||||
|
||||
if (req->flags & UV_FS_COPYFILE_EXCL)
|
||||
dst_flags |= O_EXCL;
|
||||
@@ -1160,16 +1192,26 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Get the destination file's mode. */
|
||||
if (fstat(dstfd, &dst_statsbuf)) {
|
||||
err = UV__ERR(errno);
|
||||
goto out;
|
||||
}
|
||||
/* If the file is not being opened exclusively, verify that the source and
|
||||
destination are not the same file. If they are the same, bail out early. */
|
||||
if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
|
||||
/* Get the destination file's mode. */
|
||||
if (fstat(dstfd, &dst_statsbuf)) {
|
||||
err = UV__ERR(errno);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Check if srcfd and dstfd refer to the same file */
|
||||
if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
|
||||
src_statsbuf.st_ino == dst_statsbuf.st_ino) {
|
||||
goto out;
|
||||
/* Check if srcfd and dstfd refer to the same file */
|
||||
if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
|
||||
src_statsbuf.st_ino == dst_statsbuf.st_ino) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Truncate the file in case the destination already existed. */
|
||||
if (ftruncate(dstfd, 0) != 0) {
|
||||
err = UV__ERR(errno);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
|
||||
@@ -1365,7 +1407,7 @@ static int uv__fs_statx(int fd,
|
||||
int mode;
|
||||
int rc;
|
||||
|
||||
if (no_statx)
|
||||
if (uv__load_relaxed(&no_statx))
|
||||
return UV_ENOSYS;
|
||||
|
||||
dirfd = AT_FDCWD;
|
||||
@@ -1398,7 +1440,7 @@ static int uv__fs_statx(int fd,
|
||||
* implemented, rc might return 1 with 0 set as the error code in which
|
||||
* case we return ENOSYS.
|
||||
*/
|
||||
no_statx = 1;
|
||||
uv__store_relaxed(&no_statx, 1);
|
||||
return UV_ENOSYS;
|
||||
}
|
||||
|
||||
@@ -2041,7 +2083,7 @@ void uv_fs_req_cleanup(uv_fs_t* req) {
|
||||
|
||||
/* Only necessary for asychronous requests, i.e., requests with a callback.
|
||||
* Synchronous ones don't copy their arguments and have req->path and
|
||||
* req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
|
||||
* req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
|
||||
* UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
|
||||
*/
|
||||
if (req->path != NULL &&
|
||||
|
||||
@@ -58,6 +58,9 @@
|
||||
#include <as400_protos.h>
|
||||
#include <as400_types.h>
|
||||
|
||||
char* original_exepath = NULL;
|
||||
uv_mutex_t process_title_mutex;
|
||||
uv_once_t process_title_mutex_once = UV_ONCE_INIT;
|
||||
|
||||
typedef struct {
|
||||
int bytes_available;
|
||||
@@ -171,6 +174,9 @@ static void iconv_a2e(const char* src, unsigned char dst[], size_t length) {
|
||||
dst[i] = a2e[' '];
|
||||
}
|
||||
|
||||
void init_process_title_mutex_once(void) {
|
||||
uv_mutex_init(&process_title_mutex);
|
||||
}
|
||||
|
||||
static int get_ibmi_system_status(SSTS0200* rcvr) {
|
||||
/* rcvrlen is input parameter 2 to QWCRSSTS */
|
||||
@@ -460,3 +466,36 @@ void uv_free_interface_addresses(uv_interface_address_t* addresses, int count) {
|
||||
uv__free(addresses);
|
||||
}
|
||||
|
||||
char** uv_setup_args(int argc, char** argv) {
|
||||
char exepath[UV__PATH_MAX];
|
||||
char* s;
|
||||
size_t size;
|
||||
|
||||
if (argc > 0) {
|
||||
/* Use argv[0] to determine value for uv_exepath(). */
|
||||
size = sizeof(exepath);
|
||||
if (uv__search_path(argv[0], exepath, &size) == 0) {
|
||||
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
|
||||
uv_mutex_lock(&process_title_mutex);
|
||||
original_exepath = uv__strdup(exepath);
|
||||
uv_mutex_unlock(&process_title_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
return argv;
|
||||
}
|
||||
|
||||
int uv_set_process_title(const char* title) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int uv_get_process_title(char* buffer, size_t size) {
|
||||
if (buffer == NULL || size == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
buffer[0] = '\0';
|
||||
return 0;
|
||||
}
|
||||
|
||||
void uv__process_title_cleanup(void) {
|
||||
}
|
||||
|
||||
@@ -62,9 +62,7 @@
|
||||
# include <AvailabilityMacros.h>
|
||||
#endif
|
||||
|
||||
#if defined(_POSIX_PATH_MAX)
|
||||
# define UV__PATH_MAX _POSIX_PATH_MAX
|
||||
#elif defined(PATH_MAX)
|
||||
#if defined(PATH_MAX)
|
||||
# define UV__PATH_MAX PATH_MAX
|
||||
#else
|
||||
# define UV__PATH_MAX 8192
|
||||
@@ -278,6 +276,7 @@ void uv__udp_finish_close(uv_udp_t* handle);
|
||||
uv_handle_type uv__handle_type(int fd);
|
||||
FILE* uv__open_file(const char* path);
|
||||
int uv__getpwuid_r(uv_passwd_t* pwd);
|
||||
int uv__search_path(const char* prog, char* buf, size_t* buflen);
|
||||
|
||||
/* random */
|
||||
int uv__random_devurandom(void* buf, size_t buflen);
|
||||
@@ -345,15 +344,8 @@ struct uv__mmsghdr {
|
||||
unsigned int msg_len;
|
||||
};
|
||||
|
||||
int uv__recvmmsg(int fd,
|
||||
struct uv__mmsghdr* mmsg,
|
||||
unsigned int vlen,
|
||||
unsigned int flags,
|
||||
struct timespec* timeout);
|
||||
int uv__sendmmsg(int fd,
|
||||
struct uv__mmsghdr* mmsg,
|
||||
unsigned int vlen,
|
||||
unsigned int flags);
|
||||
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
|
||||
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
|
||||
#else
|
||||
#define HAVE_MMSG 0
|
||||
#endif
|
||||
|
||||
@@ -82,7 +82,7 @@ int uv__io_fork(uv_loop_t* loop) {
|
||||
process. So we sidestep the issue by pretending like we never
|
||||
started it in the first place.
|
||||
*/
|
||||
uv__has_forked_with_cfrunloop = 1;
|
||||
uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
|
||||
uv__free(loop->cf_state);
|
||||
loop->cf_state = NULL;
|
||||
}
|
||||
@@ -129,6 +129,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int fd;
|
||||
int op;
|
||||
int i;
|
||||
int user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
@@ -202,7 +204,21 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
base = loop->time;
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
} else {
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
for (;; nevents = 0) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
if (timeout != -1) {
|
||||
spec.tv_sec = timeout / 1000;
|
||||
spec.tv_nsec = (timeout % 1000) * 1000000;
|
||||
@@ -228,6 +244,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
|
||||
if (nfds == 0) {
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
if (timeout > 0)
|
||||
goto update_timeout;
|
||||
}
|
||||
|
||||
assert(timeout != -1);
|
||||
return;
|
||||
}
|
||||
@@ -236,6 +261,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (errno != EINTR)
|
||||
abort();
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == 0)
|
||||
return;
|
||||
|
||||
@@ -276,6 +306,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (ev->filter == EVFILT_VNODE) {
|
||||
assert(w->events == POLLIN);
|
||||
assert(w->pevents == POLLIN);
|
||||
uv__metrics_update_idle_time(loop);
|
||||
w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
|
||||
nevents++;
|
||||
continue;
|
||||
@@ -337,16 +368,25 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
/* Run signal watchers last. This also affects child process watchers
|
||||
* because those are implemented in terms of signal watchers.
|
||||
*/
|
||||
if (w == &loop->signal_io_watcher)
|
||||
if (w == &loop->signal_io_watcher) {
|
||||
have_signals = 1;
|
||||
else
|
||||
} else {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
w->cb(loop, w, revents);
|
||||
}
|
||||
|
||||
nevents++;
|
||||
}
|
||||
|
||||
if (have_signals != 0)
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
|
||||
}
|
||||
|
||||
loop->watchers[loop->nwatchers] = NULL;
|
||||
loop->watchers[loop->nwatchers + 1] = NULL;
|
||||
@@ -487,7 +527,7 @@ int uv_fs_event_start(uv_fs_event_t* handle,
|
||||
if (!(statbuf.st_mode & S_IFDIR))
|
||||
goto fallback;
|
||||
|
||||
if (!uv__has_forked_with_cfrunloop) {
|
||||
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
|
||||
int r;
|
||||
/* The fallback fd is no longer needed */
|
||||
uv__close_nocheckstdio(fd);
|
||||
@@ -522,8 +562,9 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
|
||||
uv__handle_stop(handle);
|
||||
|
||||
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
|
||||
if (!uv__has_forked_with_cfrunloop && handle->cf_cb != NULL)
|
||||
r = uv__fsevents_close(handle);
|
||||
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
|
||||
if (handle->cf_cb != NULL)
|
||||
r = uv__fsevents_close(handle);
|
||||
#endif
|
||||
|
||||
if (handle->event_watcher.fd != -1) {
|
||||
|
||||
@@ -85,17 +85,7 @@ static uint64_t read_cpufreq(unsigned int cpunum);
|
||||
|
||||
int uv__platform_loop_init(uv_loop_t* loop) {
|
||||
int fd;
|
||||
|
||||
/* It was reported that EPOLL_CLOEXEC is not defined on Android API < 21,
|
||||
* a.k.a. Lollipop. Since EPOLL_CLOEXEC is an alias for O_CLOEXEC on all
|
||||
* architectures, we just use that instead.
|
||||
*/
|
||||
#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
|
||||
fd = -1;
|
||||
errno = ENOSYS;
|
||||
#else
|
||||
fd = epoll_create1(O_CLOEXEC);
|
||||
#endif
|
||||
|
||||
/* epoll_create1() can fail either because it's not implemented (old kernel)
|
||||
* or because it doesn't understand the O_CLOEXEC flag.
|
||||
@@ -208,8 +198,10 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
* that being the largest value I have seen in the wild (and only once.)
|
||||
*/
|
||||
static const int max_safe_timeout = 1789569;
|
||||
static int no_epoll_pwait;
|
||||
static int no_epoll_wait;
|
||||
static int no_epoll_pwait_cached;
|
||||
static int no_epoll_wait_cached;
|
||||
int no_epoll_pwait;
|
||||
int no_epoll_wait;
|
||||
struct epoll_event events[1024];
|
||||
struct epoll_event* pe;
|
||||
struct epoll_event e;
|
||||
@@ -226,6 +218,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int fd;
|
||||
int op;
|
||||
int i;
|
||||
int user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
@@ -281,7 +275,31 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
real_timeout = timeout;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
} else {
|
||||
reset_timeout = 0;
|
||||
user_timeout = 0;
|
||||
}
|
||||
|
||||
/* You could argue there is a dependency between these two but
|
||||
* ultimately we don't care about their ordering with respect
|
||||
* to one another. Worst case, we make a few system calls that
|
||||
* could have been avoided because another thread already knows
|
||||
* they fail with ENOSYS. Hardly the end of the world.
|
||||
*/
|
||||
no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
|
||||
no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
|
||||
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* See the comment for max_safe_timeout for an explanation of why
|
||||
* this is necessary. Executive summary: kernel bug workaround.
|
||||
*/
|
||||
@@ -293,25 +311,24 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
abort();
|
||||
|
||||
if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
|
||||
#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
|
||||
nfds = -1;
|
||||
errno = ENOSYS;
|
||||
#else
|
||||
nfds = epoll_pwait(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
timeout,
|
||||
&sigset);
|
||||
#endif
|
||||
if (nfds == -1 && errno == ENOSYS)
|
||||
if (nfds == -1 && errno == ENOSYS) {
|
||||
uv__store_relaxed(&no_epoll_pwait_cached, 1);
|
||||
no_epoll_pwait = 1;
|
||||
}
|
||||
} else {
|
||||
nfds = epoll_wait(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
timeout);
|
||||
if (nfds == -1 && errno == ENOSYS)
|
||||
if (nfds == -1 && errno == ENOSYS) {
|
||||
uv__store_relaxed(&no_epoll_wait_cached, 1);
|
||||
no_epoll_wait = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (sigmask != 0 && no_epoll_pwait != 0)
|
||||
@@ -327,6 +344,14 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (nfds == 0) {
|
||||
assert(timeout != -1);
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
if (timeout == 0)
|
||||
return;
|
||||
|
||||
@@ -346,6 +371,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (errno != EINTR)
|
||||
abort();
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
@@ -425,17 +455,26 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
/* Run signal watchers last. This also affects child process watchers
|
||||
* because those are implemented in terms of signal watchers.
|
||||
*/
|
||||
if (w == &loop->signal_io_watcher)
|
||||
if (w == &loop->signal_io_watcher) {
|
||||
have_signals = 1;
|
||||
else
|
||||
} else {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
w->cb(loop, w, pe->events);
|
||||
}
|
||||
|
||||
nevents++;
|
||||
}
|
||||
}
|
||||
|
||||
if (have_signals != 0)
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
|
||||
}
|
||||
|
||||
loop->watchers[loop->nwatchers] = NULL;
|
||||
loop->watchers[loop->nwatchers + 1] = NULL;
|
||||
@@ -483,18 +522,22 @@ uint64_t uv__hrtime(uv_clocktype_t type) {
|
||||
/* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
|
||||
* when it has microsecond granularity or better (unlikely).
|
||||
*/
|
||||
if (type == UV_CLOCK_FAST && fast_clock_id == -1) {
|
||||
if (clock_getres(CLOCK_MONOTONIC_COARSE, &t) == 0 &&
|
||||
t.tv_nsec <= 1 * 1000 * 1000) {
|
||||
fast_clock_id = CLOCK_MONOTONIC_COARSE;
|
||||
} else {
|
||||
fast_clock_id = CLOCK_MONOTONIC;
|
||||
}
|
||||
}
|
||||
clock_id = CLOCK_MONOTONIC;
|
||||
if (type != UV_CLOCK_FAST)
|
||||
goto done;
|
||||
|
||||
clock_id = uv__load_relaxed(&fast_clock_id);
|
||||
if (clock_id != -1)
|
||||
goto done;
|
||||
|
||||
clock_id = CLOCK_MONOTONIC;
|
||||
if (type == UV_CLOCK_FAST)
|
||||
clock_id = fast_clock_id;
|
||||
if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
|
||||
if (t.tv_nsec <= 1 * 1000 * 1000)
|
||||
clock_id = CLOCK_MONOTONIC_COARSE;
|
||||
|
||||
uv__store_relaxed(&fast_clock_id, clock_id);
|
||||
|
||||
done:
|
||||
|
||||
if (clock_gettime(clock_id, &t))
|
||||
return 0; /* Not really possible. */
|
||||
@@ -982,43 +1025,51 @@ void uv__set_process_title(const char* title) {
|
||||
}
|
||||
|
||||
|
||||
static uint64_t uv__read_proc_meminfo(const char* what) {
|
||||
uint64_t rc;
|
||||
static int uv__slurp(const char* filename, char* buf, size_t len) {
|
||||
ssize_t n;
|
||||
char* p;
|
||||
int fd;
|
||||
char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
|
||||
|
||||
rc = 0;
|
||||
fd = uv__open_cloexec("/proc/meminfo", O_RDONLY);
|
||||
assert(len > 0);
|
||||
|
||||
fd = uv__open_cloexec(filename, O_RDONLY);
|
||||
if (fd < 0)
|
||||
return 0;
|
||||
return fd;
|
||||
|
||||
n = read(fd, buf, sizeof(buf) - 1);
|
||||
|
||||
if (n <= 0)
|
||||
goto out;
|
||||
|
||||
buf[n] = '\0';
|
||||
p = strstr(buf, what);
|
||||
|
||||
if (p == NULL)
|
||||
goto out;
|
||||
|
||||
p += strlen(what);
|
||||
|
||||
if (1 != sscanf(p, "%" PRIu64 " kB", &rc))
|
||||
goto out;
|
||||
|
||||
rc *= 1024;
|
||||
|
||||
out:
|
||||
do
|
||||
n = read(fd, buf, len - 1);
|
||||
while (n == -1 && errno == EINTR);
|
||||
|
||||
if (uv__close_nocheckstdio(fd))
|
||||
abort();
|
||||
|
||||
return rc;
|
||||
if (n < 0)
|
||||
return UV__ERR(errno);
|
||||
|
||||
buf[n] = '\0';
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static uint64_t uv__read_proc_meminfo(const char* what) {
|
||||
uint64_t rc;
|
||||
char* p;
|
||||
char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
|
||||
|
||||
if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
|
||||
return 0;
|
||||
|
||||
p = strstr(buf, what);
|
||||
|
||||
if (p == NULL)
|
||||
return 0;
|
||||
|
||||
p += strlen(what);
|
||||
|
||||
rc = 0;
|
||||
sscanf(p, "%" PRIu64 " kB", &rc);
|
||||
|
||||
return rc * 1024;
|
||||
}
|
||||
|
||||
|
||||
@@ -1056,28 +1107,13 @@ uint64_t uv_get_total_memory(void) {
|
||||
|
||||
static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) {
|
||||
char filename[256];
|
||||
uint64_t rc;
|
||||
int fd;
|
||||
ssize_t n;
|
||||
char buf[32]; /* Large enough to hold an encoded uint64_t. */
|
||||
|
||||
snprintf(filename, 256, "/sys/fs/cgroup/%s/%s", cgroup, param);
|
||||
uint64_t rc;
|
||||
|
||||
rc = 0;
|
||||
fd = uv__open_cloexec(filename, O_RDONLY);
|
||||
|
||||
if (fd < 0)
|
||||
return 0;
|
||||
|
||||
n = read(fd, buf, sizeof(buf) - 1);
|
||||
|
||||
if (n > 0) {
|
||||
buf[n] = '\0';
|
||||
snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param);
|
||||
if (0 == uv__slurp(filename, buf, sizeof(buf)))
|
||||
sscanf(buf, "%" PRIu64, &rc);
|
||||
}
|
||||
|
||||
if (uv__close_nocheckstdio(fd))
|
||||
abort();
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -1091,3 +1127,20 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
*/
|
||||
return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes");
|
||||
}
|
||||
|
||||
|
||||
void uv_loadavg(double avg[3]) {
|
||||
struct sysinfo info;
|
||||
char buf[128]; /* Large enough to hold all of /proc/loadavg. */
|
||||
|
||||
if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
|
||||
if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
|
||||
return;
|
||||
|
||||
if (sysinfo(&info) < 0)
|
||||
return;
|
||||
|
||||
avg[0] = (double) info.loads[0] / 65536.0;
|
||||
avg[1] = (double) info.loads[1] / 65536.0;
|
||||
avg[2] = (double) info.loads[2] / 65536.0;
|
||||
}
|
||||
|
||||
@@ -37,8 +37,6 @@
|
||||
#ifndef __NR_recvmmsg
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_recvmmsg 299
|
||||
# elif defined(__i386__)
|
||||
# define __NR_recvmmsg 337
|
||||
# elif defined(__arm__)
|
||||
# define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
|
||||
# endif
|
||||
@@ -47,8 +45,6 @@
|
||||
#ifndef __NR_sendmmsg
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_sendmmsg 307
|
||||
# elif defined(__i386__)
|
||||
# define __NR_sendmmsg 345
|
||||
# elif defined(__arm__)
|
||||
# define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
|
||||
# endif
|
||||
@@ -94,6 +90,24 @@
|
||||
# endif
|
||||
#endif /* __NR_pwritev */
|
||||
|
||||
#ifndef __NR_copy_file_range
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_copy_file_range 326
|
||||
# elif defined(__i386__)
|
||||
# define __NR_copy_file_range 377
|
||||
# elif defined(__s390__)
|
||||
# define __NR_copy_file_range 375
|
||||
# elif defined(__arm__)
|
||||
# define __NR_copy_file_range (UV_SYSCALL_BASE + 391)
|
||||
# elif defined(__aarch64__)
|
||||
# define __NR_copy_file_range 285
|
||||
# elif defined(__powerpc__)
|
||||
# define __NR_copy_file_range 379
|
||||
# elif defined(__arc__)
|
||||
# define __NR_copy_file_range 285
|
||||
# endif
|
||||
#endif /* __NR_copy_file_range */
|
||||
|
||||
#ifndef __NR_statx
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_statx 332
|
||||
@@ -128,25 +142,51 @@
|
||||
|
||||
struct uv__mmsghdr;
|
||||
|
||||
int uv__sendmmsg(int fd,
|
||||
struct uv__mmsghdr* mmsg,
|
||||
unsigned int vlen,
|
||||
unsigned int flags) {
|
||||
#if defined(__NR_sendmmsg)
|
||||
return syscall(__NR_sendmmsg, fd, mmsg, vlen, flags);
|
||||
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
|
||||
#if defined(__i386__)
|
||||
unsigned long args[4];
|
||||
int rc;
|
||||
|
||||
args[0] = (unsigned long) fd;
|
||||
args[1] = (unsigned long) mmsg;
|
||||
args[2] = (unsigned long) vlen;
|
||||
args[3] = /* flags */ 0;
|
||||
|
||||
/* socketcall() raises EINVAL when SYS_SENDMMSG is not supported. */
|
||||
rc = syscall(/* __NR_socketcall */ 102, 20 /* SYS_SENDMMSG */, args);
|
||||
if (rc == -1)
|
||||
if (errno == EINVAL)
|
||||
errno = ENOSYS;
|
||||
|
||||
return rc;
|
||||
#elif defined(__NR_sendmmsg)
|
||||
return syscall(__NR_sendmmsg, fd, mmsg, vlen, /* flags */ 0);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int uv__recvmmsg(int fd,
|
||||
struct uv__mmsghdr* mmsg,
|
||||
unsigned int vlen,
|
||||
unsigned int flags,
|
||||
struct timespec* timeout) {
|
||||
#if defined(__NR_recvmmsg)
|
||||
return syscall(__NR_recvmmsg, fd, mmsg, vlen, flags, timeout);
|
||||
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
|
||||
#if defined(__i386__)
|
||||
unsigned long args[5];
|
||||
int rc;
|
||||
|
||||
args[0] = (unsigned long) fd;
|
||||
args[1] = (unsigned long) mmsg;
|
||||
args[2] = (unsigned long) vlen;
|
||||
args[3] = /* flags */ 0;
|
||||
args[4] = /* timeout */ 0;
|
||||
|
||||
/* socketcall() raises EINVAL when SYS_RECVMMSG is not supported. */
|
||||
rc = syscall(/* __NR_socketcall */ 102, 19 /* SYS_RECVMMSG */, args);
|
||||
if (rc == -1)
|
||||
if (errno == EINVAL)
|
||||
errno = ENOSYS;
|
||||
|
||||
return rc;
|
||||
#elif defined(__NR_recvmmsg)
|
||||
return syscall(__NR_recvmmsg, fd, mmsg, vlen, /* flags */ 0, /* timeout */ 0);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
@@ -180,6 +220,28 @@ int uv__dup3(int oldfd, int newfd, int flags) {
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
uv__fs_copy_file_range(int fd_in,
|
||||
ssize_t* off_in,
|
||||
int fd_out,
|
||||
ssize_t* off_out,
|
||||
size_t len,
|
||||
unsigned int flags)
|
||||
{
|
||||
#ifdef __NR_copy_file_range
|
||||
return syscall(__NR_copy_file_range,
|
||||
fd_in,
|
||||
off_in,
|
||||
fd_out,
|
||||
off_out,
|
||||
len,
|
||||
flags);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int uv__statx(int dirfd,
|
||||
const char* path,
|
||||
int flags,
|
||||
|
||||
@@ -64,6 +64,13 @@ struct uv__statx {
|
||||
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
|
||||
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
|
||||
int uv__dup3(int oldfd, int newfd, int flags);
|
||||
ssize_t
|
||||
uv__fs_copy_file_range(int fd_in,
|
||||
ssize_t* off_in,
|
||||
int fd_out,
|
||||
ssize_t* off_out,
|
||||
size_t len,
|
||||
unsigned int flags);
|
||||
int uv__statx(int dirfd,
|
||||
const char* path,
|
||||
int flags,
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <unistd.h>
|
||||
|
||||
int uv_loop_init(uv_loop_t* loop) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
void* saved_data;
|
||||
int err;
|
||||
|
||||
@@ -36,6 +37,15 @@ int uv_loop_init(uv_loop_t* loop) {
|
||||
memset(loop, 0, sizeof(*loop));
|
||||
loop->data = saved_data;
|
||||
|
||||
lfields = (uv__loop_internal_fields_t*) uv__calloc(1, sizeof(*lfields));
|
||||
if (lfields == NULL)
|
||||
return UV_ENOMEM;
|
||||
loop->internal_fields = lfields;
|
||||
|
||||
err = uv_mutex_init(&lfields->loop_metrics.lock);
|
||||
if (err)
|
||||
goto fail_metrics_mutex_init;
|
||||
|
||||
heap_init((struct heap*) &loop->timer_heap);
|
||||
QUEUE_INIT(&loop->wq);
|
||||
QUEUE_INIT(&loop->idle_handles);
|
||||
@@ -66,7 +76,7 @@ int uv_loop_init(uv_loop_t* loop) {
|
||||
|
||||
err = uv__platform_loop_init(loop);
|
||||
if (err)
|
||||
return err;
|
||||
goto fail_platform_init;
|
||||
|
||||
uv__signal_global_once_init();
|
||||
err = uv_signal_init(loop, &loop->child_watcher);
|
||||
@@ -106,6 +116,15 @@ fail_rwlock_init:
|
||||
fail_signal_init:
|
||||
uv__platform_loop_delete(loop);
|
||||
|
||||
fail_platform_init:
|
||||
uv_mutex_destroy(&lfields->loop_metrics.lock);
|
||||
|
||||
fail_metrics_mutex_init:
|
||||
uv__free(lfields);
|
||||
loop->internal_fields = NULL;
|
||||
|
||||
uv__free(loop->watchers);
|
||||
loop->nwatchers = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -144,6 +163,8 @@ int uv_loop_fork(uv_loop_t* loop) {
|
||||
|
||||
|
||||
void uv__loop_close(uv_loop_t* loop) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
|
||||
uv__signal_loop_cleanup(loop);
|
||||
uv__platform_loop_delete(loop);
|
||||
uv__async_stop(loop);
|
||||
@@ -179,10 +200,23 @@ void uv__loop_close(uv_loop_t* loop) {
|
||||
uv__free(loop->watchers);
|
||||
loop->watchers = NULL;
|
||||
loop->nwatchers = 0;
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
uv_mutex_destroy(&lfields->loop_metrics.lock);
|
||||
uv__free(lfields);
|
||||
loop->internal_fields = NULL;
|
||||
}
|
||||
|
||||
|
||||
int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
if (option == UV_METRICS_IDLE_TIME) {
|
||||
lfields->flags |= UV_METRICS_IDLE_TIME;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (option != UV_LOOP_BLOCK_SIGNAL)
|
||||
return UV_ENOSYS;
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@
|
||||
#pragma linkage(BPX4CTW, OS)
|
||||
#pragma linkage(BPX1CTW, OS)
|
||||
|
||||
static int number_of_epolls;
|
||||
static QUEUE global_epoll_queue;
|
||||
static uv_mutex_t global_epoll_lock;
|
||||
static uv_once_t once = UV_ONCE_INIT;
|
||||
|
||||
@@ -254,8 +254,6 @@ static int getexe(const int pid, char* buf, size_t len) {
|
||||
int uv_exepath(char* buffer, size_t* size) {
|
||||
int res;
|
||||
char args[PATH_MAX];
|
||||
char abspath[PATH_MAX];
|
||||
size_t abspath_size;
|
||||
int pid;
|
||||
|
||||
if (buffer == NULL || size == NULL || *size == 0)
|
||||
@@ -266,69 +264,7 @@ int uv_exepath(char* buffer, size_t* size) {
|
||||
if (res < 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
/*
|
||||
* Possibilities for args:
|
||||
* i) an absolute path such as: /home/user/myprojects/nodejs/node
|
||||
* ii) a relative path such as: ./node or ../myprojects/nodejs/node
|
||||
* iii) a bare filename such as "node", after exporting PATH variable
|
||||
* to its location.
|
||||
*/
|
||||
|
||||
/* Case i) and ii) absolute or relative paths */
|
||||
if (strchr(args, '/') != NULL) {
|
||||
if (realpath(args, abspath) != abspath)
|
||||
return UV__ERR(errno);
|
||||
|
||||
abspath_size = strlen(abspath);
|
||||
|
||||
*size -= 1;
|
||||
if (*size > abspath_size)
|
||||
*size = abspath_size;
|
||||
|
||||
memcpy(buffer, abspath, *size);
|
||||
buffer[*size] = '\0';
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
/* Case iii). Search PATH environment variable */
|
||||
char trypath[PATH_MAX];
|
||||
char* clonedpath = NULL;
|
||||
char* token = NULL;
|
||||
char* path = getenv("PATH");
|
||||
|
||||
if (path == NULL)
|
||||
return UV_EINVAL;
|
||||
|
||||
clonedpath = uv__strdup(path);
|
||||
if (clonedpath == NULL)
|
||||
return UV_ENOMEM;
|
||||
|
||||
token = strtok(clonedpath, ":");
|
||||
while (token != NULL) {
|
||||
snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, args);
|
||||
if (realpath(trypath, abspath) == abspath) {
|
||||
/* Check the match is executable */
|
||||
if (access(abspath, X_OK) == 0) {
|
||||
abspath_size = strlen(abspath);
|
||||
|
||||
*size -= 1;
|
||||
if (*size > abspath_size)
|
||||
*size = abspath_size;
|
||||
|
||||
memcpy(buffer, abspath, *size);
|
||||
buffer[*size] = '\0';
|
||||
|
||||
uv__free(clonedpath);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
token = strtok(NULL, ":");
|
||||
}
|
||||
uv__free(clonedpath);
|
||||
|
||||
/* Out of tokens (path entries), and no match found */
|
||||
return UV_EINVAL;
|
||||
}
|
||||
return uv__search_path(args, buffer, size);
|
||||
}
|
||||
|
||||
|
||||
@@ -818,6 +754,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int fd;
|
||||
int op;
|
||||
int i;
|
||||
int user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
@@ -870,8 +808,22 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
real_timeout = timeout;
|
||||
int nevents = 0;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
} else {
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
nfds = 0;
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
|
||||
timeout = max_safe_timeout;
|
||||
|
||||
@@ -887,12 +839,21 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (nfds == 0) {
|
||||
assert(timeout != -1);
|
||||
|
||||
if (timeout > 0) {
|
||||
timeout = real_timeout - timeout;
|
||||
continue;
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
return;
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
if (timeout == 0)
|
||||
return;
|
||||
|
||||
/* We may have been inside the system call for longer than |timeout|
|
||||
* milliseconds so we need to update the timestamp to avoid drift.
|
||||
*/
|
||||
goto update_timeout;
|
||||
}
|
||||
|
||||
if (nfds == -1) {
|
||||
@@ -900,6 +861,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (errno != EINTR)
|
||||
abort();
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
@@ -954,6 +920,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
pe->events |= w->pevents & (POLLIN | POLLOUT);
|
||||
|
||||
if (pe->events != 0) {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
w->cb(loop, w, pe->events);
|
||||
nevents++;
|
||||
}
|
||||
@@ -961,6 +928,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
loop->watchers[loop->nwatchers] = NULL;
|
||||
loop->watchers[loop->nwatchers + 1] = NULL;
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (nevents != 0) {
|
||||
if (nfds == ARRAY_SIZE(events) && --count != 0) {
|
||||
/* Poll for more events but don't block this time. */
|
||||
|
||||
@@ -144,6 +144,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int have_signals;
|
||||
struct pollfd* pe;
|
||||
int fd;
|
||||
int user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
@@ -177,11 +179,25 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
assert(timeout >= -1);
|
||||
time_base = loop->time;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
} else {
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
/* Loop calls to poll() and processing of results. If we get some
|
||||
* results from poll() but they turn out not to be interesting to
|
||||
* our caller then we need to loop around and poll() again.
|
||||
*/
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
if (pset != NULL)
|
||||
if (pthread_sigmask(SIG_BLOCK, pset, NULL))
|
||||
abort();
|
||||
@@ -197,6 +213,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
|
||||
if (nfds == 0) {
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
if (timeout > 0)
|
||||
goto update_timeout;
|
||||
}
|
||||
|
||||
assert(timeout != -1);
|
||||
return;
|
||||
}
|
||||
@@ -205,6 +230,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (errno != EINTR)
|
||||
abort();
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
@@ -254,6 +284,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (w == &loop->signal_io_watcher) {
|
||||
have_signals = 1;
|
||||
} else {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
w->cb(loop, w, pe->revents);
|
||||
}
|
||||
|
||||
@@ -261,8 +292,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
}
|
||||
}
|
||||
|
||||
if (have_signals != 0)
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
|
||||
}
|
||||
|
||||
loop->poll_fds_iterating = 0;
|
||||
|
||||
|
||||
@@ -100,6 +100,10 @@ int uv_set_process_title(const char* title) {
|
||||
struct uv__process_title* pt;
|
||||
size_t len;
|
||||
|
||||
/* If uv_setup_args wasn't called or failed, we can't continue. */
|
||||
if (args_mem == NULL)
|
||||
return UV_ENOBUFS;
|
||||
|
||||
pt = &process_title;
|
||||
len = strlen(title);
|
||||
|
||||
@@ -126,6 +130,10 @@ int uv_get_process_title(char* buffer, size_t size) {
|
||||
if (buffer == NULL || size == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
/* If uv_setup_args wasn't called or failed, we can't continue. */
|
||||
if (args_mem == NULL)
|
||||
return UV_ENOBUFS;
|
||||
|
||||
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
|
||||
uv_mutex_lock(&process_title_mutex);
|
||||
|
||||
|
||||
@@ -30,6 +30,8 @@
|
||||
*/
|
||||
|
||||
/* Android versions < 4.1 have a broken pthread_sigmask. */
|
||||
#include "uv-common.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
@@ -38,13 +40,13 @@ int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
|
||||
static int workaround;
|
||||
int err;
|
||||
|
||||
if (workaround) {
|
||||
if (uv__load_relaxed(&workaround)) {
|
||||
return sigprocmask(how, set, oset);
|
||||
} else {
|
||||
err = pthread_sigmask(how, set, oset);
|
||||
if (err) {
|
||||
if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
|
||||
workaround = 1;
|
||||
uv__store_relaxed(&workaround, 1);
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
|
||||
137
Utilities/cmlibuv/src/unix/qnx.c
Normal file
137
Utilities/cmlibuv/src/unix/qnx.c
Normal file
@@ -0,0 +1,137 @@
|
||||
/* Copyright libuv contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "uv.h"
|
||||
#include "internal.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <sys/process.h>
|
||||
#include <sys/neutrino.h>
|
||||
#include <sys/memmsg.h>
|
||||
#include <sys/syspage.h>
|
||||
#include <sys/procfs.h>
|
||||
|
||||
static void
|
||||
get_mem_info(uint64_t* totalmem, uint64_t* freemem) {
|
||||
mem_info_t msg;
|
||||
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.i.type = _MEM_INFO;
|
||||
msg.i.fd = -1;
|
||||
|
||||
if (MsgSend(MEMMGR_COID, &msg.i, sizeof(msg.i), &msg.o, sizeof(msg.o))
|
||||
!= -1) {
|
||||
*totalmem = msg.o.info.__posix_tmi_total;
|
||||
*freemem = msg.o.info.posix_tmi_length;
|
||||
} else {
|
||||
*totalmem = 0;
|
||||
*freemem = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void uv_loadavg(double avg[3]) {
|
||||
avg[0] = 0.0;
|
||||
avg[1] = 0.0;
|
||||
avg[2] = 0.0;
|
||||
}
|
||||
|
||||
|
||||
int uv_exepath(char* buffer, size_t* size) {
|
||||
char path[PATH_MAX];
|
||||
if (buffer == NULL || size == NULL || *size == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
realpath(_cmdname(NULL), path);
|
||||
strlcpy(buffer, path, *size);
|
||||
*size = strlen(buffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_free_memory(void) {
|
||||
uint64_t totalmem;
|
||||
uint64_t freemem;
|
||||
get_mem_info(&totalmem, &freemem);
|
||||
return freemem;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_total_memory(void) {
|
||||
uint64_t totalmem;
|
||||
uint64_t freemem;
|
||||
get_mem_info(&totalmem, &freemem);
|
||||
return totalmem;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_constrained_memory(void) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_resident_set_memory(size_t* rss) {
|
||||
int fd;
|
||||
procfs_asinfo asinfo;
|
||||
|
||||
fd = uv__open_cloexec("/proc/self/ctl", O_RDONLY);
|
||||
if (fd == -1)
|
||||
return UV__ERR(errno);
|
||||
|
||||
if (devctl(fd, DCMD_PROC_ASINFO, &asinfo, sizeof(asinfo), 0) == -1) {
|
||||
uv__close(fd);
|
||||
return UV__ERR(errno);
|
||||
}
|
||||
|
||||
uv__close(fd);
|
||||
*rss = asinfo.rss;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_uptime(double* uptime) {
|
||||
struct qtime_entry* qtime = _SYSPAGE_ENTRY(_syspage_ptr, qtime);
|
||||
*uptime = (qtime->nsec / 1000000000.0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
struct cpuinfo_entry* cpuinfo =
|
||||
(struct cpuinfo_entry*)_SYSPAGE_ENTRY(_syspage_ptr, new_cpuinfo);
|
||||
size_t cpuinfo_size = _SYSPAGE_ELEMENT_SIZE(_syspage_ptr, cpuinfo);
|
||||
struct strings_entry* strings = _SYSPAGE_ENTRY(_syspage_ptr, strings);
|
||||
int num_cpus = _syspage_ptr->num_cpu;
|
||||
int i;
|
||||
|
||||
*count = num_cpus;
|
||||
*cpu_infos = uv__malloc(num_cpus * sizeof(**cpu_infos));
|
||||
if (*cpu_infos == NULL)
|
||||
return UV_ENOMEM;
|
||||
|
||||
for (i = 0; i < num_cpus; i++) {
|
||||
(*cpu_infos)[i].model = strdup(&strings->data[cpuinfo->name]);
|
||||
(*cpu_infos)[i].speed = cpuinfo->speed;
|
||||
SYSPAGE_ARRAY_ADJ_OFFSET(cpuinfo, cpuinfo, cpuinfo_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -143,6 +143,8 @@ static void uv__signal_block_and_lock(sigset_t* saved_sigmask) {
|
||||
if (sigfillset(&new_mask))
|
||||
abort();
|
||||
|
||||
/* to shut up valgrind */
|
||||
sigemptyset(saved_sigmask);
|
||||
if (pthread_sigmask(SIG_SETMASK, &new_mask, saved_sigmask))
|
||||
abort();
|
||||
|
||||
|
||||
@@ -158,6 +158,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
sigset_t set;
|
||||
uint64_t base;
|
||||
uint64_t diff;
|
||||
uint64_t idle_poll;
|
||||
unsigned int nfds;
|
||||
unsigned int i;
|
||||
int saved_errno;
|
||||
@@ -166,6 +167,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int count;
|
||||
int err;
|
||||
int fd;
|
||||
int user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
@@ -203,7 +206,21 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
base = loop->time;
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
} else {
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
if (timeout != -1) {
|
||||
spec.tv_sec = timeout / 1000;
|
||||
spec.tv_nsec = (timeout % 1000) * 1000000;
|
||||
@@ -246,6 +263,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
|
||||
if (events[0].portev_source == 0) {
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == 0)
|
||||
return;
|
||||
|
||||
@@ -286,10 +308,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
/* Run signal watchers last. This also affects child process watchers
|
||||
* because those are implemented in terms of signal watchers.
|
||||
*/
|
||||
if (w == &loop->signal_io_watcher)
|
||||
if (w == &loop->signal_io_watcher) {
|
||||
have_signals = 1;
|
||||
else
|
||||
} else {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
w->cb(loop, w, pe->portev_events);
|
||||
}
|
||||
|
||||
nevents++;
|
||||
|
||||
@@ -301,8 +325,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
|
||||
}
|
||||
|
||||
if (have_signals != 0)
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
|
||||
}
|
||||
|
||||
loop->watchers[loop->nwatchers] = NULL;
|
||||
loop->watchers[loop->nwatchers + 1] = NULL;
|
||||
|
||||
@@ -326,16 +326,19 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
|
||||
|
||||
|
||||
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
|
||||
static int single_accept = -1;
|
||||
static int single_accept_cached = -1;
|
||||
unsigned long flags;
|
||||
int single_accept;
|
||||
int err;
|
||||
|
||||
if (tcp->delayed_error)
|
||||
return tcp->delayed_error;
|
||||
|
||||
single_accept = uv__load_relaxed(&single_accept_cached);
|
||||
if (single_accept == -1) {
|
||||
const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
|
||||
single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
|
||||
uv__store_relaxed(&single_accept_cached, single_accept);
|
||||
}
|
||||
|
||||
if (single_accept)
|
||||
|
||||
@@ -172,10 +172,11 @@ static size_t thread_stack_size(void) {
|
||||
#if defined(__APPLE__) || defined(__linux__)
|
||||
struct rlimit lim;
|
||||
|
||||
if (getrlimit(RLIMIT_STACK, &lim))
|
||||
abort();
|
||||
|
||||
if (lim.rlim_cur != RLIM_INFINITY) {
|
||||
/* getrlimit() can fail on some aarch64 systems due to a glibc bug where
|
||||
* the system call wrapper invokes the wrong system call. Don't treat
|
||||
* that as fatal, just use the default stack size instead.
|
||||
*/
|
||||
if (0 == getrlimit(RLIMIT_STACK, &lim) && lim.rlim_cur != RLIM_INFINITY) {
|
||||
/* pthread_attr_setstacksize() expects page-aligned values. */
|
||||
lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
|
||||
|
||||
@@ -708,7 +709,7 @@ int uv_cond_init(uv_cond_t* cond) {
|
||||
if (err)
|
||||
return UV__ERR(err);
|
||||
|
||||
#if !(defined(__ANDROID_API__) && __ANDROID_API__ < 21) && !defined(__hpux)
|
||||
#if !defined(__hpux)
|
||||
err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
|
||||
if (err)
|
||||
goto error2;
|
||||
@@ -804,16 +805,7 @@ int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
|
||||
#endif
|
||||
ts.tv_sec = timeout / NANOSEC;
|
||||
ts.tv_nsec = timeout % NANOSEC;
|
||||
#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
|
||||
|
||||
/*
|
||||
* The bionic pthread implementation doesn't support CLOCK_MONOTONIC,
|
||||
* but has this alternative function instead.
|
||||
*/
|
||||
r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
|
||||
#else
|
||||
r = pthread_cond_timedwait(cond, mutex, &ts);
|
||||
#endif /* __ANDROID_API__ */
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
@@ -42,6 +42,11 @@
|
||||
# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
|
||||
#endif
|
||||
|
||||
union uv__sockaddr {
|
||||
struct sockaddr_in6 in6;
|
||||
struct sockaddr_in in;
|
||||
struct sockaddr addr;
|
||||
};
|
||||
|
||||
static void uv__udp_run_completed(uv_udp_t* handle);
|
||||
static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
|
||||
@@ -68,12 +73,12 @@ static void uv__udp_mmsg_init(void) {
|
||||
s = uv__socket(AF_INET, SOCK_DGRAM, 0);
|
||||
if (s < 0)
|
||||
return;
|
||||
ret = uv__sendmmsg(s, NULL, 0, 0);
|
||||
ret = uv__sendmmsg(s, NULL, 0);
|
||||
if (ret == 0 || errno != ENOSYS) {
|
||||
uv__sendmmsg_avail = 1;
|
||||
uv__recvmmsg_avail = 1;
|
||||
} else {
|
||||
ret = uv__recvmmsg(s, NULL, 0, 0, NULL);
|
||||
ret = uv__recvmmsg(s, NULL, 0);
|
||||
if (ret == 0 || errno != ENOSYS)
|
||||
uv__recvmmsg_avail = 1;
|
||||
}
|
||||
@@ -208,7 +213,7 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
|
||||
}
|
||||
|
||||
do
|
||||
nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
|
||||
nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks);
|
||||
while (nread == -1 && errno == EINTR);
|
||||
|
||||
if (nread < 1) {
|
||||
@@ -233,7 +238,7 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
|
||||
|
||||
/* one last callback so the original buffer is freed */
|
||||
if (handle->recv_cb != NULL)
|
||||
handle->recv_cb(handle, 0, buf, NULL, 0);
|
||||
handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
|
||||
}
|
||||
return nread;
|
||||
}
|
||||
@@ -265,14 +270,11 @@ static void uv__udp_recvmsg(uv_udp_t* handle) {
|
||||
assert(buf.base != NULL);
|
||||
|
||||
#if HAVE_MMSG
|
||||
if (handle->flags & UV_HANDLE_UDP_RECVMMSG) {
|
||||
uv_once(&once, uv__udp_mmsg_init);
|
||||
if (uv__recvmmsg_avail) {
|
||||
nread = uv__udp_recvmmsg(handle, &buf);
|
||||
if (nread > 0)
|
||||
count -= nread;
|
||||
continue;
|
||||
}
|
||||
if (uv_udp_using_recvmmsg(handle)) {
|
||||
nread = uv__udp_recvmmsg(handle, &buf);
|
||||
if (nread > 0)
|
||||
count -= nread;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -354,7 +356,7 @@ write_queue_drain:
|
||||
}
|
||||
|
||||
do
|
||||
npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts, 0);
|
||||
npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts);
|
||||
while (npkts == -1 && errno == EINTR);
|
||||
|
||||
if (npkts < 1) {
|
||||
@@ -362,7 +364,7 @@ write_queue_drain:
|
||||
return;
|
||||
for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
|
||||
i < pkts && q != &handle->write_queue;
|
||||
++i, q = QUEUE_HEAD(q)) {
|
||||
++i, q = QUEUE_HEAD(&handle->write_queue)) {
|
||||
assert(q != NULL);
|
||||
req = QUEUE_DATA(q, uv_udp_send_t, queue);
|
||||
assert(req != NULL);
|
||||
@@ -567,11 +569,7 @@ int uv__udp_bind(uv_udp_t* handle,
|
||||
static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
|
||||
int domain,
|
||||
unsigned int flags) {
|
||||
union {
|
||||
struct sockaddr_in6 in6;
|
||||
struct sockaddr_in in;
|
||||
struct sockaddr addr;
|
||||
} taddr;
|
||||
union uv__sockaddr taddr;
|
||||
socklen_t addrlen;
|
||||
|
||||
if (handle->io_watcher.fd != -1)
|
||||
@@ -853,7 +851,11 @@ static int uv__udp_set_membership6(uv_udp_t* handle,
|
||||
}
|
||||
|
||||
|
||||
#if !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__ANDROID__)
|
||||
#if !defined(__OpenBSD__) && \
|
||||
!defined(__NetBSD__) && \
|
||||
!defined(__ANDROID__) && \
|
||||
!defined(__DragonFly__) & \
|
||||
!defined(__QNX__)
|
||||
static int uv__udp_set_source_membership4(uv_udp_t* handle,
|
||||
const struct sockaddr_in* multicast_addr,
|
||||
const char* interface_addr,
|
||||
@@ -924,8 +926,10 @@ static int uv__udp_set_source_membership6(uv_udp_t* handle,
|
||||
mreq.gsr_interface = 0;
|
||||
}
|
||||
|
||||
memcpy(&mreq.gsr_group, multicast_addr, sizeof(mreq.gsr_group));
|
||||
memcpy(&mreq.gsr_source, source_addr, sizeof(mreq.gsr_source));
|
||||
STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
|
||||
STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
|
||||
memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
|
||||
memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
|
||||
|
||||
if (membership == UV_JOIN_GROUP)
|
||||
optname = MCAST_JOIN_SOURCE_GROUP;
|
||||
@@ -973,6 +977,17 @@ int uv__udp_init_ex(uv_loop_t* loop,
|
||||
}
|
||||
|
||||
|
||||
int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
|
||||
#if HAVE_MMSG
|
||||
if (handle->flags & UV_HANDLE_UDP_RECVMMSG) {
|
||||
uv_once(&once, uv__udp_mmsg_init);
|
||||
return uv__recvmmsg_avail;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
|
||||
int err;
|
||||
|
||||
@@ -1028,42 +1043,37 @@ int uv_udp_set_source_membership(uv_udp_t* handle,
|
||||
const char* interface_addr,
|
||||
const char* source_addr,
|
||||
uv_membership membership) {
|
||||
#if !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__ANDROID__)
|
||||
#if !defined(__OpenBSD__) && \
|
||||
!defined(__NetBSD__) && \
|
||||
!defined(__ANDROID__) && \
|
||||
!defined(__DragonFly__) && \
|
||||
!defined(__QNX__)
|
||||
int err;
|
||||
struct sockaddr_storage mcast_addr;
|
||||
struct sockaddr_in* mcast_addr4;
|
||||
struct sockaddr_in6* mcast_addr6;
|
||||
struct sockaddr_storage src_addr;
|
||||
struct sockaddr_in* src_addr4;
|
||||
struct sockaddr_in6* src_addr6;
|
||||
union uv__sockaddr mcast_addr;
|
||||
union uv__sockaddr src_addr;
|
||||
|
||||
mcast_addr4 = (struct sockaddr_in*)&mcast_addr;
|
||||
mcast_addr6 = (struct sockaddr_in6*)&mcast_addr;
|
||||
src_addr4 = (struct sockaddr_in*)&src_addr;
|
||||
src_addr6 = (struct sockaddr_in6*)&src_addr;
|
||||
|
||||
err = uv_ip4_addr(multicast_addr, 0, mcast_addr4);
|
||||
err = uv_ip4_addr(multicast_addr, 0, &mcast_addr.in);
|
||||
if (err) {
|
||||
err = uv_ip6_addr(multicast_addr, 0, mcast_addr6);
|
||||
err = uv_ip6_addr(multicast_addr, 0, &mcast_addr.in6);
|
||||
if (err)
|
||||
return err;
|
||||
err = uv_ip6_addr(source_addr, 0, src_addr6);
|
||||
err = uv_ip6_addr(source_addr, 0, &src_addr.in6);
|
||||
if (err)
|
||||
return err;
|
||||
return uv__udp_set_source_membership6(handle,
|
||||
mcast_addr6,
|
||||
&mcast_addr.in6,
|
||||
interface_addr,
|
||||
src_addr6,
|
||||
&src_addr.in6,
|
||||
membership);
|
||||
}
|
||||
|
||||
err = uv_ip4_addr(source_addr, 0, src_addr4);
|
||||
err = uv_ip4_addr(source_addr, 0, &src_addr.in);
|
||||
if (err)
|
||||
return err;
|
||||
return uv__udp_set_source_membership4(handle,
|
||||
mcast_addr4,
|
||||
&mcast_addr.in,
|
||||
interface_addr,
|
||||
src_addr4,
|
||||
&src_addr.in,
|
||||
membership);
|
||||
#else
|
||||
return UV_ENOSYS;
|
||||
@@ -1144,7 +1154,7 @@ int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
|
||||
* and use the general uv__setsockopt_maybe_char call on other platforms.
|
||||
*/
|
||||
#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
|
||||
defined(__MVS__)
|
||||
defined(__MVS__) || defined(__QNX__)
|
||||
|
||||
return uv__setsockopt(handle,
|
||||
IP_TTL,
|
||||
@@ -1153,7 +1163,7 @@ int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
|
||||
sizeof(ttl));
|
||||
|
||||
#else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
|
||||
defined(__MVS__)) */
|
||||
defined(__MVS__) || defined(__QNX__)) */
|
||||
|
||||
return uv__setsockopt_maybe_char(handle,
|
||||
IP_TTL,
|
||||
@@ -1161,7 +1171,7 @@ int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
|
||||
ttl);
|
||||
|
||||
#endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
|
||||
defined(__MVS__) */
|
||||
defined(__MVS__) || defined(__QNX__) */
|
||||
}
|
||||
|
||||
|
||||
@@ -1173,7 +1183,7 @@ int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
|
||||
* and use the general uv__setsockopt_maybe_char call otherwise.
|
||||
*/
|
||||
#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
|
||||
defined(__MVS__)
|
||||
defined(__MVS__) || defined(__QNX__)
|
||||
if (handle->flags & UV_HANDLE_IPV6)
|
||||
return uv__setsockopt(handle,
|
||||
IP_MULTICAST_TTL,
|
||||
@@ -1181,7 +1191,7 @@ int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
|
||||
&ttl,
|
||||
sizeof(ttl));
|
||||
#endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
|
||||
defined(__MVS__) */
|
||||
defined(__MVS__) || defined(__QNX__) */
|
||||
|
||||
return uv__setsockopt_maybe_char(handle,
|
||||
IP_MULTICAST_TTL,
|
||||
@@ -1198,7 +1208,7 @@ int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
|
||||
* and use the general uv__setsockopt_maybe_char call otherwise.
|
||||
*/
|
||||
#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
|
||||
defined(__MVS__)
|
||||
defined(__MVS__) || defined(__QNX__)
|
||||
if (handle->flags & UV_HANDLE_IPV6)
|
||||
return uv__setsockopt(handle,
|
||||
IP_MULTICAST_LOOP,
|
||||
@@ -1206,7 +1216,7 @@ int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
|
||||
&on,
|
||||
sizeof(on));
|
||||
#endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) ||
|
||||
defined(__MVS__) */
|
||||
defined(__MVS__) || defined(__QNX__) */
|
||||
|
||||
return uv__setsockopt_maybe_char(handle,
|
||||
IP_MULTICAST_LOOP,
|
||||
|
||||
@@ -861,11 +861,70 @@ __attribute__((destructor))
|
||||
void uv_library_shutdown(void) {
|
||||
static int was_shutdown;
|
||||
|
||||
if (was_shutdown)
|
||||
if (uv__load_relaxed(&was_shutdown))
|
||||
return;
|
||||
|
||||
uv__process_title_cleanup();
|
||||
uv__signal_cleanup();
|
||||
uv__threadpool_cleanup();
|
||||
was_shutdown = 1;
|
||||
uv__store_relaxed(&was_shutdown, 1);
|
||||
}
|
||||
|
||||
|
||||
void uv__metrics_update_idle_time(uv_loop_t* loop) {
|
||||
uv__loop_metrics_t* loop_metrics;
|
||||
uint64_t entry_time;
|
||||
uint64_t exit_time;
|
||||
|
||||
if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
|
||||
return;
|
||||
|
||||
loop_metrics = uv__get_loop_metrics(loop);
|
||||
|
||||
/* The thread running uv__metrics_update_idle_time() is always the same
|
||||
* thread that sets provider_entry_time. So it's unnecessary to lock before
|
||||
* retrieving this value.
|
||||
*/
|
||||
if (loop_metrics->provider_entry_time == 0)
|
||||
return;
|
||||
|
||||
exit_time = uv_hrtime();
|
||||
|
||||
uv_mutex_lock(&loop_metrics->lock);
|
||||
entry_time = loop_metrics->provider_entry_time;
|
||||
loop_metrics->provider_entry_time = 0;
|
||||
loop_metrics->provider_idle_time += exit_time - entry_time;
|
||||
uv_mutex_unlock(&loop_metrics->lock);
|
||||
}
|
||||
|
||||
|
||||
void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
|
||||
uv__loop_metrics_t* loop_metrics;
|
||||
uint64_t now;
|
||||
|
||||
if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
|
||||
return;
|
||||
|
||||
now = uv_hrtime();
|
||||
loop_metrics = uv__get_loop_metrics(loop);
|
||||
uv_mutex_lock(&loop_metrics->lock);
|
||||
loop_metrics->provider_entry_time = now;
|
||||
uv_mutex_unlock(&loop_metrics->lock);
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
|
||||
uv__loop_metrics_t* loop_metrics;
|
||||
uint64_t entry_time;
|
||||
uint64_t idle_time;
|
||||
|
||||
loop_metrics = uv__get_loop_metrics(loop);
|
||||
uv_mutex_lock(&loop_metrics->lock);
|
||||
idle_time = loop_metrics->provider_idle_time;
|
||||
entry_time = loop_metrics->provider_entry_time;
|
||||
uv_mutex_unlock(&loop_metrics->lock);
|
||||
|
||||
if (entry_time > 0)
|
||||
idle_time += uv_hrtime() - entry_time;
|
||||
return idle_time;
|
||||
}
|
||||
|
||||
@@ -60,6 +60,14 @@ extern int snprintf(char*, size_t, const char*, ...);
|
||||
#define STATIC_ASSERT(expr) \
|
||||
void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 7)
|
||||
#define uv__load_relaxed(p) __atomic_load_n(p, __ATOMIC_RELAXED)
|
||||
#define uv__store_relaxed(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
|
||||
#else
|
||||
#define uv__load_relaxed(p) (*p)
|
||||
#define uv__store_relaxed(p, v) do *p = v; while (0)
|
||||
#endif
|
||||
|
||||
/* Handle flags. Some flags are specific to Windows or UNIX. */
|
||||
enum {
|
||||
/* Used by all handles. */
|
||||
@@ -325,6 +333,12 @@ void uv__threadpool_cleanup(void);
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define uv__get_internal_fields(loop) \
|
||||
((uv__loop_internal_fields_t*) loop->internal_fields)
|
||||
|
||||
#define uv__get_loop_metrics(loop) \
|
||||
(&uv__get_internal_fields(loop)->loop_metrics)
|
||||
|
||||
/* Allocator prototypes */
|
||||
void *uv__calloc(size_t count, size_t size);
|
||||
char *uv__strdup(const char* s);
|
||||
@@ -334,4 +348,21 @@ void uv__free(void* ptr);
|
||||
void* uv__realloc(void* ptr, size_t size);
|
||||
void* uv__reallocf(void* ptr, size_t size);
|
||||
|
||||
typedef struct uv__loop_metrics_s uv__loop_metrics_t;
|
||||
typedef struct uv__loop_internal_fields_s uv__loop_internal_fields_t;
|
||||
|
||||
struct uv__loop_metrics_s {
|
||||
uint64_t provider_entry_time;
|
||||
uint64_t provider_idle_time;
|
||||
uv_mutex_t lock;
|
||||
};
|
||||
|
||||
void uv__metrics_update_idle_time(uv_loop_t* loop);
|
||||
void uv__metrics_set_provider_entry_time(uv_loop_t* loop);
|
||||
|
||||
struct uv__loop_internal_fields_s {
|
||||
unsigned int flags;
|
||||
uv__loop_metrics_t loop_metrics;
|
||||
};
|
||||
|
||||
#endif /* UV_COMMON_H_ */
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
/* Copyright libuv project contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "uv.h"
|
||||
|
||||
const char* uv_handle_type_name(uv_handle_type type) {
|
||||
|
||||
@@ -222,6 +222,7 @@ static void uv_init(void) {
|
||||
|
||||
|
||||
int uv_loop_init(uv_loop_t* loop) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
struct heap* timer_heap;
|
||||
int err;
|
||||
|
||||
@@ -233,6 +234,15 @@ int uv_loop_init(uv_loop_t* loop) {
|
||||
if (loop->iocp == NULL)
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
|
||||
lfields = (uv__loop_internal_fields_t*) uv__calloc(1, sizeof(*lfields));
|
||||
if (lfields == NULL)
|
||||
return UV_ENOMEM;
|
||||
loop->internal_fields = lfields;
|
||||
|
||||
err = uv_mutex_init(&lfields->loop_metrics.lock);
|
||||
if (err)
|
||||
goto fail_metrics_mutex_init;
|
||||
|
||||
/* To prevent uninitialized memory access, loop->time must be initialized
|
||||
* to zero before calling uv_update_time for the first time.
|
||||
*/
|
||||
@@ -297,6 +307,11 @@ fail_mutex_init:
|
||||
loop->timer_heap = NULL;
|
||||
|
||||
fail_timers_alloc:
|
||||
uv_mutex_destroy(&lfields->loop_metrics.lock);
|
||||
|
||||
fail_metrics_mutex_init:
|
||||
uv__free(lfields);
|
||||
loop->internal_fields = NULL;
|
||||
CloseHandle(loop->iocp);
|
||||
loop->iocp = INVALID_HANDLE_VALUE;
|
||||
|
||||
@@ -317,6 +332,7 @@ void uv__once_init(void) {
|
||||
|
||||
|
||||
void uv__loop_close(uv_loop_t* loop) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
size_t i;
|
||||
|
||||
uv__loops_remove(loop);
|
||||
@@ -347,11 +363,24 @@ void uv__loop_close(uv_loop_t* loop) {
|
||||
uv__free(loop->timer_heap);
|
||||
loop->timer_heap = NULL;
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
uv_mutex_destroy(&lfields->loop_metrics.lock);
|
||||
uv__free(lfields);
|
||||
loop->internal_fields = NULL;
|
||||
|
||||
CloseHandle(loop->iocp);
|
||||
}
|
||||
|
||||
|
||||
int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
if (option == UV_METRICS_IDLE_TIME) {
|
||||
lfields->flags |= UV_METRICS_IDLE_TIME;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return UV_ENOSYS;
|
||||
}
|
||||
|
||||
@@ -393,16 +422,44 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
|
||||
uv_req_t* req;
|
||||
int repeat;
|
||||
uint64_t timeout_time;
|
||||
uint64_t user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
timeout_time = loop->time + timeout;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
} else {
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
for (repeat = 0; ; repeat++) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
GetQueuedCompletionStatus(loop->iocp,
|
||||
&bytes,
|
||||
&key,
|
||||
&overlapped,
|
||||
timeout);
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
/* Placed here because on success the loop will break whether there is an
|
||||
* empty package or not, or if GetQueuedCompletionStatus returned early then
|
||||
* the timeout will be updated and the loop will run again. In either case
|
||||
* the idle time will need to be updated.
|
||||
*/
|
||||
uv__metrics_update_idle_time(loop);
|
||||
|
||||
if (overlapped) {
|
||||
/* Package was dequeued */
|
||||
req = uv_overlapped_to_req(overlapped);
|
||||
@@ -445,10 +502,26 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
|
||||
ULONG i;
|
||||
int repeat;
|
||||
uint64_t timeout_time;
|
||||
uint64_t user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
timeout_time = loop->time + timeout;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
} else {
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
for (repeat = 0; ; repeat++) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
success = pGetQueuedCompletionStatusEx(loop->iocp,
|
||||
overlappeds,
|
||||
ARRAY_SIZE(overlappeds),
|
||||
@@ -456,6 +529,18 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
|
||||
timeout,
|
||||
FALSE);
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
/* Placed here because on success the loop will break whether there is an
|
||||
* empty package or not, or if GetQueuedCompletionStatus returned early then
|
||||
* the timeout will be updated and the loop will run again. In either case
|
||||
* the idle time will need to be updated.
|
||||
*/
|
||||
uv__metrics_update_idle_time(loop);
|
||||
|
||||
if (success) {
|
||||
for (i = 0; i < count; i++) {
|
||||
/* Package was dequeued, but see if it is not a empty package
|
||||
@@ -534,6 +619,12 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
|
||||
else
|
||||
uv__poll_wine(loop, timeout);
|
||||
|
||||
/* Run one final update on the provider_idle_time in case uv__poll*
|
||||
* returned because the timeout expired, but no events were received. This
|
||||
* call will be ignored if the provider_entry_time was either never set (if
|
||||
* the timeout == 0) or was already updated b/c an event was received.
|
||||
*/
|
||||
uv__metrics_update_idle_time(loop);
|
||||
|
||||
uv_check_invoke(loop);
|
||||
uv_process_endgames(loop);
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
/* Copyright libuv project contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "uv.h"
|
||||
#include "internal.h"
|
||||
#include "winapi.h"
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
/* Copyright libuv project contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef UV_WIN_FS_FD_HASH_INL_H_
|
||||
#define UV_WIN_FS_FD_HASH_INL_H_
|
||||
|
||||
@@ -53,7 +74,8 @@ static struct uv__fd_hash_bucket_s uv__fd_hash[UV__FD_HASH_SIZE];
|
||||
|
||||
|
||||
INLINE static void uv__fd_hash_init(void) {
|
||||
int i, err;
|
||||
size_t i;
|
||||
int err;
|
||||
|
||||
err = uv_mutex_init(&uv__fd_hash_mutex);
|
||||
if (err) {
|
||||
|
||||
@@ -70,10 +70,7 @@
|
||||
#define SET_REQ_RESULT(req, result_value) \
|
||||
do { \
|
||||
req->result = (result_value); \
|
||||
if (req->result == -1) { \
|
||||
req->sys_errno_ = _doserrno; \
|
||||
req->result = uv_translate_sys_error(req->sys_errno_); \
|
||||
} \
|
||||
assert(req->result != -1); \
|
||||
} while (0)
|
||||
|
||||
#define SET_REQ_WIN32_ERROR(req, sys_errno) \
|
||||
@@ -730,14 +727,14 @@ void fs__close(uv_fs_t* req) {
|
||||
assert(errno == EBADF);
|
||||
SET_REQ_UV_ERROR(req, UV_EBADF, ERROR_INVALID_HANDLE);
|
||||
} else {
|
||||
req->result = 0;
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LONG fs__filemap_ex_filter(LONG excode, PEXCEPTION_POINTERS pep,
|
||||
int* perror) {
|
||||
if (excode != EXCEPTION_IN_PAGE_ERROR) {
|
||||
if (excode != (LONG)EXCEPTION_IN_PAGE_ERROR) {
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
@@ -816,10 +813,10 @@ void fs__read_filemap(uv_fs_t* req, struct uv__fd_info_s* fd_info) {
|
||||
for (index = 0;
|
||||
index < req->fs.info.nbufs && done_read < read_size;
|
||||
++index) {
|
||||
int err = 0;
|
||||
size_t this_read_size = MIN(req->fs.info.bufs[index].len,
|
||||
read_size - done_read);
|
||||
#ifdef _MSC_VER
|
||||
int err = 0;
|
||||
__try {
|
||||
#endif
|
||||
memcpy(req->fs.info.bufs[index].base,
|
||||
@@ -938,7 +935,7 @@ void fs__write_filemap(uv_fs_t* req, HANDLE file,
|
||||
(UV_FS_O_RDONLY | UV_FS_O_WRONLY | UV_FS_O_RDWR);
|
||||
size_t write_size, done_write;
|
||||
unsigned int index;
|
||||
LARGE_INTEGER zero, pos, end_pos;
|
||||
LARGE_INTEGER pos, end_pos;
|
||||
size_t view_offset;
|
||||
LARGE_INTEGER view_base;
|
||||
void* view;
|
||||
@@ -963,7 +960,6 @@ void fs__write_filemap(uv_fs_t* req, HANDLE file,
|
||||
return;
|
||||
}
|
||||
|
||||
zero.QuadPart = 0;
|
||||
if (force_append) {
|
||||
pos = fd_info->size;
|
||||
} else if (req->fs.info.offset == -1) {
|
||||
@@ -1014,8 +1010,8 @@ void fs__write_filemap(uv_fs_t* req, HANDLE file,
|
||||
|
||||
done_write = 0;
|
||||
for (index = 0; index < req->fs.info.nbufs; ++index) {
|
||||
int err = 0;
|
||||
#ifdef _MSC_VER
|
||||
int err = 0;
|
||||
__try {
|
||||
#endif
|
||||
memcpy((char*)view + view_offset + done_write,
|
||||
@@ -1128,7 +1124,10 @@ void fs__write(uv_fs_t* req) {
|
||||
|
||||
void fs__rmdir(uv_fs_t* req) {
|
||||
int result = _wrmdir(req->file.pathw);
|
||||
SET_REQ_RESULT(req, result);
|
||||
if (result == -1)
|
||||
SET_REQ_WIN32_ERROR(req, _doserrno);
|
||||
else
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -1221,12 +1220,12 @@ void fs__unlink(uv_fs_t* req) {
|
||||
|
||||
void fs__mkdir(uv_fs_t* req) {
|
||||
/* TODO: use req->mode. */
|
||||
req->result = _wmkdir(req->file.pathw);
|
||||
if (req->result == -1) {
|
||||
req->sys_errno_ = _doserrno;
|
||||
req->result = req->sys_errno_ == ERROR_INVALID_NAME
|
||||
? UV_EINVAL
|
||||
: uv_translate_sys_error(req->sys_errno_);
|
||||
if (CreateDirectoryW(req->file.pathw, NULL)) {
|
||||
SET_REQ_RESULT(req, 0);
|
||||
} else {
|
||||
SET_REQ_WIN32_ERROR(req, GetLastError());
|
||||
if (req->sys_errno_ == ERROR_INVALID_NAME)
|
||||
req->result = UV_EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1242,19 +1241,21 @@ void fs__mktemp(uv_fs_t* req, uv__fs_mktemp_func func) {
|
||||
unsigned int tries, i;
|
||||
size_t len;
|
||||
uint64_t v;
|
||||
|
||||
char* path;
|
||||
|
||||
path = req->path;
|
||||
len = wcslen(req->file.pathw);
|
||||
ep = req->file.pathw + len;
|
||||
if (len < num_x || wcsncmp(ep - num_x, L"XXXXXX", num_x)) {
|
||||
SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
|
||||
return;
|
||||
goto clobber;
|
||||
}
|
||||
|
||||
tries = TMP_MAX;
|
||||
do {
|
||||
if (uv__random_rtlgenrandom((void *)&v, sizeof(v)) < 0) {
|
||||
SET_REQ_UV_ERROR(req, UV_EIO, ERROR_IO_DEVICE);
|
||||
break;
|
||||
goto clobber;
|
||||
}
|
||||
|
||||
cp = ep - num_x;
|
||||
@@ -1265,25 +1266,29 @@ void fs__mktemp(uv_fs_t* req, uv__fs_mktemp_func func) {
|
||||
|
||||
if (func(req)) {
|
||||
if (req->result >= 0) {
|
||||
len = strlen(req->path);
|
||||
wcstombs((char*) req->path + len - num_x, ep - num_x, num_x);
|
||||
len = strlen(path);
|
||||
wcstombs(path + len - num_x, ep - num_x, num_x);
|
||||
}
|
||||
break;
|
||||
return;
|
||||
}
|
||||
} while (--tries);
|
||||
|
||||
if (tries == 0) {
|
||||
SET_REQ_RESULT(req, -1);
|
||||
}
|
||||
SET_REQ_WIN32_ERROR(req, GetLastError());
|
||||
|
||||
clobber:
|
||||
path[0] = '\0';
|
||||
}
|
||||
|
||||
|
||||
static int fs__mkdtemp_func(uv_fs_t* req) {
|
||||
if (_wmkdir(req->file.pathw) == 0) {
|
||||
DWORD error;
|
||||
if (CreateDirectoryW(req->file.pathw, NULL)) {
|
||||
SET_REQ_RESULT(req, 0);
|
||||
return 1;
|
||||
} else if (errno != EEXIST) {
|
||||
SET_REQ_RESULT(req, -1);
|
||||
}
|
||||
error = GetLastError();
|
||||
if (error != ERROR_ALREADY_EXISTS) {
|
||||
SET_REQ_WIN32_ERROR(req, error);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1404,7 +1409,7 @@ void fs__scandir(uv_fs_t* req) {
|
||||
/* If the handle is not a directory, we'll get STATUS_INVALID_PARAMETER.
|
||||
* This should be reported back as UV_ENOTDIR.
|
||||
*/
|
||||
if (status == STATUS_INVALID_PARAMETER)
|
||||
if (status == (NTSTATUS)STATUS_INVALID_PARAMETER)
|
||||
goto not_a_directory_error;
|
||||
|
||||
while (NT_SUCCESS(status)) {
|
||||
@@ -1895,7 +1900,7 @@ INLINE static void fs__stat_impl(uv_fs_t* req, int do_lstat) {
|
||||
}
|
||||
|
||||
req->ptr = &req->statbuf;
|
||||
req->result = 0;
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -1930,7 +1935,7 @@ static void fs__fstat(uv_fs_t* req) {
|
||||
}
|
||||
|
||||
req->ptr = &req->statbuf;
|
||||
req->result = 0;
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -2157,7 +2162,10 @@ static void fs__access(uv_fs_t* req) {
|
||||
|
||||
static void fs__chmod(uv_fs_t* req) {
|
||||
int result = _wchmod(req->file.pathw, req->fs.info.mode);
|
||||
SET_REQ_RESULT(req, result);
|
||||
if (result == -1)
|
||||
SET_REQ_WIN32_ERROR(req, _doserrno);
|
||||
else
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -2315,7 +2323,7 @@ INLINE static void fs__utime_impl(uv_fs_t* req, int do_lutime) {
|
||||
return;
|
||||
}
|
||||
|
||||
req->result = 0;
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
static void fs__utime(uv_fs_t* req) {
|
||||
@@ -2340,7 +2348,7 @@ static void fs__futime(uv_fs_t* req) {
|
||||
return;
|
||||
}
|
||||
|
||||
req->result = 0;
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
static void fs__lutime(uv_fs_t* req) {
|
||||
@@ -2350,11 +2358,10 @@ static void fs__lutime(uv_fs_t* req) {
|
||||
|
||||
static void fs__link(uv_fs_t* req) {
|
||||
DWORD r = CreateHardLinkW(req->fs.info.new_pathw, req->file.pathw, NULL);
|
||||
if (r == 0) {
|
||||
if (r == 0)
|
||||
SET_REQ_WIN32_ERROR(req, GetLastError());
|
||||
} else {
|
||||
req->result = 0;
|
||||
}
|
||||
else
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -2674,17 +2681,17 @@ static void fs__realpath(uv_fs_t* req) {
|
||||
|
||||
|
||||
static void fs__chown(uv_fs_t* req) {
|
||||
req->result = 0;
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
|
||||
static void fs__fchown(uv_fs_t* req) {
|
||||
req->result = 0;
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
|
||||
static void fs__lchown(uv_fs_t* req) {
|
||||
req->result = 0;
|
||||
SET_REQ_RESULT(req, 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -2829,7 +2836,7 @@ static void uv__fs_done(struct uv__work* w, int status) {
|
||||
|
||||
if (status == UV_ECANCELED) {
|
||||
assert(req->result == 0);
|
||||
req->result = UV_ECANCELED;
|
||||
SET_REQ_UV_ERROR(req, UV_ECANCELED, 0);
|
||||
}
|
||||
|
||||
req->cb(req);
|
||||
|
||||
@@ -270,7 +270,7 @@ void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle);
|
||||
*/
|
||||
void uv__util_init(void);
|
||||
|
||||
uint64_t uv__hrtime(double scale);
|
||||
uint64_t uv__hrtime(unsigned int scale);
|
||||
__declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall);
|
||||
int uv__getpwuid_r(uv_passwd_t* pwd);
|
||||
int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8);
|
||||
|
||||
@@ -244,9 +244,8 @@ int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
|
||||
return 0;
|
||||
|
||||
error:
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE) {
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE)
|
||||
CloseHandle(pipeHandle);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -554,7 +553,7 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
|
||||
|
||||
/* Convert name to UTF16. */
|
||||
nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR);
|
||||
handle->name = (WCHAR*)uv__malloc(nameSize);
|
||||
handle->name = uv__malloc(nameSize);
|
||||
if (!handle->name) {
|
||||
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
|
||||
}
|
||||
@@ -621,9 +620,8 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
|
||||
while (WaitNamedPipeW(handle->name, 30000)) {
|
||||
/* The pipe is now available, try to connect. */
|
||||
pipeHandle = open_named_pipe(handle->name, &duplex_flags);
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE) {
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE)
|
||||
break;
|
||||
}
|
||||
|
||||
SwitchToThread();
|
||||
}
|
||||
@@ -655,7 +653,7 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
|
||||
|
||||
/* Convert name to UTF16. */
|
||||
nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR);
|
||||
handle->name = (WCHAR*)uv__malloc(nameSize);
|
||||
handle->name = uv__malloc(nameSize);
|
||||
if (!handle->name) {
|
||||
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
|
||||
}
|
||||
@@ -2147,7 +2145,7 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
|
||||
if (pipe->ipc) {
|
||||
assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
|
||||
pipe->pipe.conn.ipc_remote_pid = uv_os_getppid();
|
||||
assert(pipe->pipe.conn.ipc_remote_pid != (DWORD) -1);
|
||||
assert(pipe->pipe.conn.ipc_remote_pid != (DWORD)(uv_pid_t) -1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -523,16 +523,15 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
|
||||
&req->u.io.overlapped,
|
||||
NULL);
|
||||
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
handle->reqs_pending++;
|
||||
|
||||
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
|
||||
/* Process the req without IOCP. */
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
req->u.io.overlapped.InternalHigh = bytes;
|
||||
handle->reqs_pending++;
|
||||
uv_insert_pending_req(loop, (uv_req_t*)req);
|
||||
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
|
||||
/* The req will be processed with IOCP. */
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
handle->reqs_pending++;
|
||||
if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
|
||||
req->wait_handle == INVALID_HANDLE_VALUE &&
|
||||
!RegisterWaitForSingleObject(&req->wait_handle,
|
||||
@@ -545,7 +544,6 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
|
||||
/* Make this req pending reporting an error. */
|
||||
SET_REQ_ERROR(req, WSAGetLastError());
|
||||
uv_insert_pending_req(loop, (uv_req_t*)req);
|
||||
handle->reqs_pending++;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -750,6 +748,40 @@ int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uv__is_loopback(const struct sockaddr_storage* storage) {
|
||||
const struct sockaddr_in* in4;
|
||||
const struct sockaddr_in6* in6;
|
||||
int i;
|
||||
|
||||
if (storage->ss_family == AF_INET) {
|
||||
in4 = (const struct sockaddr_in*) storage;
|
||||
return in4->sin_addr.S_un.S_un_b.s_b1 == 127;
|
||||
}
|
||||
if (storage->ss_family == AF_INET6) {
|
||||
in6 = (const struct sockaddr_in6*) storage;
|
||||
for (i = 0; i < 7; ++i) {
|
||||
if (in6->sin6_addr.u.Word[i] != 0)
|
||||
return 0;
|
||||
}
|
||||
return in6->sin6_addr.u.Word[7] == htons(1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Check if Windows version is 10.0.16299 or later
|
||||
static int uv__is_fast_loopback_fail_supported() {
|
||||
OSVERSIONINFOW os_info;
|
||||
if (!pRtlGetVersion)
|
||||
return 0;
|
||||
pRtlGetVersion(&os_info);
|
||||
if (os_info.dwMajorVersion < 10)
|
||||
return 0;
|
||||
if (os_info.dwMajorVersion > 10)
|
||||
return 1;
|
||||
if (os_info.dwMinorVersion > 0)
|
||||
return 1;
|
||||
return os_info.dwBuildNumber >= 16299;
|
||||
}
|
||||
|
||||
static int uv_tcp_try_connect(uv_connect_t* req,
|
||||
uv_tcp_t* handle,
|
||||
@@ -757,6 +789,7 @@ static int uv_tcp_try_connect(uv_connect_t* req,
|
||||
unsigned int addrlen,
|
||||
uv_connect_cb cb) {
|
||||
uv_loop_t* loop = handle->loop;
|
||||
TCP_INITIAL_RTO_PARAMETERS retransmit_ioctl;
|
||||
const struct sockaddr* bind_addr;
|
||||
struct sockaddr_storage converted;
|
||||
BOOL success;
|
||||
@@ -792,6 +825,25 @@ static int uv_tcp_try_connect(uv_connect_t* req,
|
||||
}
|
||||
}
|
||||
|
||||
/* This makes connect() fail instantly if the target port on the localhost
|
||||
* is not reachable, instead of waiting for 2s. We do not care if this fails.
|
||||
* This only works on Windows version 10.0.16299 and later.
|
||||
*/
|
||||
if (uv__is_fast_loopback_fail_supported() && uv__is_loopback(&converted)) {
|
||||
memset(&retransmit_ioctl, 0, sizeof(retransmit_ioctl));
|
||||
retransmit_ioctl.Rtt = TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS;
|
||||
retransmit_ioctl.MaxSynRetransmissions = TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS;
|
||||
WSAIoctl(handle->socket,
|
||||
SIO_TCP_INITIAL_RTO,
|
||||
&retransmit_ioctl,
|
||||
sizeof(retransmit_ioctl),
|
||||
NULL,
|
||||
0,
|
||||
&bytes,
|
||||
NULL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
UV_REQ_INIT(req, UV_CONNECT);
|
||||
req->handle = (uv_stream_t*) handle;
|
||||
req->cb = cb;
|
||||
|
||||
@@ -517,6 +517,7 @@ static DWORD CALLBACK uv_tty_line_read_thread(void* data) {
|
||||
status = InterlockedExchange(&uv__read_console_status, IN_PROGRESS);
|
||||
if (status == TRAP_REQUESTED) {
|
||||
SET_REQ_SUCCESS(req);
|
||||
InterlockedExchange(&uv__read_console_status, COMPLETED);
|
||||
req->u.io.overlapped.InternalHigh = 0;
|
||||
POST_COMPLETION_FOR_REQ(loop, req);
|
||||
return 0;
|
||||
@@ -2121,13 +2122,6 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
|
||||
abort();
|
||||
}
|
||||
|
||||
/* We wouldn't mind emitting utf-16 surrogate pairs. Too bad, the windows
|
||||
* console doesn't really support UTF-16, so just emit the replacement
|
||||
* character. */
|
||||
if (utf8_codepoint > 0xffff) {
|
||||
utf8_codepoint = UNICODE_REPLACEMENT_CHARACTER;
|
||||
}
|
||||
|
||||
if (utf8_codepoint == 0x0a || utf8_codepoint == 0x0d) {
|
||||
/* EOL conversion - emit \r\n when we see \n. */
|
||||
|
||||
@@ -2154,6 +2148,12 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
|
||||
ENSURE_BUFFER_SPACE(1);
|
||||
utf16_buf[utf16_buf_used++] = (WCHAR) utf8_codepoint;
|
||||
previous_eol = 0;
|
||||
} else {
|
||||
ENSURE_BUFFER_SPACE(2);
|
||||
utf8_codepoint -= 0x10000;
|
||||
utf16_buf[utf16_buf_used++] = (WCHAR) (utf8_codepoint / 0x400 + 0xD800);
|
||||
utf16_buf[utf16_buf_used++] = (WCHAR) (utf8_codepoint % 0x400 + 0xDC00);
|
||||
previous_eol = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2412,6 +2412,7 @@ static DWORD WINAPI uv__tty_console_resize_watcher_thread(void* param) {
|
||||
uv__tty_console_signal_resize();
|
||||
ResetEvent(uv__tty_console_resized);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uv__tty_console_signal_resize(void) {
|
||||
|
||||
@@ -189,6 +189,11 @@ void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle) {
|
||||
}
|
||||
|
||||
|
||||
int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int uv_udp_maybe_bind(uv_udp_t* handle,
|
||||
const struct sockaddr* addr,
|
||||
unsigned int addrlen,
|
||||
@@ -752,6 +757,9 @@ int uv__udp_set_source_membership6(uv_udp_t* handle,
|
||||
int optname;
|
||||
int err;
|
||||
|
||||
STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
|
||||
STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
|
||||
|
||||
if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
|
||||
return UV_EINVAL;
|
||||
|
||||
@@ -774,8 +782,8 @@ int uv__udp_set_source_membership6(uv_udp_t* handle,
|
||||
mreq.gsr_interface = 0;
|
||||
}
|
||||
|
||||
memcpy(&mreq.gsr_group, multicast_addr, sizeof(mreq.gsr_group));
|
||||
memcpy(&mreq.gsr_source, source_addr, sizeof(mreq.gsr_source));
|
||||
memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
|
||||
memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
|
||||
|
||||
if (membership == UV_JOIN_GROUP)
|
||||
optname = MCAST_JOIN_SOURCE_GROUP;
|
||||
@@ -1065,7 +1073,7 @@ int uv__udp_connect(uv_udp_t* handle,
|
||||
|
||||
err = connect(handle->socket, addr, addrlen);
|
||||
if (err)
|
||||
return uv_translate_sys_error(err);
|
||||
return uv_translate_sys_error(WSAGetLastError());
|
||||
|
||||
handle->flags |= UV_HANDLE_UDP_CONNECTED;
|
||||
|
||||
@@ -1081,7 +1089,7 @@ int uv__udp_disconnect(uv_udp_t* handle) {
|
||||
|
||||
err = connect(handle->socket, &addr, sizeof(addr));
|
||||
if (err)
|
||||
return uv_translate_sys_error(err);
|
||||
return uv_translate_sys_error(WSAGetLastError());
|
||||
|
||||
handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
|
||||
return 0;
|
||||
|
||||
@@ -30,12 +30,14 @@
|
||||
#include "uv.h"
|
||||
#include "internal.h"
|
||||
|
||||
/* clang-format off */
|
||||
#include <winsock2.h>
|
||||
#include <winperf.h>
|
||||
#include <iphlpapi.h>
|
||||
#include <psapi.h>
|
||||
#include <tlhelp32.h>
|
||||
#include <windows.h>
|
||||
/* clang-format on */
|
||||
#include <userenv.h>
|
||||
#include <math.h>
|
||||
|
||||
@@ -67,8 +69,8 @@ extern BOOLEAN NTAPI SystemFunction036(PVOID Buffer, ULONG BufferLength);
|
||||
static char *process_title;
|
||||
static CRITICAL_SECTION process_title_lock;
|
||||
|
||||
/* Interval (in seconds) of the high-resolution clock. */
|
||||
static double hrtime_interval_ = 0;
|
||||
/* Frequency of the high-resolution clock. */
|
||||
static uint64_t hrtime_frequency_ = 0;
|
||||
|
||||
|
||||
/*
|
||||
@@ -84,9 +86,9 @@ void uv__util_init(void) {
|
||||
* and precompute its reciprocal.
|
||||
*/
|
||||
if (QueryPerformanceFrequency(&perf_frequency)) {
|
||||
hrtime_interval_ = 1.0 / perf_frequency.QuadPart;
|
||||
hrtime_frequency_ = perf_frequency.QuadPart;
|
||||
} else {
|
||||
hrtime_interval_= 0;
|
||||
uv_fatal_error(GetLastError(), "QueryPerformanceFrequency");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -490,23 +492,25 @@ uint64_t uv_hrtime(void) {
|
||||
return uv__hrtime(UV__NANOSEC);
|
||||
}
|
||||
|
||||
uint64_t uv__hrtime(double scale) {
|
||||
uint64_t uv__hrtime(unsigned int scale) {
|
||||
LARGE_INTEGER counter;
|
||||
double scaled_freq;
|
||||
double result;
|
||||
|
||||
/* If the performance interval is zero, there's no support. */
|
||||
if (hrtime_interval_ == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
assert(hrtime_frequency_ != 0);
|
||||
assert(scale != 0);
|
||||
if (!QueryPerformanceCounter(&counter)) {
|
||||
return 0;
|
||||
uv_fatal_error(GetLastError(), "QueryPerformanceCounter");
|
||||
}
|
||||
assert(counter.QuadPart != 0);
|
||||
|
||||
/* Because we have no guarantee about the order of magnitude of the
|
||||
* performance counter interval, integer math could cause this computation
|
||||
* to overflow. Therefore we resort to floating point math.
|
||||
*/
|
||||
return (uint64_t) ((double) counter.QuadPart * hrtime_interval_ * scale);
|
||||
scaled_freq = (double) hrtime_frequency_ / scale;
|
||||
result = (double) counter.QuadPart / scaled_freq;
|
||||
return (uint64_t) result;
|
||||
}
|
||||
|
||||
|
||||
@@ -1804,7 +1808,9 @@ int uv_os_uname(uv_utsname_t* buffer) {
|
||||
pRtlGetVersion(&os_info);
|
||||
} else {
|
||||
/* Silence GetVersionEx() deprecation warning. */
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(suppress : 4996)
|
||||
#endif
|
||||
if (GetVersionExW(&os_info) == 0) {
|
||||
r = uv_translate_sys_error(GetLastError());
|
||||
goto error;
|
||||
@@ -1871,7 +1877,7 @@ int uv_os_uname(uv_utsname_t* buffer) {
|
||||
"MINGW32_NT-%u.%u",
|
||||
(unsigned int) os_info.dwMajorVersion,
|
||||
(unsigned int) os_info.dwMinorVersion);
|
||||
assert(r < sizeof(buffer->sysname));
|
||||
assert((size_t)r < sizeof(buffer->sysname));
|
||||
#else
|
||||
uv__strscpy(buffer->sysname, "Windows_NT", sizeof(buffer->sysname));
|
||||
#endif
|
||||
@@ -1883,7 +1889,7 @@ int uv_os_uname(uv_utsname_t* buffer) {
|
||||
(unsigned int) os_info.dwMajorVersion,
|
||||
(unsigned int) os_info.dwMinorVersion,
|
||||
(unsigned int) os_info.dwBuildNumber);
|
||||
assert(r < sizeof(buffer->release));
|
||||
assert((size_t)r < sizeof(buffer->release));
|
||||
|
||||
/* Populate the machine field. */
|
||||
GetSystemInfo(&system_info);
|
||||
|
||||
@@ -4735,6 +4735,18 @@ typedef HWINEVENTHOOK (WINAPI *sSetWinEventHook)
|
||||
DWORD idThread,
|
||||
UINT dwflags);
|
||||
|
||||
/* From mstcpip.h */
|
||||
typedef struct _TCP_INITIAL_RTO_PARAMETERS {
|
||||
USHORT Rtt;
|
||||
UCHAR MaxSynRetransmissions;
|
||||
} TCP_INITIAL_RTO_PARAMETERS, *PTCP_INITIAL_RTO_PARAMETERS;
|
||||
|
||||
#ifndef TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS
|
||||
# define TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS ((UCHAR) -2)
|
||||
#endif
|
||||
#ifndef SIO_TCP_INITIAL_RTO
|
||||
# define SIO_TCP_INITIAL_RTO _WSAIOW(IOC_VENDOR,17)
|
||||
#endif
|
||||
|
||||
/* Ntdll function pointers */
|
||||
extern sRtlGetVersion pRtlGetVersion;
|
||||
|
||||
Reference in New Issue
Block a user