8293064: Remove unused NET_xxx functions

Reviewed-by: chegar, djelinski, aefimov, vtewari
This commit is contained in:
Darragh Clarke 2022-09-27 15:01:03 +00:00 committed by Aleksei Efimov
parent 3419363e89
commit 99017b06bf
8 changed files with 6 additions and 873 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -295,96 +295,6 @@ static inline void endOp
errno = orig_errno;
}
/*
* Close or dup2 a file descriptor ensuring that all threads blocked on
* the file descriptor are notified via a wakeup signal.
*
* fd1 < 0 => close(fd2)
* fd1 >= 0 => dup2(fd1, fd2)
*
* Returns -1 with errno set if operation fails.
*/
static int closefd(int fd1, int fd2) {
int rv, orig_errno;
fdEntry_t *fdEntry = getFdEntry(fd2);
if (fdEntry == NULL) {
errno = EBADF;
return -1;
}
/*
* Lock the fd to hold-off additional I/O on this fd.
*/
pthread_mutex_lock(&(fdEntry->lock));
{
/* On fast machines we see that we enter dup2 before the
* accepting thread had a chance to get and process the signal.
* So in case we woke a thread up, give it some time to cope.
* Also see https://bugs.openjdk.java.net/browse/JDK-8006395 */
int num_woken = 0;
/*
* Send a wakeup signal to all threads blocked on this
* file descriptor.
*/
threadEntry_t *curr = fdEntry->threads;
while (curr != NULL) {
curr->intr = 1;
pthread_kill( curr->thr, sigWakeup );
num_woken ++;
curr = curr->next;
}
if (num_woken > 0) {
usleep(num_woken * 50);
}
/*
* And close/dup the file descriptor
* (restart if interrupted by signal)
*/
do {
if (fd1 < 0) {
rv = close(fd2);
} else {
rv = dup2(fd1, fd2);
}
} while (rv == -1 && errno == EINTR);
}
/*
* Unlock without destroying errno
*/
orig_errno = errno;
pthread_mutex_unlock(&(fdEntry->lock));
errno = orig_errno;
return rv;
}
/*
* Wrapper for dup2 - same semantics as dup2 system call except
* that any threads blocked in an I/O system call on fd2 will be
* preempted and return -1/EBADF;
*/
int NET_Dup2(int fd, int fd2) {
if (fd < 0) {
errno = EBADF;
return -1;
}
return closefd(fd, fd2);
}
/*
* Wrapper for close - same semantics as close system call
* except that any threads blocked in an I/O on fd will be
* preempted and the I/O system call will return -1/EBADF.
*/
int NET_SocketClose(int fd) {
return closefd(-1, fd);
}
/************** Basic I/O operations here ***************/
/*
@ -408,31 +318,6 @@ int NET_SocketClose(int fd) {
return ret; \
}
int NET_Read(int s, void* buf, size_t len) {
BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) );
}
int NET_NonBlockingRead(int s, void* buf, size_t len) {
BLOCKING_IO_RETURN_INT(s, recv(s, buf, len, MSG_NONBLOCK));
}
int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
struct sockaddr *from, socklen_t *fromlen) {
BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) );
}
int NET_Send(int s, void *msg, int len, unsigned int flags) {
BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) );
}
int NET_SendTo(int s, const void *msg, int len, unsigned int
flags, const struct sockaddr *to, int tolen) {
BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) );
}
int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen) {
BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) );
}
int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
int crc = -1, prc = -1;
@ -491,58 +376,3 @@ int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) {
BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) );
}
/*
* Wrapper for poll(s, timeout).
* Auto restarts with adjusted timeout if interrupted by
* signal other than our wakeup signal.
*/
int NET_Timeout(JNIEnv *env, int s, long timeout, jlong nanoTimeStamp) {
jlong prevNanoTime = nanoTimeStamp;
jlong nanoTimeout = (jlong) timeout * NET_NSEC_PER_MSEC;
fdEntry_t *fdEntry = getFdEntry(s);
/*
* Check that fd hasn't been closed.
*/
if (fdEntry == NULL) {
errno = EBADF;
return -1;
}
for(;;) {
struct pollfd pfd;
int rv;
threadEntry_t self;
/*
* Poll the fd. If interrupted by our wakeup signal
* errno will be set to EBADF.
*/
pfd.fd = s;
pfd.events = POLLIN | POLLERR;
startOp(fdEntry, &self);
rv = poll(&pfd, 1, nanoTimeout / NET_NSEC_PER_MSEC);
endOp(fdEntry, &self);
/*
* If interrupted then adjust timeout. If timeout
* has expired return 0 (indicating timeout expired).
*/
if (rv < 0 && errno == EINTR) {
if (timeout > 0) {
jlong newNanoTime = JVM_NanoTime(env, 0);
nanoTimeout -= newNanoTime - prevNanoTime;
if (nanoTimeout < NET_NSEC_PER_MSEC) {
return 0;
}
prevNanoTime = newNanoTime;
} else {
continue; // timeout is -1, so loop again.
}
} else {
return rv;
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -263,85 +263,6 @@ static inline void endOp
errno = orig_errno;
}
/*
* Close or dup2 a file descriptor ensuring that all threads blocked on
* the file descriptor are notified via a wakeup signal.
*
* fd1 < 0 => close(fd2)
* fd1 >= 0 => dup2(fd1, fd2)
*
* Returns -1 with errno set if operation fails.
*/
static int closefd(int fd1, int fd2) {
int rv, orig_errno;
fdEntry_t *fdEntry = getFdEntry(fd2);
if (fdEntry == NULL) {
errno = EBADF;
return -1;
}
/*
* Lock the fd to hold-off additional I/O on this fd.
*/
pthread_mutex_lock(&(fdEntry->lock));
{
/*
* And close/dup the file descriptor
* (restart if interrupted by signal)
*/
if (fd1 < 0) {
rv = close(fd2);
} else {
do {
rv = dup2(fd1, fd2);
} while (rv == -1 && errno == EINTR);
}
/*
* Send a wakeup signal to all threads blocked on this
* file descriptor.
*/
threadEntry_t *curr = fdEntry->threads;
while (curr != NULL) {
curr->intr = 1;
pthread_kill( curr->thr, WAKEUP_SIGNAL);
curr = curr->next;
}
}
/*
* Unlock without destroying errno
*/
orig_errno = errno;
pthread_mutex_unlock(&(fdEntry->lock));
errno = orig_errno;
return rv;
}
/*
* Wrapper for dup2 - same semantics as dup2 system call except
* that any threads blocked in an I/O system call on fd2 will be
* preempted and return -1/EBADF;
*/
int NET_Dup2(int fd, int fd2) {
if (fd < 0) {
errno = EBADF;
return -1;
}
return closefd(fd, fd2);
}
/*
* Wrapper for close - same semantics as close system call
* except that any threads blocked in an I/O on fd will be
* preempted and the I/O system call will return -1/EBADF.
*/
int NET_SocketClose(int fd) {
return closefd(-1, fd);
}
/************** Basic I/O operations here ***************/
/*
@ -365,32 +286,6 @@ int NET_SocketClose(int fd) {
return ret; \
}
int NET_Read(int s, void* buf, size_t len) {
BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) );
}
int NET_NonBlockingRead(int s, void* buf, size_t len) {
BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, MSG_DONTWAIT) );
}
int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
struct sockaddr *from, socklen_t *fromlen) {
BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) );
}
int NET_Send(int s, void *msg, int len, unsigned int flags) {
BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) );
}
int NET_SendTo(int s, const void *msg, int len, unsigned int
flags, const struct sockaddr *to, int tolen) {
BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) );
}
int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen) {
BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) );
}
int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) );
}
@ -398,57 +293,3 @@ int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) {
BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) );
}
/*
* Wrapper for poll(s, timeout).
* Auto restarts with adjusted timeout if interrupted by
* signal other than our wakeup signal.
*/
int NET_Timeout(JNIEnv *env, int s, long timeout, jlong nanoTimeStamp) {
jlong prevNanoTime = nanoTimeStamp;
jlong nanoTimeout = (jlong)timeout * NET_NSEC_PER_MSEC;
fdEntry_t *fdEntry = getFdEntry(s);
/*
* Check that fd hasn't been closed.
*/
if (fdEntry == NULL) {
errno = EBADF;
return -1;
}
for(;;) {
struct pollfd pfd;
int rv;
threadEntry_t self;
/*
* Poll the fd. If interrupted by our wakeup signal
* errno will be set to EBADF.
*/
pfd.fd = s;
pfd.events = POLLIN | POLLERR;
startOp(fdEntry, &self);
rv = poll(&pfd, 1, nanoTimeout / NET_NSEC_PER_MSEC);
endOp(fdEntry, &self);
/*
* If interrupted then adjust timeout. If timeout
* has expired return 0 (indicating timeout expired).
*/
if (rv < 0 && errno == EINTR) {
if (timeout > 0) {
jlong newNanoTime = JVM_NanoTime(env, 0);
nanoTimeout -= newNanoTime - prevNanoTime;
if (nanoTimeout < NET_NSEC_PER_MSEC) {
return 0;
}
prevNanoTime = newNanoTime;
} else {
continue; // timeout is -1, so loop again.
}
} else {
return rv;
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -266,86 +266,6 @@ static inline void endOp
errno = orig_errno;
}
/*
* Close or dup2 a file descriptor ensuring that all threads blocked on
* the file descriptor are notified via a wakeup signal.
*
* fd1 < 0 => close(fd2)
* fd1 >= 0 => dup2(fd1, fd2)
*
* Returns -1 with errno set if operation fails.
*/
static int closefd(int fd1, int fd2) {
int rv, orig_errno;
fdEntry_t *fdEntry = getFdEntry(fd2);
if (fdEntry == NULL) {
errno = EBADF;
return -1;
}
/*
* Lock the fd to hold-off additional I/O on this fd.
*/
pthread_mutex_lock(&(fdEntry->lock));
{
/*
* Send a wakeup signal to all threads blocked on this
* file descriptor.
*/
threadEntry_t *curr = fdEntry->threads;
while (curr != NULL) {
curr->intr = 1;
pthread_kill( curr->thr, sigWakeup );
curr = curr->next;
}
/*
* And close/dup the file descriptor
* (restart if interrupted by signal)
*/
do {
if (fd1 < 0) {
rv = close(fd2);
} else {
rv = dup2(fd1, fd2);
}
} while (rv == -1 && errno == EINTR);
}
/*
* Unlock without destroying errno
*/
orig_errno = errno;
pthread_mutex_unlock(&(fdEntry->lock));
errno = orig_errno;
return rv;
}
/*
* Wrapper for dup2 - same semantics as dup2 system call except
* that any threads blocked in an I/O system call on fd2 will be
* preempted and return -1/EBADF;
*/
int NET_Dup2(int fd, int fd2) {
if (fd < 0) {
errno = EBADF;
return -1;
}
return closefd(fd, fd2);
}
/*
* Wrapper for close - same semantics as close system call
* except that any threads blocked in an I/O on fd will be
* preempted and the I/O system call will return -1/EBADF.
*/
int NET_SocketClose(int fd) {
return closefd(-1, fd);
}
/************** Basic I/O operations here ***************/
/*
@ -369,32 +289,6 @@ int NET_SocketClose(int fd) {
return ret; \
}
int NET_Read(int s, void* buf, size_t len) {
BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) );
}
int NET_NonBlockingRead(int s, void* buf, size_t len) {
BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, MSG_DONTWAIT));
}
int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
struct sockaddr *from, socklen_t *fromlen) {
BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) );
}
int NET_Send(int s, void *msg, int len, unsigned int flags) {
BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) );
}
int NET_SendTo(int s, const void *msg, int len, unsigned int
flags, const struct sockaddr *to, int tolen) {
BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) );
}
int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen) {
BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) );
}
int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) );
}
@ -402,94 +296,3 @@ int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) {
BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) );
}
/*
* Wrapper for select(s, timeout). We are using select() on Mac OS due to Bug 7131399.
* Auto restarts with adjusted timeout if interrupted by
* signal other than our wakeup signal.
*/
int NET_Timeout(JNIEnv *env, int s, long timeout, jlong nanoTimeStamp) {
struct timeval t, *tp = &t;
fd_set fds;
fd_set* fdsp = NULL;
int allocated = 0;
threadEntry_t self;
fdEntry_t *fdEntry = getFdEntry(s);
/*
* Check that fd hasn't been closed.
*/
if (fdEntry == NULL) {
errno = EBADF;
return -1;
}
/*
* Pick up current time as may need to adjust timeout
*/
if (timeout > 0) {
/* Timed */
t.tv_sec = timeout / 1000;
t.tv_usec = (timeout % 1000) * 1000;
} else if (timeout < 0) {
/* Blocking */
tp = 0;
} else {
/* Poll */
t.tv_sec = 0;
t.tv_usec = 0;
}
if (s < FD_SETSIZE) {
fdsp = &fds;
FD_ZERO(fdsp);
} else {
int length = (howmany(s+1, NFDBITS)) * sizeof(int);
fdsp = (fd_set *) calloc(1, length);
if (fdsp == NULL) {
return -1; // errno will be set to ENOMEM
}
allocated = 1;
}
FD_SET(s, fdsp);
jlong prevNanoTime = nanoTimeStamp;
jlong nanoTimeout = (jlong) timeout * NET_NSEC_PER_MSEC;
for(;;) {
int rv;
/*
* call select on the fd. If interrupted by our wakeup signal
* errno will be set to EBADF.
*/
startOp(fdEntry, &self);
rv = select(s+1, fdsp, 0, 0, tp);
endOp(fdEntry, &self);
/*
* If interrupted then adjust timeout. If timeout
* has expired return 0 (indicating timeout expired).
*/
if (rv < 0 && errno == EINTR) {
if (timeout > 0) {
jlong newNanoTime = JVM_NanoTime(env, 0);
nanoTimeout -= newNanoTime - prevNanoTime;
if (nanoTimeout < NET_NSEC_PER_MSEC) {
if (allocated != 0)
free(fdsp);
return 0;
}
prevNanoTime = newNanoTime;
t.tv_sec = nanoTimeout / NET_NSEC_PER_SEC;
t.tv_usec = (nanoTimeout % NET_NSEC_PER_SEC) / NET_NSEC_PER_USEC;
} else {
continue; // timeout is -1, so loop again.
}
} else {
if (allocated != 0)
free(fdsp);
return rv;
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -109,9 +109,6 @@ JNIEXPORT void JNICALL Java_java_net_NetworkInterface_init(JNIEnv *env, jclass c
JNIEXPORT void JNICALL NET_ThrowNew(JNIEnv *env, int errorNum, char *msg);
void NET_ThrowCurrent(JNIEnv *env, char *msg);
jfieldID NET_GetFileDescriptorID(JNIEnv *env);
JNIEXPORT jint JNICALL ipv4_available();
JNIEXPORT jint JNICALL ipv6_available();

View File

@ -69,11 +69,6 @@ NET_ThrowByNameWithLastError(JNIEnv *env, const char *name,
JNU_ThrowByNameWithMessageAndLastError(env, name, defaultDetail);
}
void
NET_ThrowCurrent(JNIEnv *env, char *msg) {
NET_ThrowNew(env, errno, msg);
}
void
NET_ThrowNew(JNIEnv *env, int errorNumber, char *msg) {
char fullMsg[512];
@ -95,15 +90,6 @@ NET_ThrowNew(JNIEnv *env, int errorNumber, char *msg) {
}
}
jfieldID
NET_GetFileDescriptorID(JNIEnv *env)
{
jclass cls = (*env)->FindClass(env, "java/io/FileDescriptor");
CHECK_NULL_RETURN(cls, NULL);
return (*env)->GetFieldID(env, cls, "fd", "I");
}
jint IPv4_supported()
{
int fd = socket(AF_INET, SOCK_STREAM, 0) ;
@ -328,13 +314,6 @@ NET_InetAddressToSockaddr(JNIEnv *env, jobject iaObj, int port,
return 0;
}
void
NET_SetTrafficClass(SOCKETADDRESS *sa, int trafficClass) {
if (sa->sa.sa_family == AF_INET6) {
sa->sa6.sin6_flowinfo = htonl((trafficClass & 0xff) << 20);
}
}
int
NET_IsIPv4Mapped(jbyte* caddr) {
int i;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,18 +75,7 @@ typedef union {
* Functions
*/
int NET_Timeout(JNIEnv *env, int s, long timeout, jlong nanoTimeStamp);
int NET_Read(int s, void* buf, size_t len);
int NET_NonBlockingRead(int s, void* buf, size_t len);
int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
struct sockaddr *from, socklen_t *fromlen);
int NET_Send(int s, void *msg, int len, unsigned int flags);
int NET_SendTo(int s, const void *msg, int len, unsigned int
flags, const struct sockaddr *to, int tolen);
int NET_Connect(int s, struct sockaddr *addr, int addrlen);
int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen);
int NET_SocketClose(int s);
int NET_Dup2(int oldfd, int newfd);
int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout);
void NET_ThrowUnknownHostExceptionWithGaiError(JNIEnv *env,
@ -94,6 +83,5 @@ void NET_ThrowUnknownHostExceptionWithGaiError(JNIEnv *env,
int gai_error);
void NET_ThrowByNameWithLastError(JNIEnv *env, const char *name,
const char *defaultDetail);
void NET_SetTrafficClass(SOCKETADDRESS *sa, int trafficClass);
#endif /* NET_UTILS_MD_H */

View File

@ -193,26 +193,12 @@ NET_ThrowNew(JNIEnv *env, int errorNum, char *msg)
JNU_ThrowByName(env, exc, fullMsg);
}
void
NET_ThrowCurrent(JNIEnv *env, char *msg)
{
NET_ThrowNew(env, WSAGetLastError(), msg);
}
void
NET_ThrowByNameWithLastError(JNIEnv *env, const char *name,
const char *defaultDetail) {
JNU_ThrowByNameWithMessageAndLastError(env, name, defaultDetail);
}
jfieldID
NET_GetFileDescriptorID(JNIEnv *env)
{
jclass cls = (*env)->FindClass(env, "java/io/FileDescriptor");
CHECK_NULL_RETURN(cls, NULL);
return (*env)->GetFieldID(env, cls, "fd", "I");
}
jint IPv4_supported()
{
SOCKET s = socket(AF_INET, SOCK_STREAM, 0);
@ -472,77 +458,6 @@ NET_WinBind(int s, SOCKETADDRESS *sa, int len, jboolean exclBind)
return NET_Bind(s, sa, len);
}
JNIEXPORT int JNICALL
NET_SocketClose(int fd) {
struct linger l = {0, 0};
int ret = 0;
int len = sizeof (l);
if (getsockopt(fd, SOL_SOCKET, SO_LINGER, (char *)&l, &len) == 0) {
if (l.l_onoff == 0) {
shutdown(fd, SD_SEND);
}
}
ret = closesocket (fd);
return ret;
}
JNIEXPORT int JNICALL
NET_Timeout(int fd, long timeout) {
int ret;
fd_set tbl;
struct timeval t;
t.tv_sec = timeout / 1000;
t.tv_usec = (timeout % 1000) * 1000;
FD_ZERO(&tbl);
FD_SET(fd, &tbl);
ret = select (fd + 1, &tbl, 0, 0, &t);
return ret;
}
/*
* differs from NET_Timeout() as follows:
*
* If timeout = -1, it blocks forever.
*
* returns 1 or 2 depending if only one or both sockets
* fire at same time.
*
* *fdret is (one of) the active fds. If both sockets
* fire at same time, *fdret = fd always.
*/
JNIEXPORT int JNICALL
NET_Timeout2(int fd, int fd1, long timeout, int *fdret) {
int ret;
fd_set tbl;
struct timeval t, *tP = &t;
if (timeout == -1) {
tP = 0;
} else {
t.tv_sec = timeout / 1000;
t.tv_usec = (timeout % 1000) * 1000;
}
FD_ZERO(&tbl);
FD_SET(fd, &tbl);
FD_SET(fd1, &tbl);
ret = select (0, &tbl, 0, 0, tP);
switch (ret) {
case 0:
return 0; /* timeout */
case 1:
if (FD_ISSET (fd, &tbl)) {
*fdret= fd;
} else {
*fdret= fd1;
}
return 1;
case 2:
*fdret= fd;
return 2;
}
return -1;
}
void dumpAddr (char *str, void *addr) {
struct sockaddr_in6 *a = (struct sockaddr_in6 *)addr;
@ -567,195 +482,6 @@ void dumpAddr (char *str, void *addr) {
}
}
/* Macro, which cleans-up the iv6bind structure,
* closes the two sockets (if open),
* and returns SOCKET_ERROR. Used in NET_BindV6 only.
*/
#define CLOSE_SOCKETS_AND_RETURN do { \
if (fd != -1) { \
closesocket (fd); \
fd = -1; \
} \
if (ofd != -1) { \
closesocket (ofd); \
ofd = -1; \
} \
if (close_fd != -1) { \
closesocket (close_fd); \
close_fd = -1; \
} \
if (close_ofd != -1) { \
closesocket (close_ofd); \
close_ofd = -1; \
} \
b->ipv4_fd = b->ipv6_fd = -1; \
return SOCKET_ERROR; \
} while(0)
/*
* if ipv6 is available, call NET_BindV6 to bind to the required address/port.
* Because the same port number may need to be reserved in both v4 and v6 space,
* this may require socket(s) to be re-opened. Therefore, all of this information
* is passed in and returned through the ipv6bind structure.
*
* If the request is to bind to a specific address, then this (by definition) means
* only bind in either v4 or v6, and this is just the same as normal. ie. a single
* call to bind() will suffice. The other socket is closed in this case.
*
* The more complicated case is when the requested address is ::0 or 0.0.0.0.
*
* Two further cases:
* 2. If the requested port is 0 (ie. any port) then we try to bind in v4 space
* first with a wild-card port argument. We then try to bind in v6 space
* using the returned port number. If this fails, we repeat the process
* until a free port common to both spaces becomes available.
*
* 3. If the requested port is a specific port, then we just try to get that
* port in both spaces, and if it is not free in both, then the bind fails.
*
* On failure, sockets are closed and an error returned with CLOSE_SOCKETS_AND_RETURN
*/
JNIEXPORT int JNICALL
NET_BindV6(struct ipv6bind *b, jboolean exclBind) {
int fd=-1, ofd=-1, rv, len;
/* need to defer close until new sockets created */
int close_fd=-1, close_ofd=-1;
SOCKETADDRESS oaddr; /* other address to bind */
int family = b->addr->sa.sa_family;
int ofamily;
u_short port; /* requested port parameter */
u_short bound_port;
if (family == AF_INET && (b->addr->sa4.sin_addr.s_addr != INADDR_ANY)) {
/* bind to v4 only */
int ret;
ret = NET_WinBind((int)b->ipv4_fd, b->addr,
sizeof(SOCKETADDRESS), exclBind);
if (ret == SOCKET_ERROR) {
CLOSE_SOCKETS_AND_RETURN;
}
closesocket (b->ipv6_fd);
b->ipv6_fd = -1;
return 0;
}
if (family == AF_INET6 && (!IN6_IS_ADDR_ANY(&b->addr->sa6.sin6_addr))) {
/* bind to v6 only */
int ret;
ret = NET_WinBind((int)b->ipv6_fd, b->addr,
sizeof(SOCKETADDRESS), exclBind);
if (ret == SOCKET_ERROR) {
CLOSE_SOCKETS_AND_RETURN;
}
closesocket (b->ipv4_fd);
b->ipv4_fd = -1;
return 0;
}
/* We need to bind on both stacks, with the same port number */
memset (&oaddr, 0, sizeof(oaddr));
if (family == AF_INET) {
ofamily = AF_INET6;
fd = (int)b->ipv4_fd;
ofd = (int)b->ipv6_fd;
port = (u_short)GET_PORT (b->addr);
IN6ADDR_SETANY(&oaddr.sa6);
oaddr.sa6.sin6_port = port;
} else {
ofamily = AF_INET;
ofd = (int)b->ipv4_fd;
fd = (int)b->ipv6_fd;
port = (u_short)GET_PORT (b->addr);
oaddr.sa4.sin_family = AF_INET;
oaddr.sa4.sin_port = port;
oaddr.sa4.sin_addr.s_addr = INADDR_ANY;
}
rv = NET_WinBind(fd, b->addr, sizeof(SOCKETADDRESS), exclBind);
if (rv == SOCKET_ERROR) {
CLOSE_SOCKETS_AND_RETURN;
}
/* get the port and set it in the other address */
len = sizeof(SOCKETADDRESS);
if (getsockname(fd, (struct sockaddr *)b->addr, &len) == -1) {
CLOSE_SOCKETS_AND_RETURN;
}
bound_port = GET_PORT (b->addr);
SET_PORT (&oaddr, bound_port);
if ((rv = NET_WinBind(ofd, &oaddr,
sizeof(SOCKETADDRESS), exclBind)) == SOCKET_ERROR) {
int retries;
int sotype, arglen=sizeof(sotype);
/* no retries unless, the request was for any free port */
if (port != 0) {
CLOSE_SOCKETS_AND_RETURN;
}
getsockopt(fd, SOL_SOCKET, SO_TYPE, (void *)&sotype, &arglen);
#define SOCK_RETRIES 50
/* 50 is an arbitrary limit, just to ensure that this
* cannot be an endless loop. Would expect socket creation to
* succeed sooner.
*/
for (retries = 0; retries < SOCK_RETRIES; retries ++) {
int len;
close_fd = fd; fd = -1;
close_ofd = ofd; ofd = -1;
b->ipv4_fd = SOCKET_ERROR;
b->ipv6_fd = SOCKET_ERROR;
/* create two new sockets */
fd = (int)socket (family, sotype, 0);
if (fd == SOCKET_ERROR) {
CLOSE_SOCKETS_AND_RETURN;
}
ofd = (int)socket (ofamily, sotype, 0);
if (ofd == SOCKET_ERROR) {
CLOSE_SOCKETS_AND_RETURN;
}
/* bind random port on first socket */
SET_PORT (&oaddr, 0);
rv = NET_WinBind(ofd, &oaddr, sizeof(SOCKETADDRESS), exclBind);
if (rv == SOCKET_ERROR) {
CLOSE_SOCKETS_AND_RETURN;
}
/* close the original pair of sockets before continuing */
closesocket (close_fd);
closesocket (close_ofd);
close_fd = close_ofd = -1;
/* bind new port on second socket */
len = sizeof(SOCKETADDRESS);
if (getsockname(ofd, &oaddr.sa, &len) == -1) {
CLOSE_SOCKETS_AND_RETURN;
}
bound_port = GET_PORT (&oaddr);
SET_PORT (b->addr, bound_port);
rv = NET_WinBind(fd, b->addr, sizeof(SOCKETADDRESS), exclBind);
if (rv != SOCKET_ERROR) {
if (family == AF_INET) {
b->ipv4_fd = fd;
b->ipv6_fd = ofd;
} else {
b->ipv4_fd = ofd;
b->ipv6_fd = fd;
}
return 0;
}
}
CLOSE_SOCKETS_AND_RETURN;
}
return 0;
}
/**
* Enables SIO_LOOPBACK_FAST_PATH
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,24 +51,12 @@
/* true if SO_RCVTIMEO is supported by underlying provider */
extern jboolean isRcvTimeoutSupported;
void NET_ThrowCurrent(JNIEnv *env, char *msg);
typedef union {
struct sockaddr sa;
struct sockaddr_in sa4;
struct sockaddr_in6 sa6;
} SOCKETADDRESS;
/*
* passed to NET_BindV6. Both ipv4_fd and ipv6_fd must be created and unbound
* sockets. On return they may refer to different sockets.
*/
struct ipv6bind {
SOCKETADDRESS *addr;
SOCKET ipv4_fd;
SOCKET ipv6_fd;
};
#define SOCKETADDRESS_COPY(DST,SRC) { \
if ((SRC)->sa_family == AF_INET6) { \
memcpy ((DST), (SRC), sizeof (struct sockaddr_in6)); \
@ -118,30 +106,11 @@ struct ipv6bind {
(IN6_IS_ADDR_V4MAPPED_LOOPBACK(&(x)->sa6.sin6_addr))) \
)
JNIEXPORT int JNICALL NET_SocketClose(int fd);
JNIEXPORT int JNICALL NET_Timeout(int fd, long timeout);
int NET_Socket(int domain, int type, int protocol);
void NET_ThrowByNameWithLastError(JNIEnv *env, const char *name,
const char *defaultDetail);
/*
* differs from NET_Timeout() as follows:
*
* If timeout = -1, it blocks forever.
*
* returns 1 or 2 depending if only one or both sockets
* fire at same time.
*
* *fdret is (one of) the active fds. If both sockets
* fire at same time, *fd == fd always.
*/
JNIEXPORT int JNICALL NET_Timeout2(int fd, int fd1, long timeout, int *fdret);
JNIEXPORT int JNICALL NET_BindV6(struct ipv6bind *b, jboolean exclBind);
JNIEXPORT int JNICALL NET_WinBind(int s, SOCKETADDRESS *sa, int len,
jboolean exclBind);