This commit is contained in:
J. Duke 2017-07-05 20:07:30 +02:00
commit 46d13b99d3
214 changed files with 4358 additions and 2126 deletions

View File

@ -281,3 +281,4 @@ c173ba994245380fb11ef077d1e59823386840eb jdk9-b35
201d4e235d597a25a2d3ee1404394789ba386119 jdk9-b36
723a67b0c442391447b1d8aad8b249d06d1032e8 jdk9-b37
d42c0a90afc3c66ca87543076ec9aafd4b4680de jdk9-b38
512dbbeb1730edcebfec873fc3f1455660b32000 jdk9-b39

View File

@ -441,3 +441,4 @@ af46576a8d7cb4003028b8ee8bf408cfe227315b jdk9-b32
464ab653fbb17eb518d8ef60f8df301de7ef00d0 jdk9-b36
b1c2dd843f247a1db19e1e85eb62ca405f72dc26 jdk9-b37
c363a8b87e477ee45d6d3cb2a36cb365141bc596 jdk9-b38
9cb75e5e394827ccbaf2e15524108a412dc4ddc5 jdk9-b39

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.runtime;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.types.*;
public class CodeCacheSweeperThread extends JavaThread {
public CodeCacheSweeperThread(Address addr) {
super(addr);
}
public boolean isJavaThread() { return false; }
public boolean isHiddenFromExternalView() { return true; }
public boolean isCodeCacheSweeperThread() { return true; }
}

View File

@ -118,9 +118,10 @@ public class JavaThread extends Thread {
return VM.getVM().getThreads().createJavaThreadWrapper(threadAddr);
}
/** NOTE: for convenience, this differs in definition from the
underlying VM. Only "pure" JavaThreads return true;
CompilerThreads and JVMDIDebuggerThreads return false. FIXME:
/** NOTE: for convenience, this differs in definition from the underlying VM.
Only "pure" JavaThreads return true; CompilerThreads, the CodeCacheSweeperThread,
JVMDIDebuggerThreads return false.
FIXME:
consider encapsulating platform-specific functionality in an
object instead of using inheritance (which is the primary reason
we can't traverse CompilerThreads, etc; didn't want to have, for

View File

@ -111,14 +111,15 @@ public class Thread extends VMObject {
return allocatedBytesField.getValue(addr);
}
public boolean isVMThread() { return false; }
public boolean isJavaThread() { return false; }
public boolean isCompilerThread() { return false; }
public boolean isHiddenFromExternalView() { return false; }
public boolean isJvmtiAgentThread() { return false; }
public boolean isWatcherThread() { return false; }
public boolean isVMThread() { return false; }
public boolean isJavaThread() { return false; }
public boolean isCompilerThread() { return false; }
public boolean isCodeCacheSweeperThread() { return false; }
public boolean isHiddenFromExternalView() { return false; }
public boolean isJvmtiAgentThread() { return false; }
public boolean isWatcherThread() { return false; }
public boolean isConcurrentMarkSweepThread() { return false; }
public boolean isServiceThread() { return false; }
public boolean isServiceThread() { return false; }
/** Memory operations */
public void oopsDo(AddressVisitor oopVisitor) {

View File

@ -120,6 +120,7 @@ public class Threads {
virtualConstructor.addMapping("JavaThread", JavaThread.class);
if (!VM.getVM().isCore()) {
virtualConstructor.addMapping("CompilerThread", CompilerThread.class);
virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class);
}
// for now, use JavaThread itself. fix it later with appropriate class if needed
virtualConstructor.addMapping("SurrogateLockerThread", JavaThread.class);
@ -164,7 +165,7 @@ public class Threads {
return thread;
} catch (Exception e) {
throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr +
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, or SurrogateLockerThread)", e);
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, SurrogateLockerThread, or CodeCacheSweeperThread)", e);
}
}
@ -201,7 +202,7 @@ public class Threads {
public List getPendingThreads(ObjectMonitor monitor) {
List pendingThreads = new ArrayList();
for (JavaThread thread = first(); thread != null; thread = thread.next()) {
if (thread.isCompilerThread()) {
if (thread.isCompilerThread() || thread.isCodeCacheSweeperThread()) {
continue;
}
ObjectMonitor pending = thread.getCurrentPendingMonitor();

View File

@ -836,6 +836,7 @@ vmType2Class["InterpreterCodelet"] = sapkg.interpreter.InterpreterCodelet;
// Java Threads
vmType2Class["JavaThread"] = sapkg.runtime.JavaThread;
vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread;
vmType2Class["CodeCacheSweeperThread"] = sapkg.runtime.CodeCacheSweeperThread;
vmType2Class["SurrogateLockerThread"] = sapkg.runtime.JavaThread;
vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread;

View File

@ -143,7 +143,7 @@ else
LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle
endif # sparcWorks
LIBS += -lkstat -lpicl
LIBS += -lkstat
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM

View File

@ -158,7 +158,7 @@ LD=link.exe
!endif
LD_FLAGS= $(LD_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \
comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib \
uuid.lib Wsock32.lib winmm.lib /nologo /machine:$(MACHINE) /opt:REF \
uuid.lib Wsock32.lib winmm.lib version.lib /nologo /machine:$(MACHINE) /opt:REF \
/opt:ICF,8
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
LD_FLAGS= $(LD_FLAGS) /map /debug

View File

@ -237,7 +237,7 @@ IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(
// handle arguments
// Warning: We use reg arg slot 00 temporarily to return the RegArgSignature
// back to the code that pops the arguments into the CPU registers
SlowSignatureHandler(m, (address)from, m->is_static() ? to+2 : to+1, to).iterate(UCONST64(-1));
SlowSignatureHandler(m, (address)from, m->is_static() ? to+2 : to+1, to).iterate((uint64_t)CONST64(-1));
// return result handler
return Interpreter::result_handler(m->result_type());
IRT_END

View File

@ -60,10 +60,10 @@ static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
// Static initialization during VM startup.
static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));

View File

@ -1597,7 +1597,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ movl(rdx, 0x80000000);
__ xorl(rax, rax);
#else
__ mov64(rax, CONST64(0x8000000000000000));
__ mov64(rax, UCONST64(0x8000000000000000));
#endif // _LP64
__ jmp(do_return);

View File

@ -135,7 +135,7 @@ IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread
methodHandle m(thread, (Method*)method);
assert(m->is_native(), "sanity check");
// handle arguments
SlowSignatureHandler(m, (address)from, to + 1).iterate(UCONST64(-1));
SlowSignatureHandler(m, (address)from, to + 1).iterate((uint64_t)CONST64(-1));
// return result handler
return Interpreter::result_handler(m->result_type());
IRT_END

View File

@ -487,7 +487,7 @@ IRT_ENTRY(address,
assert(m->is_native(), "sanity check");
// handle arguments
SlowSignatureHandler(m, (address)from, to + 1).iterate(UCONST64(-1));
SlowSignatureHandler(m, (address)from, to + 1).iterate((uint64_t)CONST64(-1));
// return result handler
return Interpreter::result_handler(m->result_type());

View File

@ -865,14 +865,19 @@ void VM_Version::get_processor_features() {
if (supports_bmi1()) {
// tzcnt does not require VEX prefix
if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
UseCountTrailingZerosInstruction = true;
if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
// Don't use tzcnt if BMI1 is switched off on command line.
UseCountTrailingZerosInstruction = false;
} else {
UseCountTrailingZerosInstruction = true;
}
}
} else if (UseCountTrailingZerosInstruction) {
warning("tzcnt instruction is not available on this CPU");
FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
}
// BMI instructions use an encoding with VEX prefix.
// BMI instructions (except tzcnt) use an encoding with VEX prefix.
// VEX prefix is generated only when AVX > 0.
if (supports_bmi1() && supports_avx()) {
if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {

View File

@ -155,7 +155,7 @@ IRT_ENTRY(address,
intptr_t *buf = (intptr_t *) stack->alloc(required_words * wordSize);
SlowSignatureHandlerGenerator sshg(methodHandle(thread, method), buf);
sshg.generate(UCONST64(-1));
sshg.generate((uint64_t)CONST64(-1));
SignatureHandler *handler = sshg.handler();
handler->finalize();

View File

@ -1641,7 +1641,8 @@ void os::jvm_path(char *buf, jint buflen) {
char* rp = realpath((char *)dlinfo.dli_fname, buf);
assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
strcpy(saved_jvm_path, buf);
strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
}
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@ -3829,11 +3830,6 @@ jint os::init_2(void) {
return JNI_OK;
}
// this is called at the end of vm_initialization
void os::init_3(void) {
return;
}
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if (!guard_memory((char*)_polling_page, Aix::page_size())) {
@ -4137,15 +4133,6 @@ int os::available(int fd, jlong *bytes) {
return 1;
}
int os::socket_available(int fd, jint *pbytes) {
// Linux doc says EINTR not returned, unlike Solaris
int ret = ::ioctl(fd, FIONREAD, pbytes);
//%% note ioctl can return 0 when successful, JVM_SocketAvailable
// is expected to return 0 on failure and 1 on success to the jdk.
return (ret < 0) ? 0 : 1;
}
// Map a block of memory.
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,

View File

@ -178,92 +178,14 @@ inline int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
return os::send(fd, buf, nBytes, flags);
}
inline int os::timeout(int fd, long timeout) {
julong prevtime,newtime;
struct timeval t;
gettimeofday(&t, NULL);
prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
for(;;) {
struct pollfd pfd;
pfd.fd = fd;
pfd.events = POLLIN | POLLERR;
int res = ::poll(&pfd, 1, timeout);
if (res == OS_ERR && errno == EINTR) {
// On Linux any value < 0 means "forever"
if(timeout >= 0) {
gettimeofday(&t, NULL);
newtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
timeout -= newtime - prevtime;
if(timeout <= 0)
return OS_OK;
prevtime = newtime;
}
} else
return res;
}
}
inline int os::listen(int fd, int count) {
return ::listen(fd, count);
}
inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
RESTARTABLE_RETURN_INT(::connect(fd, him, len));
}
inline int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
// Linux doc says this can't return EINTR, unlike accept() on Solaris.
// But see attachListener_linux.cpp, LinuxAttachListener::dequeue().
return (int)::accept(fd, him, len);
}
inline int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
sockaddr* from, socklen_t* fromlen) {
RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
}
inline int os::sendto(int fd, char* buf, size_t len, uint flags,
struct sockaddr* to, socklen_t tolen) {
RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
}
inline int os::socket_shutdown(int fd, int howto) {
return ::shutdown(fd, howto);
}
inline int os::bind(int fd, struct sockaddr* him, socklen_t len) {
return ::bind(fd, him, len);
}
inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
return ::getsockname(fd, him, len);
}
inline int os::get_host_name(char* name, int namelen) {
return ::gethostname(name, namelen);
}
inline struct hostent* os::get_host_by_name(char* name) {
return ::gethostbyname(name);
}
inline int os::get_sock_opt(int fd, int level, int optname,
char* optval, socklen_t* optlen) {
return ::getsockopt(fd, level, optname, optval, optlen);
}
inline int os::set_sock_opt(int fd, int level, int optname,
const char* optval, socklen_t optlen) {
return ::setsockopt(fd, level, optname, optval, optlen);
}
inline bool os::supports_monotonic_clock() {
// mread_real_time() is monotonic on AIX (see os::javaTimeNanos() comments)
return true;

View File

@ -506,6 +506,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
if (!is_directory_secure(dirname)) {
// the directory is not a secure directory
os::closedir(dirp);
return;
}
@ -853,6 +854,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
if (luser != user) {
FREE_C_HEAP_ARRAY(char, luser, mtInternal);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
}

View File

@ -1875,6 +1875,7 @@ void os::jvm_path(char *buf, jint buflen) {
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@ -3635,9 +3636,6 @@ jint os::init_2(void) {
return JNI_OK;
}
// this is called at the end of vm_initialization
void os::init_3(void) { }
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if (!guard_memory((char*)_polling_page, Bsd::page_size())) {
@ -3958,21 +3956,6 @@ int os::available(int fd, jlong *bytes) {
return 1;
}
int os::socket_available(int fd, jint *pbytes) {
if (fd < 0) {
return OS_OK;
}
int ret;
RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
//%% note ioctl can return 0 when successful, JVM_SocketAvailable
// is expected to return 0 on failure and 1 on success to the jdk.
return (ret == OS_ERR) ? 0 : 1;
}
// Map a block of memory.
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
@ -4133,7 +4116,18 @@ void os::pause() {
}
// Refer to the comments in os_solaris.cpp park-unpark.
// Refer to the comments in os_solaris.cpp park-unpark. The next two
// comment paragraphs are worth repeating here:
//
// Assumption:
// Only one parker can exist on an event, which is why we allocate
// them per-thread. Multiple unparkers can coexist.
//
// _Event serves as a restricted-range semaphore.
// -1 : thread is blocked, i.e. there is a waiter
// 0 : neutral: thread is running or ready,
// could have been signaled after a wait started
// 1 : signaled - thread is running or ready
//
// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
@ -4218,6 +4212,11 @@ static struct timespec* compute_abstime(struct timespec* abstime,
}
void os::PlatformEvent::park() { // AKA "down()"
// Transitions for _Event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _Event to 0 before returning
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
// TODO: assert that _Assoc != NULL or _Assoc == Self
@ -4255,6 +4254,11 @@ void os::PlatformEvent::park() { // AKA "down()"
}
int os::PlatformEvent::park(jlong millis) {
// Transitions for _Event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _Event to 0 before returning
guarantee(_nParked == 0, "invariant");
int v;
@ -4318,11 +4322,11 @@ int os::PlatformEvent::park(jlong millis) {
void os::PlatformEvent::unpark() {
// Transitions for _Event:
// 0 :=> 1
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
// 0 => 1 : just return
// 1 => 1 : just return
// -1 => either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@ -4345,15 +4349,16 @@ void os::PlatformEvent::unpark() {
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
if (AnyWaiters != 0) {
// Note that we signal() *after* dropping the lock for "immortal" Events.
// This is safe and avoids a common class of futile wakeups. In rare
// circumstances this can cause a thread to return prematurely from
// cond_{timed}wait() but the spurious wakeup is benign and the victim
// will simply re-test the condition and re-park itself.
// This provides particular benefit if the underlying platform does not
// provide wait morphing.
status = pthread_cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
// Note that we signal() _after dropping the lock for "immortal" Events.
// This is safe and avoids a common class of futile wakeups. In rare
// circumstances this can cause a thread to return prematurely from
// cond_{timed}wait() but the spurious wakeup is benign and the victim will
// simply re-test the condition and re-park itself.
}

View File

@ -181,91 +181,14 @@ inline int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
return os::send(fd, buf, nBytes, flags);
}
inline int os::timeout(int fd, long timeout) {
julong prevtime,newtime;
struct timeval t;
gettimeofday(&t, NULL);
prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
for(;;) {
struct pollfd pfd;
pfd.fd = fd;
pfd.events = POLLIN | POLLERR;
int res = ::poll(&pfd, 1, timeout);
if (res == OS_ERR && errno == EINTR) {
// On Bsd any value < 0 means "forever"
if(timeout >= 0) {
gettimeofday(&t, NULL);
newtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
timeout -= newtime - prevtime;
if(timeout <= 0)
return OS_OK;
prevtime = newtime;
}
} else
return res;
}
}
inline int os::listen(int fd, int count) {
return ::listen(fd, count);
}
inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
RESTARTABLE_RETURN_INT(::connect(fd, him, len));
}
inline int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
// At least OpenBSD and FreeBSD can return EINTR from accept.
RESTARTABLE_RETURN_INT(::accept(fd, him, len));
}
inline int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
sockaddr* from, socklen_t* fromlen) {
RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
}
inline int os::sendto(int fd, char* buf, size_t len, uint flags,
struct sockaddr *to, socklen_t tolen) {
RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
}
inline int os::socket_shutdown(int fd, int howto) {
return ::shutdown(fd, howto);
}
inline int os::bind(int fd, struct sockaddr* him, socklen_t len) {
return ::bind(fd, him, len);
}
inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
return ::getsockname(fd, him, len);
}
inline int os::get_host_name(char* name, int namelen) {
return ::gethostname(name, namelen);
}
inline struct hostent* os::get_host_by_name(char* name) {
return ::gethostbyname(name);
}
inline int os::get_sock_opt(int fd, int level, int optname,
char *optval, socklen_t* optlen) {
return ::getsockopt(fd, level, optname, optval, optlen);
}
inline int os::set_sock_opt(int fd, int level, int optname,
const char* optval, socklen_t optlen) {
return ::setsockopt(fd, level, optname, optval, optlen);
}
inline bool os::supports_monotonic_clock() {
#ifdef __APPLE__
return true;

View File

@ -506,6 +506,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
if (!is_directory_secure(dirname)) {
// the directory is not a secure directory
os::closedir(dirp);
return;
}
@ -872,6 +873,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
if (luser != user) {
FREE_C_HEAP_ARRAY(char, luser, mtInternal);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
}

View File

@ -163,35 +163,6 @@ static pthread_mutex_t dl_mutex;
// Declarations
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
#ifdef JAVASE_EMBEDDED
class MemNotifyThread: public Thread {
friend class VMStructs;
public:
virtual void run();
private:
static MemNotifyThread* _memnotify_thread;
int _fd;
public:
// Constructor
MemNotifyThread(int fd);
// Tester
bool is_memnotify_thread() const { return true; }
// Printing
char* name() const { return (char*)"Linux MemNotify Thread"; }
// Returns the single instance of the MemNotifyThread
static MemNotifyThread* memnotify_thread() { return _memnotify_thread; }
// Create and start the single instance of MemNotifyThread
static void start();
};
#endif // JAVASE_EMBEDDED
// utility functions
static int SR_initialize();
@ -384,7 +355,10 @@ void os::init_system_properties_values() {
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /libjvm.so.
}
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
@ -1223,7 +1197,7 @@ void os::Linux::capture_initial_stack(size_t max_size) {
i = 0;
if (s) {
// Skip blank chars
do s++; while (isspace(*s));
do { s++; } while (s && isspace(*s));
#define _UFM UINTX_FORMAT
#define _DFM INTX_FORMAT
@ -2372,6 +2346,9 @@ void os::jvm_path(char *buf, jint buflen) {
// Check the current module name "libjvm.so".
p = strrchr(buf, '/');
if (p == NULL) {
return;
}
assert(strstr(p, "/libjvm") == p, "invalid library name");
rp = realpath(java_home_var, buf);
@ -2405,6 +2382,7 @@ void os::jvm_path(char *buf, jint buflen) {
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@ -4866,17 +4844,6 @@ jint os::init_2(void) {
return JNI_OK;
}
// this is called at the end of vm_initialization
void os::init_3(void) {
#ifdef JAVASE_EMBEDDED
// Start the MemNotifyThread
if (LowMemoryProtection) {
MemNotifyThread::start();
}
return;
#endif
}
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if (!guard_memory((char*)_polling_page, Linux::page_size())) {
@ -5103,9 +5070,38 @@ int os::open(const char *path, int oflag, int mode) {
errno = ENAMETOOLONG;
return -1;
}
int fd;
fd = ::open64(path, oflag, mode);
// All file descriptors that are opened in the Java process and not
// specifically destined for a subprocess should have the close-on-exec
// flag set. If we don't set it, then careless 3rd party native code
// might fork and exec without closing all appropriate file descriptors
// (e.g. as we do in closeDescriptors in UNIXProcess.c), and this in
// turn might:
//
// - cause end-of-file to fail to be detected on some file
// descriptors, resulting in mysterious hangs, or
//
// - might cause an fopen in the subprocess to fail on a system
// suffering from bug 1085341.
//
// (Yes, the default setting of the close-on-exec flag is a Unix
// design flaw)
//
// See:
// 1085341: 32-bit stdio routines should support file descriptors >255
// 4843136: (process) pipe file descriptor from Runtime.exec not being closed
// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
//
// Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
// O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
// because it saves a system call and removes a small window where the flag
// is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored
// and we fall back to using FD_CLOEXEC (see below).
#ifdef O_CLOEXEC
oflag |= O_CLOEXEC;
#endif
int fd = ::open64(path, oflag, mode);
if (fd == -1) return -1;
//If the open succeeded, the file might still be a directory
@ -5126,32 +5122,17 @@ int os::open(const char *path, int oflag, int mode) {
}
}
// All file descriptors that are opened in the JVM and not
// specifically destined for a subprocess should have the
// close-on-exec flag set. If we don't set it, then careless 3rd
// party native code might fork and exec without closing all
// appropriate file descriptors (e.g. as we do in closeDescriptors in
// UNIXProcess.c), and this in turn might:
//
// - cause end-of-file to fail to be detected on some file
// descriptors, resulting in mysterious hangs, or
//
// - might cause an fopen in the subprocess to fail on a system
// suffering from bug 1085341.
//
// (Yes, the default setting of the close-on-exec flag is a Unix
// design flaw)
//
// See:
// 1085341: 32-bit stdio routines should support file descriptors >255
// 4843136: (process) pipe file descriptor from Runtime.exec not being closed
// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
//
#ifdef FD_CLOEXEC
{
// Validate that the use of the O_CLOEXEC flag on open above worked.
// With recent kernels, we will perform this check exactly once.
static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
if (!O_CLOEXEC_is_known_to_work) {
int flags = ::fcntl(fd, F_GETFD);
if (flags != -1) {
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
if ((flags & FD_CLOEXEC) != 0)
O_CLOEXEC_is_known_to_work = 1;
else
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
}
}
#endif
@ -5211,15 +5192,6 @@ int os::available(int fd, jlong *bytes) {
return 1;
}
int os::socket_available(int fd, jint *pbytes) {
// Linux doc says EINTR not returned, unlike Solaris
int ret = ::ioctl(fd, FIONREAD, pbytes);
//%% note ioctl can return 0 when successful, JVM_SocketAvailable
// is expected to return 0 on failure and 1 on success to the jdk.
return (ret < 0) ? 0 : 1;
}
// Map a block of memory.
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
@ -5349,7 +5321,7 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
if (s == NULL) return -1;
// Skip blank chars
do s++; while (isspace(*s));
do { s++; } while (s && isspace(*s));
count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
&cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
@ -5410,7 +5382,18 @@ void os::pause() {
}
// Refer to the comments in os_solaris.cpp park-unpark.
// Refer to the comments in os_solaris.cpp park-unpark. The next two
// comment paragraphs are worth repeating here:
//
// Assumption:
// Only one parker can exist on an event, which is why we allocate
// them per-thread. Multiple unparkers can coexist.
//
// _Event serves as a restricted-range semaphore.
// -1 : thread is blocked, i.e. there is a waiter
// 0 : neutral: thread is running or ready,
// could have been signaled after a wait started
// 1 : signaled - thread is running or ready
//
// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
@ -5509,6 +5492,11 @@ static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
}
void os::PlatformEvent::park() { // AKA "down()"
// Transitions for _Event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _Event to 0 before returning
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
// TODO: assert that _Assoc != NULL or _Assoc == Self
@ -5546,6 +5534,11 @@ void os::PlatformEvent::park() { // AKA "down()"
}
int os::PlatformEvent::park(jlong millis) {
// Transitions for _Event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _Event to 0 before returning
guarantee(_nParked == 0, "invariant");
int v;
@ -5609,11 +5602,11 @@ int os::PlatformEvent::park(jlong millis) {
void os::PlatformEvent::unpark() {
// Transitions for _Event:
// 0 :=> 1
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
// 0 => 1 : just return
// 1 => 1 : just return
// -1 => either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@ -5636,15 +5629,16 @@ void os::PlatformEvent::unpark() {
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
if (AnyWaiters != 0) {
// Note that we signal() *after* dropping the lock for "immortal" Events.
// This is safe and avoids a common class of futile wakeups. In rare
// circumstances this can cause a thread to return prematurely from
// cond_{timed}wait() but the spurious wakeup is benign and the victim
// will simply re-test the condition and re-park itself.
// This provides particular benefit if the underlying platform does not
// provide wait morphing.
status = pthread_cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
// Note that we signal() _after dropping the lock for "immortal" Events.
// This is safe and avoids a common class of futile wakeups. In rare
// circumstances this can cause a thread to return prematurely from
// cond_{timed}wait() but the spurious wakeup is benign and the victim will
// simply re-test the condition and re-park itself.
}
@ -6006,82 +6000,6 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return strlen(buffer);
}
#ifdef JAVASE_EMBEDDED
//
// A thread to watch the '/dev/mem_notify' device, which will tell us when the OS is running low on memory.
//
MemNotifyThread* MemNotifyThread::_memnotify_thread = NULL;
// ctor
//
MemNotifyThread::MemNotifyThread(int fd): Thread() {
assert(memnotify_thread() == NULL, "we can only allocate one MemNotifyThread");
_fd = fd;
if (os::create_thread(this, os::os_thread)) {
_memnotify_thread = this;
os::set_priority(this, NearMaxPriority);
os::start_thread(this);
}
}
// Where all the work gets done
//
void MemNotifyThread::run() {
assert(this == memnotify_thread(), "expected the singleton MemNotifyThread");
// Set up the select arguments
fd_set rfds;
if (_fd != -1) {
FD_ZERO(&rfds);
FD_SET(_fd, &rfds);
}
// Now wait for the mem_notify device to wake up
while (1) {
// Wait for the mem_notify device to signal us..
int rc = select(_fd+1, _fd != -1 ? &rfds : NULL, NULL, NULL, NULL);
if (rc == -1) {
perror("select!\n");
break;
} else if (rc) {
//ssize_t free_before = os::available_memory();
//tty->print ("Notified: Free: %dK \n",os::available_memory()/1024);
// The kernel is telling us there is not much memory left...
// try to do something about that
// If we are not already in a GC, try one.
if (!Universe::heap()->is_gc_active()) {
Universe::heap()->collect(GCCause::_allocation_failure);
//ssize_t free_after = os::available_memory();
//tty->print ("Post-Notify: Free: %dK\n",free_after/1024);
//tty->print ("GC freed: %dK\n", (free_after - free_before)/1024);
}
// We might want to do something like the following if we find the GC's are not helping...
// Universe::heap()->size_policy()->set_gc_time_limit_exceeded(true);
}
}
}
// See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
//
void MemNotifyThread::start() {
int fd;
fd = open("/dev/mem_notify", O_RDONLY, 0);
if (fd < 0) {
return;
}
if (memnotify_thread() == NULL) {
new MemNotifyThread(fd);
}
}
#endif // JAVASE_EMBEDDED
/////////////// Unit tests ///////////////
#ifndef PRODUCT

View File

@ -173,92 +173,14 @@ inline int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
return os::send(fd, buf, nBytes, flags);
}
inline int os::timeout(int fd, long timeout) {
julong prevtime,newtime;
struct timeval t;
gettimeofday(&t, NULL);
prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
for(;;) {
struct pollfd pfd;
pfd.fd = fd;
pfd.events = POLLIN | POLLERR;
int res = ::poll(&pfd, 1, timeout);
if (res == OS_ERR && errno == EINTR) {
// On Linux any value < 0 means "forever"
if(timeout >= 0) {
gettimeofday(&t, NULL);
newtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
timeout -= newtime - prevtime;
if(timeout <= 0)
return OS_OK;
prevtime = newtime;
}
} else
return res;
}
}
inline int os::listen(int fd, int count) {
return ::listen(fd, count);
}
inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
RESTARTABLE_RETURN_INT(::connect(fd, him, len));
}
inline int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
// Linux doc says this can't return EINTR, unlike accept() on Solaris.
// But see attachListener_linux.cpp, LinuxAttachListener::dequeue().
return (int)::accept(fd, him, len);
}
inline int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
sockaddr* from, socklen_t* fromlen) {
RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
}
inline int os::sendto(int fd, char* buf, size_t len, uint flags,
struct sockaddr* to, socklen_t tolen) {
RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
}
inline int os::socket_shutdown(int fd, int howto) {
return ::shutdown(fd, howto);
}
inline int os::bind(int fd, struct sockaddr* him, socklen_t len) {
return ::bind(fd, him, len);
}
inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
return ::getsockname(fd, him, len);
}
inline int os::get_host_name(char* name, int namelen) {
return ::gethostname(name, namelen);
}
inline struct hostent* os::get_host_by_name(char* name) {
return ::gethostbyname(name);
}
inline int os::get_sock_opt(int fd, int level, int optname,
char* optval, socklen_t* optlen) {
return ::getsockopt(fd, level, optname, optval, optlen);
}
inline int os::set_sock_opt(int fd, int level, int optname,
const char* optval, socklen_t optlen) {
return ::setsockopt(fd, level, optname, optval, optlen);
}
inline bool os::supports_monotonic_clock() {
return Linux::_clock_gettime != NULL;
}

View File

@ -506,6 +506,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
if (!is_directory_secure(dirname)) {
// the directory is not a secure directory
os::closedir(dirp);
return;
}
@ -872,6 +873,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
if (luser != user) {
FREE_C_HEAP_ARRAY(char, luser, mtInternal);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
}

View File

@ -663,7 +663,10 @@ const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
}
}
jio_snprintf(out, outlen, ret);
if (out && outlen > 0) {
strncpy(out, ret, outlen);
out[outlen - 1] = '\0';
}
return out;
}

View File

@ -2221,6 +2221,7 @@ void os::jvm_path(char *buf, jint buflen) {
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
@ -4761,10 +4762,6 @@ jint os::init_2(void) {
return JNI_OK;
}
void os::init_3(void) {
return;
}
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
@ -5372,31 +5369,32 @@ extern "C" {
// to immediately return 0 your code should still work,
// albeit degenerating to a spin loop.
//
// An interesting optimization for park() is to use a trylock()
// to attempt to acquire the mutex. If the trylock() fails
// then we know that a concurrent unpark() operation is in-progress.
// in that case the park() code could simply set _count to 0
// and return immediately. The subsequent park() operation *might*
// return immediately. That's harmless as the caller of park() is
// expected to loop. By using trylock() we will have avoided a
// avoided a context switch caused by contention on the per-thread mutex.
// In a sense, park()-unpark() just provides more polite spinning
// and polling with the key difference over naive spinning being
// that a parked thread needs to be explicitly unparked() in order
// to wake up and to poll the underlying condition.
//
// TODO-FIXME:
// 1. Reconcile Doug's JSR166 j.u.c park-unpark with the
// objectmonitor implementation.
// 2. Collapse the JSR166 parker event, and the
// objectmonitor ParkEvent into a single "Event" construct.
// 3. In park() and unpark() add:
// assert (Thread::current() == AssociatedWith).
// 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
// 1-out-of-N park() operations will return immediately.
// Assumption:
// Only one parker can exist on an event, which is why we allocate
// them per-thread. Multiple unparkers can coexist.
//
// _Event transitions in park()
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block
// 0 => -1 : block; then set _Event to 0 before returning
//
// _Event transitions in unpark()
// 0 => 1 : just return
// 1 => 1 : just return
// -1 => either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
//
// _Event serves as a restricted-range semaphore.
// -1 : thread is blocked, i.e. there is a waiter
// 0 : neutral: thread is running or ready,
// could have been signaled after a wait started
// 1 : signaled - thread is running or ready
//
// Another possible encoding of _Event would be with
// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
@ -5456,6 +5454,11 @@ static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
}
void os::PlatformEvent::park() { // AKA: down()
// Transitions for _Event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _Event to 0 before returning
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
assert(_nParked == 0, "invariant");
@ -5497,6 +5500,11 @@ void os::PlatformEvent::park() { // AKA: down()
}
int os::PlatformEvent::park(jlong millis) {
// Transitions for _Event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _Event to 0 before returning
guarantee(_nParked == 0, "invariant");
int v;
for (;;) {
@ -5542,11 +5550,11 @@ int os::PlatformEvent::park(jlong millis) {
void os::PlatformEvent::unpark() {
// Transitions for _Event:
// 0 :=> 1
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
// 0 => 1 : just return
// 1 => 1 : just return
// -1 => either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@ -5566,8 +5574,13 @@ void os::PlatformEvent::unpark() {
assert_status(status == 0, status, "mutex_unlock");
guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
if (AnyWaiters != 0) {
// We intentional signal *after* dropping the lock
// to avoid a common class of futile wakeups.
// Note that we signal() *after* dropping the lock for "immortal" Events.
// This is safe and avoids a common class of futile wakeups. In rare
// circumstances this can cause a thread to return prematurely from
// cond_{timed}wait() but the spurious wakeup is benign and the victim
// will simply re-test the condition and re-park itself.
// This provides particular benefit if the underlying platform does not
// provide wait morphing.
status = os::Solaris::cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
@ -5912,37 +5925,6 @@ int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
// a poll() is done with timeout == -1, in which case we repeat with this
// "wait forever" value.
int os::timeout(int fd, long timeout) {
int res;
struct timeval t;
julong prevtime, newtime;
static const char* aNull = 0;
struct pollfd pfd;
pfd.fd = fd;
pfd.events = POLLIN;
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
gettimeofday(&t, &aNull);
prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
for (;;) {
res = ::poll(&pfd, 1, timeout);
if (res == OS_ERR && errno == EINTR) {
if (timeout != -1) {
gettimeofday(&t, &aNull);
newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000;
timeout -= newtime - prevtime;
if (timeout <= 0) {
return OS_OK;
}
prevtime = newtime;
}
} else return res;
}
}
int os::connect(int fd, struct sockaddr *him, socklen_t len) {
int _result;
_result = ::connect(fd, him, len);
@ -5982,46 +5964,6 @@ int os::connect(int fd, struct sockaddr *him, socklen_t len) {
return _result;
}
int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
if (fd < 0) {
return OS_ERR;
}
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
}
int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
sockaddr* from, socklen_t* fromlen) {
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
}
int os::sendto(int fd, char* buf, size_t len, uint flags,
struct sockaddr* to, socklen_t tolen) {
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
}
int os::socket_available(int fd, jint *pbytes) {
if (fd < 0) {
return OS_OK;
}
int ret;
RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
// note: ioctl can return 0 when successful, JVM_SocketAvailable
// is expected to return 0 on failure and 1 on success to the jdk.
return (ret == OS_ERR) ? 0 : 1;
}
int os::bind(int fd, struct sockaddr* him, socklen_t len) {
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
return ::bind(fd, him, len);
}
// Get the default path to the core file
// Returns the length of the string
int os::get_core_path(char* buffer, size_t bufferSize) {

View File

@ -120,38 +120,10 @@ inline int os::socket(int domain, int type, int protocol) {
return ::socket(domain, type, protocol);
}
inline int os::listen(int fd, int count) {
if (fd < 0) return OS_ERR;
return ::listen(fd, count);
}
inline int os::socket_shutdown(int fd, int howto){
return ::shutdown(fd, howto);
}
inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len){
return ::getsockname(fd, him, len);
}
inline int os::get_host_name(char* name, int namelen){
return ::gethostname(name, namelen);
}
inline struct hostent* os::get_host_by_name(char* name) {
return ::gethostbyname(name);
}
inline int os::get_sock_opt(int fd, int level, int optname,
char* optval, socklen_t* optlen) {
return ::getsockopt(fd, level, optname, optval, optlen);
}
inline int os::set_sock_opt(int fd, int level, int optname,
const char *optval, socklen_t optlen) {
return ::setsockopt(fd, level, optname, optval, optlen);
}
inline bool os::supports_monotonic_clock() {
// javaTimeNanos() is monotonic on Solaris, see getTimeNanos() comments
return true;

View File

@ -545,6 +545,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
if (!is_directory_secure(dirname)) {
// the directory is not a secure directory
os::closedir(dirp);
return;
}
@ -890,6 +891,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
if (luser != user) {
FREE_C_HEAP_ARRAY(char, luser, mtInternal);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
}

View File

@ -30,6 +30,7 @@
#include <windows.h>
#include <signal.h> // SIGBREAK
#include <stdio.h>
// The AttachListener thread services a queue of operations. It blocks in the dequeue
// function until an operation is enqueued. A client enqueues an operation by creating
@ -269,6 +270,7 @@ HANDLE Win32AttachOperation::open_pipe() {
if (hPipe != INVALID_HANDLE_VALUE) {
// shouldn't happen as there is a pipe created per operation
if (::GetLastError() == ERROR_PIPE_BUSY) {
::CloseHandle(hPipe);
return INVALID_HANDLE_VALUE;
}
}
@ -313,7 +315,8 @@ void Win32AttachOperation::complete(jint result, bufferedStream* result_stream)
BOOL fSuccess;
char msg[32];
sprintf(msg, "%d\n", result);
_snprintf(msg, sizeof(msg), "%d\n", result);
msg[sizeof(msg) - 1] = '\0';
fSuccess = write_pipe(hPipe, msg, (int)strlen(msg));
if (fSuccess) {

View File

@ -96,7 +96,7 @@
#include <vdmdbg.h>
// for timer info max values which include all bits
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
#define ALL_64_BITS CONST64(-1)
// For DLL loading/load error detection
// Values of PE COFF
@ -211,6 +211,7 @@ void os::init_system_properties_values() {
}
strcpy(home_path, home_dir);
Arguments::set_java_home(home_path);
FREE_C_HEAP_ARRAY(char, home_path, mtInternal);
dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
mtInternal);
@ -220,6 +221,7 @@ void os::init_system_properties_values() {
strcpy(dll_path, home_dir);
strcat(dll_path, bin);
Arguments::set_dll_dir(dll_path);
FREE_C_HEAP_ARRAY(char, dll_path, mtInternal);
if (!set_boot_path('\\', ';')) {
return;
@ -297,6 +299,9 @@ void os::init_system_properties_values() {
char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
Arguments::set_endorsed_dirs(buf);
// (Arguments::set_endorsed_dirs() calls SystemProperty::set_value(), which
// duplicates the input.)
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#undef ENDORSED_DIR
}
@ -436,9 +441,9 @@ static unsigned __stdcall java_start(Thread* thread) {
}
// Diagnostic code to investigate JDK-6573254
int res = 90115; // non-java thread
int res = 50115; // non-java thread
if (thread->is_Java_thread()) {
res = 60115; // java thread
res = 40115; // java thread
}
// Install a win32 structured exception handler around every thread created
@ -1610,96 +1615,123 @@ void os::print_os_info(outputStream* st) {
void os::win32::print_windows_version(outputStream* st) {
OSVERSIONINFOEX osvi;
SYSTEM_INFO si;
VS_FIXEDFILEINFO *file_info;
TCHAR kernel32_path[MAX_PATH];
UINT len, ret;
// Use the GetVersionEx information to see if we're on a server or
// workstation edition of Windows. Starting with Windows 8.1 we can't
// trust the OS version information returned by this API.
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
st->print_cr("N/A");
st->print_cr("Call to GetVersionEx failed");
return;
}
bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
// Get the full path to \Windows\System32\kernel32.dll and use that for
// determining what version of Windows we're running on.
len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
ret = GetSystemDirectory(kernel32_path, len);
if (ret == 0 || ret > len) {
st->print_cr("Call to GetSystemDirectory failed");
return;
}
strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
if (version_size == 0) {
st->print_cr("Call to GetFileVersionInfoSize failed");
return;
}
int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
if (version_info == NULL) {
st->print_cr("Failed to allocate version_info");
return;
}
ZeroMemory(&si, sizeof(SYSTEM_INFO));
if (os_vers >= 5002) {
// Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
// find out whether we are running on 64 bit processor or not.
if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
os::Kernel32Dll::GetNativeSystemInfo(&si);
if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
os::free(version_info);
st->print_cr("Call to GetFileVersionInfo failed");
return;
}
if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
os::free(version_info);
st->print_cr("Call to VerQueryValue failed");
return;
}
int major_version = HIWORD(file_info->dwProductVersionMS);
int minor_version = LOWORD(file_info->dwProductVersionMS);
int build_number = HIWORD(file_info->dwProductVersionLS);
int build_minor = LOWORD(file_info->dwProductVersionLS);
int os_vers = major_version * 1000 + minor_version;
os::free(version_info);
st->print(" Windows ");
switch (os_vers) {
case 6000:
if (is_workstation) {
st->print("Vista");
} else {
GetSystemInfo(&si);
st->print("Server 2008");
}
break;
case 6001:
if (is_workstation) {
st->print("7");
} else {
st->print("Server 2008 R2");
}
break;
case 6002:
if (is_workstation) {
st->print("8");
} else {
st->print("Server 2012");
}
break;
case 6003:
if (is_workstation) {
st->print("8.1");
} else {
st->print("Server 2012 R2");
}
break;
case 6004:
if (is_workstation) {
st->print("10");
} else {
// The server version name of Windows 10 is not known at this time
st->print("%d.%d", major_version, minor_version);
}
break;
default:
// Unrecognized windows, print out its major and minor versions
st->print("%d.%d", major_version, minor_version);
break;
}
if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
switch (os_vers) {
case 3051: st->print(" Windows NT 3.51"); break;
case 4000: st->print(" Windows NT 4.0"); break;
case 5000: st->print(" Windows 2000"); break;
case 5001: st->print(" Windows XP"); break;
case 5002:
if (osvi.wProductType == VER_NT_WORKSTATION &&
si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
st->print(" Windows XP x64 Edition");
} else {
st->print(" Windows Server 2003 family");
}
break;
case 6000:
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows Vista");
} else {
st->print(" Windows Server 2008");
}
break;
case 6001:
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows 7");
} else {
st->print(" Windows Server 2008 R2");
}
break;
case 6002:
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows 8");
} else {
st->print(" Windows Server 2012");
}
break;
case 6003:
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows 8.1");
} else {
st->print(" Windows Server 2012 R2");
}
break;
default: // future os
// Unrecognized windows, print out its major and minor versions
st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
}
} else {
switch (os_vers) {
case 4000: st->print(" Windows 95"); break;
case 4010: st->print(" Windows 98"); break;
case 4090: st->print(" Windows Me"); break;
default: // future windows, print out its major and minor versions
st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
}
}
if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
// Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
// find out whether we are running on 64 bit processor or not
SYSTEM_INFO si;
ZeroMemory(&si, sizeof(SYSTEM_INFO));
os::Kernel32Dll::GetNativeSystemInfo(&si);
if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
st->print(" , 64 bit");
}
st->print(" Build %d", osvi.dwBuildNumber);
st->print(" %s", osvi.szCSDVersion); // service pack
st->print(" Build %d", build_number);
st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
st->cr();
}
@ -1807,6 +1839,7 @@ void os::jvm_path(char *buf, jint buflen) {
GetModuleFileName(vm_lib_handle, buf, buflen);
}
strncpy(saved_jvm_path, buf, MAX_PATH);
saved_jvm_path[MAX_PATH - 1] = '\0';
}
@ -3719,8 +3752,12 @@ HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
// search system directory
if ((size = GetSystemDirectory(path, pathLen)) > 0) {
strcat(path, "\\");
strcat(path, name);
if (size >= pathLen) {
return NULL; // truncated
}
if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
return NULL; // truncated
}
if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
return result;
}
@ -3728,8 +3765,12 @@ HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
// try Windows directory
if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
strcat(path, "\\");
strcat(path, name);
if (size >= pathLen) {
return NULL; // truncated
}
if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
return NULL; // truncated
}
if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
return result;
}
@ -3740,68 +3781,134 @@ HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
return NULL;
}
#define MIN_EXIT_MUTEXES 1
#define MAX_EXIT_MUTEXES 16
#define MAX_EXIT_HANDLES 16
#define EXIT_TIMEOUT 1000 /* 1 sec */
struct ExitMutexes {
DWORD count;
HANDLE handles[MAX_EXIT_MUTEXES];
};
static BOOL CALLBACK init_muts_call(PINIT_ONCE, PVOID ppmuts, PVOID*) {
static ExitMutexes muts;
muts.count = os::processor_count();
if (muts.count < MIN_EXIT_MUTEXES) {
muts.count = MIN_EXIT_MUTEXES;
} else if (muts.count > MAX_EXIT_MUTEXES) {
muts.count = MAX_EXIT_MUTEXES;
}
for (DWORD i = 0; i < muts.count; ++i) {
muts.handles[i] = CreateMutex(NULL, FALSE, NULL);
if (muts.handles[i] == NULL) {
return FALSE;
}
}
*((ExitMutexes**)ppmuts) = &muts;
static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
return TRUE;
}
int os::win32::exit_process_or_thread(Ept what, int exit_code) {
if (os::win32::has_exit_bug()) {
static INIT_ONCE init_once_muts = INIT_ONCE_STATIC_INIT;
static ExitMutexes* pmuts;
// Basic approach:
// - Each exiting thread registers its intent to exit and then does so.
// - A thread trying to terminate the process must wait for all
// threads currently exiting to complete their exit.
if (!InitOnceExecuteOnce(&init_once_muts, init_muts_call, &pmuts, NULL)) {
warning("ExitMutex initialization failed in %s: %d\n", __FILE__, __LINE__);
} else if (WaitForMultipleObjects(pmuts->count, pmuts->handles,
(what != EPT_THREAD), // exiting process waits for all mutexes
INFINITE) == WAIT_FAILED) {
warning("ExitMutex acquisition failed in %s: %d\n", __FILE__, __LINE__);
if (os::win32::has_exit_bug()) {
// The array holds handles of the threads that have started exiting by calling
// _endthreadex().
// Should be large enough to avoid blocking the exiting thread due to lack of
// a free slot.
static HANDLE handles[MAX_EXIT_HANDLES];
static int handle_count = 0;
static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
static CRITICAL_SECTION crit_sect;
int i, j;
DWORD res;
HANDLE hproc, hthr;
// The first thread that reached this point, initializes the critical section.
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
} else {
EnterCriticalSection(&crit_sect);
if (what == EPT_THREAD) {
// Remove from the array those handles of the threads that have completed exiting.
for (i = 0, j = 0; i < handle_count; ++i) {
res = WaitForSingleObject(handles[i], 0 /* don't wait */);
if (res == WAIT_TIMEOUT) {
handles[j++] = handles[i];
} else {
if (res != WAIT_OBJECT_0) {
warning("WaitForSingleObject failed in %s: %d\n", __FILE__, __LINE__);
// Don't keep the handle, if we failed waiting for it.
}
CloseHandle(handles[i]);
}
}
// If there's no free slot in the array of the kept handles, we'll have to
// wait until at least one thread completes exiting.
if ((handle_count = j) == MAX_EXIT_HANDLES) {
res = WaitForMultipleObjects(MAX_EXIT_HANDLES, handles, FALSE, EXIT_TIMEOUT);
if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
i = (res - WAIT_OBJECT_0);
handle_count = MAX_EXIT_HANDLES - 1;
for (; i < handle_count; ++i) {
handles[i] = handles[i + 1];
}
} else {
warning("WaitForMultipleObjects failed in %s: %d\n", __FILE__, __LINE__);
// Don't keep handles, if we failed waiting for them.
for (i = 0; i < MAX_EXIT_HANDLES; ++i) {
CloseHandle(handles[i]);
}
handle_count = 0;
}
}
// Store a duplicate of the current thread handle in the array of handles.
hproc = GetCurrentProcess();
hthr = GetCurrentThread();
if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
0, FALSE, DUPLICATE_SAME_ACCESS)) {
warning("DuplicateHandle failed in %s: %d\n", __FILE__, __LINE__);
} else {
++handle_count;
}
// The current exiting thread has stored its handle in the array, and now
// should leave the critical section before calling _endthreadex().
} else { // what != EPT_THREAD
if (handle_count > 0) {
// Before ending the process, make sure all the threads that had called
// _endthreadex() completed.
res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT);
if (res == WAIT_FAILED) {
warning("WaitForMultipleObjects failed in %s: %d\n", __FILE__, __LINE__);
}
for (i = 0; i < handle_count; ++i) {
CloseHandle(handles[i]);
}
handle_count = 0;
}
// End the process, not leaving critical section.
// This makes sure no other thread executes exit-related code at the same
// time, thus a race is avoided.
if (what == EPT_PROCESS) {
::exit(exit_code);
} else {
_exit(exit_code);
}
}
LeaveCriticalSection(&crit_sect);
}
}
switch (what) {
case EPT_THREAD:
// We are here if either
// - there's no 'race at exit' bug on this OS release;
// - initialization of the critical section failed (unlikely);
// - the current thread has stored its handle and left the critical section.
if (what == EPT_THREAD) {
_endthreadex((unsigned)exit_code);
break;
case EPT_PROCESS:
} else if (what == EPT_PROCESS) {
::exit(exit_code);
break;
case EPT_PROCESS_DIE:
} else {
_exit(exit_code);
break;
}
// should not reach here
// Should not reach here
return exit_code;
}
#undef MIN_EXIT_MUTEXES
#undef MAX_EXIT_MUTEXES
#undef MAX_EXIT_HANDLES
#undef EXIT_TIMEOUT
void os::win32::setmode_streams() {
_setmode(_fileno(stdin), _O_BINARY);
@ -4047,10 +4154,6 @@ jint os::init_2(void) {
return JNI_OK;
}
void os::init_3(void) {
return;
}
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
DWORD old_status;
@ -4792,27 +4895,46 @@ bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
// 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
// into a single win32 CreateEvent() handle.
//
// Assumption:
// Only one parker can exist on an event, which is why we allocate
// them per-thread. Multiple unparkers can coexist.
//
// _Event transitions in park()
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block
// 0 => -1 : block; then set _Event to 0 before returning
//
// _Event serves as a restricted-range semaphore :
// -1 : thread is blocked
// 0 : neutral - thread is running or ready
// 1 : signaled - thread is running or ready
// _Event transitions in unpark()
// 0 => 1 : just return
// 1 => 1 : just return
// -1 => either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
//
// _Event serves as a restricted-range semaphore.
// -1 : thread is blocked, i.e. there is a waiter
// 0 : neutral: thread is running or ready,
// could have been signaled after a wait started
// 1 : signaled - thread is running or ready
//
// Another possible encoding of _Event would be with
// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
//
// Another possible encoding of _Event would be
// with explicit "PARKED" and "SIGNALED" bits.
int os::PlatformEvent::park(jlong Millis) {
// Transitions for _Event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _Event to 0 before returning
guarantee(_ParkHandle != NULL , "Invariant");
guarantee(Millis > 0 , "Invariant");
int v;
// CONSIDER: defer assigning a CreateEvent() handle to the Event until
// the initial park() operation.
// Consider: use atomic decrement instead of CAS-loop
int v;
for (;;) {
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
@ -4860,9 +4982,15 @@ int os::PlatformEvent::park(jlong Millis) {
}
void os::PlatformEvent::park() {
// Transitions for _Event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _Event to 0 before returning
guarantee(_ParkHandle != NULL, "Invariant");
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
// Consider: use atomic decrement instead of CAS-loop
int v;
for (;;) {
v = _Event;
@ -4891,11 +5019,11 @@ void os::PlatformEvent::unpark() {
guarantee(_ParkHandle != NULL, "Invariant");
// Transitions for _Event:
// 0 :=> 1
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
// 0 => 1 : just return
// 1 => 1 : just return
// -1 => either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@ -5091,39 +5219,14 @@ int os::socket_close(int fd) {
return ::closesocket(fd);
}
int os::socket_available(int fd, jint *pbytes) {
int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes);
return (ret < 0) ? 0 : 1;
}
int os::socket(int domain, int type, int protocol) {
return ::socket(domain, type, protocol);
}
int os::listen(int fd, int count) {
return ::listen(fd, count);
}
int os::connect(int fd, struct sockaddr* him, socklen_t len) {
return ::connect(fd, him, len);
}
int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
return ::accept(fd, him, len);
}
int os::sendto(int fd, char* buf, size_t len, uint flags,
struct sockaddr* to, socklen_t tolen) {
return ::sendto(fd, buf, (int)len, flags, to, tolen);
}
int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags,
sockaddr* from, socklen_t* fromlen) {
return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen);
}
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
return ::recv(fd, buf, (int)nBytes, flags);
}
@ -5136,45 +5239,6 @@ int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
return ::send(fd, buf, (int)nBytes, flags);
}
int os::timeout(int fd, long timeout) {
fd_set tbl;
struct timeval t;
t.tv_sec = timeout / 1000;
t.tv_usec = (timeout % 1000) * 1000;
tbl.fd_count = 1;
tbl.fd_array[0] = fd;
return ::select(1, &tbl, 0, 0, &t);
}
int os::get_host_name(char* name, int namelen) {
return ::gethostname(name, namelen);
}
int os::socket_shutdown(int fd, int howto) {
return ::shutdown(fd, howto);
}
int os::bind(int fd, struct sockaddr* him, socklen_t len) {
return ::bind(fd, him, len);
}
int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
return ::getsockname(fd, him, len);
}
int os::get_sock_opt(int fd, int level, int optname,
char* optval, socklen_t* optlen) {
return ::getsockopt(fd, level, optname, optval, optlen);
}
int os::set_sock_opt(int fd, int level, int optname,
const char* optval, socklen_t optlen) {
return ::setsockopt(fd, level, optname, optval, optlen);
}
// WINDOWS CONTEXT Flags for THREAD_SAMPLING
#if defined(IA32)
#define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
@ -5367,11 +5431,6 @@ inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,
return ::Module32Next(hSnapshot, lpme);
}
inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
return true;
}
inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
::GetNativeSystemInfo(lpSystemInfo);
}

View File

@ -210,7 +210,6 @@ public:
static BOOL Module32First(HANDLE,LPMODULEENTRY32);
static BOOL Module32Next(HANDLE,LPMODULEENTRY32);
static BOOL GetNativeSystemInfoAvailable();
static void GetNativeSystemInfo(LPSYSTEM_INFO);
// NUMA calls

View File

@ -910,7 +910,7 @@ void os::workaround_expand_exec_shield_cs_limit() {
*/
char* hint = (char*) (Linux::initial_thread_stack_bottom() -
((StackYellowPages + StackRedPages + 1) * page_size));
char* codebuf = os::reserve_memory(page_size, hint);
char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
return; // No matter, we tried, best effort.
}

View File

@ -33,18 +33,50 @@
#include <sys/systeminfo.h>
#include <kstat.h>
#include <picl.h>
#include <dlfcn.h>
#include <link.h>
extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
extern "C" static int PICL_visit_cpu_helper(picl_nodehdl_t nodeh, void *result);
// Functions from the library we need (signatures should match those in picl.h)
extern "C" {
typedef int (*picl_initialize_func_t)(void);
typedef int (*picl_shutdown_func_t)(void);
typedef int (*picl_get_root_func_t)(picl_nodehdl_t *nodehandle);
typedef int (*picl_walk_tree_by_class_func_t)(picl_nodehdl_t rooth,
const char *classname, void *c_args,
int (*callback_fn)(picl_nodehdl_t hdl, void *args));
typedef int (*picl_get_prop_by_name_func_t)(picl_nodehdl_t nodeh, const char *nm,
picl_prophdl_t *ph);
typedef int (*picl_get_propval_func_t)(picl_prophdl_t proph, void *valbuf, size_t sz);
typedef int (*picl_get_propinfo_func_t)(picl_prophdl_t proph, picl_propinfo_t *pi);
}
class PICL {
// Pointers to functions in the library
picl_initialize_func_t _picl_initialize;
picl_shutdown_func_t _picl_shutdown;
picl_get_root_func_t _picl_get_root;
picl_walk_tree_by_class_func_t _picl_walk_tree_by_class;
picl_get_prop_by_name_func_t _picl_get_prop_by_name;
picl_get_propval_func_t _picl_get_propval;
picl_get_propinfo_func_t _picl_get_propinfo;
// Handle to the library that is returned by dlopen
void *_dl_handle;
bool open_library();
void close_library();
template<typename FuncType> bool bind(FuncType& func, const char* name);
bool bind_library_functions();
// Get a value of the integer property. The value in the tree can be either 32 or 64 bit
// depending on the platform. The result is converted to int.
static int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) {
int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) {
picl_propinfo_t pinfo;
picl_prophdl_t proph;
if (picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS ||
picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) {
if (_picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS ||
_picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) {
return PICL_FAILURE;
}
@ -54,13 +86,13 @@ class PICL {
}
if (pinfo.size == sizeof(int64_t)) {
int64_t val;
if (picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) {
if (_picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) {
return PICL_FAILURE;
}
*result = static_cast<int>(val);
} else if (pinfo.size == sizeof(int32_t)) {
int32_t val;
if (picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) {
if (_picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) {
return PICL_FAILURE;
}
*result = static_cast<int>(val);
@ -74,6 +106,7 @@ class PICL {
// Visitor and a state machine that visits integer properties and verifies that the
// values are the same. Stores the unique value observed.
class UniqueValueVisitor {
PICL *_picl;
enum {
INITIAL, // Start state, no assignments happened
ASSIGNED, // Assigned a value
@ -81,7 +114,7 @@ class PICL {
} _state;
int _value;
public:
UniqueValueVisitor() : _state(INITIAL) { }
UniqueValueVisitor(PICL* picl) : _picl(picl), _state(INITIAL) { }
int value() {
assert(_state == ASSIGNED, "Precondition");
return _value;
@ -96,71 +129,125 @@ class PICL {
bool is_inconsistent() { return _state == INCONSISTENT; }
void set_inconsistent() { _state = INCONSISTENT; }
static int visit(picl_nodehdl_t nodeh, const char* name, void *arg) {
UniqueValueVisitor *state = static_cast<UniqueValueVisitor*>(arg);
assert(!state->is_inconsistent(), "Precondition");
void visit(picl_nodehdl_t nodeh, const char* name) {
assert(!is_inconsistent(), "Precondition");
int curr;
if (PICL::get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
if (!state->is_assigned()) { // first iteration
state->set_value(curr);
} else if (curr != state->value()) { // following iterations
state->set_inconsistent();
if (_picl->get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
if (!is_assigned()) { // first iteration
set_value(curr);
} else if (curr != value()) { // following iterations
set_inconsistent();
}
}
if (state->is_inconsistent()) {
}
};
class CPUVisitor {
UniqueValueVisitor _l1_visitor;
UniqueValueVisitor _l2_visitor;
int _limit; // number of times visit() can be run
public:
CPUVisitor(PICL *picl, int limit) : _l1_visitor(picl), _l2_visitor(picl), _limit(limit) {}
static int visit(picl_nodehdl_t nodeh, void *arg) {
CPUVisitor *cpu_visitor = static_cast<CPUVisitor*>(arg);
UniqueValueVisitor* l1_visitor = cpu_visitor->l1_visitor();
UniqueValueVisitor* l2_visitor = cpu_visitor->l2_visitor();
if (!l1_visitor->is_inconsistent()) {
l1_visitor->visit(nodeh, "l1-dcache-line-size");
}
if (!l2_visitor->is_inconsistent()) {
l2_visitor->visit(nodeh, "l2-cache-line-size");
}
if (l1_visitor->is_inconsistent() && l2_visitor->is_inconsistent()) {
return PICL_WALK_TERMINATE;
}
cpu_visitor->_limit--;
if (cpu_visitor->_limit <= 0) {
return PICL_WALK_TERMINATE;
}
return PICL_WALK_CONTINUE;
}
UniqueValueVisitor* l1_visitor() { return &_l1_visitor; }
UniqueValueVisitor* l2_visitor() { return &_l2_visitor; }
};
int _L1_data_cache_line_size;
int _L2_cache_line_size;
public:
static int get_l1_data_cache_line_size(picl_nodehdl_t nodeh, void *state) {
return UniqueValueVisitor::visit(nodeh, "l1-dcache-line-size", state);
}
static int get_l2_cache_line_size(picl_nodehdl_t nodeh, void *state) {
return UniqueValueVisitor::visit(nodeh, "l2-cache-line-size", state);
static int visit_cpu(picl_nodehdl_t nodeh, void *state) {
return CPUVisitor::visit(nodeh, state);
}
PICL() : _L1_data_cache_line_size(0), _L2_cache_line_size(0) {
if (picl_initialize() == PICL_SUCCESS) {
PICL(bool is_fujitsu) : _L1_data_cache_line_size(0), _L2_cache_line_size(0), _dl_handle(NULL) {
if (!open_library()) {
return;
}
if (_picl_initialize() == PICL_SUCCESS) {
picl_nodehdl_t rooth;
if (picl_get_root(&rooth) == PICL_SUCCESS) {
UniqueValueVisitor L1_state;
// Visit all "cpu" class instances
picl_walk_tree_by_class(rooth, "cpu", &L1_state, PICL_get_l1_data_cache_line_size_helper);
if (L1_state.is_initial()) { // Still initial, iteration found no values
// Try walk all "core" class instances, it might be a Fujitsu machine
picl_walk_tree_by_class(rooth, "core", &L1_state, PICL_get_l1_data_cache_line_size_helper);
if (_picl_get_root(&rooth) == PICL_SUCCESS) {
const char* cpu_class = "cpu";
// If it's a Fujitsu machine, it's a "core"
if (is_fujitsu) {
cpu_class = "core";
}
if (L1_state.is_assigned()) { // Is there a value?
_L1_data_cache_line_size = L1_state.value();
CPUVisitor cpu_visitor(this, os::processor_count());
_picl_walk_tree_by_class(rooth, cpu_class, &cpu_visitor, PICL_visit_cpu_helper);
if (cpu_visitor.l1_visitor()->is_assigned()) { // Is there a value?
_L1_data_cache_line_size = cpu_visitor.l1_visitor()->value();
}
UniqueValueVisitor L2_state;
picl_walk_tree_by_class(rooth, "cpu", &L2_state, PICL_get_l2_cache_line_size_helper);
if (L2_state.is_initial()) {
picl_walk_tree_by_class(rooth, "core", &L2_state, PICL_get_l2_cache_line_size_helper);
}
if (L2_state.is_assigned()) {
_L2_cache_line_size = L2_state.value();
if (cpu_visitor.l2_visitor()->is_assigned()) {
_L2_cache_line_size = cpu_visitor.l2_visitor()->value();
}
}
picl_shutdown();
_picl_shutdown();
}
close_library();
}
unsigned int L1_data_cache_line_size() const { return _L1_data_cache_line_size; }
unsigned int L2_cache_line_size() const { return _L2_cache_line_size; }
};
extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) {
return PICL::get_l1_data_cache_line_size(nodeh, result);
extern "C" static int PICL_visit_cpu_helper(picl_nodehdl_t nodeh, void *result) {
return PICL::visit_cpu(nodeh, result);
}
extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) {
return PICL::get_l2_cache_line_size(nodeh, result);
template<typename FuncType>
bool PICL::bind(FuncType& func, const char* name) {
func = reinterpret_cast<FuncType>(dlsym(_dl_handle, name));
return func != NULL;
}
bool PICL::bind_library_functions() {
assert(_dl_handle != NULL, "library should be open");
return bind(_picl_initialize, "picl_initialize" ) &&
bind(_picl_shutdown, "picl_shutdown" ) &&
bind(_picl_get_root, "picl_get_root" ) &&
bind(_picl_walk_tree_by_class, "picl_walk_tree_by_class") &&
bind(_picl_get_prop_by_name, "picl_get_prop_by_name" ) &&
bind(_picl_get_propval, "picl_get_propval" ) &&
bind(_picl_get_propinfo, "picl_get_propinfo" );
}
bool PICL::open_library() {
_dl_handle = dlopen("libpicl.so.1", RTLD_LAZY);
if (_dl_handle == NULL) {
warning("PICL (libpicl.so.1) is missing. Performance will not be optimal.");
return false;
}
if (!bind_library_functions()) {
assert(false, "unexpected PICL API change");
close_library();
return false;
}
return true;
}
void PICL::close_library() {
assert(_dl_handle != NULL, "library should be open");
dlclose(_dl_handle);
_dl_handle = NULL;
}
// We need to keep these here as long as we have to build on Solaris
@ -342,7 +429,7 @@ int VM_Version::platform_features(int features) {
}
// Figure out cache line sizes using PICL
PICL picl;
PICL picl((features & sparc64_family_m) != 0);
_L1_data_cache_line_size = picl.L1_data_cache_line_size();
_L2_cache_line_size = picl.L2_cache_line_size();

View File

@ -411,7 +411,7 @@ class CompilerInterfaceVC10 extends CompilerInterface {
"/export:jio_vsnprintf "+
"/export:JVM_GetVersionInfo "+
"/export:JVM_InitAgentProperties");
addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib");
addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib;version.lib");
addAttr(rv, "OutputFile", outDll);
addAttr(rv, "SuppressStartupBanner", "true");
addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");

View File

@ -1093,9 +1093,8 @@ void ciEnv::register_method(ciMethod* target,
// JVMTI -- compiled method notification (must be done outside lock)
nm->post_compiled_method_load_event();
} else {
// The CodeCache is full. Print out warning and disable compilation.
// The CodeCache is full.
record_failure("code cache is full");
CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
}
}

View File

@ -2557,7 +2557,7 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
Array<Method*>* ClassFileParser::parse_methods(bool is_interface,
AccessFlags* promoted_flags,
bool* has_final_method,
bool* has_default_methods,
bool* declares_default_methods,
TRAPS) {
ClassFileStream* cfs = stream();
cfs->guarantee_more(2, CHECK_NULL); // length
@ -2576,11 +2576,11 @@ Array<Method*>* ClassFileParser::parse_methods(bool is_interface,
if (method->is_final()) {
*has_final_method = true;
}
if (is_interface && !(*has_default_methods)
&& !method->is_abstract() && !method->is_static()
&& !method->is_private()) {
// default method
*has_default_methods = true;
// declares_default_methods: declares concrete instance methods, any access flags
// used for interface initialization, and default method inheritance analysis
if (is_interface && !(*declares_default_methods)
&& !method->is_abstract() && !method->is_static()) {
*declares_default_methods = true;
}
_methods->at_put(index, method());
}
@ -3739,6 +3739,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
JvmtiCachedClassFileData *cached_class_file = NULL;
Handle class_loader(THREAD, loader_data->class_loader());
bool has_default_methods = false;
bool declares_default_methods = false;
ResourceMark rm(THREAD);
ClassFileStream* cfs = stream();
@ -3976,9 +3977,13 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
Array<Method*>* methods = parse_methods(access_flags.is_interface(),
&promoted_flags,
&has_final_method,
&has_default_methods,
&declares_default_methods,
CHECK_(nullHandle));
if (declares_default_methods) {
has_default_methods = true;
}
// Additional attributes
ClassAnnotationCollector parsed_annotations;
parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
@ -4120,6 +4125,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
this_klass->set_minor_version(minor_version);
this_klass->set_major_version(major_version);
this_klass->set_has_default_methods(has_default_methods);
this_klass->set_declares_default_methods(declares_default_methods);
if (!host_klass.is_null()) {
assert (this_klass->is_anonymous(), "should be the same");

View File

@ -247,7 +247,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
Array<Method*>* parse_methods(bool is_interface,
AccessFlags* promoted_flags,
bool* has_final_method,
bool* has_default_method,
bool* declares_default_methods,
TRAPS);
intArray* sort_methods(Array<Method*>* methods);

View File

@ -553,6 +553,7 @@ ClassLoaderData* ClassLoaderDataGraph::_saved_unloading = NULL;
ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
bool ClassLoaderDataGraph::_should_purge = false;
bool ClassLoaderDataGraph::_metaspace_oom = false;
// Add a new class loader data node to the list. Assign the newly created
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
@ -804,12 +805,17 @@ void ClassLoaderDataGraph::purge() {
ClassLoaderData* list = _unloading;
_unloading = NULL;
ClassLoaderData* next = list;
bool classes_unloaded = false;
while (next != NULL) {
ClassLoaderData* purge_me = next;
next = purge_me->next();
delete purge_me;
classes_unloaded = true;
}
if (classes_unloaded) {
Metaspace::purge();
set_metaspace_oom(false);
}
Metaspace::purge();
}
void ClassLoaderDataGraph::post_class_unload_events(void) {

View File

@ -68,6 +68,9 @@ class ClassLoaderDataGraph : public AllStatic {
static ClassLoaderData* _saved_head;
static ClassLoaderData* _saved_unloading;
static bool _should_purge;
// OOM has been seen in metaspace allocation. Used to prevent some
// allocations until class unloading
static bool _metaspace_oom;
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
static void post_class_unload_events(void);
@ -107,6 +110,9 @@ class ClassLoaderDataGraph : public AllStatic {
}
}
static bool has_metaspace_oom() { return _metaspace_oom; }
static void set_metaspace_oom(bool value) { _metaspace_oom = value; }
static void free_deallocate_lists();
static void dump_on(outputStream * const out) PRODUCT_RETURN;

View File

@ -455,6 +455,7 @@
template(object_void_signature, "(Ljava/lang/Object;)V") \
template(object_int_signature, "(Ljava/lang/Object;)I") \
template(object_boolean_signature, "(Ljava/lang/Object;)Z") \
template(object_object_signature, "(Ljava/lang/Object;)Ljava/lang/Object;") \
template(string_void_signature, "(Ljava/lang/String;)V") \
template(string_int_signature, "(Ljava/lang/String;)I") \
template(throwable_void_signature, "(Ljava/lang/Throwable;)V") \
@ -746,6 +747,8 @@
do_name( isPrimitive_name, "isPrimitive") \
do_intrinsic(_getSuperclass, java_lang_Class, getSuperclass_name, void_class_signature, F_RN) \
do_name( getSuperclass_name, "getSuperclass") \
do_intrinsic(_Class_cast, java_lang_Class, Class_cast_name, object_object_signature, F_R) \
do_name( Class_cast_name, "cast") \
\
do_intrinsic(_getClassAccessFlags, sun_reflect_Reflection, getClassAccessFlags_name, class_int_signature, F_SN) \
do_name( getClassAccessFlags_name, "getClassAccessFlags") \

View File

@ -229,8 +229,8 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
return blob;
}
void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
return CodeCache::allocate(size, CodeBlobType::NonNMethod, is_critical);
void* BufferBlob::operator new(size_t s, unsigned size) throw() {
return CodeCache::allocate(size, CodeBlobType::NonNMethod);
}
void BufferBlob::free(BufferBlob *blob) {
@ -260,10 +260,7 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// The parameter 'true' indicates a critical memory allocation.
// This means that CodeCacheMinimumFreeSpace is used, if necessary
const bool is_critical = true;
blob = new (size, is_critical) AdapterBlob(size, cb);
blob = new (size) AdapterBlob(size, cb);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@ -285,10 +282,7 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
size += round_to(buffer_size, oopSize);
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// The parameter 'true' indicates a critical memory allocation.
// This means that CodeCacheMinimumFreeSpace is used, if necessary
const bool is_critical = true;
blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
blob = new (size) MethodHandlesAdapterBlob(size);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@ -336,14 +330,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
// operator new shared by all singletons:
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}

View File

@ -221,7 +221,7 @@ class BufferBlob: public CodeBlob {
BufferBlob(const char* name, int size);
BufferBlob(const char* name, int size, CodeBuffer* cb);
void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
void* operator new(size_t s, unsigned size) throw();
public:
// Creation

View File

@ -44,6 +44,7 @@
#include "runtime/icache.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/compilationPolicy.hpp"
#include "services/memoryService.hpp"
#include "trace/tracing.hpp"
@ -192,16 +193,16 @@ void CodeCache::initialize_heaps() {
}
// Make sure we have enough space for VM internal code
uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
}
guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
// Align reserved sizes of CodeHeaps
size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
// Compute initial sizes of CodeHeaps
size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
@ -267,6 +268,22 @@ bool CodeCache::heap_available(int code_blob_type) {
}
}
const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
switch(code_blob_type) {
case CodeBlobType::NonNMethod:
return "NonNMethodCodeHeapSize";
break;
case CodeBlobType::MethodNonProfiled:
return "NonProfiledCodeHeapSize";
break;
case CodeBlobType::MethodProfiled:
return "ProfiledCodeHeapSize";
break;
}
ShouldNotReachHere();
return NULL;
}
void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
// Check if heap is needed
if (!heap_available(code_blob_type)) {
@ -332,14 +349,18 @@ CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
return next_blob(get_code_heap(cb), cb);
}
CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
// Do not seize the CodeCache lock here--if the caller has not
// already done so, we are going to lose bigtime, since the code
// cache will contain a garbage CodeBlob until the caller can
// run the constructor for the CodeBlob subclass he is busy
// instantiating.
/**
* Do not seize the CodeCache lock here--if the caller has not
* already done so, we are going to lose bigtime, since the code
* cache will contain a garbage CodeBlob until the caller can
* run the constructor for the CodeBlob subclass he is busy
* instantiating.
*/
CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
// Possibly wakes up the sweeper thread.
NMethodSweeper::notify(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock);
assert(size > 0, "allocation request must be reasonable");
assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size));
if (size <= 0) {
return NULL;
}
@ -350,14 +371,18 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
assert(heap != NULL, "heap is null");
while (true) {
cb = (CodeBlob*)heap->allocate(size, is_critical);
cb = (CodeBlob*)heap->allocate(size);
if (cb != NULL) break;
if (!heap->expand_by(CodeCacheExpansionSize)) {
// Expansion failed
if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
// Fallback solution: Store non-nmethod code in the non-profiled code heap
return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
// Fallback solution: Store non-nmethod code in the non-profiled code heap.
// Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled
// code heap and force stack scanning if less than 10% if the code heap are free.
return allocate(size, CodeBlobType::MethodNonProfiled);
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(code_blob_type);
return NULL;
}
if (PrintCodeCacheExtension) {
@ -754,19 +779,6 @@ size_t CodeCache::max_capacity() {
return max_cap;
}
/**
* Returns true if a CodeHeap is full and sets code_blob_type accordingly.
*/
bool CodeCache::is_full(int* code_blob_type) {
FOR_ALL_HEAPS(heap) {
if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
*code_blob_type = (*heap)->code_blob_type();
return true;
}
}
return false;
}
/**
* Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
* is free, reverse_free_ratio() returns 4.
@ -776,9 +788,13 @@ double CodeCache::reverse_free_ratio(int code_blob_type) {
if (heap == NULL) {
return 0;
}
double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
double max_capacity = (double)heap->max_capacity();
return max_capacity / unallocated_capacity;
double result = max_capacity / unallocated_capacity;
assert (max_capacity >= unallocated_capacity, "Must be");
assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result));
return result;
}
size_t CodeCache::bytes_allocated_in_freelists() {
@ -1011,9 +1027,8 @@ void CodeCache::report_codemem_full(int code_blob_type, bool print) {
// Not yet reported for this heap, report
heap->report_full();
if (SegmentedCodeCache) {
warning("%s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type));
warning("Try increasing the code heap size using -XX:%s=",
(code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type));
warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type));
} else {
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");

View File

@ -100,6 +100,8 @@ class CodeCache : AllStatic {
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
// Returns the name of the VM option to set the size of the corresponding CodeHeap
static const char* get_code_heap_flag_name(int code_blob_type);
static bool heap_available(int code_blob_type); // Returns true if an own CodeHeap for the given CodeBlobType is available
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
@ -118,16 +120,16 @@ class CodeCache : AllStatic {
static void initialize();
// Allocation/administration
static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
static void free(CodeBlob* cb); // frees a CodeBlob
static bool contains(void *p); // returns whether p is included
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
static CodeBlob* allocate(int size, int code_blob_type); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
static void free(CodeBlob* cb); // frees a CodeBlob
static bool contains(void *p); // returns whether p is included
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
// Lookup
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
@ -180,7 +182,6 @@ class CodeCache : AllStatic {
static size_t unallocated_capacity();
static size_t max_capacity();
static bool is_full(int* code_blob_type);
static double reverse_free_ratio(int code_blob_type);
static bool needs_cache_clean() { return _needs_cache_clean; }

View File

@ -804,10 +804,7 @@ nmethod::nmethod(
#endif // def HAVE_DTRACE_H
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
// With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
// with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
bool is_critical = SegmentedCodeCache;
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
}
nmethod::nmethod(

View File

@ -63,7 +63,6 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
// If changing the name, update the other file accordingly.
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL;
}
_chunk = blob->content_begin();

View File

@ -156,8 +156,6 @@ long CompileBroker::_peak_compilation_time = 0;
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
class CompilationLog : public StringEventLog {
public:
@ -187,6 +185,14 @@ class CompilationLog : public StringEventLog {
lm.print("\n");
log(thread, "%s", (const char*)lm);
}
void log_metaspace_failure(const char* reason) {
ResourceMark rm;
StringLogMessage lm;
lm.print("%4d COMPILE PROFILING SKIPPED: %s", -1, reason);
lm.print("\n");
log(JavaThread::current(), "%s", (const char*)lm);
}
};
static CompilationLog* _compilation_log = NULL;
@ -649,13 +655,10 @@ void CompileQueue::free_all() {
lock()->notify_all();
}
// ------------------------------------------------------------------
// CompileQueue::get
//
// Get the next CompileTask from a CompileQueue
/**
* Get the next CompileTask from a CompileQueue
*/
CompileTask* CompileQueue::get() {
NMethodSweeper::possibly_sweep();
MutexLocker locker(lock());
// If _first is NULL we have no more compile jobs. There are two reasons for
// having no compile jobs: First, we compiled everything we wanted. Second,
@ -668,35 +671,16 @@ CompileTask* CompileQueue::get() {
return NULL;
}
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
// Wait a certain amount of time to possibly do another sweep.
// We must wait until stack scanning has happened so that we can
// transition a method's state from 'not_entrant' to 'zombie'.
long wait_time = NmethodSweepCheckInterval * 1000;
if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
// Only one thread at a time can do sweeping. Scale the
// wait time according to the number of compiler threads.
// As a result, the next sweep is likely to happen every 100ms
// with an arbitrary number of threads that do sweeping.
wait_time = 100 * CICompilerCount;
}
bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
if (timeout) {
MutexUnlocker ul(lock());
NMethodSweeper::possibly_sweep();
}
} else {
// If there are no compilation tasks and we can compile new jobs
// (i.e., there is enough free space in the code cache) there is
// no need to invoke the sweeper. As a result, the hotness of methods
// remains unchanged. This behavior is desired, since we want to keep
// the stable state, i.e., we do not want to evict methods from the
// code cache if it is unnecessary.
// We need a timed wait here, since compiler threads can exit if compilation
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
// is not critical and we do not want idle compiler threads to wake up too often.
lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
}
// If there are no compilation tasks and we can compile new jobs
// (i.e., there is enough free space in the code cache) there is
// no need to invoke the sweeper. As a result, the hotness of methods
// remains unchanged. This behavior is desired, since we want to keep
// the stable state, i.e., we do not want to evict methods from the
// code cache if it is unnecessary.
// We need a timed wait here, since compiler threads can exit if compilation
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
// is not critical and we do not want idle compiler threads to wake up too often.
lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
}
if (CompileBroker::is_compilation_disabled_forever()) {
@ -886,8 +870,8 @@ void CompileBroker::compilation_init() {
_compilers[1] = new SharkCompiler();
#endif // SHARK
// Start the CompilerThreads
init_compiler_threads(c1_count, c2_count);
// Start the compiler thread(s) and the sweeper thread
init_compiler_sweeper_threads(c1_count, c2_count);
// totalTime performance counter is always created as it is required
// by the implementation of java.lang.management.CompilationMBean.
{
@ -991,13 +975,10 @@ void CompileBroker::compilation_init() {
}
CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
AbstractCompiler* comp, TRAPS) {
CompilerThread* compiler_thread = NULL;
Klass* k =
SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(),
true, CHECK_0);
JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
AbstractCompiler* comp, bool compiler_thread, TRAPS) {
JavaThread* thread = NULL;
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_0);
instanceKlassHandle klass (THREAD, k);
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_0);
Handle string = java_lang_String::create_from_str(name, CHECK_0);
@ -1015,7 +996,11 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
{
MutexLocker mu(Threads_lock, THREAD);
compiler_thread = new CompilerThread(queue, counters);
if (compiler_thread) {
thread = new CompilerThread(queue, counters);
} else {
thread = new CodeCacheSweeperThread();
}
// At this point the new CompilerThread data-races with this startup
// thread (which I believe is the primoridal thread and NOT the VM
// thread). This means Java bytecodes being executed at startup can
@ -1028,12 +1013,12 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
// in that case. However, since this must work and we do not allow
// exceptions anyway, check and abort if this fails.
if (compiler_thread == NULL || compiler_thread->osthread() == NULL){
if (thread == NULL || thread->osthread() == NULL) {
vm_exit_during_initialization("java.lang.OutOfMemoryError",
os::native_thread_creation_failed_msg());
}
java_lang_Thread::set_thread(thread_oop(), compiler_thread);
java_lang_Thread::set_thread(thread_oop(), thread);
// Note that this only sets the JavaThread _priority field, which by
// definition is limited to Java priorities and not OS priorities.
@ -1054,24 +1039,26 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
native_prio = os::java_to_os_priority[NearMaxPriority];
}
}
os::set_native_priority(compiler_thread, native_prio);
os::set_native_priority(thread, native_prio);
java_lang_Thread::set_daemon(thread_oop());
compiler_thread->set_threadObj(thread_oop());
compiler_thread->set_compiler(comp);
Threads::add(compiler_thread);
Thread::start(compiler_thread);
thread->set_threadObj(thread_oop());
if (compiler_thread) {
thread->as_CompilerThread()->set_compiler(comp);
}
Threads::add(thread);
Thread::start(thread);
}
// Let go of Threads_lock before yielding
os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
return compiler_thread;
return thread;
}
void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
void CompileBroker::init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count) {
EXCEPTION_MARK;
#if !defined(ZERO) && !defined(SHARK)
assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
@ -1088,17 +1075,14 @@ void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler
int compiler_count = c1_compiler_count + c2_compiler_count;
_compiler_threads =
new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
char name_buffer[256];
const bool compiler_thread = true;
for (int i = 0; i < c2_compiler_count; i++) {
// Create a name for our thread.
sprintf(name_buffer, "C2 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
// Shark and C2
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK);
_compiler_threads->append(new_thread);
make_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], compiler_thread, CHECK);
}
for (int i = c2_compiler_count; i < compiler_count; i++) {
@ -1106,13 +1090,17 @@ void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler
sprintf(name_buffer, "C1 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
// C1
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK);
_compiler_threads->append(new_thread);
make_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], compiler_thread, CHECK);
}
if (UsePerfData) {
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
}
if (MethodFlushing) {
// Initialize the sweeper thread
make_thread("Sweeper thread", NULL, NULL, NULL, false, CHECK);
}
}
@ -1759,13 +1747,6 @@ void CompileBroker::compiler_thread_loop() {
// We need this HandleMark to avoid leaking VM handles.
HandleMark hm(thread);
// Check if the CodeCache is full
int code_blob_type = 0;
if (CodeCache::is_full(&code_blob_type)) {
// The CodeHeap for code_blob_type is really full
handle_full_code_cache(code_blob_type);
}
CompileTask* task = queue->get();
if (task == NULL) {
continue;
@ -1773,8 +1754,9 @@ void CompileBroker::compiler_thread_loop() {
// Give compiler threads an extra quanta. They tend to be bursty and
// this helps the compiler to finish up the job.
if( CompilerThreadHintNoPreempt )
if (CompilerThreadHintNoPreempt) {
os::hint_no_preempt();
}
// trace per thread time and compile statistics
CompilerCounters* counters = ((CompilerThread*)thread)->counters();
@ -1843,6 +1825,18 @@ void CompileBroker::init_compiler_thread_log() {
warning("Cannot open log file: %s", file_name);
}
void CompileBroker::log_metaspace_failure() {
const char* message = "some methods may not be compiled because metaspace "
"is out of memory";
if (_compilation_log != NULL) {
_compilation_log->log_metaspace_failure(message);
}
if (PrintCompilation) {
tty->print_cr("COMPILE PROFILING SKIPPED: %s", message);
}
}
// ------------------------------------------------------------------
// CompileBroker::set_should_block
//
@ -2074,8 +2068,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
}
/**
* The CodeCache is full. Print out warning and disable compilation
* or try code cache cleaning so compilation can continue later.
* The CodeCache is full. Print warning and disable compilation.
* Schedule code cache cleaning so compilation can continue later.
* This function needs to be called only from CodeCache::allocate(),
* since we currently handle a full code cache uniformly.
*/
void CompileBroker::handle_full_code_cache(int code_blob_type) {
UseInterpreter = true;
@ -2107,10 +2103,6 @@ void CompileBroker::handle_full_code_cache(int code_blob_type) {
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
NMethodSweeper::log_sweep("disable_compiler");
}
// Switch to 'vm_state'. This ensures that possibly_sweep() can be called
// without having to consider the state in which the current thread is.
ThreadInVMfromUnknown in_vm;
NMethodSweeper::possibly_sweep();
} else {
disable_compilation_forever();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -290,8 +290,6 @@ class CompileBroker: AllStatic {
static CompileQueue* _c2_compile_queue;
static CompileQueue* _c1_compile_queue;
static GrowableArray<CompilerThread*>* _compiler_threads;
// performance counters
static PerfCounter* _perf_total_compilation;
static PerfCounter* _perf_native_compilation;
@ -339,8 +337,8 @@ class CompileBroker: AllStatic {
static volatile jint _print_compilation_warning;
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS);
static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count);
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
static bool is_compile_blocking();
@ -473,6 +471,9 @@ class CompileBroker: AllStatic {
static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; }
static long get_peak_compilation_time() { return _peak_compilation_time; }
static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); }
// Log that compilation profiling is skipped because metaspace is full.
static void log_metaspace_failure();
};
#endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP

View File

@ -127,41 +127,6 @@ public:
};
class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
size_t _num_processed;
CardTableModRefBS* _ctbs;
int _histo[256];
public:
ClearLoggedCardTableEntryClosure() :
_num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
{
for (int i = 0; i < 256; i++) _histo[i] = 0;
}
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
unsigned char* ujb = (unsigned char*)card_ptr;
int ind = (int)(*ujb);
_histo[ind]++;
*card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
_num_processed++;
return true;
}
size_t num_processed() { return _num_processed; }
void print_histo() {
gclog_or_tty->print_cr("Card table value histogram:");
for (int i = 0; i < 256; i++) {
if (_histo[i] != 0) {
gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
}
}
}
};
class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
private:
size_t _num_processed;
@ -475,48 +440,6 @@ bool G1CollectedHeap::is_scavengable(const void* p) {
return !hr->is_humongous();
}
void G1CollectedHeap::check_ct_logs_at_safepoint() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
CardTableModRefBS* ct_bs = g1_barrier_set();
// Count the dirty cards at the start.
CountNonCleanMemRegionClosure count1(this);
ct_bs->mod_card_iterate(&count1);
int orig_count = count1.n();
// First clear the logged cards.
ClearLoggedCardTableEntryClosure clear;
dcqs.apply_closure_to_all_completed_buffers(&clear);
dcqs.iterate_closure_all_threads(&clear, false);
clear.print_histo();
// Now ensure that there's no dirty cards.
CountNonCleanMemRegionClosure count2(this);
ct_bs->mod_card_iterate(&count2);
if (count2.n() != 0) {
gclog_or_tty->print_cr("Card table has %d entries; %d originally",
count2.n(), orig_count);
}
guarantee(count2.n() == 0, "Card table should be clean.");
RedirtyLoggedCardTableEntryClosure redirty;
dcqs.apply_closure_to_all_completed_buffers(&redirty);
dcqs.iterate_closure_all_threads(&redirty, false);
gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
clear.num_processed(), orig_count);
guarantee(redirty.num_processed() == clear.num_processed(),
err_msg("Redirtied "SIZE_FORMAT" cards, bug cleared "SIZE_FORMAT,
redirty.num_processed(), clear.num_processed()));
CountNonCleanMemRegionClosure count3(this);
ct_bs->mod_card_iterate(&count3);
if (count3.n() != orig_count) {
gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
orig_count, count3.n());
guarantee(count3.n() >= orig_count, "Should have restored them all.");
}
}
// Private class members.
G1CollectedHeap* G1CollectedHeap::_g1h;
@ -5760,14 +5683,10 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
// not copied during the pause.
process_discovered_references(n_workers);
// Weak root processing.
{
if (G1StringDedup::is_enabled()) {
G1STWIsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
JNIHandles::weak_oops_do(&is_alive, &keep_alive);
if (G1StringDedup::is_enabled()) {
G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
}
G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
}
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);

View File

@ -797,9 +797,6 @@ protected:
// The closure used to refine a single card.
RefineCardTableEntryClosure* _refine_cte_cl;
// A function to check the consistency of dirty card logs.
void check_ct_logs_at_safepoint();
// A DirtyCardQueueSet that is used to hold cards that contain
// references into the current collection set. This is used to
// update the remembered sets of the regions in the collection

View File

@ -1077,7 +1077,6 @@ IRT_END
address SignatureHandlerLibrary::set_handler_blob() {
BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
if (handler_blob == NULL) {
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL;
}
address handler = handler_blob->code_begin();

View File

@ -171,13 +171,13 @@ void CodeHeap::clear() {
}
void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
void* CodeHeap::allocate(size_t instance_size) {
size_t number_of_segments = size_to_segments(instance_size + header_size());
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
// First check if we can satisfy request from freelist
NOT_PRODUCT(verify());
HeapBlock* block = search_freelist(number_of_segments, is_critical);
HeapBlock* block = search_freelist(number_of_segments);
NOT_PRODUCT(verify());
if (block != NULL) {
@ -191,15 +191,6 @@ void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
// Ensure minimum size for allocation to the heap.
number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
if (!is_critical) {
// Make sure the allocation fits in the unallocated heap without using
// the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
// Fail allocation
return NULL;
}
}
if (_next_segment + number_of_segments <= _number_of_committed_segments) {
mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
HeapBlock* b = block_at(_next_segment);
@ -427,24 +418,17 @@ void CodeHeap::add_to_freelist(HeapBlock* a) {
* Search freelist for an entry on the list with the best fit.
* @return NULL, if no one was found
*/
FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
FreeBlock* CodeHeap::search_freelist(size_t length) {
FreeBlock* found_block = NULL;
FreeBlock* found_prev = NULL;
size_t found_length = 0;
FreeBlock* prev = NULL;
FreeBlock* cur = _freelist;
const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace;
// Search for first block that fits
while(cur != NULL) {
if (cur->length() >= length) {
// Non critical allocations are not allowed to use the last part of the code heap.
// Make sure the end of the allocation doesn't cross into the last part of the code heap.
if (!is_critical && (((size_t)cur + length) > critical_boundary)) {
// The freelist is sorted by address - if one fails, all consecutive will also fail.
break;
}
// Remember block, its previous element, and its length
found_block = cur;
found_prev = prev;

View File

@ -120,7 +120,7 @@ class CodeHeap : public CHeapObj<mtCode> {
// Toplevel freelist management
void add_to_freelist(HeapBlock* b);
FreeBlock* search_freelist(size_t length, bool is_critical);
FreeBlock* search_freelist(size_t length);
// Iteration helpers
void* next_free(HeapBlock* b) const;
@ -140,8 +140,8 @@ class CodeHeap : public CHeapObj<mtCode> {
bool expand_by(size_t size); // expands committed memory by size
// Memory allocation
void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL
void deallocate(void* p); // deallocates a block
void* allocate (size_t size); // Allocate 'size' bytes in the code cache or return NULL
void deallocate(void* p); // Deallocate memory
// Attributes
char* low_boundary() const { return _memory.low_boundary (); }

View File

@ -3157,6 +3157,16 @@ void Metaspace::global_initialize() {
SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
// the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods()
uintx min_misc_code_size = align_size_up(
(MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
(sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
max_alignment);
if (SharedMiscCodeSize < min_misc_code_size) {
report_out_of_shared_space(SharedMiscCode);
}
// Initialize with the sum of the shared space sizes. The read-only
// and read write metaspace chunks will be allocated out of this and the
// remainder is the misc code and data chunks.

View File

@ -714,12 +714,17 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
int class_list_path_len = (int)strlen(class_list_path_str);
if (class_list_path_len >= 3) {
if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
strcat(class_list_path_str, os::file_separator());
strcat(class_list_path_str, "lib");
if (class_list_path_len < JVM_MAXPATHLEN - 4) {
strncat(class_list_path_str, os::file_separator(), 1);
strncat(class_list_path_str, "lib", 3);
}
}
}
strcat(class_list_path_str, os::file_separator());
strcat(class_list_path_str, "classlist");
class_list_path_len = (int)strlen(class_list_path_str);
if (class_list_path_len < JVM_MAXPATHLEN - 10) {
strncat(class_list_path_str, os::file_separator(), 1);
strncat(class_list_path_str, "classlist", 9);
}
class_list_path = class_list_path_str;
} else {
class_list_path = SharedClassListFile;

View File

@ -57,11 +57,16 @@ class MetaspaceShared : AllStatic {
static bool _archive_loading_failed;
public:
enum {
vtbl_list_size = 17, // number of entries in the shared space vtable list.
num_virtuals = 200 // maximum number of virtual functions
// If virtual functions are added to Metadata,
// this number needs to be increased. Also,
// SharedMiscCodeSize will need to be increased.
vtbl_list_size = 17, // number of entries in the shared space vtable list.
num_virtuals = 200, // maximum number of virtual functions
// If virtual functions are added to Metadata,
// this number needs to be increased. Also,
// SharedMiscCodeSize will need to be increased.
// The following 2 sizes were based on
// MetaspaceShared::generate_vtable_methods()
vtbl_method_size = 16, // conservative size of the mov1 and jmp instructions
// for the x64 platform
vtbl_common_code_size = (1*K) // conservative size of the "common_code" for the x64 platform
};
enum {

View File

@ -277,7 +277,7 @@ public:
bool has_stackmap_table() const { return _stackmap_data != NULL; }
void init_fingerprint() {
const uint64_t initval = CONST64(0x8000000000000000);
const uint64_t initval = UCONST64(0x8000000000000000);
_fingerprint = initval;
}

View File

@ -206,7 +206,8 @@ void ConstantPool::trace_class_resolution(constantPoolHandle this_cp, KlassHandl
}
}
Klass* ConstantPool::klass_at_impl(constantPoolHandle this_cp, int which, TRAPS) {
Klass* ConstantPool::klass_at_impl(constantPoolHandle this_cp, int which,
bool save_resolution_error, TRAPS) {
assert(THREAD->is_Java_thread(), "must be a Java thread");
// A resolved constantPool entry will contain a Klass*, otherwise a Symbol*.
@ -249,7 +250,18 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_cp, int which, TRAPS)
// Failed to resolve class. We must record the errors so that subsequent attempts
// to resolve this constant pool entry fail with the same error (JVMS 5.4.3).
if (HAS_PENDING_EXCEPTION) {
save_and_throw_exception(this_cp, which, constantTag(JVM_CONSTANT_UnresolvedClass), CHECK_0);
if (save_resolution_error) {
save_and_throw_exception(this_cp, which, constantTag(JVM_CONSTANT_UnresolvedClass), CHECK_NULL);
// If CHECK_NULL above doesn't return the exception, that means that
// some other thread has beaten us and has resolved the class.
// To preserve old behavior, we return the resolved class.
entry = this_cp->resolved_klass_at(which);
assert(entry.is_resolved(), "must be resolved if exception was cleared");
assert(entry.get_klass()->is_klass(), "must be resolved to a klass");
return entry.get_klass();
} else {
return NULL; // return the pending exception
}
}
// Make this class loader depend upon the class loader owning the class reference
@ -260,10 +272,10 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_cp, int which, TRAPS)
// skip resolving the constant pool so that this code gets
// called the next time some bytecodes refer to this class.
trace_class_resolution(this_cp, k);
return k();
} else {
this_cp->klass_at_put(which, k());
}
return k();
} else {
this_cp->klass_at_put(which, k());
}
entry = this_cp->resolved_klass_at(which);
assert(entry.is_resolved() && entry.get_klass()->is_klass(), "must be resolved at this point");
@ -573,24 +585,25 @@ void ConstantPool::save_and_throw_exception(constantPoolHandle this_cp, int whic
Symbol* message = exception_message(this_cp, which, tag, PENDING_EXCEPTION);
SystemDictionary::add_resolution_error(this_cp, which, error, message);
// CAS in the tag. If a thread beat us to registering this error that's fine.
// If another thread resolved the reference, this is an error. The resolution
// must deterministically get an error. So why do we save this?
// We save this because jvmti can add classes to the bootclass path after this
// error, so it needs to get the same error if the error is first.
// If another thread resolved the reference, this is a race condition. This
// thread may have had a security manager or something temporary.
// This doesn't deterministically get an error. So why do we save this?
// We save this because jvmti can add classes to the bootclass path after
// this error, so it needs to get the same error if the error is first.
jbyte old_tag = Atomic::cmpxchg((jbyte)error_tag,
(jbyte*)this_cp->tag_addr_at(which), (jbyte)tag.value());
assert(old_tag == error_tag || old_tag == tag.value(), "should not be resolved otherwise");
if (old_tag != error_tag && old_tag != tag.value()) {
// MethodHandles and MethodType doesn't change to resolved version.
assert(this_cp->tag_at(which).is_klass(), "Wrong tag value");
// Forget the exception and use the resolved class.
CLEAR_PENDING_EXCEPTION;
}
} else {
// some other thread put this in error state
throw_resolution_error(this_cp, which, CHECK);
}
// This exits with some pending exception
assert(HAS_PENDING_EXCEPTION, "should not be cleared");
}
// Called to resolve constants in the constant pool and return an oop.
// Some constant pool entries cache their resolved oop. This is also
// called to create oops from constants to use in arguments for invokedynamic
@ -627,7 +640,7 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_cp, int index
case JVM_CONSTANT_Class:
{
assert(cache_index == _no_index_sentinel, "should not have been set");
Klass* resolved = klass_at_impl(this_cp, index, CHECK_NULL);
Klass* resolved = klass_at_impl(this_cp, index, true, CHECK_NULL);
// ldc wants the java mirror.
result_oop = resolved->java_mirror();
break;
@ -660,7 +673,7 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_cp, int index
ref_kind, index, this_cp->method_handle_index_at(index),
callee_index, name->as_C_string(), signature->as_C_string());
KlassHandle callee;
{ Klass* k = klass_at_impl(this_cp, callee_index, CHECK_NULL);
{ Klass* k = klass_at_impl(this_cp, callee_index, true, CHECK_NULL);
callee = KlassHandle(THREAD, k);
}
KlassHandle klass(THREAD, this_cp->pool_holder());

View File

@ -336,7 +336,13 @@ class ConstantPool : public Metadata {
Klass* klass_at(int which, TRAPS) {
constantPoolHandle h_this(THREAD, this);
return klass_at_impl(h_this, which, CHECK_NULL);
return klass_at_impl(h_this, which, true, CHECK_NULL);
}
// Version of klass_at that doesn't save the resolution error, called during deopt
Klass* klass_at_ignore_error(int which, TRAPS) {
constantPoolHandle h_this(THREAD, this);
return klass_at_impl(h_this, which, false, CHECK_NULL);
}
Symbol* klass_name_at(int which); // Returns the name, w/o resolving.
@ -793,7 +799,8 @@ class ConstantPool : public Metadata {
// Implementation of methods that needs an exposed 'this' pointer, in order to
// handle GC while executing the method
static Klass* klass_at_impl(constantPoolHandle this_cp, int which, TRAPS);
static Klass* klass_at_impl(constantPoolHandle this_cp, int which,
bool save_resolution_error, TRAPS);
static oop string_at_impl(constantPoolHandle this_cp, int which, int obj_index, TRAPS);
static void trace_class_resolution(constantPoolHandle this_cp, KlassHandle k);

View File

@ -736,6 +736,41 @@ void InstanceKlass::link_methods(TRAPS) {
}
}
// Eagerly initialize superinterfaces that declare default methods (concrete instance: any access)
void InstanceKlass::initialize_super_interfaces(instanceKlassHandle this_k, TRAPS) {
if (this_k->has_default_methods()) {
for (int i = 0; i < this_k->local_interfaces()->length(); ++i) {
Klass* iface = this_k->local_interfaces()->at(i);
InstanceKlass* ik = InstanceKlass::cast(iface);
if (ik->should_be_initialized()) {
if (ik->has_default_methods()) {
ik->initialize_super_interfaces(ik, THREAD);
}
// Only initialize() interfaces that "declare" concrete methods.
// has_default_methods drives searching superinterfaces since it
// means has_default_methods in its superinterface hierarchy
if (!HAS_PENDING_EXCEPTION && ik->declares_default_methods()) {
ik->initialize(THREAD);
}
if (HAS_PENDING_EXCEPTION) {
Handle e(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
{
EXCEPTION_MARK;
// Locks object, set state, and notify all waiting threads
this_k->set_initialization_state_and_notify(
initialization_error, THREAD);
// ignore any exception thrown, superclass initialization error is
// thrown below
CLEAR_PENDING_EXCEPTION;
}
THROW_OOP(e());
}
}
}
}
}
void InstanceKlass::initialize_impl(instanceKlassHandle this_k, TRAPS) {
// Make sure klass is linked (verified) before initialization
@ -815,33 +850,11 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_k, TRAPS) {
}
}
// Recursively initialize any superinterfaces that declare default methods
// Only need to recurse if has_default_methods which includes declaring and
// inheriting default methods
if (this_k->has_default_methods()) {
// Step 7.5: initialize any interfaces which have default methods
for (int i = 0; i < this_k->local_interfaces()->length(); ++i) {
Klass* iface = this_k->local_interfaces()->at(i);
InstanceKlass* ik = InstanceKlass::cast(iface);
if (ik->has_default_methods() && ik->should_be_initialized()) {
ik->initialize(THREAD);
if (HAS_PENDING_EXCEPTION) {
Handle e(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
{
EXCEPTION_MARK;
// Locks object, set state, and notify all waiting threads
this_k->set_initialization_state_and_notify(
initialization_error, THREAD);
// ignore any exception thrown, superclass initialization error is
// thrown below
CLEAR_PENDING_EXCEPTION;
}
DTRACE_CLASSINIT_PROBE_WAIT(
super__failed, InstanceKlass::cast(this_k()), -1, wait);
THROW_OOP(e());
}
}
}
this_k->initialize_super_interfaces(this_k, CHECK);
}
// Step 8
@ -1717,6 +1730,25 @@ jmethodID InstanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle m
return id;
}
// Figure out how many jmethodIDs haven't been allocated, and make
// sure space for them is pre-allocated. This makes getting all
// method ids much, much faster with classes with more than 8
// methods, and has a *substantial* effect on performance with jvmti
// code that loads all jmethodIDs for all classes.
void InstanceKlass::ensure_space_for_methodids(int start_offset) {
int new_jmeths = 0;
int length = methods()->length();
for (int index = start_offset; index < length; index++) {
Method* m = methods()->at(index);
jmethodID id = m->find_jmethod_id_or_null();
if (id == NULL) {
new_jmeths++;
}
}
if (new_jmeths != 0) {
Method::ensure_jmethod_ids(class_loader_data(), new_jmeths);
}
}
// Common code to fetch the jmethodID from the cache or update the
// cache with the new jmethodID. This function should never do anything
@ -2486,7 +2518,7 @@ const char* InstanceKlass::signature_name() const {
// If this is an anonymous class, append a hash to make the name unique
if (is_anonymous()) {
intptr_t hash = (java_mirror() != NULL) ? java_mirror()->identity_hash() : 0;
sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash);
jio_snprintf(hash_buf, sizeof(hash_buf), "/" UINTX_FORMAT, (uintx)hash);
hash_len = (int)strlen(hash_buf);
}
@ -2779,19 +2811,18 @@ void InstanceKlass::adjust_default_methods(Method** old_methods, Method** new_me
// On-stack replacement stuff
void InstanceKlass::add_osr_nmethod(nmethod* n) {
// only one compilation can be active
NEEDS_CLEANUP
// This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check();
assert(n->is_osr_method(), "wrong kind of nmethod");
n->set_osr_link(osr_nmethods_head());
set_osr_nmethods_head(n);
// Raise the highest osr level if necessary
if (TieredCompilation) {
Method* m = n->method();
m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
{
// This is a short non-blocking critical region, so the no safepoint check is ok.
MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
assert(n->is_osr_method(), "wrong kind of nmethod");
n->set_osr_link(osr_nmethods_head());
set_osr_nmethods_head(n);
// Raise the highest osr level if necessary
if (TieredCompilation) {
Method* m = n->method();
m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
}
}
// Remember to unlock again
OsrList_lock->unlock();
// Get rid of the osr methods for the same bci that have lower levels.
if (TieredCompilation) {
@ -2807,7 +2838,7 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) {
void InstanceKlass::remove_osr_nmethod(nmethod* n) {
// This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check();
MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
assert(n->is_osr_method(), "wrong kind of nmethod");
nmethod* last = NULL;
nmethod* cur = osr_nmethods_head();
@ -2844,13 +2875,27 @@ void InstanceKlass::remove_osr_nmethod(nmethod* n) {
}
m->set_highest_osr_comp_level(max_level);
}
// Remember to unlock again
OsrList_lock->unlock();
}
int InstanceKlass::mark_osr_nmethods(const Method* m) {
// This is a short non-blocking critical region, so the no safepoint check is ok.
MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head();
int found = 0;
while (osr != NULL) {
assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
if (osr->method() == m) {
osr->mark_for_deoptimization();
found++;
}
osr = osr->osr_link();
}
return found;
}
nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const {
// This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check();
MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head();
nmethod* best = NULL;
while (osr != NULL) {
@ -2866,14 +2911,12 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
if (match_level) {
if (osr->comp_level() == comp_level) {
// Found a match - return it.
OsrList_lock->unlock();
return osr;
}
} else {
if (best == NULL || (osr->comp_level() > best->comp_level())) {
if (osr->comp_level() == CompLevel_highest_tier) {
// Found the best possible - return it.
OsrList_lock->unlock();
return osr;
}
best = osr;
@ -2882,7 +2925,6 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
}
osr = osr->osr_link();
}
OsrList_lock->unlock();
if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
return best;
}

View File

@ -199,13 +199,14 @@ class InstanceKlass: public Klass {
bool _has_unloaded_dependent;
enum {
_misc_rewritten = 1 << 0, // methods rewritten.
_misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops
_misc_should_verify_class = 1 << 2, // allow caching of preverification
_misc_is_anonymous = 1 << 3, // has embedded _host_klass field
_misc_is_contended = 1 << 4, // marked with contended annotation
_misc_has_default_methods = 1 << 5, // class/superclass/implemented interfaces has default methods
_misc_has_been_redefined = 1 << 6 // class has been redefined
_misc_rewritten = 1 << 0, // methods rewritten.
_misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops
_misc_should_verify_class = 1 << 2, // allow caching of preverification
_misc_is_anonymous = 1 << 3, // has embedded _host_klass field
_misc_is_contended = 1 << 4, // marked with contended annotation
_misc_has_default_methods = 1 << 5, // class/superclass/implemented interfaces has default methods
_misc_declares_default_methods = 1 << 6, // directly declares default methods (any access)
_misc_has_been_redefined = 1 << 7 // class has been redefined
};
u2 _misc_flags;
u2 _minor_version; // minor version number of class file
@ -651,6 +652,17 @@ class InstanceKlass: public Klass {
}
}
bool declares_default_methods() const {
return (_misc_flags & _misc_declares_default_methods) != 0;
}
void set_declares_default_methods(bool b) {
if (b) {
_misc_flags |= _misc_declares_default_methods;
} else {
_misc_flags &= ~_misc_declares_default_methods;
}
}
// for adding methods, ConstMethod::UNSET_IDNUM means no more ids available
inline u2 next_method_idnum();
void set_initial_method_idnum(u2 value) { _idnum_allocated_count = value; }
@ -686,6 +698,7 @@ class InstanceKlass: public Klass {
jmethodID** to_dealloc_jmeths_p);
static void get_jmethod_id_length_value(jmethodID* cache, size_t idnum,
size_t *length_p, jmethodID* id_p);
void ensure_space_for_methodids(int start_offset = 0);
jmethodID jmethod_id_or_null(Method* method);
// annotations support
@ -742,6 +755,7 @@ class InstanceKlass: public Klass {
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
void add_osr_nmethod(nmethod* n);
void remove_osr_nmethod(nmethod* n);
int mark_osr_nmethods(const Method* m);
nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
// Breakpoint support (see methods on Method* for details)
@ -1022,6 +1036,7 @@ private:
static bool link_class_impl (instanceKlassHandle this_k, bool throw_verifyerror, TRAPS);
static bool verify_code (instanceKlassHandle this_k, bool throw_verifyerror, TRAPS);
static void initialize_impl (instanceKlassHandle this_k, TRAPS);
static void initialize_super_interfaces (instanceKlassHandle this_k, TRAPS);
static void eager_initialize_impl (instanceKlassHandle this_k);
static void set_initialization_state_and_notify_impl (instanceKlassHandle this_k, ClassState state, TRAPS);
static void call_class_initializer_impl (instanceKlassHandle this_k, TRAPS);

View File

@ -368,6 +368,13 @@ void Method::print_invocation_count() {
// Build a MethodData* object to hold information about this method
// collected in the interpreter.
void Method::build_interpreter_method_data(methodHandle method, TRAPS) {
// Do not profile the method if metaspace has hit an OOM previously
// allocating profiling data. Callers clear pending exception so don't
// add one here.
if (ClassLoaderDataGraph::has_metaspace_oom()) {
return;
}
// Do not profile method if current thread holds the pending list lock,
// which avoids deadlock for acquiring the MethodData_lock.
if (InstanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) {
@ -379,7 +386,13 @@ void Method::build_interpreter_method_data(methodHandle method, TRAPS) {
MutexLocker ml(MethodData_lock, THREAD);
if (method->method_data() == NULL) {
ClassLoaderData* loader_data = method->method_holder()->class_loader_data();
MethodData* method_data = MethodData::allocate(loader_data, method, CHECK);
MethodData* method_data = MethodData::allocate(loader_data, method, THREAD);
if (HAS_PENDING_EXCEPTION) {
CompileBroker::log_metaspace_failure();
ClassLoaderDataGraph::set_metaspace_oom(true);
return; // return the exception (which is cleared)
}
method->set_method_data(method_data);
if (PrintMethodData && (Verbose || WizardMode)) {
ResourceMark rm(THREAD);
@ -392,9 +405,19 @@ void Method::build_interpreter_method_data(methodHandle method, TRAPS) {
}
MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
// Do not profile the method if metaspace has hit an OOM previously
if (ClassLoaderDataGraph::has_metaspace_oom()) {
return NULL;
}
methodHandle mh(m);
ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL);
MethodCounters* counters = MethodCounters::allocate(loader_data, THREAD);
if (HAS_PENDING_EXCEPTION) {
CompileBroker::log_metaspace_failure();
ClassLoaderDataGraph::set_metaspace_oom(true);
return NULL; // return the exception (which is cleared)
}
if (!mh->init_method_counters(counters)) {
MetadataFactory::free_metadata(loader_data, counters);
}
@ -1295,6 +1318,10 @@ void Method::init_intrinsic_id() {
vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
if (id != vmIntrinsics::_none) {
set_intrinsic_id(id);
if (id == vmIntrinsics::_Class_cast) {
// Even if the intrinsic is rejected, we want to inline this simple method.
set_force_inline(true);
}
return;
}
@ -1704,59 +1731,98 @@ void BreakpointInfo::clear(Method* method) {
// jmethodID handling
// This is a block allocating object, sort of like JNIHandleBlock, only a
// lot simpler. There aren't many of these, they aren't long, they are rarely
// deleted and so we can do some suboptimal things.
// lot simpler.
// It's allocated on the CHeap because once we allocate a jmethodID, we can
// never get rid of it.
// It would be nice to be able to parameterize the number of methods for
// the null_class_loader but then we'd have to turn this and ClassLoaderData
// into templates.
// I feel like this brain dead class should exist somewhere in the STL
static const int min_block_size = 8;
class JNIMethodBlockNode : public CHeapObj<mtClass> {
friend class JNIMethodBlock;
Method** _methods;
int _number_of_methods;
int _top;
JNIMethodBlockNode* _next;
public:
JNIMethodBlockNode(int num_methods = min_block_size);
~JNIMethodBlockNode() { FREE_C_HEAP_ARRAY(Method*, _methods, mtInternal); }
void ensure_methods(int num_addl_methods) {
if (_top < _number_of_methods) {
num_addl_methods -= _number_of_methods - _top;
if (num_addl_methods <= 0) {
return;
}
}
if (_next == NULL) {
_next = new JNIMethodBlockNode(MAX2(num_addl_methods, min_block_size));
} else {
_next->ensure_methods(num_addl_methods);
}
}
};
class JNIMethodBlock : public CHeapObj<mtClass> {
enum { number_of_methods = 8 };
Method* _methods[number_of_methods];
int _top;
JNIMethodBlock* _next;
JNIMethodBlockNode _head;
JNIMethodBlockNode *_last_free;
public:
static Method* const _free_method;
JNIMethodBlock() : _next(NULL), _top(0) {
for (int i = 0; i< number_of_methods; i++) _methods[i] = _free_method;
JNIMethodBlock(int initial_capacity = min_block_size)
: _head(initial_capacity), _last_free(&_head) {}
void ensure_methods(int num_addl_methods) {
_last_free->ensure_methods(num_addl_methods);
}
Method** add_method(Method* m) {
if (_top < number_of_methods) {
// top points to the next free entry.
int i = _top;
_methods[i] = m;
_top++;
return &_methods[i];
} else if (_top == number_of_methods) {
// if the next free entry ran off the block see if there's a free entry
for (int i = 0; i< number_of_methods; i++) {
if (_methods[i] == _free_method) {
_methods[i] = m;
return &_methods[i];
for (JNIMethodBlockNode* b = _last_free; b != NULL; b = b->_next) {
if (b->_top < b->_number_of_methods) {
// top points to the next free entry.
int i = b->_top;
b->_methods[i] = m;
b->_top++;
_last_free = b;
return &(b->_methods[i]);
} else if (b->_top == b->_number_of_methods) {
// if the next free entry ran off the block see if there's a free entry
for (int i = 0; i < b->_number_of_methods; i++) {
if (b->_methods[i] == _free_method) {
b->_methods[i] = m;
_last_free = b;
return &(b->_methods[i]);
}
}
// Only check each block once for frees. They're very unlikely.
// Increment top past the end of the block.
b->_top++;
}
// need to allocate a next block.
if (b->_next == NULL) {
b->_next = _last_free = new JNIMethodBlockNode();
}
// Only check each block once for frees. They're very unlikely.
// Increment top past the end of the block.
_top++;
}
// need to allocate a next block.
if (_next == NULL) {
_next = new JNIMethodBlock();
}
return _next->add_method(m);
guarantee(false, "Should always allocate a free block");
return NULL;
}
bool contains(Method** m) {
for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
for (int i = 0; i< number_of_methods; i++) {
if (&(b->_methods[i]) == m) {
if (m == NULL) return false;
for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
if (b->_methods <= m && m < b->_methods + b->_number_of_methods) {
// This is a bit of extra checking, for two reasons. One is
// that contains() deals with pointers that are passed in by
// JNI code, so making sure that the pointer is aligned
// correctly is valuable. The other is that <= and > are
// technically not defined on pointers, so the if guard can
// pass spuriously; no modern compiler is likely to make that
// a problem, though (and if one did, the guard could also
// fail spuriously, which would be bad).
ptrdiff_t idx = m - b->_methods;
if (b->_methods + idx == m) {
return true;
}
}
@ -1775,9 +1841,9 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
// During class unloading the methods are cleared, which is different
// than freed.
void clear_all_methods() {
for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
for (int i = 0; i< number_of_methods; i++) {
_methods[i] = NULL;
for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
for (int i = 0; i< b->_number_of_methods; i++) {
b->_methods[i] = NULL;
}
}
}
@ -1785,9 +1851,9 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
int count_methods() {
// count all allocated methods
int count = 0;
for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
for (int i = 0; i< number_of_methods; i++) {
if (_methods[i] != _free_method) count++;
for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
for (int i = 0; i< b->_number_of_methods; i++) {
if (b->_methods[i] != _free_method) count++;
}
}
return count;
@ -1798,6 +1864,36 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
// Something that can't be mistaken for an address or a markOop
Method* const JNIMethodBlock::_free_method = (Method*)55;
JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _next(NULL), _top(0) {
_number_of_methods = MAX2(num_methods, min_block_size);
_methods = NEW_C_HEAP_ARRAY(Method*, _number_of_methods, mtInternal);
for (int i = 0; i < _number_of_methods; i++) {
_methods[i] = JNIMethodBlock::_free_method;
}
}
void Method::ensure_jmethod_ids(ClassLoaderData* loader_data, int capacity) {
ClassLoaderData* cld = loader_data;
if (!SafepointSynchronize::is_at_safepoint()) {
// Have to add jmethod_ids() to class loader data thread-safely.
// Also have to add the method to the list safely, which the cld lock
// protects as well.
MutexLockerEx ml(cld->metaspace_lock(), Mutex::_no_safepoint_check_flag);
if (cld->jmethod_ids() == NULL) {
cld->set_jmethod_ids(new JNIMethodBlock(capacity));
} else {
cld->jmethod_ids()->ensure_methods(capacity);
}
} else {
// At safepoint, we are single threaded and can set this.
if (cld->jmethod_ids() == NULL) {
cld->set_jmethod_ids(new JNIMethodBlock(capacity));
} else {
cld->jmethod_ids()->ensure_methods(capacity);
}
}
}
// Add a method id to the jmethod_ids
jmethodID Method::make_jmethod_id(ClassLoaderData* loader_data, Method* m) {
ClassLoaderData* cld = loader_data;

View File

@ -729,6 +729,11 @@ class Method : public Metadata {
static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh);
static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid);
// Ensure there is enough capacity in the internal tracking data
// structures to hold the number of jmethodIDs you plan to generate.
// This saves substantial time doing allocations.
static void ensure_jmethod_ids(ClassLoaderData* loader_data, int capacity);
// Use resolve_jmethod_id() in situations where the caller is expected
// to provide a valid jmethodID; the only sanity checks are in asserts;
// result guaranteed not to be NULL.
@ -813,6 +818,10 @@ class Method : public Metadata {
return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
}
int mark_osr_nmethods() {
return method_holder()->mark_osr_nmethods(this);
}
nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
}

View File

@ -45,9 +45,10 @@ class objArrayOopDesc : public arrayOopDesc {
private:
// Give size of objArrayOop in HeapWords minus the header
static int array_size(int length) {
const int OopsPerHeapWord = HeapWordSize/heapOopSize;
const uint OopsPerHeapWord = HeapWordSize/heapOopSize;
assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
"Else the following (new) computation would be in error");
uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
#ifdef ASSERT
// The old code is left in for sanity-checking; it'll
// go away pretty soon. XXX
@ -55,16 +56,15 @@ private:
// oop->length() * HeapWordsPerOop;
// With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
// The oop elements are aligned up to wordSize
const int HeapWordsPerOop = heapOopSize/HeapWordSize;
int old_res;
const uint HeapWordsPerOop = heapOopSize/HeapWordSize;
uint old_res;
if (HeapWordsPerOop > 0) {
old_res = length * HeapWordsPerOop;
} else {
old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
}
#endif // ASSERT
int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
assert(res == old_res, "Inconsistency between old and new.");
#endif // ASSERT
return res;
}

View File

@ -150,7 +150,7 @@ class typeArrayOopDesc : public arrayOopDesc {
DEBUG_ONLY(BasicType etype = Klass::layout_helper_element_type(lh));
assert(length <= arrayOopDesc::max_array_length(etype), "no overflow");
julong size_in_bytes = length;
julong size_in_bytes = (juint)length;
size_in_bytes <<= element_shift;
size_in_bytes += instance_header_size;
julong size_in_words = ((size_in_bytes + (HeapWordSize-1)) >> LogHeapWordSize);

View File

@ -476,6 +476,9 @@
product(bool, DoEscapeAnalysis, true, \
"Perform escape analysis") \
\
product(double, EscapeAnalysisTimeout, 20. DEBUG_ONLY(+40.), \
"Abort EA when it reaches time limit (in sec)") \
\
develop(bool, ExitEscapeAnalysisOnTimeout, true, \
"Exit or throw assert in EA when it reaches time limit") \
\

View File

@ -939,7 +939,8 @@ int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
#ifndef PRODUCT
if (!(call->req() > TypeFunc::Parms &&
call->in(TypeFunc::Parms) != NULL &&
call->in(TypeFunc::Parms)->is_Con())) {
call->in(TypeFunc::Parms)->is_Con() &&
call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
assert(in_dump() != 0, "OK if dumping");
tty->print("[bad uncommon trap]");
return 0;

View File

@ -281,9 +281,11 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
Node *copy;
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
// Rematerialize constants instead of copying them
if( m->is_Mach() && m->as_Mach()->is_Con() &&
m->as_Mach()->rematerialize() ) {
// Rematerialize constants instead of copying them.
// We do this only for immediate constants, we avoid constant table loads
// because that will unsafely extend the live range of the constant table base.
if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
m->as_Mach()->rematerialize()) {
copy = m->clone();
// Insert the copy in the predecessor basic block
pred->add_inst(copy);
@ -317,8 +319,8 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
// At this point it is unsafe to extend live ranges (6550579).
// Rematerialize only constants as we do for Phi above.
if(m->is_Mach() && m->as_Mach()->is_Con() &&
m->as_Mach()->rematerialize()) {
if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
m->as_Mach()->rematerialize()) {
copy = m->clone();
// Insert the copy in the basic block, just before us
b->insert_node(copy, l++);

View File

@ -535,7 +535,6 @@ void Compile::init_scratch_buffer_blob(int const_size) {
if (scratch_buffer_blob() == NULL) {
// Let CompilerBroker disable further compilations.
record_failure("Not enough space for scratch buffer in CodeCache");
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return;
}
}

View File

@ -38,6 +38,8 @@
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
_in_worklist(C->comp_arena()),
_next_pidx(0),
_collecting(true),
_verify(false),
_compile(C),
@ -125,13 +127,19 @@ bool ConnectionGraph::compute_escape() {
if (C->root() != NULL) {
ideal_nodes.push(C->root());
}
// Processed ideal nodes are unique on ideal_nodes list
// but several ideal nodes are mapped to the phantom_obj.
// To avoid duplicated entries on the following worklists
// add the phantom_obj only once to them.
ptnodes_worklist.append(phantom_obj);
java_objects_worklist.append(phantom_obj);
for( uint next = 0; next < ideal_nodes.size(); ++next ) {
Node* n = ideal_nodes.at(next);
// Create PointsTo nodes and add them to Connection Graph. Called
// only once per ideal node since ideal_nodes is Unique_Node list.
add_node_to_connection_graph(n, &delayed_worklist);
PointsToNode* ptn = ptnode_adr(n->_idx);
if (ptn != NULL) {
if (ptn != NULL && ptn != phantom_obj) {
ptnodes_worklist.append(ptn);
if (ptn->is_JavaObject()) {
java_objects_worklist.append(ptn->as_JavaObject());
@ -415,7 +423,7 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
}
case Op_CreateEx: {
// assume that all exception objects globally escape
add_java_object(n, PointsToNode::GlobalEscape);
map_ideal_node(n, phantom_obj);
break;
}
case Op_LoadKlass:
@ -1074,13 +1082,8 @@ bool ConnectionGraph::complete_connection_graph(
// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
// Set limit to 20 to catch situation when something did go wrong and
// bailout Escape Analysis.
// Also limit build time to 30 sec (60 in debug VM).
// Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
#define CG_BUILD_ITER_LIMIT 20
#ifdef ASSERT
#define CG_BUILD_TIME_LIMIT 60.0
#else
#define CG_BUILD_TIME_LIMIT 30.0
#endif
// Propagate GlobalEscape and ArgEscape escape states and check that
// we still have non-escaping objects. The method pushs on _worklist
@ -1091,12 +1094,13 @@ bool ConnectionGraph::complete_connection_graph(
// Now propagate references to all JavaObject nodes.
int java_objects_length = java_objects_worklist.length();
elapsedTimer time;
bool timeout = false;
int new_edges = 1;
int iterations = 0;
do {
while ((new_edges > 0) &&
(iterations++ < CG_BUILD_ITER_LIMIT) &&
(time.seconds() < CG_BUILD_TIME_LIMIT)) {
(iterations++ < CG_BUILD_ITER_LIMIT)) {
double start_time = time.seconds();
time.start();
new_edges = 0;
// Propagate references to phantom_object for nodes pushed on _worklist
@ -1105,7 +1109,26 @@ bool ConnectionGraph::complete_connection_graph(
for (int next = 0; next < java_objects_length; ++next) {
JavaObjectNode* ptn = java_objects_worklist.at(next);
new_edges += add_java_object_edges(ptn, true);
#define SAMPLE_SIZE 4
if ((next % SAMPLE_SIZE) == 0) {
// Each 4 iterations calculate how much time it will take
// to complete graph construction.
time.stop();
double stop_time = time.seconds();
double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
double time_until_end = time_per_iter * (double)(java_objects_length - next);
if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
timeout = true;
break; // Timeout
}
start_time = stop_time;
time.start();
}
#undef SAMPLE_SIZE
}
if (timeout) break;
if (new_edges > 0) {
// Update escape states on each iteration if graph was updated.
if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
@ -1113,9 +1136,12 @@ bool ConnectionGraph::complete_connection_graph(
}
}
time.stop();
if (time.seconds() >= EscapeAnalysisTimeout) {
timeout = true;
break;
}
}
if ((iterations < CG_BUILD_ITER_LIMIT) &&
(time.seconds() < CG_BUILD_TIME_LIMIT)) {
if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {
time.start();
// Find fields which have unknown value.
int fields_length = oop_fields_worklist.length();
@ -1128,18 +1154,21 @@ bool ConnectionGraph::complete_connection_graph(
}
}
time.stop();
if (time.seconds() >= EscapeAnalysisTimeout) {
timeout = true;
break;
}
} else {
new_edges = 0; // Bailout
}
} while (new_edges > 0);
// Bailout if passed limits.
if ((iterations >= CG_BUILD_ITER_LIMIT) ||
(time.seconds() >= CG_BUILD_TIME_LIMIT)) {
if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
Compile* C = _compile;
if (C->log() != NULL) {
C->log()->begin_elem("connectionGraph_bailout reason='reached ");
C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
C->log()->text("%s", timeout ? "time" : "iterations");
C->log()->end_elem(" limit'");
}
assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
@ -1156,7 +1185,6 @@ bool ConnectionGraph::complete_connection_graph(
#endif
#undef CG_BUILD_ITER_LIMIT
#undef CG_BUILD_TIME_LIMIT
// Find fields initialized by NULL for non-escaping Allocations.
int non_escaped_length = non_escaped_worklist.length();
@ -1280,8 +1308,8 @@ int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_w
}
}
}
while(_worklist.length() > 0) {
PointsToNode* use = _worklist.pop();
for (int l = 0; l < _worklist.length(); l++) {
PointsToNode* use = _worklist.at(l);
if (PointsToNode::is_base_use(use)) {
// Add reference from jobj to field and from field to jobj (field's base).
use = PointsToNode::get_use_node(use)->as_Field();
@ -1328,6 +1356,8 @@ int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_w
add_field_uses_to_worklist(use->as_Field());
}
}
_worklist.clear();
_in_worklist.Reset();
return new_edges;
}
@ -1906,7 +1936,7 @@ void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
return;
}
Compile* C = _compile;
ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
_nodes.at_put(n->_idx, ptadr);
}
@ -1917,7 +1947,7 @@ void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
return;
}
Compile* C = _compile;
ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
_nodes.at_put(n->_idx, ptadr);
}
@ -1933,7 +1963,7 @@ void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offse
es = PointsToNode::GlobalEscape;
}
Compile* C = _compile;
FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
_nodes.at_put(n->_idx, field);
}
@ -1947,7 +1977,7 @@ void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
return;
}
Compile* C = _compile;
ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
_nodes.at_put(n->_idx, ptadr);
// Add edge from arraycopy node to source object.
(void)add_edge(ptadr, src);

View File

@ -125,6 +125,8 @@ class LocalVarNode;
class FieldNode;
class ArraycopyNode;
class ConnectionGraph;
// ConnectionGraph nodes
class PointsToNode : public ResourceObj {
GrowableArray<PointsToNode*> _edges; // List of nodes this node points to
@ -137,6 +139,7 @@ class PointsToNode : public ResourceObj {
Node* const _node; // Ideal node corresponding to this PointsTo node.
const int _idx; // Cached ideal node's _idx
const uint _pidx; // Index of this node
public:
typedef enum {
@ -165,17 +168,9 @@ public:
} NodeFlags;
PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type):
_edges(C->comp_arena(), 2, 0, NULL),
_uses (C->comp_arena(), 2, 0, NULL),
_node(n),
_idx(n->_idx),
_type((u1)type),
_escape((u1)es),
_fields_escape((u1)es),
_flags(ScalarReplaceable) {
assert(n != NULL && es != UnknownEscape, "sanity");
}
inline PointsToNode(ConnectionGraph* CG, Node* n, EscapeState es, NodeType type);
uint pidx() const { return _pidx; }
Node* ideal_node() const { return _node; }
int idx() const { return _idx; }
@ -243,14 +238,14 @@ public:
class LocalVarNode: public PointsToNode {
public:
LocalVarNode(Compile *C, Node* n, EscapeState es):
PointsToNode(C, n, es, LocalVar) {}
LocalVarNode(ConnectionGraph *CG, Node* n, EscapeState es):
PointsToNode(CG, n, es, LocalVar) {}
};
class JavaObjectNode: public PointsToNode {
public:
JavaObjectNode(Compile *C, Node* n, EscapeState es):
PointsToNode(C, n, es, JavaObject) {
JavaObjectNode(ConnectionGraph *CG, Node* n, EscapeState es):
PointsToNode(CG, n, es, JavaObject) {
if (es > NoEscape)
set_scalar_replaceable(false);
}
@ -262,8 +257,8 @@ class FieldNode: public PointsToNode {
const bool _is_oop; // Field points to object
bool _has_unknown_base; // Has phantom_object base
public:
FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop):
PointsToNode(C, n, es, Field),
FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop):
PointsToNode(CG, n, es, Field),
_offset(offs), _is_oop(is_oop),
_has_unknown_base(false) {}
@ -284,8 +279,8 @@ public:
class ArraycopyNode: public PointsToNode {
public:
ArraycopyNode(Compile *C, Node* n, EscapeState es):
PointsToNode(C, n, es, Arraycopy) {}
ArraycopyNode(ConnectionGraph *CG, Node* n, EscapeState es):
PointsToNode(CG, n, es, Arraycopy) {}
};
// Iterators for PointsTo node's edges:
@ -323,11 +318,14 @@ public:
class ConnectionGraph: public ResourceObj {
friend class PointsToNode;
private:
GrowableArray<PointsToNode*> _nodes; // Map from ideal nodes to
// ConnectionGraph nodes.
GrowableArray<PointsToNode*> _worklist; // Nodes to be processed
VectorSet _in_worklist;
uint _next_pidx;
bool _collecting; // Indicates whether escape information
// is still being collected. If false,
@ -353,6 +351,8 @@ private:
}
uint nodes_size() const { return _nodes.length(); }
uint next_pidx() { return _next_pidx++; }
// Add nodes to ConnectionGraph.
void add_local_var(Node* n, PointsToNode::EscapeState es);
void add_java_object(Node* n, PointsToNode::EscapeState es);
@ -396,15 +396,26 @@ private:
int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist);
// Put node on worklist if it is (or was) not there.
void add_to_worklist(PointsToNode* pt) {
_worklist.push(pt);
return;
inline void add_to_worklist(PointsToNode* pt) {
PointsToNode* ptf = pt;
uint pidx_bias = 0;
if (PointsToNode::is_base_use(pt)) {
// Create a separate entry in _in_worklist for a marked base edge
// because _worklist may have an entry for a normal edge pointing
// to the same node. To separate them use _next_pidx as bias.
ptf = PointsToNode::get_use_node(pt)->as_Field();
pidx_bias = _next_pidx;
}
if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) {
_worklist.append(pt);
}
}
// Put on worklist all uses of this node.
void add_uses_to_worklist(PointsToNode* pt) {
for (UseIterator i(pt); i.has_next(); i.next())
_worklist.push(i.get());
inline void add_uses_to_worklist(PointsToNode* pt) {
for (UseIterator i(pt); i.has_next(); i.next()) {
add_to_worklist(i.get());
}
}
// Put on worklist all field's uses and related field nodes.
@ -517,8 +528,8 @@ private:
}
// Helper functions
bool is_oop_field(Node* n, int offset, bool* unsafe);
static Node* get_addp_base(Node *addp);
static Node* find_second_addp(Node* addp, Node* n);
static Node* get_addp_base(Node *addp);
static Node* find_second_addp(Node* addp, Node* n);
// offset of a field reference
int address_offset(Node* adr, PhaseTransform *phase);
@ -587,4 +598,17 @@ public:
#endif
};
inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type):
_edges(CG->_compile->comp_arena(), 2, 0, NULL),
_uses (CG->_compile->comp_arena(), 2, 0, NULL),
_node(n),
_idx(n->_idx),
_pidx(CG->next_pidx()),
_type((u1)type),
_escape((u1)es),
_fields_escape((u1)es),
_flags(ScalarReplaceable) {
assert(n != NULL && es != UnknownEscape, "sanity");
}
#endif // SHARE_VM_OPTO_ESCAPE_HPP

View File

@ -464,9 +464,7 @@ Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &re
iop == Op_CreateEx || // Create-exception must start block
iop == Op_CheckCastPP
) {
// select the node n
// remove n from worklist and retain the order of remaining nodes
worklist.remove((uint)i);
worklist.map(i,worklist.pop());
return n;
}
@ -552,9 +550,7 @@ Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &re
assert(idx >= 0, "index should be set");
Node *n = worklist[(uint)idx]; // Get the winner
// select the node n
// remove n from worklist and retain the order of remaining nodes
worklist.remove((uint)idx);
worklist.map((uint)idx, worklist.pop()); // Compress worklist
return n;
}

View File

@ -268,6 +268,7 @@ class LibraryCallKit : public GraphKit {
bool inline_fp_conversions(vmIntrinsics::ID id);
bool inline_number_methods(vmIntrinsics::ID id);
bool inline_reference_get();
bool inline_Class_cast();
bool inline_aescrypt_Block(vmIntrinsics::ID id);
bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
@ -869,6 +870,8 @@ bool LibraryCallKit::try_to_inline(int predicate) {
case vmIntrinsics::_Reference_get: return inline_reference_get();
case vmIntrinsics::_Class_cast: return inline_Class_cast();
case vmIntrinsics::_aescrypt_encryptBlock:
case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
@ -3546,6 +3549,89 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
return true;
}
//-------------------------inline_Class_cast-------------------
bool LibraryCallKit::inline_Class_cast() {
Node* mirror = argument(0); // Class
Node* obj = argument(1);
const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
if (mirror_con == NULL) {
return false; // dead path (mirror->is_top()).
}
if (obj == NULL || obj->is_top()) {
return false; // dead path
}
const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
// First, see if Class.cast() can be folded statically.
// java_mirror_type() returns non-null for compile-time Class constants.
ciType* tm = mirror_con->java_mirror_type();
if (tm != NULL && tm->is_klass() &&
tp != NULL && tp->klass() != NULL) {
if (!tp->klass()->is_loaded()) {
// Don't use intrinsic when class is not loaded.
return false;
} else {
int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
if (static_res == Compile::SSC_always_true) {
// isInstance() is true - fold the code.
set_result(obj);
return true;
} else if (static_res == Compile::SSC_always_false) {
// Don't use intrinsic, have to throw ClassCastException.
// If the reference is null, the non-intrinsic bytecode will
// be optimized appropriately.
return false;
}
}
}
// Bailout intrinsic and do normal inlining if exception path is frequent.
if (too_many_traps(Deoptimization::Reason_intrinsic)) {
return false;
}
// Generate dynamic checks.
// Class.cast() is java implementation of _checkcast bytecode.
// Do checkcast (Parse::do_checkcast()) optimizations here.
mirror = null_check(mirror);
// If mirror is dead, only null-path is taken.
if (stopped()) {
return true;
}
// Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
RegionNode* region = new RegionNode(PATH_LIMIT);
record_for_igvn(region);
// Now load the mirror's klass metaobject, and null-check it.
// If kls is null, we have a primitive mirror and
// nothing is an instance of a primitive type.
Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
Node* res = top();
if (!stopped()) {
Node* bad_type_ctrl = top();
// Do checkcast optimizations.
res = gen_checkcast(obj, kls, &bad_type_ctrl);
region->init_req(_bad_type_path, bad_type_ctrl);
}
if (region->in(_prim_path) != top() ||
region->in(_bad_type_path) != top()) {
// Let Interpreter throw ClassCastException.
PreserveJVMState pjvms(this);
set_control(_gvn.transform(region));
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_maybe_recompile);
}
if (!stopped()) {
set_result(res);
}
return true;
}
//--------------------------inline_native_subtype_check------------------------
// This intrinsic takes the JNI calls out of the heart of
// UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
@ -4611,6 +4697,10 @@ bool LibraryCallKit::inline_arraycopy() {
Node* dest_offset = argument(3); // type: int
Node* length = argument(4); // type: int
// Check for allocation before we add nodes that would confuse
// tightly_coupled_allocation()
AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
// The following tests must be performed
// (1) src and dest are arrays.
// (2) src and dest arrays must have elements of the same BasicType
@ -4784,7 +4874,6 @@ bool LibraryCallKit::inline_arraycopy() {
return true;
}
AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
// Create LoadRange and LoadKlass nodes for use during macro expansion here
// so the compiler has a chance to eliminate them: during macro expansion,

View File

@ -1257,6 +1257,16 @@ Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
result = new ConvI2LNode(phase->transform(result));
}
#endif
// Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair).
// Need to preserve unboxing load type if it is unsigned.
switch(this->Opcode()) {
case Op_LoadUB:
result = new AndINode(phase->transform(result), phase->intcon(0xFF));
break;
case Op_LoadUS:
result = new AndINode(phase->transform(result), phase->intcon(0xFFFF));
break;
}
return result;
}
}

View File

@ -610,7 +610,7 @@ Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// convert masks which would cause a sign extension of the integer
// value. This check includes UI2L masks (0x00000000FFFFFFFF) which
// would be optimized away later in Identity.
if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF80000000)) == 0) {
if (op == Op_ConvI2L && (mask & UCONST64(0xFFFFFFFF80000000)) == 0) {
Node* andi = new AndINode(in1->in(1), phase->intcon(mask));
andi = phase->transform(andi);
return new ConvI2LNode(andi);

View File

@ -1166,7 +1166,6 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
// Have we run out of code space?
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL;
}
// Configure the code buffer.
@ -1491,7 +1490,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return;
}
@ -1648,7 +1646,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// One last check for failed CodeBuffer::expand:
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return;
}

View File

@ -951,8 +951,9 @@ class JNI_ArgumentPusherVaArg : public JNI_ArgumentPusher {
// Optimized path if we have the bitvector form of signature
void iterate( uint64_t fingerprint ) {
if ( fingerprint == UCONST64(-1) ) SignatureIterator::iterate();// Must be too many arguments
else {
if (fingerprint == (uint64_t)CONST64(-1)) {
SignatureIterator::iterate(); // Must be too many arguments
} else {
_return_type = (BasicType)((fingerprint >> static_feature_size) &
result_feature_mask);
@ -1022,8 +1023,9 @@ class JNI_ArgumentPusherArray : public JNI_ArgumentPusher {
// Optimized path if we have the bitvector form of signature
void iterate( uint64_t fingerprint ) {
if ( fingerprint == UCONST64(-1) ) SignatureIterator::iterate(); // Must be too many arguments
else {
if (fingerprint == (uint64_t)CONST64(-1)) {
SignatureIterator::iterate(); // Must be too many arguments
} else {
_return_type = (BasicType)((fingerprint >> static_feature_size) &
result_feature_mask);
assert(fingerprint, "Fingerprint should not be 0");

View File

@ -2583,7 +2583,14 @@ ATTRIBUTE_PRINTF(3, 0)
int jio_vsnprintf(char *str, size_t count, const char *fmt, va_list args) {
// see bug 4399518, 4417214
if ((intptr_t)count <= 0) return -1;
return vsnprintf(str, count, fmt, args);
int result = vsnprintf(str, count, fmt, args);
if ((result > 0 && (size_t)result >= count) || result == -1) {
str[count - 1] = '\0';
result = -1;
}
return result;
}
ATTRIBUTE_PRINTF(3, 0)
@ -3271,8 +3278,10 @@ static inline arrayOop check_array(JNIEnv *env, jobject arr, bool type_array_onl
THROW_0(vmSymbols::java_lang_NullPointerException());
}
oop a = JNIHandles::resolve_non_null(arr);
if (!a->is_array() || (type_array_only && !a->is_typeArray())) {
if (!a->is_array()) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Argument is not an array");
} else if (type_array_only && !a->is_typeArray()) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Argument is not an array of primitive type");
}
return arrayOop(a);
}

View File

@ -41,6 +41,7 @@
void JvmtiClassFileReconstituter::write_field_infos() {
HandleMark hm(thread());
Array<AnnotationArray*>* fields_anno = ikh()->fields_annotations();
Array<AnnotationArray*>* fields_type_anno = ikh()->fields_type_annotations();
// Compute the real number of Java fields
int java_fields = ikh()->java_fields_count();
@ -55,6 +56,7 @@ void JvmtiClassFileReconstituter::write_field_infos() {
// int offset = ikh()->field_offset( index );
int generic_signature_index = fs.generic_signature_index();
AnnotationArray* anno = fields_anno == NULL ? NULL : fields_anno->at(fs.index());
AnnotationArray* type_anno = fields_type_anno == NULL ? NULL : fields_type_anno->at(fs.index());
// JVMSpec| field_info {
// JVMSpec| u2 access_flags;
@ -80,6 +82,9 @@ void JvmtiClassFileReconstituter::write_field_infos() {
if (anno != NULL) {
++attr_count; // has RuntimeVisibleAnnotations attribute
}
if (type_anno != NULL) {
++attr_count; // has RuntimeVisibleTypeAnnotations attribute
}
write_u2(attr_count);
@ -97,6 +102,9 @@ void JvmtiClassFileReconstituter::write_field_infos() {
if (anno != NULL) {
write_annotations_attribute("RuntimeVisibleAnnotations", anno);
}
if (type_anno != NULL) {
write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
}
}
}
@ -537,6 +545,7 @@ void JvmtiClassFileReconstituter::write_method_info(methodHandle method) {
AnnotationArray* anno = method->annotations();
AnnotationArray* param_anno = method->parameter_annotations();
AnnotationArray* default_anno = method->annotation_default();
AnnotationArray* type_anno = method->type_annotations();
// skip generated default interface methods
if (method->is_overpass()) {
@ -572,6 +581,9 @@ void JvmtiClassFileReconstituter::write_method_info(methodHandle method) {
if (param_anno != NULL) {
++attr_count; // has RuntimeVisibleParameterAnnotations attribute
}
if (type_anno != NULL) {
++attr_count; // has RuntimeVisibleTypeAnnotations attribute
}
write_u2(attr_count);
if (const_method->code_size() > 0) {
@ -596,6 +608,9 @@ void JvmtiClassFileReconstituter::write_method_info(methodHandle method) {
if (param_anno != NULL) {
write_annotations_attribute("RuntimeVisibleParameterAnnotations", param_anno);
}
if (type_anno != NULL) {
write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
}
}
// Write the class attributes portion of ClassFile structure
@ -605,6 +620,7 @@ void JvmtiClassFileReconstituter::write_class_attributes() {
u2 inner_classes_length = inner_classes_attribute_length();
Symbol* generic_signature = ikh()->generic_signature();
AnnotationArray* anno = ikh()->class_annotations();
AnnotationArray* type_anno = ikh()->class_type_annotations();
int attr_count = 0;
if (generic_signature != NULL) {
@ -622,6 +638,9 @@ void JvmtiClassFileReconstituter::write_class_attributes() {
if (anno != NULL) {
++attr_count; // has RuntimeVisibleAnnotations attribute
}
if (type_anno != NULL) {
++attr_count; // has RuntimeVisibleTypeAnnotations attribute
}
if (cpool()->operands() != NULL) {
++attr_count;
}
@ -643,6 +662,9 @@ void JvmtiClassFileReconstituter::write_class_attributes() {
if (anno != NULL) {
write_annotations_attribute("RuntimeVisibleAnnotations", anno);
}
if (type_anno != NULL) {
write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
}
if (cpool()->operands() != NULL) {
write_bootstrapmethod_attribute();
}

View File

@ -2263,6 +2263,8 @@ JvmtiEnv::GetClassMethods(oop k_mirror, jint* method_count_ptr, jmethodID** meth
int result_length = instanceK_h->methods()->length();
jmethodID* result_list = (jmethodID*)jvmtiMalloc(result_length * sizeof(jmethodID));
int index;
bool jmethodids_found = true;
if (JvmtiExport::can_maintain_original_method_order()) {
// Use the original method ordering indices stored in the class, so we can emit
// jmethodIDs in the order they appeared in the class file
@ -2270,14 +2272,40 @@ JvmtiEnv::GetClassMethods(oop k_mirror, jint* method_count_ptr, jmethodID** meth
Method* m = instanceK_h->methods()->at(index);
int original_index = instanceK_h->method_ordering()->at(index);
assert(original_index >= 0 && original_index < result_length, "invalid original method index");
jmethodID id = m->jmethod_id();
jmethodID id;
if (jmethodids_found) {
id = m->find_jmethod_id_or_null();
if (id == NULL) {
// If we find an uninitialized value, make sure there is
// enough space for all the uninitialized values we might
// find.
instanceK_h->ensure_space_for_methodids(index);
jmethodids_found = false;
id = m->jmethod_id();
}
} else {
id = m->jmethod_id();
}
result_list[original_index] = id;
}
} else {
// otherwise just copy in any order
for (index = 0; index < result_length; index++) {
Method* m = instanceK_h->methods()->at(index);
jmethodID id = m->jmethod_id();
jmethodID id;
if (jmethodids_found) {
id = m->find_jmethod_id_or_null();
if (id == NULL) {
// If we find an uninitialized value, make sure there is
// enough space for all the uninitialized values we might
// find.
instanceK_h->ensure_space_for_methodids(index);
jmethodids_found = false;
id = m->jmethod_id();
}
} else {
id = m->jmethod_id();
}
result_list[index] = id;
}
}

View File

@ -1569,6 +1569,29 @@ bool VM_RedefineClasses::rewrite_cp_refs(instanceKlassHandle scratch_class,
return false;
}
// rewrite constant pool references in the class_type_annotations:
if (!rewrite_cp_refs_in_class_type_annotations(scratch_class, THREAD)) {
// propagate failure back to caller
return false;
}
// rewrite constant pool references in the fields_type_annotations:
if (!rewrite_cp_refs_in_fields_type_annotations(scratch_class, THREAD)) {
// propagate failure back to caller
return false;
}
// rewrite constant pool references in the methods_type_annotations:
if (!rewrite_cp_refs_in_methods_type_annotations(scratch_class, THREAD)) {
// propagate failure back to caller
return false;
}
// There can be type annotations in the Code part of a method_info attribute.
// These annotations are not accessible, even by reflection.
// Currently they are not even parsed by the ClassFileParser.
// If runtime access is added they will also need to be rewritten.
// rewrite source file name index:
u2 source_file_name_idx = scratch_class->source_file_name_index();
if (source_file_name_idx != 0) {
@ -2239,6 +2262,588 @@ bool VM_RedefineClasses::rewrite_cp_refs_in_methods_default_annotations(
} // end rewrite_cp_refs_in_methods_default_annotations()
// Rewrite constant pool references in a class_type_annotations field.
bool VM_RedefineClasses::rewrite_cp_refs_in_class_type_annotations(
instanceKlassHandle scratch_class, TRAPS) {
AnnotationArray* class_type_annotations = scratch_class->class_type_annotations();
if (class_type_annotations == NULL || class_type_annotations->length() == 0) {
// no class_type_annotations so nothing to do
return true;
}
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("class_type_annotations length=%d", class_type_annotations->length()));
int byte_i = 0; // byte index into class_type_annotations
return rewrite_cp_refs_in_type_annotations_typeArray(class_type_annotations,
byte_i, "ClassFile", THREAD);
} // end rewrite_cp_refs_in_class_type_annotations()
// Rewrite constant pool references in a fields_type_annotations field.
bool VM_RedefineClasses::rewrite_cp_refs_in_fields_type_annotations(
instanceKlassHandle scratch_class, TRAPS) {
Array<AnnotationArray*>* fields_type_annotations = scratch_class->fields_type_annotations();
if (fields_type_annotations == NULL || fields_type_annotations->length() == 0) {
// no fields_type_annotations so nothing to do
return true;
}
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("fields_type_annotations length=%d", fields_type_annotations->length()));
for (int i = 0; i < fields_type_annotations->length(); i++) {
AnnotationArray* field_type_annotations = fields_type_annotations->at(i);
if (field_type_annotations == NULL || field_type_annotations->length() == 0) {
// this field does not have any annotations so skip it
continue;
}
int byte_i = 0; // byte index into field_type_annotations
if (!rewrite_cp_refs_in_type_annotations_typeArray(field_type_annotations,
byte_i, "field_info", THREAD)) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("bad field_type_annotations at %d", i));
// propagate failure back to caller
return false;
}
}
return true;
} // end rewrite_cp_refs_in_fields_type_annotations()
// Rewrite constant pool references in a methods_type_annotations field.
bool VM_RedefineClasses::rewrite_cp_refs_in_methods_type_annotations(
instanceKlassHandle scratch_class, TRAPS) {
for (int i = 0; i < scratch_class->methods()->length(); i++) {
Method* m = scratch_class->methods()->at(i);
AnnotationArray* method_type_annotations = m->constMethod()->type_annotations();
if (method_type_annotations == NULL || method_type_annotations->length() == 0) {
// this method does not have any annotations so skip it
continue;
}
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("methods type_annotations length=%d", method_type_annotations->length()));
int byte_i = 0; // byte index into method_type_annotations
if (!rewrite_cp_refs_in_type_annotations_typeArray(method_type_annotations,
byte_i, "method_info", THREAD)) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("bad method_type_annotations at %d", i));
// propagate failure back to caller
return false;
}
}
return true;
} // end rewrite_cp_refs_in_methods_type_annotations()
// Rewrite constant pool references in a type_annotations
// field. This "structure" is adapted from the
// RuntimeVisibleTypeAnnotations_attribute described in
// section 4.7.20 of the Java SE 8 Edition of the VM spec:
//
// type_annotations_typeArray {
// u2 num_annotations;
// type_annotation annotations[num_annotations];
// }
//
bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotations_typeArray(
AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
const char * location_mesg, TRAPS) {
if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
// not enough room for num_annotations field
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for num_annotations field"));
return false;
}
u2 num_annotations = Bytes::get_Java_u2((address)
type_annotations_typeArray->adr_at(byte_i_ref));
byte_i_ref += 2;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("num_type_annotations=%d", num_annotations));
int calc_num_annotations = 0;
for (; calc_num_annotations < num_annotations; calc_num_annotations++) {
if (!rewrite_cp_refs_in_type_annotation_struct(type_annotations_typeArray,
byte_i_ref, location_mesg, THREAD)) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("bad type_annotation_struct at %d", calc_num_annotations));
// propagate failure back to caller
return false;
}
}
assert(num_annotations == calc_num_annotations, "sanity check");
if (byte_i_ref != type_annotations_typeArray->length()) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("read wrong amount of bytes at end of processing "
"type_annotations_typeArray (%d of %d bytes were read)",
byte_i_ref, type_annotations_typeArray->length()));
return false;
}
return true;
} // end rewrite_cp_refs_in_type_annotations_typeArray()
// Rewrite constant pool references in a type_annotation
// field. This "structure" is adapted from the
// RuntimeVisibleTypeAnnotations_attribute described in
// section 4.7.20 of the Java SE 8 Edition of the VM spec:
//
// type_annotation {
// u1 target_type;
// union {
// type_parameter_target;
// supertype_target;
// type_parameter_bound_target;
// empty_target;
// method_formal_parameter_target;
// throws_target;
// localvar_target;
// catch_target;
// offset_target;
// type_argument_target;
// } target_info;
// type_path target_path;
// annotation anno;
// }
//
bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotation_struct(
AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
const char * location_mesg, TRAPS) {
if (!skip_type_annotation_target(type_annotations_typeArray,
byte_i_ref, location_mesg, THREAD)) {
return false;
}
if (!skip_type_annotation_type_path(type_annotations_typeArray,
byte_i_ref, THREAD)) {
return false;
}
if (!rewrite_cp_refs_in_annotation_struct(type_annotations_typeArray,
byte_i_ref, THREAD)) {
return false;
}
return true;
} // end rewrite_cp_refs_in_type_annotation_struct()
// Read, verify and skip over the target_type and target_info part
// so that rewriting can continue in the later parts of the struct.
//
// u1 target_type;
// union {
// type_parameter_target;
// supertype_target;
// type_parameter_bound_target;
// empty_target;
// method_formal_parameter_target;
// throws_target;
// localvar_target;
// catch_target;
// offset_target;
// type_argument_target;
// } target_info;
//
bool VM_RedefineClasses::skip_type_annotation_target(
AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
const char * location_mesg, TRAPS) {
if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
// not enough room for a target_type let alone the rest of a type_annotation
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a target_type"));
return false;
}
u1 target_type = type_annotations_typeArray->at(byte_i_ref);
byte_i_ref += 1;
RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("target_type=0x%.2x", target_type));
RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("location=%s", location_mesg));
// Skip over target_info
switch (target_type) {
case 0x00:
// kind: type parameter declaration of generic class or interface
// location: ClassFile
case 0x01:
// kind: type parameter declaration of generic method or constructor
// location: method_info
{
// struct:
// type_parameter_target {
// u1 type_parameter_index;
// }
//
if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a type_parameter_target"));
return false;
}
u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref);
byte_i_ref += 1;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("type_parameter_target: type_parameter_index=%d",
type_parameter_index));
} break;
case 0x10:
// kind: type in extends clause of class or interface declaration
// (including the direct superclass of an anonymous class declaration),
// or in implements clause of interface declaration
// location: ClassFile
{
// struct:
// supertype_target {
// u2 supertype_index;
// }
//
if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a supertype_target"));
return false;
}
u2 supertype_index = Bytes::get_Java_u2((address)
type_annotations_typeArray->adr_at(byte_i_ref));
byte_i_ref += 2;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("supertype_target: supertype_index=%d", supertype_index));
} break;
case 0x11:
// kind: type in bound of type parameter declaration of generic class or interface
// location: ClassFile
case 0x12:
// kind: type in bound of type parameter declaration of generic method or constructor
// location: method_info
{
// struct:
// type_parameter_bound_target {
// u1 type_parameter_index;
// u1 bound_index;
// }
//
if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a type_parameter_bound_target"));
return false;
}
u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref);
byte_i_ref += 1;
u1 bound_index = type_annotations_typeArray->at(byte_i_ref);
byte_i_ref += 1;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("type_parameter_bound_target: type_parameter_index=%d, bound_index=%d",
type_parameter_index, bound_index));
} break;
case 0x13:
// kind: type in field declaration
// location: field_info
case 0x14:
// kind: return type of method, or type of newly constructed object
// location: method_info
case 0x15:
// kind: receiver type of method or constructor
// location: method_info
{
// struct:
// empty_target {
// }
//
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("empty_target"));
} break;
case 0x16:
// kind: type in formal parameter declaration of method, constructor, or lambda expression
// location: method_info
{
// struct:
// formal_parameter_target {
// u1 formal_parameter_index;
// }
//
if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a formal_parameter_target"));
return false;
}
u1 formal_parameter_index = type_annotations_typeArray->at(byte_i_ref);
byte_i_ref += 1;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("formal_parameter_target: formal_parameter_index=%d",
formal_parameter_index));
} break;
case 0x17:
// kind: type in throws clause of method or constructor
// location: method_info
{
// struct:
// throws_target {
// u2 throws_type_index
// }
//
if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a throws_target"));
return false;
}
u2 throws_type_index = Bytes::get_Java_u2((address)
type_annotations_typeArray->adr_at(byte_i_ref));
byte_i_ref += 2;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("throws_target: throws_type_index=%d", throws_type_index));
} break;
case 0x40:
// kind: type in local variable declaration
// location: Code
case 0x41:
// kind: type in resource variable declaration
// location: Code
{
// struct:
// localvar_target {
// u2 table_length;
// struct {
// u2 start_pc;
// u2 length;
// u2 index;
// } table[table_length];
// }
//
if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
// not enough room for a table_length let alone the rest of a localvar_target
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a localvar_target table_length"));
return false;
}
u2 table_length = Bytes::get_Java_u2((address)
type_annotations_typeArray->adr_at(byte_i_ref));
byte_i_ref += 2;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("localvar_target: table_length=%d", table_length));
int table_struct_size = 2 + 2 + 2; // 3 u2 variables per table entry
int table_size = table_length * table_struct_size;
if ((byte_i_ref + table_size) > type_annotations_typeArray->length()) {
// not enough room for a table
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a table array of length %d", table_length));
return false;
}
// Skip over table
byte_i_ref += table_size;
} break;
case 0x42:
// kind: type in exception parameter declaration
// location: Code
{
// struct:
// catch_target {
// u2 exception_table_index;
// }
//
if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a catch_target"));
return false;
}
u2 exception_table_index = Bytes::get_Java_u2((address)
type_annotations_typeArray->adr_at(byte_i_ref));
byte_i_ref += 2;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("catch_target: exception_table_index=%d", exception_table_index));
} break;
case 0x43:
// kind: type in instanceof expression
// location: Code
case 0x44:
// kind: type in new expression
// location: Code
case 0x45:
// kind: type in method reference expression using ::new
// location: Code
case 0x46:
// kind: type in method reference expression using ::Identifier
// location: Code
{
// struct:
// offset_target {
// u2 offset;
// }
//
if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a offset_target"));
return false;
}
u2 offset = Bytes::get_Java_u2((address)
type_annotations_typeArray->adr_at(byte_i_ref));
byte_i_ref += 2;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("offset_target: offset=%d", offset));
} break;
case 0x47:
// kind: type in cast expression
// location: Code
case 0x48:
// kind: type argument for generic constructor in new expression or
// explicit constructor invocation statement
// location: Code
case 0x49:
// kind: type argument for generic method in method invocation expression
// location: Code
case 0x4A:
// kind: type argument for generic constructor in method reference expression using ::new
// location: Code
case 0x4B:
// kind: type argument for generic method in method reference expression using ::Identifier
// location: Code
{
// struct:
// type_argument_target {
// u2 offset;
// u1 type_argument_index;
// }
//
if ((byte_i_ref + 3) > type_annotations_typeArray->length()) {
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a type_argument_target"));
return false;
}
u2 offset = Bytes::get_Java_u2((address)
type_annotations_typeArray->adr_at(byte_i_ref));
byte_i_ref += 2;
u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref);
byte_i_ref += 1;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("type_argument_target: offset=%d, type_argument_index=%d",
offset, type_argument_index));
} break;
default:
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("unknown target_type"));
#ifdef ASSERT
ShouldNotReachHere();
#endif
return false;
}
return true;
} // end skip_type_annotation_target()
// Read, verify and skip over the type_path part so that rewriting
// can continue in the later parts of the struct.
//
// type_path {
// u1 path_length;
// {
// u1 type_path_kind;
// u1 type_argument_index;
// } path[path_length];
// }
//
bool VM_RedefineClasses::skip_type_annotation_type_path(
AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS) {
if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
// not enough room for a path_length let alone the rest of the type_path
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for a type_path"));
return false;
}
u1 path_length = type_annotations_typeArray->at(byte_i_ref);
byte_i_ref += 1;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("type_path: path_length=%d", path_length));
int calc_path_length = 0;
for (; calc_path_length < path_length; calc_path_length++) {
if ((byte_i_ref + 1 + 1) > type_annotations_typeArray->length()) {
// not enough room for a path
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("length() is too small for path entry %d of %d",
calc_path_length, path_length));
return false;
}
u1 type_path_kind = type_annotations_typeArray->at(byte_i_ref);
byte_i_ref += 1;
u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref);
byte_i_ref += 1;
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("type_path: path[%d]: type_path_kind=%d, type_argument_index=%d",
calc_path_length, type_path_kind, type_argument_index));
if (type_path_kind > 3 || (type_path_kind != 3 && type_argument_index != 0)) {
// not enough room for a path
RC_TRACE_WITH_THREAD(0x02000000, THREAD,
("inconsistent type_path values"));
return false;
}
}
assert(path_length == calc_path_length, "sanity check");
return true;
} // end skip_type_annotation_type_path()
// Rewrite constant pool references in the method's stackmap table.
// These "structures" are adapted from the StackMapTable_attribute that
// is described in section 4.8.4 of the 6.0 version of the VM spec
@ -3223,23 +3828,6 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() {
void VM_RedefineClasses::swap_annotations(instanceKlassHandle the_class,
instanceKlassHandle scratch_class) {
// Since there is currently no rewriting of type annotations indexes
// into the CP, we null out type annotations on scratch_class before
// we swap annotations with the_class rather than facing the
// possibility of shipping annotations with broken indexes to
// Java-land.
ClassLoaderData* loader_data = scratch_class->class_loader_data();
AnnotationArray* new_class_type_annotations = scratch_class->class_type_annotations();
if (new_class_type_annotations != NULL) {
MetadataFactory::free_array<u1>(loader_data, new_class_type_annotations);
scratch_class->annotations()->set_class_type_annotations(NULL);
}
Array<AnnotationArray*>* new_field_type_annotations = scratch_class->fields_type_annotations();
if (new_field_type_annotations != NULL) {
Annotations::free_contents(loader_data, new_field_type_annotations);
scratch_class->annotations()->set_fields_type_annotations(NULL);
}
// Swap annotation fields values
Annotations* old_annotations = the_class->annotations();
the_class->set_annotations(scratch_class->annotations());

View File

@ -452,6 +452,17 @@ class VM_RedefineClasses: public VM_Operation {
instanceKlassHandle scratch_class, TRAPS);
bool rewrite_cp_refs_in_element_value(
AnnotationArray* class_annotations, int &byte_i_ref, TRAPS);
bool rewrite_cp_refs_in_type_annotations_typeArray(
AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
const char * location_mesg, TRAPS);
bool rewrite_cp_refs_in_type_annotation_struct(
AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
const char * location_mesg, TRAPS);
bool skip_type_annotation_target(
AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
const char * location_mesg, TRAPS);
bool skip_type_annotation_type_path(
AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS);
bool rewrite_cp_refs_in_fields_annotations(
instanceKlassHandle scratch_class, TRAPS);
void rewrite_cp_refs_in_method(methodHandle method,
@ -463,6 +474,12 @@ class VM_RedefineClasses: public VM_Operation {
instanceKlassHandle scratch_class, TRAPS);
bool rewrite_cp_refs_in_methods_parameter_annotations(
instanceKlassHandle scratch_class, TRAPS);
bool rewrite_cp_refs_in_class_type_annotations(
instanceKlassHandle scratch_class, TRAPS);
bool rewrite_cp_refs_in_fields_type_annotations(
instanceKlassHandle scratch_class, TRAPS);
bool rewrite_cp_refs_in_methods_type_annotations(
instanceKlassHandle scratch_class, TRAPS);
void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS);
void rewrite_cp_refs_in_verification_type_info(
address& stackmap_addr_ref, address stackmap_end, u2 frame_i,

View File

@ -36,6 +36,7 @@
#include "runtime/reflection.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/exceptions.hpp"
/*
@ -55,26 +56,30 @@
bool MethodHandles::_enabled = false; // set true after successful native linkage
MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
//------------------------------------------------------------------------------
// MethodHandles::generate_adapters
//
void MethodHandles::generate_adapters() {
if (SystemDictionary::MethodHandle_klass() == NULL) return;
/**
* Generates method handle adapters. Returns 'false' if memory allocation
* failed and true otherwise.
*/
bool MethodHandles::generate_adapters() {
if (SystemDictionary::MethodHandle_klass() == NULL) {
return true;
}
assert(_adapter_code == NULL, "generate only once");
ResourceMark rm;
TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
_adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
if (_adapter_code == NULL)
vm_exit_out_of_memory(adapter_code_size, OOM_MALLOC_ERROR,
"CodeCache: no room for MethodHandles adapters");
{
CodeBuffer code(_adapter_code);
MethodHandlesAdapterGenerator g(&code);
g.generate();
code.log_section_sizes("MethodHandlesAdapterBlob");
if (_adapter_code == NULL) {
return false;
}
CodeBuffer code(_adapter_code);
MethodHandlesAdapterGenerator g(&code);
g.generate();
code.log_section_sizes("MethodHandlesAdapterBlob");
return true;
}
//------------------------------------------------------------------------------
@ -1401,7 +1406,9 @@ JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class))
}
if (enable_MH) {
MethodHandles::generate_adapters();
if (MethodHandles::generate_adapters() == false) {
THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), "Out of space in CodeCache for method handle adapters");
}
MethodHandles::set_enabled(true);
}
}

View File

@ -69,7 +69,7 @@ class MethodHandles: AllStatic {
enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
// Generate MethodHandles adapters.
static void generate_adapters();
static bool generate_adapters();
// Called from MethodHandlesAdapterGenerator.
static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid);

View File

@ -802,8 +802,7 @@ UNSAFE_END
static inline void throw_new(JNIEnv *env, const char *ename) {
char buf[100];
strcpy(buf, "java/lang/");
strcat(buf, ename);
jio_snprintf(buf, 100, "%s%s", "java/lang/", ename);
jclass cls = env->FindClass(buf);
if (env->ExceptionCheck()) {
env->ExceptionClear();

View File

@ -282,7 +282,7 @@ WB_END
// NMT picks it up correctly
WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
jlong addr = 0;
addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
return addr;
WB_END
@ -291,7 +291,7 @@ WB_END
WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
address pc = (address)(size_t)pseudo_stack;
NativeCallStack stack(&pc, 1);
return (jlong)os::malloc(size, mtTest, stack);
return (jlong)(uintptr_t)os::malloc(size, mtTest, stack);
WB_END
// Free the memory allocated by NMTAllocTest
@ -326,15 +326,6 @@ WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
return MemTracker::tracking_level() == NMT_detail;
WB_END
WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
address pc = (address)1;
for (jlong index = 0; index < num; index ++) {
NativeCallStack stack(&pc, 1);
os::malloc(0, mtTest, stack);
pc += MallocSiteTable::hash_buckets();
}
WB_END
WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
// Test that we can downgrade NMT levels but not upgrade them.
if (MemTracker::tracking_level() == NMT_off) {
@ -365,6 +356,12 @@ WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
return MemTracker::tracking_level() == NMT_minimal;
}
WB_END
WB_ENTRY(jint, WB_NMTGetHashSize(JNIEnv* env, jobject o))
int hash_size = MallocSiteTable::hash_buckets();
assert(hash_size > 0, "NMT hash_size should be > 0");
return (jint)hash_size;
WB_END
#endif // INCLUDE_NMT
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
@ -386,19 +383,10 @@ WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jbool
CHECK_JNI_EXCEPTION_(env, result);
MutexLockerEx mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code;
if (is_osr) {
int bci = InvocationEntryBci;
while ((code = mh->lookup_osr_nmethod_for(bci, CompLevel_none, false)) != NULL) {
code->mark_for_deoptimization();
++result;
bci = code->osr_entry_bci() + 1;
}
} else {
code = mh->code();
}
if (code != NULL) {
code->mark_for_deoptimization();
result += mh->mark_osr_nmethods();
} else if (mh->code() != NULL) {
mh->code()->mark_for_deoptimization();
++result;
}
result += CodeCache::mark_for_deoptimization(mh());
@ -518,16 +506,6 @@ class AlwaysFalseClosure : public BoolObjectClosure {
static AlwaysFalseClosure always_false;
class VM_WhiteBoxCleanMethodData : public VM_WhiteBoxOperation {
public:
VM_WhiteBoxCleanMethodData(MethodData* mdo) : _mdo(mdo) { }
void doit() {
_mdo->clean_method_data(&always_false);
}
private:
MethodData* _mdo;
};
WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION(env);
@ -543,8 +521,8 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
for (int i = 0; i < arg_count; i++) {
mdo->set_arg_modified(i, 0);
}
VM_WhiteBoxCleanMethodData op(mdo);
VMThread::execute(&op);
MutexLockerEx mu(mdo->extra_data_lock());
mdo->clean_method_data(&always_false);
}
mh->clear_not_c1_compilable();
@ -566,13 +544,13 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
WB_END
template <typename T>
static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*)) {
static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*, bool, bool)) {
if (name == NULL) {
return false;
}
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
const char* flag_name = env->GetStringUTFChars(name, NULL);
bool result = (*TAt)(flag_name, value);
bool result = (*TAt)(flag_name, value, true, true);
env->ReleaseStringUTFChars(name, flag_name);
return result;
}
@ -619,6 +597,24 @@ static jobject doubleBox(JavaThread* thread, JNIEnv* env, jdouble value) {
return box(thread, env, vmSymbols::java_lang_Double(), vmSymbols::Double_valueOf_signature(), value);
}
static Flag* getVMFlag(JavaThread* thread, JNIEnv* env, jstring name) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
const char* flag_name = env->GetStringUTFChars(name, NULL);
Flag* result = Flag::find_flag(flag_name, strlen(flag_name), true, true);
env->ReleaseStringUTFChars(name, flag_name);
return result;
}
WB_ENTRY(jboolean, WB_IsConstantVMFlag(JNIEnv* env, jobject o, jstring name))
Flag* flag = getVMFlag(thread, env, name);
return (flag != NULL) && flag->is_constant_in_binary();
WB_END
WB_ENTRY(jboolean, WB_IsLockedVMFlag(JNIEnv* env, jobject o, jstring name))
Flag* flag = getVMFlag(thread, env, name);
return (flag != NULL) && !(flag->is_unlocked() || flag->is_unlocker());
WB_END
WB_ENTRY(jobject, WB_GetBooleanVMFlag(JNIEnv* env, jobject o, jstring name))
bool result;
if (GetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAt)) {
@ -794,20 +790,24 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo
ThreadToNativeFromVM ttn(thread);
jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
CHECK_JNI_EXCEPTION_(env, NULL);
result = env->NewObjectArray(2, clazz, NULL);
result = env->NewObjectArray(3, clazz, NULL);
if (result == NULL) {
return result;
}
jobject obj = integerBox(thread, env, code->comp_level());
jobject level = integerBox(thread, env, code->comp_level());
CHECK_JNI_EXCEPTION_(env, NULL);
env->SetObjectArrayElement(result, 0, obj);
env->SetObjectArrayElement(result, 0, level);
jbyteArray insts = env->NewByteArray(insts_size);
CHECK_JNI_EXCEPTION_(env, NULL);
env->SetByteArrayRegion(insts, 0, insts_size, (jbyte*) code->insts_begin());
env->SetObjectArrayElement(result, 1, insts);
jobject id = integerBox(thread, env, code->compile_id());
CHECK_JNI_EXCEPTION_(env, NULL);
env->SetObjectArrayElement(result, 2, id);
return result;
WB_END
@ -989,9 +989,9 @@ static JNINativeMethod methods[] = {
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
{CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket},
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
{CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize },
#endif // INCLUDE_NMT
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
{CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I",
@ -1018,6 +1018,8 @@ static JNINativeMethod methods[] = {
CC"(Ljava/lang/reflect/Executable;II)Z", (void*)&WB_EnqueueMethodForCompilation},
{CC"clearMethodState",
CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState},
{CC"isConstantVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsConstantVMFlag},
{CC"isLockedVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsLockedVMFlag},
{CC"setBooleanVMFlag", CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag},
{CC"setIntxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetIntxVMFlag},
{CC"setUintxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUintxVMFlag},

View File

@ -54,7 +54,7 @@
#endif // INCLUDE_ALL_GCS
// Note: This is a special bug reporting site for the JVM
#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp"
#define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
#define DEFAULT_JAVA_LAUNCHER "generic"
// Disable options not supported in this release, with a warning if they
@ -306,6 +306,9 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "NmethodSweepFraction", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "NmethodSweepCheckInterval", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "CodeCacheMinimumFreeSpace", JDK_Version::jdk(9), JDK_Version::jdk(10) },
#ifndef ZERO
{ "UseFastAccessorMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
@ -2528,7 +2531,7 @@ bool Arguments::check_vm_args_consistency() {
// Check lower bounds of the code cache
// Template Interpreter code is approximately 3X larger in debug builds.
uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (InitialCodeCacheSize < (uintx)os::vm_page_size()) {
jio_fprintf(defaultStream::error_stream(),
"Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K,
@ -2564,10 +2567,11 @@ bool Arguments::check_vm_args_consistency() {
status = false;
}
status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
status &= verify_interval(CodeCacheMinBlockLength, 1, 100, "CodeCacheMinBlockLength");
status &= verify_interval(CodeCacheSegmentSize, 1, 1024, "CodeCacheSegmentSize");
status &= verify_interval(StartAggressiveSweepingAt, 0, 100, "StartAggressiveSweepingAt");
int min_number_of_compiler_threads = get_min_number_of_compiler_threads();
// The default CICompilerCount's value is CI_COMPILER_COUNT.
@ -3992,12 +3996,6 @@ jint Arguments::apply_ergo() {
#endif
#endif
// Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered)
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
}
// Set heap size based on available physical memory
set_heap_size();
@ -4065,13 +4063,6 @@ jint Arguments::apply_ergo() {
}
#ifndef PRODUCT
if (CompileTheWorld) {
// Force NmethodSweeper to sweep whole CodeCache each time.
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
NmethodSweepFraction = 1;
}
}
if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
if (use_vm_log()) {
LogVMOutput = true;

View File

@ -1173,7 +1173,7 @@ Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
// in case of an unresolved klass entry, load the class.
if (constant_pool->tag_at(index).is_unresolved_klass()) {
Klass* tk = constant_pool->klass_at(index, CHECK);
Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK);
return;
}

View File

@ -28,6 +28,7 @@
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/os.hpp"
#include "utilities/ostream.hpp"
#include "utilities/macros.hpp"
#include "utilities/top.hpp"
@ -634,8 +635,8 @@ static void trace_flag_changed(const char* name, const T old_value, const T new_
e.commit();
}
bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_bool()) return false;
*value = result->get_bool();
@ -662,8 +663,8 @@ void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Fla
faddr->set_origin(origin);
}
bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_intx()) return false;
*value = result->get_intx();
@ -690,8 +691,8 @@ void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Fla
faddr->set_origin(origin);
}
bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_uintx()) return false;
*value = result->get_uintx();
@ -718,8 +719,8 @@ void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, F
faddr->set_origin(origin);
}
bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_uint64_t()) return false;
*value = result->get_uint64_t();
@ -746,8 +747,8 @@ void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t va
faddr->set_origin(origin);
}
bool CommandLineFlags::size_tAt(const char* name, size_t len, size_t* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::size_tAt(const char* name, size_t len, size_t* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_size_t()) return false;
*value = result->get_size_t();
@ -774,8 +775,8 @@ void CommandLineFlagsEx::size_tAtPut(CommandLineFlagWithType flag, size_t value,
faddr->set_origin(origin);
}
bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_double()) return false;
*value = result->get_double();
@ -802,8 +803,8 @@ void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value,
faddr->set_origin(origin);
}
bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_ccstr()) return false;
*value = result->get_ccstr();
@ -818,15 +819,12 @@ bool CommandLineFlags::ccstrAtPut(const char* name, size_t len, ccstr* value, Fl
trace_flag_changed<EventStringFlagChanged, const char*>(name, old_value, *value, origin);
char* new_value = NULL;
if (*value != NULL) {
new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1, mtInternal);
strcpy(new_value, *value);
new_value = os::strdup_check_oom(*value);
}
result->set_ccstr(new_value);
if (result->is_default() && old_value != NULL) {
// Prior value is NOT heap allocated, but was a literal constant.
char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1, mtInternal);
strcpy(old_value_to_free, old_value);
old_value = old_value_to_free;
old_value = os::strdup_check_oom(old_value);
}
*value = old_value;
result->set_origin(origin);
@ -838,8 +836,7 @@ void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, F
guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
ccstr old_value = faddr->get_ccstr();
trace_flag_changed<EventStringFlagChanged, const char*>(faddr->_name, old_value, value, origin);
char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal);
strcpy(new_value, value);
char* new_value = os::strdup_check_oom(value);
faddr->set_ccstr(new_value);
if (!faddr->is_default() && old_value != NULL) {
// Prior value is heap allocated so free it.

View File

@ -379,38 +379,38 @@ class SizeTFlagSetting {
class CommandLineFlags {
public:
static bool boolAt(const char* name, size_t len, bool* value);
static bool boolAt(const char* name, bool* value) { return boolAt(name, strlen(name), value); }
static bool boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
static bool boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
static bool boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
static bool intxAt(const char* name, size_t len, intx* value);
static bool intxAt(const char* name, intx* value) { return intxAt(name, strlen(name), value); }
static bool intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
static bool intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
static bool intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
static bool uintxAt(const char* name, size_t len, uintx* value);
static bool uintxAt(const char* name, uintx* value) { return uintxAt(name, strlen(name), value); }
static bool uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
static bool uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
static bool size_tAt(const char* name, size_t len, size_t* value);
static bool size_tAt(const char* name, size_t* value) { return size_tAt(name, strlen(name), value); }
static bool size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false);
static bool size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); }
static bool size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin);
static bool size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); }
static bool uint64_tAt(const char* name, size_t len, uint64_t* value);
static bool uint64_tAt(const char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
static bool uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
static bool uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
static bool doubleAt(const char* name, size_t len, double* value);
static bool doubleAt(const char* name, double* value) { return doubleAt(name, strlen(name), value); }
static bool doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
static bool doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
static bool ccstrAt(const char* name, size_t len, ccstr* value);
static bool ccstrAt(const char* name, ccstr* value) { return ccstrAt(name, strlen(name), value); }
static bool ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
static bool ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
// Contract: Flag will make private copy of the incoming value.
// Outgoing value is always malloc-ed, and caller MUST call free.
static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
@ -2066,9 +2066,6 @@ class CommandLineFlags {
"Provide more detailed and expensive TLAB statistics " \
"(with PrintTLAB)") \
\
EMBEDDED_ONLY(product(bool, LowMemoryProtection, true, \
"Enable LowMemoryProtection")) \
\
product_pd(bool, NeverActAsServerClassMachine, \
"Never act like a server-class machine") \
\
@ -2984,12 +2981,6 @@ class CommandLineFlags {
product(intx, SafepointTimeoutDelay, 10000, \
"Delay in milliseconds for option SafepointTimeout") \
\
product(intx, NmethodSweepFraction, 16, \
"Number of invocations of sweeper to cover all nmethods") \
\
product(intx, NmethodSweepCheckInterval, 5, \
"Compilers wake up every n seconds to possibly sweep nmethods") \
\
product(intx, NmethodSweepActivity, 10, \
"Removes cold nmethods from code cache if > 0. Higher values " \
"result in more aggressive sweeping") \
@ -3378,9 +3369,6 @@ class CommandLineFlags {
product_pd(uintx, NonNMethodCodeHeapSize, \
"Size of code heap with non-nmethods (in bytes)") \
\
product(uintx, CodeCacheMinimumFreeSpace, 500*K, \
"When less than X space left, we stop compiling") \
\
product_pd(uintx, CodeCacheExpansionSize, \
"Code cache expansion size (in bytes)") \
\
@ -3393,6 +3381,11 @@ class CommandLineFlags {
product(bool, UseCodeCacheFlushing, true, \
"Remove cold/old nmethods from the code cache") \
\
product(uintx, StartAggressiveSweepingAt, 10, \
"Start aggressive sweeping if X[%] of the code cache is free." \
"Segmented code cache: X[%] of the non-profiled heap." \
"Non-segmented code cache: X[%] of the total code cache") \
\
/* interpreter debugging */ \
develop(intx, BinarySwitchThreshold, 5, \
"Minimal number of lookupswitch entries for rewriting to binary " \

View File

@ -187,19 +187,22 @@ void InterfaceSupport::zap_dead_locals_old() {
# endif
// invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
int deoptimizeAllCounter = 0;
int zombieAllCounter = 0;
void InterfaceSupport::zombieAll() {
if (is_init_completed() && zombieAllCounter > ZombieALotInterval) {
// This method is called by all threads when a thread make
// transition to VM state (for example, runtime calls).
// Divide number of calls by number of threads to avoid
// dependence of ZombieAll events frequency on number of threads.
int value = zombieAllCounter / Threads::number_of_threads();
if (is_init_completed() && value > ZombieALotInterval) {
zombieAllCounter = 0;
VM_ZombieAll op;
VMThread::execute(&op);
} else {
zombieAllCounter++;
}
zombieAllCounter++;
}
void InterfaceSupport::unlinkSymbols() {
@ -208,12 +211,17 @@ void InterfaceSupport::unlinkSymbols() {
}
void InterfaceSupport::deoptimizeAll() {
if (is_init_completed() ) {
if (DeoptimizeALot && deoptimizeAllCounter > DeoptimizeALotInterval) {
// This method is called by all threads when a thread make
// transition to VM state (for example, runtime calls).
// Divide number of calls by number of threads to avoid
// dependence of DeoptimizeAll events frequency on number of threads.
int value = deoptimizeAllCounter / Threads::number_of_threads();
if (is_init_completed()) {
if (DeoptimizeALot && value > DeoptimizeALotInterval) {
deoptimizeAllCounter = 0;
VM_DeoptimizeAll op;
VMThread::execute(&op);
} else if (DeoptimizeRandom && (deoptimizeAllCounter & 0x1f) == (os::random() & 0x1f)) {
} else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) {
VM_DeoptimizeAll op;
VMThread::execute(&op);
}

View File

@ -705,25 +705,35 @@ int JDK_Version::compare(const JDK_Version& other) const {
}
void JDK_Version::to_string(char* buffer, size_t buflen) const {
assert(buffer && buflen > 0, "call with useful buffer");
size_t index = 0;
if (!is_valid()) {
jio_snprintf(buffer, buflen, "%s", "(uninitialized)");
} else if (is_partially_initialized()) {
jio_snprintf(buffer, buflen, "%s", "(uninitialized) pre-1.6.0");
} else {
index += jio_snprintf(
int rc = jio_snprintf(
&buffer[index], buflen - index, "%d.%d", _major, _minor);
if (rc == -1) return;
index += rc;
if (_micro > 0) {
index += jio_snprintf(&buffer[index], buflen - index, ".%d", _micro);
rc = jio_snprintf(&buffer[index], buflen - index, ".%d", _micro);
}
if (_update > 0) {
index += jio_snprintf(&buffer[index], buflen - index, "_%02d", _update);
rc = jio_snprintf(&buffer[index], buflen - index, "_%02d", _update);
if (rc == -1) return;
index += rc;
}
if (_special > 0) {
index += jio_snprintf(&buffer[index], buflen - index, "%c", _special);
rc = jio_snprintf(&buffer[index], buflen - index, "%c", _special);
if (rc == -1) return;
index += rc;
}
if (_build > 0) {
index += jio_snprintf(&buffer[index], buflen - index, "-b%02d", _build);
rc = jio_snprintf(&buffer[index], buflen - index, "-b%02d", _build);
if (rc == -1) return;
index += rc;
}
}
}

View File

@ -61,7 +61,7 @@ Mutex* SymbolTable_lock = NULL;
Mutex* StringTable_lock = NULL;
Monitor* StringDedupQueue_lock = NULL;
Mutex* StringDedupTable_lock = NULL;
Mutex* CodeCache_lock = NULL;
Monitor* CodeCache_lock = NULL;
Mutex* MethodData_lock = NULL;
Mutex* RetData_lock = NULL;
Monitor* VMOperationQueue_lock = NULL;
@ -205,7 +205,7 @@ void mutex_init() {
}
def(ParGCRareEvent_lock , Mutex , leaf , true );
def(DerivedPointerTableGC_lock , Mutex, leaf, true );
def(CodeCache_lock , Mutex , special, true );
def(CodeCache_lock , Monitor, special, true );
def(Interrupt_lock , Monitor, special, true ); // used for interrupt processing
def(RawMonitor_lock , Mutex, special, true );
def(OopMapCacheAlloc_lock , Mutex, leaf, true ); // used for oop_map_cache allocation.

View File

@ -53,7 +53,7 @@ extern Mutex* SymbolTable_lock; // a lock on the symbol table
extern Mutex* StringTable_lock; // a lock on the interned string table
extern Monitor* StringDedupQueue_lock; // a lock on the string deduplication queue
extern Mutex* StringDedupTable_lock; // a lock on the string deduplication table
extern Mutex* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
extern Monitor* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
extern Mutex* MethodData_lock; // a lock on installation of method data
extern Mutex* RetData_lock; // a lock on installation of RetData inside method data
extern Mutex* DerivedPointerTableGC_lock; // a lock to protect the derived pointer table

View File

@ -571,17 +571,6 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
#if INCLUDE_NMT
// NMT can not track malloc allocation size > MAX_MALLOC_SIZE, which is
// (1GB - 1) on 32-bit system. It is not an issue on 64-bit system, where
// MAX_MALLOC_SIZE = ((1 << 62) - 1).
// VM code does not have such large malloc allocation. However, it can come
// Unsafe call.
if (MemTracker::tracking_level() >= NMT_summary && size > MAX_MALLOC_SIZE) {
return NULL;
}
#endif
#ifdef ASSERT
// checking for the WatcherThread and crash_protection first
// since os::malloc can be called when the libjvm.{dll,so} is
@ -652,12 +641,6 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
}
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
#if INCLUDE_NMT
// See comments in os::malloc() above
if (MemTracker::tracking_level() >= NMT_summary && size > MAX_MALLOC_SIZE) {
return NULL;
}
#endif
#ifndef ASSERT
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));

View File

@ -158,7 +158,6 @@ class os: AllStatic {
static void init_globals(void) { // Called from init_globals() in init.cpp
init_globals_ext();
}
static void init_3(void); // Called at the end of vm init
// File names are case-insensitive on windows only
// Override me as needed
@ -680,28 +679,10 @@ class os: AllStatic {
// SocketInterface (ex HPI SocketInterface )
static int socket(int domain, int type, int protocol);
static int socket_close(int fd);
static int socket_shutdown(int fd, int howto);
static int recv(int fd, char* buf, size_t nBytes, uint flags);
static int send(int fd, char* buf, size_t nBytes, uint flags);
static int raw_send(int fd, char* buf, size_t nBytes, uint flags);
static int timeout(int fd, long timeout);
static int listen(int fd, int count);
static int connect(int fd, struct sockaddr* him, socklen_t len);
static int bind(int fd, struct sockaddr* him, socklen_t len);
static int accept(int fd, struct sockaddr* him, socklen_t* len);
static int recvfrom(int fd, char* buf, size_t nbytes, uint flags,
struct sockaddr* from, socklen_t* fromlen);
static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len);
static int sendto(int fd, char* buf, size_t len, uint flags,
struct sockaddr* to, socklen_t tolen);
static int socket_available(int fd, jint* pbytes);
static int get_sock_opt(int fd, int level, int optname,
char* optval, socklen_t* optlen);
static int set_sock_opt(int fd, int level, int optname,
const char* optval, socklen_t optlen);
static int get_host_name(char* name, int namelen);
static struct hostent* get_host_by_name(char* name);
// Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal)

View File

@ -2421,8 +2421,6 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
// CodeCache is full, disable compilation
// Ought to log this but compile log is only per compile thread
// and we're some non descript Java thread.
MutexUnlocker mu(AdapterHandlerLibrary_lock);
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL; // Out of CodeCache space
}
entry->relocate(new_adapter->content_begin());
@ -2594,9 +2592,6 @@ void AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : "");
}
nm->post_compiled_method_load_event();
} else {
// CodeCache is full, disable compilation
CompileBroker::handle_full_code_cache(CodeBlobType::MethodNonProfiled);
}
}

View File

@ -158,7 +158,7 @@ void SignatureIterator::iterate_parameters( uint64_t fingerprint ) {
uint64_t saved_fingerprint = fingerprint;
// Check for too many arguments
if ( fingerprint == UCONST64(-1) ) {
if (fingerprint == (uint64_t)CONST64(-1)) {
SignatureIterator::iterate_parameters();
return;
}

View File

@ -243,7 +243,7 @@ class Fingerprinter: public SignatureIterator {
}
if (mh->size_of_parameters() > max_size_of_parameters ) {
_fingerprint = UCONST64(-1);
_fingerprint = (uint64_t)CONST64(-1);
mh->constMethod()->set_fingerprint(_fingerprint);
return _fingerprint;
}

Some files were not shown because too many files have changed in this diff Show More