Merge
This commit is contained in:
commit
c8b6b35f12
2
.hgtags
2
.hgtags
@ -209,3 +209,5 @@ d7ad0dfaa41151bd3a9ae46725b0aec3730a9cd0 jdk8-b84
|
||||
1872c12529090e1c1dbf567f02ad7ae6231b8f0c jdk8-b85
|
||||
da9a4c9312816451884aa6db6f18be51a07bff13 jdk8-b86
|
||||
5ebf6c63714de2c9dcf831074086d31daec819df jdk8-b87
|
||||
e517701a4d0e25ae9c7945bca6e1762a8c5d8aa6 jdk8-b88
|
||||
4dec41b3c5e3bb616f0c6f15830d940905aa5d16 jdk8-b89
|
||||
|
@ -209,3 +209,5 @@ fd1a5574cf68af24bfd52decc37ac6361afb278a jdk8-b78
|
||||
7fc358f5943676b82f1dccd3152b1ac07d92e38b jdk8-b85
|
||||
df9b5240f0a76c91cfe1a5b39da4d08df56e05be jdk8-b86
|
||||
b9415faa7066a4d3b16d466556d5428446918d95 jdk8-b87
|
||||
e1a929afcfc492470d50be0b6b0e8dc77d3760b9 jdk8-b88
|
||||
892a0196d10c67f3a12f0eefb0bb536e423d8868 jdk8-b89
|
||||
|
@ -411,6 +411,8 @@ define SetupNativeCompilation
|
||||
$1_EXTRA_LDFLAGS+="-implib:$$($1_OBJECT_DIR)/$$($1_LIBRARY).lib"
|
||||
endif
|
||||
|
||||
$1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)
|
||||
|
||||
ifneq (,$$($1_DEBUG_SYMBOLS))
|
||||
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
|
||||
ifeq ($(OPENJDK_TARGET_OS), windows)
|
||||
@ -549,6 +551,8 @@ define SetupNativeCompilation
|
||||
endif
|
||||
endif
|
||||
|
||||
$1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)
|
||||
|
||||
$$($1_TARGET) : $$($1_EXPECTED_OBJS) $$($1_RES) $$($1_GEN_MANIFEST)
|
||||
$$(call LINKING_EXE_MSG,$$($1_BASENAME))
|
||||
$$($1_LDEXE) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $(EXE_OUT_OPTION)$$($1_TARGET) \
|
||||
|
@ -142,6 +142,7 @@ CORE_PKGS = \
|
||||
java.util.prefs \
|
||||
java.util.regex \
|
||||
java.util.spi \
|
||||
java.util.stream \
|
||||
java.util.zip \
|
||||
javax.accessibility \
|
||||
javax.activation \
|
||||
|
@ -390,6 +390,17 @@ $(COREAPI_OPTIONS_FILE): $(COREAPI_OVERVIEW)
|
||||
$(call OptionPair,-tag,specdefault:X) ; \
|
||||
$(call OptionPair,-tag,Note:X) ; \
|
||||
$(call OptionPair,-tag,ToDo:X) ; \
|
||||
$(call OptionPair,-tag,apiNote:a:API Note:) ; \
|
||||
$(call OptionPair,-tag,implSpec:a:Implementation Requirements:) ; \
|
||||
$(call OptionPair,-tag,implNote:a:Implementation Note:) ; \
|
||||
$(call OptionPair,-tag,param) ; \
|
||||
$(call OptionPair,-tag,return) ; \
|
||||
$(call OptionPair,-tag,throws) ; \
|
||||
$(call OptionPair,-tag,since) ; \
|
||||
$(call OptionPair,-tag,version) ; \
|
||||
$(call OptionPair,-tag,serialData) ; \
|
||||
$(call OptionPair,-tag,factory) ; \
|
||||
$(call OptionPair,-tag,see) ; \
|
||||
$(call OptionPair,-tag,$(TAG_JLS)) ; \
|
||||
$(call OptionOnly,-splitIndex) ; \
|
||||
$(call OptionPair,-overview,$(COREAPI_OVERVIEW)) ; \
|
||||
|
@ -209,3 +209,5 @@ a45bb25a67c7517b45f00c9682e317f46fecbba9 jdk8-b83
|
||||
9583a6431596bac1959d2d8828f5ea217843dd12 jdk8-b85
|
||||
44a8ce4a759f2668ff434661a93ff462ea472478 jdk8-b86
|
||||
f1709874d55a06bc3d5dfa02dbcdfbc59f4cba34 jdk8-b87
|
||||
4e3a881ebb1ee96ce0872508b0066d74f310dbfa jdk8-b88
|
||||
fe4150590ee597f4e125fea950aa3b352622cc2d jdk8-b89
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -258,6 +258,19 @@ public class UnionGen implements com.sun.tools.corba.se.idl.UnionGen, JavaGenera
|
||||
{
|
||||
Vector labels = vectorizeLabels (u.branches (), true);
|
||||
|
||||
if (Util.javaName(utype).equals ("boolean")) {
|
||||
stream.println( "" ) ;
|
||||
stream.println( " private void verifyDefault (boolean discriminator)" ) ;
|
||||
stream.println( " {" ) ;
|
||||
if (labels.contains ("true"))
|
||||
stream.println (" if ( discriminator )");
|
||||
else
|
||||
stream.println (" if ( !discriminator )");
|
||||
stream.println( " throw new org.omg.CORBA.BAD_OPERATION();" ) ;
|
||||
stream.println( " }" ) ;
|
||||
return;
|
||||
}
|
||||
|
||||
stream.println( "" ) ;
|
||||
stream.println( " private void verifyDefault( " + Util.javaName(utype) +
|
||||
" value )" ) ;
|
||||
@ -763,7 +776,7 @@ public class UnionGen implements com.sun.tools.corba.se.idl.UnionGen, JavaGenera
|
||||
stream.println (indent + "if (" + disName + ')');
|
||||
|
||||
if (firstBranch == null)
|
||||
stream.println (indent + " throw new org.omg.CORBA.BAD_OPERATION ();");
|
||||
stream.println (indent + " value._default(" + disName + ");");
|
||||
else {
|
||||
stream.println (indent + '{');
|
||||
index = readBranch (index, indent + " ", firstBranch.typedef.name (),
|
||||
@ -774,7 +787,7 @@ public class UnionGen implements com.sun.tools.corba.se.idl.UnionGen, JavaGenera
|
||||
stream.println (indent + "else");
|
||||
|
||||
if (secondBranch == null)
|
||||
stream.println (indent + " throw new org.omg.CORBA.BAD_OPERATION ();");
|
||||
stream.println (indent + " value._default(" + disName + ");");
|
||||
else {
|
||||
stream.println (indent + '{');
|
||||
index = readBranch (index, indent + " ", secondBranch.typedef.name (),
|
||||
@ -924,23 +937,25 @@ public class UnionGen implements com.sun.tools.corba.se.idl.UnionGen, JavaGenera
|
||||
firstBranch = secondBranch;
|
||||
secondBranch = tmp;
|
||||
}
|
||||
stream.println (indent + "if (" + disName + ')');
|
||||
if (firstBranch == null)
|
||||
stream.println (indent + " throw new org.omg.CORBA.BAD_OPERATION ();");
|
||||
else
|
||||
{
|
||||
stream.println (indent + '{');
|
||||
index = writeBranch (index, indent + " ", name, firstBranch.typedef, stream);
|
||||
stream.println (indent + '}');
|
||||
}
|
||||
stream.println (indent + "else");
|
||||
if (secondBranch == null)
|
||||
stream.println (indent + " throw new org.omg.CORBA.BAD_OPERATION ();");
|
||||
else
|
||||
{
|
||||
stream.println (indent + '{');
|
||||
index = writeBranch (index, indent + " ", name, secondBranch.typedef, stream);
|
||||
stream.println (indent + '}');
|
||||
if (firstBranch != null && secondBranch != null) {
|
||||
stream.println (indent + "if (" + disName + ')');
|
||||
stream.println (indent + '{');
|
||||
index = writeBranch (index, indent + " ", name, firstBranch.typedef, stream);
|
||||
stream.println (indent + '}');
|
||||
stream.println (indent + "else");
|
||||
stream.println (indent + '{');
|
||||
index = writeBranch (index, indent + " ", name, secondBranch.typedef, stream);
|
||||
stream.println (indent + '}');
|
||||
} else if (firstBranch != null) {
|
||||
stream.println (indent + "if (" + disName + ')');
|
||||
stream.println (indent + '{');
|
||||
index = writeBranch (index, indent + " ", name, firstBranch.typedef, stream);
|
||||
stream.println (indent + '}');
|
||||
} else {
|
||||
stream.println (indent + "if (!" + disName + ')');
|
||||
stream.println (indent + '{');
|
||||
index = writeBranch (index, indent + " ", name, secondBranch.typedef, stream);
|
||||
stream.println (indent + '}');
|
||||
}
|
||||
}
|
||||
return index;
|
||||
|
@ -337,3 +337,7 @@ a947f40fb536e5b9e0aa210cf26abb430f80887a hs25-b26
|
||||
d4c2667846607042370760e23f64c3ab9350e60d jdk8-b87
|
||||
01d5f04e64dc2d64625b2db2056f5ed4de918a45 hs25-b29
|
||||
c4af77d2045476c56fbf3f914b336bb1b7cd18af hs25-b30
|
||||
8482058e74bc8c1a890e6f3be3eff192dba6ce67 jdk8-b88
|
||||
4ec91349972255650f97bedfd07e6423e02428cf hs25-b31
|
||||
9c1fe0b419b40a9ecdd1653cc9af1b6d67a12c46 jdk8-b89
|
||||
69494caf57908ba2c8efa9eaaa472b4d1875588a hs25-b32
|
||||
|
@ -1,22 +1,22 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>
|
||||
C2 Replay
|
||||
Replay
|
||||
</title>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<h1>C2 compiler replay</h1>
|
||||
<h1>Compiler replay</h1>
|
||||
<p>
|
||||
The C2 compiler replay is a function to repeat the compiling process from a crashed java process in compiled method<br>
|
||||
The compiler replay is a function to repeat the compiling process from a crashed java process in compiled method<br>
|
||||
This function only exists in debug version of VM
|
||||
</p>
|
||||
<h2>Usage</h2>
|
||||
<pre>
|
||||
First, use SA to attach to the core file, if suceeded, do
|
||||
clhsdb>dumpreplaydata <address> | -a | <thread_id> [> replay.txt]
|
||||
<pre>
|
||||
First, use SA to attach to the core file, if succeeded, do
|
||||
hsdb> dumpreplaydata <address> | -a | <thread_id> [> replay.txt]
|
||||
create file replay.txt, address is address of Method, or nmethod(CodeBlob)
|
||||
clhsdb>buildreplayjars [all | boot | app]
|
||||
hsdb> buildreplayjars [all | boot | app]
|
||||
create files:
|
||||
all:
|
||||
app.jar, boot.jar
|
||||
@ -26,16 +26,16 @@ First, use SA to attach to the core file, if suceeded, do
|
||||
app.jar
|
||||
exit SA now.
|
||||
Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java
|
||||
java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=<datafile> -XX:+ReplayCompiles ....
|
||||
java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=<datafile> -XX:+ReplayCompiles ....
|
||||
This will replay the compiling process.
|
||||
|
||||
With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app.
|
||||
|
||||
notes:
|
||||
1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes.
|
||||
2) If encounter error as "<flag>" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/<os>/proc/<os_platform>
|
||||
2) If encounter error as "<flag>" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/<os>/proc/<os_platform>
|
||||
|
||||
Use this tool to dump VM type library:
|
||||
vmstructsdump libjvm.so > <type_name>.db
|
||||
vmstructsdump libjvm.so > <type_name>.db
|
||||
|
||||
set env SA_TYPEDB=<type_name>.db (refer different shell for set envs)
|
||||
set env SA_TYPEDB=<type_name>.db (refer different shell for set envs)
|
@ -15,7 +15,7 @@ GUI tools. Command line HSDB (CLHSDB) tool is alternative to SA GUI tool HSDB.
|
||||
<p>
|
||||
There is also JavaScript based SA command line interface called <a href="jsdb.html">jsdb</a>.
|
||||
But, CLHSDB supports Unix shell-like (or dbx/gdb-like) command line interface with
|
||||
support for output redirection/appending (familiar >, >>), command history and so on.
|
||||
support for output redirection/appending (familiar >, >>), command history and so on.
|
||||
Each CLHSDB command can have zero or more arguments and optionally end with output redirection
|
||||
(or append) to a file. Commands may be stored in a file and run using <b>source</b> command.
|
||||
<b>help</b> command prints usage message for all supported commands (or a specific command)
|
||||
@ -49,7 +49,7 @@ Available commands:
|
||||
dumpheap [ file ] <font color="red">dump heap in hprof binary format</font>
|
||||
dumpideal -a | id <font color="red">dump ideal graph like debug flag -XX:+PrintIdeal</font>
|
||||
dumpilt -a | id <font color="red">dump inline tree for C2 compilation</font>
|
||||
dumpreplaydata <address> | -a | <thread_id> [>replay.txt] <font color="red">dump replay data into a file</font>
|
||||
dumpreplaydata <address> | -a | <thread_id> [>replay.txt] <font color="red">dump replay data into a file</font>
|
||||
echo [ true | false ] <font color="red">turn on/off command echo mode</font>
|
||||
examine [ address/count ] | [ address,address] <font color="red">show contents of memory from given address</font>
|
||||
field [ type [ name fieldtype isStatic offset address ] ] <font color="red">print info about a field of HotSpot type</font>
|
||||
@ -96,11 +96,11 @@ Available commands:
|
||||
|
||||
<h3>JavaScript integration</h3>
|
||||
|
||||
<p>Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set
|
||||
<p>Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set
|
||||
by implementing more commands in a JavaScript file and by loading it by <b>jsload</b> command. <b>jseval</b>
|
||||
command may be used to evaluate arbitrary JavaScript expression from a string. Any JavaScript function
|
||||
may be exposed as a CLHSDB command by registering it using JavaScript <b><code>registerCommand</code></b>
|
||||
function. This function accepts command name, usage and name of the JavaScript implementation function
|
||||
function. This function accepts command name, usage and name of the JavaScript implementation function
|
||||
as arguments.
|
||||
</p>
|
||||
|
||||
@ -127,11 +127,11 @@ hsdb> jsload test.js
|
||||
</code>
|
||||
</pre>
|
||||
|
||||
<h3>C2 Compilation Replay</h3>
|
||||
<h3>Compilation Replay</h3>
|
||||
<p>
|
||||
When a java process crashes in compiled method, usually a core file is saved.
|
||||
The C2 replay function can reproduce the compiling process in the core.
|
||||
<a href="c2replay.html">c2replay.html</a>
|
||||
The replay function can reproduce the compiling process in the core.
|
||||
<a href="cireplay.html">cireplay.html</a>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
@ -204,7 +204,7 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_lookupByName0(
|
||||
jstring objectName, jstring symbolName)
|
||||
{
|
||||
struct ps_prochandle* ph = get_proc_handle(env, this_obj);
|
||||
if (ph->core != NULL) {
|
||||
if (ph != NULL && ph->core != NULL) {
|
||||
return lookupByNameIncore(env, ph, this_obj, objectName, symbolName);
|
||||
}
|
||||
|
||||
@ -238,10 +238,13 @@ JNIEXPORT jobject JNICALL Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_loo
|
||||
const char* sym = NULL;
|
||||
|
||||
struct ps_prochandle* ph = get_proc_handle(env, this_obj);
|
||||
sym = symbol_for_pc(ph, (uintptr_t) addr, &offset);
|
||||
if (sym == NULL) return 0;
|
||||
return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID,
|
||||
if (ph != NULL && ph->core != NULL) {
|
||||
sym = symbol_for_pc(ph, (uintptr_t) addr, &offset);
|
||||
if (sym == NULL) return 0;
|
||||
return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID,
|
||||
(*env)->NewStringUTF(env, sym), (jlong)offset);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** called from Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_readBytesFromProcess0 */
|
||||
@ -279,7 +282,7 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_readBytesFromProcess0(
|
||||
jbyteArray array;
|
||||
|
||||
struct ps_prochandle* ph = get_proc_handle(env, this_obj);
|
||||
if (ph->core != NULL) {
|
||||
if (ph != NULL && ph->core != NULL) {
|
||||
return readBytesFromCore(env, ph, this_obj, addr, numBytes);
|
||||
}
|
||||
|
||||
@ -394,9 +397,9 @@ bool fill_java_threads(JNIEnv* env, jobject this_obj, struct ps_prochandle* ph)
|
||||
/* For core file only, called from
|
||||
* Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_getThreadIntegerRegisterSet0
|
||||
*/
|
||||
jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, long lwp_id) {
|
||||
jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, long lwp_id, struct ps_prochandle* ph) {
|
||||
if (!_threads_filled) {
|
||||
if (!fill_java_threads(env, this_obj, get_proc_handle(env, this_obj))) {
|
||||
if (!fill_java_threads(env, this_obj, ph)) {
|
||||
throw_new_debugger_exception(env, "Failed to fill in threads");
|
||||
return 0;
|
||||
} else {
|
||||
@ -409,7 +412,6 @@ jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, lo
|
||||
jlongArray array;
|
||||
jlong *regs;
|
||||
|
||||
struct ps_prochandle* ph = get_proc_handle(env, this_obj);
|
||||
if (get_lwp_regs(ph, lwp_id, &gregs) != true) {
|
||||
THROW_NEW_DEBUGGER_EXCEPTION_("get_thread_regs failed for a lwp", 0);
|
||||
}
|
||||
@ -521,8 +523,8 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_getThreadIntegerRegisterSet0(
|
||||
print_debug("getThreadRegisterSet0 called\n");
|
||||
|
||||
struct ps_prochandle* ph = get_proc_handle(env, this_obj);
|
||||
if (ph->core != NULL) {
|
||||
return getThreadIntegerRegisterSetFromCore(env, this_obj, thread_id);
|
||||
if (ph != NULL && ph->core != NULL) {
|
||||
return getThreadIntegerRegisterSetFromCore(env, this_obj, thread_id, ph);
|
||||
}
|
||||
|
||||
kern_return_t result;
|
||||
@ -705,8 +707,8 @@ JNF_COCOA_ENTER(env);
|
||||
task_t gTask = 0;
|
||||
result = task_for_pid(mach_task_self(), jpid, &gTask);
|
||||
if (result != KERN_SUCCESS) {
|
||||
print_error("attach: task_for_pid(%d) failed (%d)\n", (int)jpid, result);
|
||||
THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process");
|
||||
print_error("attach: task_for_pid(%d) failed: '%s' (%d)\n", (int)jpid, mach_error_string(result), result);
|
||||
THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process. Could be caused by an incorrect pid or lack of privileges.");
|
||||
}
|
||||
putTask(env, this_obj, gTask);
|
||||
|
||||
|
@ -199,10 +199,10 @@ static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
|
||||
//---------------------------------------------------------------
|
||||
// Part of the class sharing workaround:
|
||||
//
|
||||
// With class sharing, pages are mapped from classes[_g].jsa file.
|
||||
// With class sharing, pages are mapped from classes.jsa file.
|
||||
// The read-only class sharing pages are mapped as MAP_SHARED,
|
||||
// PROT_READ pages. These pages are not dumped into core dump.
|
||||
// With this workaround, these pages are read from classes[_g].jsa.
|
||||
// With this workaround, these pages are read from classes.jsa.
|
||||
|
||||
// FIXME: !HACK ALERT!
|
||||
// The format of sharing achive file header is needed to read shared heap
|
||||
@ -298,14 +298,12 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
lib_info* lib = ph->libs;
|
||||
while (lib != NULL) {
|
||||
// we are iterating over shared objects from the core dump. look for
|
||||
// libjvm[_g].so.
|
||||
// libjvm.so.
|
||||
const char *jvm_name = 0;
|
||||
#ifdef __APPLE__
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0 ||
|
||||
(jvm_name = strstr(lib->name, "/libjvm_g.dylib")) != 0)
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0)
|
||||
#else
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 ||
|
||||
(jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0)
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0)
|
||||
#endif // __APPLE__
|
||||
{
|
||||
char classes_jsa[PATH_MAX];
|
||||
@ -389,7 +387,7 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
}
|
||||
|
||||
ph->core->classes_jsa_fd = fd;
|
||||
// add read-only maps from classes[_g].jsa to the list of maps
|
||||
// add read-only maps from classes.jsa to the list of maps
|
||||
for (m = 0; m < NUM_SHARED_MAPS; m++) {
|
||||
if (header._space[m]._read_only) {
|
||||
base = (uintptr_t) header._space[m]._base;
|
||||
|
@ -195,10 +195,10 @@ static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
|
||||
//---------------------------------------------------------------
|
||||
// Part of the class sharing workaround:
|
||||
//
|
||||
// With class sharing, pages are mapped from classes[_g].jsa file.
|
||||
// With class sharing, pages are mapped from classes.jsa file.
|
||||
// The read-only class sharing pages are mapped as MAP_SHARED,
|
||||
// PROT_READ pages. These pages are not dumped into core dump.
|
||||
// With this workaround, these pages are read from classes[_g].jsa.
|
||||
// With this workaround, these pages are read from classes.jsa.
|
||||
|
||||
// FIXME: !HACK ALERT!
|
||||
// The format of sharing achive file header is needed to read shared heap
|
||||
@ -284,10 +284,9 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
lib_info* lib = ph->libs;
|
||||
while (lib != NULL) {
|
||||
// we are iterating over shared objects from the core dump. look for
|
||||
// libjvm[_g].so.
|
||||
// libjvm.so.
|
||||
const char *jvm_name = 0;
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 ||
|
||||
(jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0) {
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) {
|
||||
char classes_jsa[PATH_MAX];
|
||||
struct FileMapHeader header;
|
||||
size_t n = 0;
|
||||
@ -371,7 +370,7 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
}
|
||||
|
||||
ph->core->classes_jsa_fd = fd;
|
||||
// add read-only maps from classes[_g].jsa to the list of maps
|
||||
// add read-only maps from classes.jsa to the list of maps
|
||||
for (m = 0; m < NUM_SHARED_MAPS; m++) {
|
||||
if (header._space[m]._read_only) {
|
||||
base = (uintptr_t) header._space[m]._base;
|
||||
|
@ -589,8 +589,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
JNIEnv* env = dbg->env;
|
||||
jobject this_obj = dbg->this_obj;
|
||||
const char* jvm_name = 0;
|
||||
if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL ||
|
||||
(jvm_name = strstr(obj_name, "libjvm_g.so")) != NULL) {
|
||||
if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL) {
|
||||
jvm_name = obj_name;
|
||||
} else {
|
||||
return 0;
|
||||
@ -598,7 +597,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
|
||||
struct ps_prochandle* ph = (struct ps_prochandle*) env->GetLongField(this_obj, p_ps_prochandle_ID);
|
||||
|
||||
// initialize classes[_g].jsa file descriptor field.
|
||||
// initialize classes.jsa file descriptor field.
|
||||
dbg->env->SetIntField(this_obj, classes_jsa_fd_ID, -1);
|
||||
|
||||
// check whether class sharing is on by reading variable "UseSharedSpaces"
|
||||
@ -641,7 +640,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
|
||||
print_debug("looking for %s\n", classes_jsa);
|
||||
|
||||
// open the classes[_g].jsa
|
||||
// open the classes.jsa
|
||||
int fd = libsaproc_open(classes_jsa, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
char errMsg[ERR_MSG_SIZE];
|
||||
@ -651,7 +650,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
print_debug("opened shared archive file %s\n", classes_jsa);
|
||||
}
|
||||
|
||||
// parse classes[_g].jsa
|
||||
// parse classes.jsa
|
||||
struct FileMapHeader* pheader = (struct FileMapHeader*) malloc(sizeof(struct FileMapHeader));
|
||||
if (pheader == NULL) {
|
||||
close(fd);
|
||||
@ -798,8 +797,8 @@ static void attach_internal(JNIEnv* env, jobject this_obj, jstring cmdLine, jboo
|
||||
if (! isProcess) {
|
||||
/*
|
||||
* With class sharing, shared perm. gen heap is allocated in with MAP_SHARED|PROT_READ.
|
||||
* These pages are mapped from the file "classes[_g].jsa". MAP_SHARED pages are not dumped
|
||||
* in Solaris core.To read shared heap pages, we have to read classes[_g].jsa file.
|
||||
* These pages are mapped from the file "classes.jsa". MAP_SHARED pages are not dumped
|
||||
* in Solaris core.To read shared heap pages, we have to read classes.jsa file.
|
||||
*/
|
||||
Pobject_iter(ph, init_classsharing_workaround, &dbg);
|
||||
exception = env->ExceptionOccurred();
|
||||
|
@ -24,20 +24,29 @@
|
||||
|
||||
package sun.jvm.hotspot;
|
||||
|
||||
import java.io.PrintStream;
|
||||
import java.net.*;
|
||||
import java.rmi.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.bsd.*;
|
||||
import sun.jvm.hotspot.debugger.proc.*;
|
||||
import sun.jvm.hotspot.debugger.remote.*;
|
||||
import sun.jvm.hotspot.debugger.windbg.*;
|
||||
import sun.jvm.hotspot.debugger.linux.*;
|
||||
import sun.jvm.hotspot.memory.*;
|
||||
import sun.jvm.hotspot.oops.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
import java.rmi.RemoteException;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Debugger;
|
||||
import sun.jvm.hotspot.debugger.DebuggerException;
|
||||
import sun.jvm.hotspot.debugger.JVMDebugger;
|
||||
import sun.jvm.hotspot.debugger.MachineDescription;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionAMD64;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionIA64;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionSPARC32Bit;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionSPARC64Bit;
|
||||
import sun.jvm.hotspot.debugger.NoSuchSymbolException;
|
||||
import sun.jvm.hotspot.debugger.bsd.BsdDebuggerLocal;
|
||||
import sun.jvm.hotspot.debugger.linux.LinuxDebuggerLocal;
|
||||
import sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal;
|
||||
import sun.jvm.hotspot.debugger.remote.RemoteDebugger;
|
||||
import sun.jvm.hotspot.debugger.remote.RemoteDebuggerClient;
|
||||
import sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer;
|
||||
import sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
import sun.jvm.hotspot.utilities.PlatformInfo;
|
||||
import sun.jvm.hotspot.utilities.UnsupportedPlatformException;
|
||||
|
||||
/** <P> This class wraps much of the basic functionality and is the
|
||||
* highest-level factory for VM data structures. It makes it simple
|
||||
@ -475,7 +484,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesSolaris() {
|
||||
jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so", "gamma_g" };
|
||||
jvmLibNames = new String[] { "libjvm.so" };
|
||||
}
|
||||
|
||||
//
|
||||
@ -507,7 +516,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesWin32() {
|
||||
jvmLibNames = new String[] { "jvm.dll", "jvm_g.dll" };
|
||||
jvmLibNames = new String[] { "jvm.dll" };
|
||||
}
|
||||
|
||||
//
|
||||
@ -547,7 +556,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesLinux() {
|
||||
jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" };
|
||||
jvmLibNames = new String[] { "libjvm.so" };
|
||||
}
|
||||
|
||||
//
|
||||
@ -572,7 +581,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesBsd() {
|
||||
jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" };
|
||||
jvmLibNames = new String[] { "libjvm.so" };
|
||||
}
|
||||
|
||||
//
|
||||
@ -595,7 +604,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesDarwin() {
|
||||
jvmLibNames = new String[] { "libjvm.dylib", "libjvm_g.dylib" };
|
||||
jvmLibNames = new String[] { "libjvm.dylib" };
|
||||
}
|
||||
|
||||
/** Convenience routine which should be called by per-platform
|
||||
|
@ -24,9 +24,9 @@
|
||||
|
||||
package sun.jvm.hotspot;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.types.basic.*;
|
||||
import sun.jvm.hotspot.debugger.SymbolLookup;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.basic.BasicVtblAccess;
|
||||
|
||||
public class LinuxVtblAccess extends BasicVtblAccess {
|
||||
private String vt;
|
||||
@ -35,8 +35,7 @@ public class LinuxVtblAccess extends BasicVtblAccess {
|
||||
String[] dllNames) {
|
||||
super(symbolLookup, dllNames);
|
||||
|
||||
if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null ||
|
||||
symbolLookup.lookup("libjvm_g.so", "__vt_10JavaThread") != null) {
|
||||
if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null) {
|
||||
// old C++ ABI
|
||||
vt = "__vt_";
|
||||
} else {
|
||||
|
@ -93,10 +93,11 @@ public class ciEnv extends VMObject {
|
||||
CompileTask task = task();
|
||||
Method method = task.method();
|
||||
int entryBci = task.osrBci();
|
||||
int compLevel = task.compLevel();
|
||||
Klass holder = method.getMethodHolder();
|
||||
out.println("compile " + holder.getName().asString() + " " +
|
||||
OopUtilities.escapeString(method.getName().asString()) + " " +
|
||||
method.getSignature().asString() + " " +
|
||||
entryBci);
|
||||
entryBci + " " + compLevel);
|
||||
}
|
||||
}
|
||||
|
@ -78,6 +78,8 @@ public class NMethod extends CodeBlob {
|
||||
current sweep traversal index. */
|
||||
private static CIntegerField stackTraversalMarkField;
|
||||
|
||||
private static CIntegerField compLevelField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
@ -113,7 +115,7 @@ public class NMethod extends CodeBlob {
|
||||
osrEntryPointField = type.getAddressField("_osr_entry_point");
|
||||
lockCountField = type.getJIntField("_lock_count");
|
||||
stackTraversalMarkField = type.getCIntegerField("_stack_traversal_mark");
|
||||
|
||||
compLevelField = type.getCIntegerField("_comp_level");
|
||||
pcDescSize = db.lookupType("PcDesc").getSize();
|
||||
}
|
||||
|
||||
@ -530,7 +532,7 @@ public class NMethod extends CodeBlob {
|
||||
out.println("compile " + holder.getName().asString() + " " +
|
||||
OopUtilities.escapeString(method.getName().asString()) + " " +
|
||||
method.getSignature().asString() + " " +
|
||||
getEntryBCI());
|
||||
getEntryBCI() + " " + getCompLevel());
|
||||
|
||||
}
|
||||
|
||||
@ -551,4 +553,5 @@ public class NMethod extends CodeBlob {
|
||||
private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); }
|
||||
private int getNulChkTableOffset() { return (int) nulChkTableOffsetField .getValue(addr); }
|
||||
private int getNMethodEndOffset() { return (int) nmethodEndOffsetField .getValue(addr); }
|
||||
private int getCompLevel() { return (int) compLevelField .getValue(addr); }
|
||||
}
|
||||
|
@ -46,10 +46,12 @@ public class CompileTask extends VMObject {
|
||||
Type type = db.lookupType("CompileTask");
|
||||
methodField = type.getAddressField("_method");
|
||||
osrBciField = new CIntField(type.getCIntegerField("_osr_bci"), 0);
|
||||
compLevelField = new CIntField(type.getCIntegerField("_comp_level"), 0);
|
||||
}
|
||||
|
||||
private static AddressField methodField;
|
||||
private static CIntField osrBciField;
|
||||
private static CIntField compLevelField;
|
||||
|
||||
public CompileTask(Address addr) {
|
||||
super(addr);
|
||||
@ -63,4 +65,8 @@ public class CompileTask extends VMObject {
|
||||
public int osrBci() {
|
||||
return (int)osrBciField.getValue(getAddress());
|
||||
}
|
||||
|
||||
public int compLevel() {
|
||||
return (int)compLevelField.getValue(getAddress());
|
||||
}
|
||||
}
|
||||
|
@ -24,17 +24,28 @@
|
||||
|
||||
package sun.jvm.hotspot.debugger.bsd;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.*;
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.x86.*;
|
||||
import sun.jvm.hotspot.debugger.cdbg.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.Threads;
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.debugger.DebuggerBase;
|
||||
import sun.jvm.hotspot.debugger.DebuggerException;
|
||||
import sun.jvm.hotspot.debugger.DebuggerUtilities;
|
||||
import sun.jvm.hotspot.debugger.MachineDescription;
|
||||
import sun.jvm.hotspot.debugger.NotInHeapException;
|
||||
import sun.jvm.hotspot.debugger.OopHandle;
|
||||
import sun.jvm.hotspot.debugger.ReadResult;
|
||||
import sun.jvm.hotspot.debugger.ThreadProxy;
|
||||
import sun.jvm.hotspot.debugger.UnalignedAddressException;
|
||||
import sun.jvm.hotspot.debugger.UnmappedAddressException;
|
||||
import sun.jvm.hotspot.debugger.cdbg.CDebugger;
|
||||
import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
|
||||
import sun.jvm.hotspot.debugger.cdbg.LoadObject;
|
||||
import sun.jvm.hotspot.runtime.JavaThread;
|
||||
import java.lang.reflect.*;
|
||||
import sun.jvm.hotspot.runtime.Threads;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.utilities.PlatformInfo;
|
||||
|
||||
/** <P> An implementation of the JVMDebugger interface. The basic debug
|
||||
facilities are implemented through ptrace interface in the JNI code
|
||||
@ -246,10 +257,8 @@ public class BsdDebuggerLocal extends DebuggerBase implements BsdDebugger {
|
||||
/* called from attach methods */
|
||||
private void findABIVersion() throws DebuggerException {
|
||||
String libjvmName = isDarwin ? "libjvm.dylib" : "libjvm.so";
|
||||
String libjvm_gName = isDarwin? "libjvm_g.dylib" : "libjvm_g.so";
|
||||
String javaThreadVt = isDarwin ? "_vt_10JavaThread" : "__vt_10JavaThread";
|
||||
if (lookupByName0(libjvmName, javaThreadVt) != 0 ||
|
||||
lookupByName0(libjvm_gName, javaThreadVt) != 0) {
|
||||
if (lookupByName0(libjvmName, javaThreadVt) != 0) {
|
||||
// old C++ ABI
|
||||
useGCC32ABI = false;
|
||||
} else {
|
||||
|
@ -24,14 +24,25 @@
|
||||
|
||||
package sun.jvm.hotspot.debugger.linux;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.*;
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.x86.*;
|
||||
import sun.jvm.hotspot.debugger.cdbg.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
import java.lang.reflect.*;
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.debugger.DebuggerBase;
|
||||
import sun.jvm.hotspot.debugger.DebuggerException;
|
||||
import sun.jvm.hotspot.debugger.DebuggerUtilities;
|
||||
import sun.jvm.hotspot.debugger.MachineDescription;
|
||||
import sun.jvm.hotspot.debugger.NotInHeapException;
|
||||
import sun.jvm.hotspot.debugger.OopHandle;
|
||||
import sun.jvm.hotspot.debugger.ReadResult;
|
||||
import sun.jvm.hotspot.debugger.ThreadProxy;
|
||||
import sun.jvm.hotspot.debugger.UnalignedAddressException;
|
||||
import sun.jvm.hotspot.debugger.UnmappedAddressException;
|
||||
import sun.jvm.hotspot.debugger.cdbg.CDebugger;
|
||||
import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
|
||||
import sun.jvm.hotspot.debugger.cdbg.LoadObject;
|
||||
import sun.jvm.hotspot.utilities.PlatformInfo;
|
||||
|
||||
/** <P> An implementation of the JVMDebugger interface. The basic debug
|
||||
facilities are implemented through ptrace interface in the JNI code
|
||||
@ -238,8 +249,7 @@ public class LinuxDebuggerLocal extends DebuggerBase implements LinuxDebugger {
|
||||
|
||||
/* called from attach methods */
|
||||
private void findABIVersion() throws DebuggerException {
|
||||
if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0 ||
|
||||
lookupByName0("libjvm_g.so", "__vt_10JavaThread") != 0) {
|
||||
if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0) {
|
||||
// old C++ ABI
|
||||
useGCC32ABI = false;
|
||||
} else {
|
||||
|
@ -117,8 +117,6 @@ public class JMap extends Tool {
|
||||
mode = MODE_HEAP_SUMMARY;
|
||||
} else if (modeFlag.equals("-histo")) {
|
||||
mode = MODE_HISTOGRAM;
|
||||
} else if (modeFlag.equals("-permstat")) {
|
||||
mode = MODE_CLSTATS;
|
||||
} else if (modeFlag.equals("-clstats")) {
|
||||
mode = MODE_CLSTATS;
|
||||
} else if (modeFlag.equals("-finalizerinfo")) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,22 +81,25 @@ ifeq ($(INCLUDE_ALL_GCS), false)
|
||||
cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
|
||||
cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp compactibleFreeListSpace.cpp \
|
||||
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
|
||||
freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
|
||||
concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
|
||||
dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
|
||||
g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
|
||||
g1MMUTracker.cpp g1MonitoringSupport.cpp g1RemSet.cpp g1SATBCardTableModRefBS.cpp heapRegion.cpp \
|
||||
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp ptrQueue.cpp \
|
||||
satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp adjoiningGenerations.cpp \
|
||||
adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp cardTableExtension.cpp \
|
||||
gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp parallelScavengeHeap.cpp parMarkBitMap.cpp \
|
||||
pcTasks.cpp psAdaptiveSizePolicy.cpp psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp \
|
||||
psGenerationCounters.cpp psMarkSweep.cpp psMarkSweepDecorator.cpp psOldGen.cpp psParallelCompact.cpp \
|
||||
psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp psTasks.cpp psVirtualspace.cpp \
|
||||
psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp parCardTableModRefBS.cpp \
|
||||
parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp gSpaceCounters.cpp allocationStats.cpp \
|
||||
spaceCounters.cpp gcAdaptivePolicyCounters.cpp mutableNUMASpace.cpp immutableSpace.cpp \
|
||||
immutableSpace.cpp g1MemoryPool.cpp psMemoryPool.cpp yieldingWorkGroup.cpp g1Log.cpp
|
||||
freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp \
|
||||
collectionSetChooser.cpp concurrentG1Refine.cpp concurrentG1RefineThread.cpp \
|
||||
concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \
|
||||
g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
|
||||
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
|
||||
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
|
||||
g1RemSet.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
|
||||
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
|
||||
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
|
||||
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
|
||||
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
|
||||
parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \
|
||||
psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp psGenerationCounters.cpp \
|
||||
psMarkSweep.cpp psMarkSweepDecorator.cpp psMemoryPool.cpp psOldGen.cpp \
|
||||
psParallelCompact.cpp psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp \
|
||||
psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
|
||||
parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
|
||||
gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
|
||||
mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
|
||||
endif
|
||||
|
||||
ifeq ($(INCLUDE_NMT), false)
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
|
||||
|
||||
HS_MAJOR_VER=25
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=30
|
||||
HS_BUILD_NUMBER=32
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
@ -52,7 +52,7 @@ CXX=cl.exe
|
||||
# improving the quality of crash log stack traces involving jvm.dll.
|
||||
|
||||
# These are always used in all compiles
|
||||
CXX_FLAGS=/nologo /W3 /WX
|
||||
CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3 /WX
|
||||
|
||||
# Let's add debug information when Full Debug Symbols is enabled
|
||||
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
|
||||
|
@ -193,7 +193,7 @@ ifdef COOKED_BUILD_NUMBER
|
||||
MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER)
|
||||
endif
|
||||
|
||||
NMAKE= MAKEFLAGS= MFLAGS= nmake -NOLOGO
|
||||
NMAKE= MAKEFLAGS= MFLAGS= EXTRA_CFLAGS="$(EXTRA_CFLAGS)" nmake -NOLOGO
|
||||
ifndef SYSTEM_UNAME
|
||||
SYSTEM_UNAME := $(shell uname)
|
||||
export SYSTEM_UNAME
|
||||
|
193
hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp
Normal file
193
hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp
Normal file
@ -0,0 +1,193 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/matcher.hpp"
|
||||
#endif
|
||||
|
||||
// Release the CompiledICHolder* associated with this call site is there is one.
|
||||
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
if (is_icholder_entry(call->destination())) {
|
||||
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
|
||||
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
|
||||
}
|
||||
}
|
||||
|
||||
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
return is_icholder_entry(call->destination());
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// Search for the ic_call at the given address.
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
#define __ _masm.
|
||||
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||
#ifdef COMPILER2
|
||||
// Stub is fixed up when the corresponding call is converted from calling
|
||||
// compiled code to calling interpreted code.
|
||||
// set (empty), G5
|
||||
// jmp -1
|
||||
|
||||
address mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
||||
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
address base =
|
||||
__ start_a_stub(to_interp_stub_size()*2);
|
||||
if (base == NULL) return; // CodeBuffer::expand failed.
|
||||
|
||||
// Static stub relocation stores the instruction address of the call.
|
||||
__ relocate(static_stub_Relocation::spec(mark));
|
||||
|
||||
__ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode()));
|
||||
|
||||
__ set_inst_mark();
|
||||
AddressLiteral addrlit(-1);
|
||||
__ JUMP(addrlit, G3, 0);
|
||||
|
||||
__ delayed()->nop();
|
||||
|
||||
// Update current stubs pointer and restore code_end.
|
||||
__ end_a_stub();
|
||||
#else
|
||||
ShouldNotReachHere();
|
||||
#endif
|
||||
}
|
||||
#undef __
|
||||
|
||||
int CompiledStaticCall::to_interp_stub_size() {
|
||||
// This doesn't need to be accurate but it must be larger or equal to
|
||||
// the real size of the stub.
|
||||
return (NativeMovConstReg::instruction_size + // sethi/setlo;
|
||||
NativeJump::instruction_size + // sethi; jmp; nop
|
||||
(TraceJumps ? 20 * BytesPerInstWord : 0) );
|
||||
}
|
||||
|
||||
// Relocation entries for call stub, compiled java to interpreter.
|
||||
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||
return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call
|
||||
}
|
||||
|
||||
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
||||
address stub = find_stub();
|
||||
guarantee(stub != NULL, "stub not found");
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
||||
instruction_address(),
|
||||
callee->name_and_sig_as_C_string());
|
||||
}
|
||||
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
jump->set_jump_destination(entry);
|
||||
|
||||
// Update jump to call.
|
||||
set_destination_mt_safe(stub);
|
||||
}
|
||||
|
||||
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset stub.
|
||||
address stub = static_stub->addr();
|
||||
assert(stub != NULL, "stub not found");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
method_holder->set_data(0);
|
||||
jump->set_jump_destination((address)-1);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Non-product mode code
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CompiledStaticCall::verify() {
|
||||
// Verify call.
|
||||
NativeCall::verify();
|
||||
if (os::is_MP()) {
|
||||
verify_alignment();
|
||||
}
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub();
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
// Verify state.
|
||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
@ -30,4 +30,6 @@ const int BytesPerInstWord = 4;
|
||||
|
||||
const int StackAlignmentInBytes = (2*wordSize);
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
#endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP
|
||||
|
@ -23,7 +23,12 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
// Note: please do not change these without also changing jni_md.h in the JDK
|
||||
// repository
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
|
||||
#define JNIEXPORT __attribute__((visibility("default")))
|
||||
#define JNIIMPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
|
@ -1655,53 +1655,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
|
||||
return ra_->C->scratch_emit_size(this);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
||||
// emit call stub, compiled java to interpretor
|
||||
void emit_java_to_interp(CodeBuffer &cbuf ) {
|
||||
|
||||
// Stub is fixed up when the corresponding call is converted from calling
|
||||
// compiled code to calling interpreted code.
|
||||
// set (empty), G5
|
||||
// jmp -1
|
||||
|
||||
address mark = cbuf.insts_mark(); // get mark within main instrs section
|
||||
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
address base =
|
||||
__ start_a_stub(Compile::MAX_stubs_size);
|
||||
if (base == NULL) return; // CodeBuffer::expand failed
|
||||
|
||||
// static stub relocation stores the instruction address of the call
|
||||
__ relocate(static_stub_Relocation::spec(mark));
|
||||
|
||||
__ set_metadata(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode()));
|
||||
|
||||
__ set_inst_mark();
|
||||
AddressLiteral addrlit(-1);
|
||||
__ JUMP(addrlit, G3, 0);
|
||||
|
||||
__ delayed()->nop();
|
||||
|
||||
// Update current stubs pointer and restore code_end.
|
||||
__ end_a_stub();
|
||||
}
|
||||
|
||||
// size of call stub, compiled java to interpretor
|
||||
uint size_java_to_interp() {
|
||||
// This doesn't need to be accurate but it must be larger or equal to
|
||||
// the real size of the stub.
|
||||
return (NativeMovConstReg::instruction_size + // sethi/setlo;
|
||||
NativeJump::instruction_size + // sethi; jmp; nop
|
||||
(TraceJumps ? 20 * BytesPerInstWord : 0) );
|
||||
}
|
||||
// relocation entries for call stub, compiled java to interpretor
|
||||
uint reloc_java_to_interp() {
|
||||
return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call
|
||||
}
|
||||
|
||||
|
||||
//=============================================================================
|
||||
#ifndef PRODUCT
|
||||
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||
@ -2576,15 +2529,15 @@ encode %{
|
||||
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
|
||||
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
|
||||
// who we intended to call.
|
||||
if ( !_method ) {
|
||||
if (!_method) {
|
||||
emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type);
|
||||
} else if (_optimized_virtual) {
|
||||
emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type);
|
||||
} else {
|
||||
emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type);
|
||||
}
|
||||
if( _method ) { // Emit stub for static call
|
||||
emit_java_to_interp(cbuf);
|
||||
if (_method) { // Emit stub for static call.
|
||||
CompiledStaticCall::emit_to_interp_stub(cbuf);
|
||||
}
|
||||
%}
|
||||
|
||||
|
180
hotspot/src/cpu/x86/vm/compiledIC_x86.cpp
Normal file
180
hotspot/src/cpu/x86/vm/compiledIC_x86.cpp
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
// Release the CompiledICHolder* associated with this call site is there is one.
|
||||
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
if (is_icholder_entry(call->destination())) {
|
||||
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
|
||||
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
|
||||
}
|
||||
}
|
||||
|
||||
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
return is_icholder_entry(call->destination());
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// Search for the ic_call at the given address.
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
#define __ _masm.
|
||||
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||
// Stub is fixed up when the corresponding call is converted from
|
||||
// calling compiled code to calling interpreted code.
|
||||
// movq rbx, 0
|
||||
// jmp -5 # to self
|
||||
|
||||
address mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
||||
|
||||
// Note that the code buffer's insts_mark is always relative to insts.
|
||||
// That's why we must use the macroassembler to generate a stub.
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
address base =
|
||||
__ start_a_stub(to_interp_stub_size()*2);
|
||||
if (base == NULL) return; // CodeBuffer::expand failed.
|
||||
// Static stub relocation stores the instruction address of the call.
|
||||
__ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand);
|
||||
// Static stub relocation also tags the Method* in the code-stream.
|
||||
__ mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time.
|
||||
// This is recognized as unresolved by relocs/nativeinst/ic code.
|
||||
__ jump(RuntimeAddress(__ pc()));
|
||||
|
||||
// Update current stubs pointer and restore insts_end.
|
||||
__ end_a_stub();
|
||||
}
|
||||
#undef __
|
||||
|
||||
int CompiledStaticCall::to_interp_stub_size() {
|
||||
return NOT_LP64(10) // movl; jmp
|
||||
LP64_ONLY(15); // movq (1+1+8); jmp (1+4)
|
||||
}
|
||||
|
||||
// Relocation entries for call stub, compiled java to interpreter.
|
||||
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
|
||||
}
|
||||
|
||||
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
||||
address stub = find_stub();
|
||||
guarantee(stub != NULL, "stub not found");
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
||||
instruction_address(),
|
||||
callee->name_and_sig_as_C_string());
|
||||
}
|
||||
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
jump->set_jump_destination(entry);
|
||||
|
||||
// Update jump to call.
|
||||
set_destination_mt_safe(stub);
|
||||
}
|
||||
|
||||
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset stub.
|
||||
address stub = static_stub->addr();
|
||||
assert(stub != NULL, "stub not found");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
method_holder->set_data(0);
|
||||
jump->set_jump_destination((address)-1);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Non-product mode code
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CompiledStaticCall::verify() {
|
||||
// Verify call.
|
||||
NativeCall::verify();
|
||||
if (os::is_MP()) {
|
||||
verify_alignment();
|
||||
}
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub();
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
// Verify state.
|
||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
@ -27,4 +27,6 @@
|
||||
|
||||
const int StackAlignmentInBytes = 16;
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
|
||||
|
@ -28,7 +28,13 @@
|
||||
|
||||
#if defined(SOLARIS) || defined(LINUX) || defined(_ALLBSD_SOURCE)
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2)
|
||||
|
||||
// Note: please do not change these without also changing jni_md.h in the JDK
|
||||
// repository
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
|
||||
#define JNIEXPORT __attribute__((visibility("default")))
|
||||
#define JNIIMPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
|
@ -1256,43 +1256,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
|
||||
}
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
||||
// emit call stub, compiled java to interpreter
|
||||
void emit_java_to_interp(CodeBuffer &cbuf ) {
|
||||
// Stub is fixed up when the corresponding call is converted from calling
|
||||
// compiled code to calling interpreted code.
|
||||
// mov rbx,0
|
||||
// jmp -1
|
||||
|
||||
address mark = cbuf.insts_mark(); // get mark within main instrs section
|
||||
|
||||
// Note that the code buffer's insts_mark is always relative to insts.
|
||||
// That's why we must use the macroassembler to generate a stub.
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
address base =
|
||||
__ start_a_stub(Compile::MAX_stubs_size);
|
||||
if (base == NULL) return; // CodeBuffer::expand failed
|
||||
// static stub relocation stores the instruction address of the call
|
||||
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32);
|
||||
// static stub relocation also tags the Method* in the code-stream.
|
||||
__ mov_metadata(rbx, (Metadata*)NULL); // method is zapped till fixup time
|
||||
// This is recognized as unresolved by relocs/nativeInst/ic code
|
||||
__ jump(RuntimeAddress(__ pc()));
|
||||
|
||||
__ end_a_stub();
|
||||
// Update current stubs pointer and restore insts_end.
|
||||
}
|
||||
// size of call stub, compiled java to interpretor
|
||||
uint size_java_to_interp() {
|
||||
return 10; // movl; jmp
|
||||
}
|
||||
// relocation entries for call stub, compiled java to interpretor
|
||||
uint reloc_java_to_interp() {
|
||||
return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
#ifndef PRODUCT
|
||||
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
|
||||
@ -1909,8 +1872,8 @@ encode %{
|
||||
emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
|
||||
static_call_Relocation::spec(), RELOC_IMM32 );
|
||||
}
|
||||
if (_method) { // Emit stub for static call
|
||||
emit_java_to_interp(cbuf);
|
||||
if (_method) { // Emit stub for static call.
|
||||
CompiledStaticCall::emit_to_interp_stub(cbuf);
|
||||
}
|
||||
%}
|
||||
|
||||
|
@ -1387,48 +1387,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const
|
||||
return (offset < 0x80) ? 5 : 8; // REX
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
||||
// emit call stub, compiled java to interpreter
|
||||
void emit_java_to_interp(CodeBuffer& cbuf)
|
||||
{
|
||||
// Stub is fixed up when the corresponding call is converted from
|
||||
// calling compiled code to calling interpreted code.
|
||||
// movq rbx, 0
|
||||
// jmp -5 # to self
|
||||
|
||||
address mark = cbuf.insts_mark(); // get mark within main instrs section
|
||||
|
||||
// Note that the code buffer's insts_mark is always relative to insts.
|
||||
// That's why we must use the macroassembler to generate a stub.
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
address base =
|
||||
__ start_a_stub(Compile::MAX_stubs_size);
|
||||
if (base == NULL) return; // CodeBuffer::expand failed
|
||||
// static stub relocation stores the instruction address of the call
|
||||
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
|
||||
// static stub relocation also tags the Method* in the code-stream.
|
||||
__ mov_metadata(rbx, (Metadata*) NULL); // method is zapped till fixup time
|
||||
// This is recognized as unresolved by relocs/nativeinst/ic code
|
||||
__ jump(RuntimeAddress(__ pc()));
|
||||
|
||||
// Update current stubs pointer and restore insts_end.
|
||||
__ end_a_stub();
|
||||
}
|
||||
|
||||
// size of call stub, compiled java to interpretor
|
||||
uint size_java_to_interp()
|
||||
{
|
||||
return 15; // movq (1+1+8); jmp (1+4)
|
||||
}
|
||||
|
||||
// relocation entries for call stub, compiled java to interpretor
|
||||
uint reloc_java_to_interp()
|
||||
{
|
||||
return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
#ifndef PRODUCT
|
||||
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
||||
@ -2078,8 +2036,8 @@ encode %{
|
||||
RELOC_DISP32);
|
||||
}
|
||||
if (_method) {
|
||||
// Emit stub for static call
|
||||
emit_java_to_interp(cbuf);
|
||||
// Emit stub for static call.
|
||||
CompiledStaticCall::emit_to_interp_stub(cbuf);
|
||||
}
|
||||
%}
|
||||
|
||||
|
122
hotspot/src/cpu/zero/vm/compiledIC_zero.cpp
Normal file
122
hotspot/src/cpu/zero/vm/compiledIC_zero.cpp
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "code/vtableStubs.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/linkResolver.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
|
||||
|
||||
// Release the CompiledICHolder* associated with this call site is there is one.
|
||||
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
if (is_icholder_entry(call->destination())) {
|
||||
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
|
||||
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
|
||||
}
|
||||
}
|
||||
|
||||
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
return is_icholder_entry(call->destination());
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// Search for the ic_call at the given address.
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||
}
|
||||
|
||||
int CompiledStaticCall::to_interp_stub_size() {
|
||||
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Relocation entries for call stub, compiled java to interpreter.
|
||||
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||
return 0;
|
||||
}
|
||||
|
||||
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
||||
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||
}
|
||||
|
||||
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Non-product mode code.
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CompiledStaticCall::verify() {
|
||||
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
@ -212,7 +212,13 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
|
||||
|
||||
// Update the invocation counter
|
||||
if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
|
||||
InvocationCounter *counter = method->invocation_counter();
|
||||
MethodCounters* mcs = method->method_counters();
|
||||
if (mcs == NULL) {
|
||||
CALL_VM_NOCHECK(mcs = InterpreterRuntime::build_method_counters(thread, method));
|
||||
if (HAS_PENDING_EXCEPTION)
|
||||
goto unwind_and_return;
|
||||
}
|
||||
InvocationCounter *counter = mcs->invocation_counter();
|
||||
counter->increment();
|
||||
if (counter->reached_InvocationLimit()) {
|
||||
CALL_VM_NOCHECK(
|
||||
|
@ -25,7 +25,13 @@
|
||||
*/
|
||||
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
|
||||
// Note: please do not change these without also changing jni_md.h in the JDK
|
||||
// repository
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
|
||||
#define JNIEXPORT __attribute__((visibility("default")))
|
||||
#define JNIIMPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
|
@ -1230,10 +1230,6 @@ bool os::dll_build_name(char* buffer, size_t buflen,
|
||||
return retval;
|
||||
}
|
||||
|
||||
const char* os::get_current_directory(char *buf, int buflen) {
|
||||
return getcwd(buf, buflen);
|
||||
}
|
||||
|
||||
// check if addr is inside libjvm.so
|
||||
bool os::address_is_in_vm(address addr) {
|
||||
static address libjvm_base_addr;
|
||||
@ -2080,9 +2076,10 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
|
||||
flags |= MAP_FIXED;
|
||||
}
|
||||
|
||||
// Map uncommitted pages PROT_READ and PROT_WRITE, change access
|
||||
// to PROT_EXEC if executable when we commit the page.
|
||||
addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE,
|
||||
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
|
||||
// touch an uncommitted page. Otherwise, the read/write might
|
||||
// succeed if we have enough swap space to back the physical page.
|
||||
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
|
||||
flags, -1, 0);
|
||||
|
||||
if (addr != MAP_FAILED) {
|
||||
|
@ -119,6 +119,7 @@ int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
|
||||
Mutex* os::Linux::_createThread_lock = NULL;
|
||||
pthread_t os::Linux::_main_thread;
|
||||
int os::Linux::_page_size = -1;
|
||||
const int os::Linux::_vm_default_page_size = (8 * K);
|
||||
bool os::Linux::_is_floating_stack = false;
|
||||
bool os::Linux::_is_NPTL = false;
|
||||
bool os::Linux::_supports_fast_thread_cpu_time = false;
|
||||
@ -1662,10 +1663,6 @@ bool os::dll_build_name(char* buffer, size_t buflen,
|
||||
return retval;
|
||||
}
|
||||
|
||||
const char* os::get_current_directory(char *buf, int buflen) {
|
||||
return getcwd(buf, buflen);
|
||||
}
|
||||
|
||||
// check if addr is inside libjvm.so
|
||||
bool os::address_is_in_vm(address addr) {
|
||||
static address libjvm_base_addr;
|
||||
@ -2906,9 +2903,10 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
|
||||
flags |= MAP_FIXED;
|
||||
}
|
||||
|
||||
// Map uncommitted pages PROT_READ and PROT_WRITE, change access
|
||||
// to PROT_EXEC if executable when we commit the page.
|
||||
addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE,
|
||||
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
|
||||
// touch an uncommitted page. Otherwise, the read/write might
|
||||
// succeed if we have enough swap space to back the physical page.
|
||||
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
|
||||
flags, -1, 0);
|
||||
|
||||
if (addr != MAP_FAILED) {
|
||||
@ -4249,6 +4247,15 @@ void os::init(void) {
|
||||
Linux::clock_init();
|
||||
initial_time_count = os::elapsed_counter();
|
||||
pthread_mutex_init(&dl_mutex, NULL);
|
||||
|
||||
// If the pagesize of the VM is greater than 8K determine the appropriate
|
||||
// number of initial guard pages. The user can change this with the
|
||||
// command line arguments, if needed.
|
||||
if (vm_page_size() > (int)Linux::vm_default_page_size()) {
|
||||
StackYellowPages = 1;
|
||||
StackRedPages = 1;
|
||||
StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size();
|
||||
}
|
||||
}
|
||||
|
||||
// To install functions for atexit system call
|
||||
@ -4302,8 +4309,8 @@ jint os::init_2(void)
|
||||
// Add in 2*BytesPerWord times page size to account for VM stack during
|
||||
// class initialization depending on 32 or 64 bit VM.
|
||||
os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
|
||||
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
|
||||
2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size());
|
||||
(size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
|
||||
(2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
|
||||
|
||||
size_t threadStackSizeInBytes = ThreadStackSize * K;
|
||||
if (threadStackSizeInBytes != 0 &&
|
||||
|
@ -70,6 +70,7 @@ class Linux {
|
||||
static pthread_t _main_thread;
|
||||
static Mutex* _createThread_lock;
|
||||
static int _page_size;
|
||||
static const int _vm_default_page_size;
|
||||
|
||||
static julong available_memory();
|
||||
static julong physical_memory() { return _physical_memory; }
|
||||
@ -116,6 +117,8 @@ class Linux {
|
||||
static int page_size(void) { return _page_size; }
|
||||
static void set_page_size(int val) { _page_size = val; }
|
||||
|
||||
static int vm_default_page_size(void) { return _vm_default_page_size; }
|
||||
|
||||
static address ucontext_get_pc(ucontext_t* uc);
|
||||
static intptr_t* ucontext_get_sp(ucontext_t* uc);
|
||||
static intptr_t* ucontext_get_fp(ucontext_t* uc);
|
||||
|
@ -251,3 +251,11 @@ bool os::has_allocatable_memory_limit(julong* limit) {
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
const char* os::get_current_directory(char *buf, size_t buflen) {
|
||||
return getcwd(buf, buflen);
|
||||
}
|
||||
|
||||
FILE* os::open(int fd, const char* mode) {
|
||||
return ::fdopen(fd, mode);
|
||||
}
|
||||
|
@ -824,7 +824,7 @@ void os::init_system_properties_values() {
|
||||
// allocate new buffer and initialize
|
||||
info = (Dl_serinfo*)malloc(_info.dls_size);
|
||||
if (info == NULL) {
|
||||
vm_exit_out_of_memory(_info.dls_size,
|
||||
vm_exit_out_of_memory(_info.dls_size, OOM_MALLOC_ERROR,
|
||||
"init_system_properties_values info");
|
||||
}
|
||||
info->dls_size = _info.dls_size;
|
||||
@ -866,7 +866,7 @@ void os::init_system_properties_values() {
|
||||
common_path = malloc(bufsize);
|
||||
if (common_path == NULL) {
|
||||
free(info);
|
||||
vm_exit_out_of_memory(bufsize,
|
||||
vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR,
|
||||
"init_system_properties_values common_path");
|
||||
}
|
||||
sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch);
|
||||
@ -879,7 +879,7 @@ void os::init_system_properties_values() {
|
||||
if (library_path == NULL) {
|
||||
free(info);
|
||||
free(common_path);
|
||||
vm_exit_out_of_memory(bufsize,
|
||||
vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR,
|
||||
"init_system_properties_values library_path");
|
||||
}
|
||||
library_path[0] = '\0';
|
||||
@ -1623,7 +1623,8 @@ void os::thread_local_storage_at_put(int index, void* value) {
|
||||
// %%% this is used only in threadLocalStorage.cpp
|
||||
if (thr_setspecific((thread_key_t)index, value)) {
|
||||
if (errno == ENOMEM) {
|
||||
vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
|
||||
vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
|
||||
"thr_setspecific: out of swap space");
|
||||
} else {
|
||||
fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
|
||||
"(%s)", strerror(errno)));
|
||||
@ -1915,10 +1916,6 @@ bool os::dll_build_name(char* buffer, size_t buflen,
|
||||
return retval;
|
||||
}
|
||||
|
||||
const char* os::get_current_directory(char *buf, int buflen) {
|
||||
return getcwd(buf, buflen);
|
||||
}
|
||||
|
||||
// check if addr is inside libjvm.so
|
||||
bool os::address_is_in_vm(address addr) {
|
||||
static address libjvm_base_addr;
|
||||
|
@ -1221,8 +1221,10 @@ bool os::dll_build_name(char *buffer, size_t buflen,
|
||||
|
||||
// Needs to be in os specific directory because windows requires another
|
||||
// header file <direct.h>
|
||||
const char* os::get_current_directory(char *buf, int buflen) {
|
||||
return _getcwd(buf, buflen);
|
||||
const char* os::get_current_directory(char *buf, size_t buflen) {
|
||||
int n = static_cast<int>(buflen);
|
||||
if (buflen > INT_MAX) n = INT_MAX;
|
||||
return _getcwd(buf, n);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------
|
||||
@ -4098,6 +4100,10 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
|
||||
}
|
||||
|
||||
FILE* os::open(int fd, const char* mode) {
|
||||
return ::_fdopen(fd, mode);
|
||||
}
|
||||
|
||||
// Is a (classpath) directory empty?
|
||||
bool os::dir_is_empty(const char* path) {
|
||||
WIN32_FIND_DATA fd;
|
||||
|
@ -178,7 +178,7 @@ static void current_stack_region(address* bottom, size_t* size) {
|
||||
// JVM needs to know exact stack location, abort if it fails
|
||||
if (rslt != 0) {
|
||||
if (rslt == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, "pthread_getattr_np");
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
|
||||
} else {
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
|
||||
}
|
||||
|
@ -710,7 +710,7 @@ static void current_stack_region(address * bottom, size_t * size) {
|
||||
// JVM needs to know exact stack location, abort if it fails
|
||||
if (rslt != 0) {
|
||||
if (rslt == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, "pthread_getattr_np");
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
|
||||
} else {
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
|
||||
}
|
||||
|
@ -313,7 +313,7 @@ static void current_stack_region(address *bottom, size_t *size) {
|
||||
int res = pthread_getattr_np(pthread_self(), &attr);
|
||||
if (res != 0) {
|
||||
if (res == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, "pthread_getattr_np");
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
|
||||
}
|
||||
else {
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
|
||||
|
@ -591,7 +591,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
||||
// on the thread stack, which could get a mapping error when touched.
|
||||
address addr = (address) info->si_addr;
|
||||
if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, "Out of swap space to map in thread stack.");
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack.");
|
||||
}
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
|
@ -745,7 +745,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
||||
// on the thread stack, which could get a mapping error when touched.
|
||||
address addr = (address) info->si_addr;
|
||||
if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, "Out of swap space to map in thread stack.");
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack.");
|
||||
}
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
|
@ -213,6 +213,7 @@ int main(int argc, char *argv[])
|
||||
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name));
|
||||
AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp");
|
||||
AD.addInclude(AD._CPP_file, "asm/macroAssembler.inline.hpp");
|
||||
AD.addInclude(AD._CPP_file, "code/compiledIC.hpp");
|
||||
AD.addInclude(AD._CPP_file, "code/vmreg.hpp");
|
||||
AD.addInclude(AD._CPP_file, "gc_interface/collectedHeap.inline.hpp");
|
||||
AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp");
|
||||
|
@ -44,7 +44,7 @@ AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
|
||||
CodeSection* cs = code->insts();
|
||||
cs->clear_mark(); // new assembler kills old mark
|
||||
if (cs->start() == NULL) {
|
||||
vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, err_msg("CodeCache: no room for %s",
|
||||
code->name()));
|
||||
}
|
||||
_code_section = cs;
|
||||
|
@ -483,7 +483,8 @@ ciKlass* ciEnv::get_klass_by_index_impl(constantPoolHandle cpool,
|
||||
{
|
||||
// We have to lock the cpool to keep the oop from being resolved
|
||||
// while we are accessing it.
|
||||
MonitorLockerEx ml(cpool->lock());
|
||||
oop cplock = cpool->lock();
|
||||
ObjectLocker ol(cplock, THREAD, cplock != NULL);
|
||||
constantTag tag = cpool->tag_at(index);
|
||||
if (tag.is_klass()) {
|
||||
// The klass has been inserted into the constant pool
|
||||
@ -1149,23 +1150,9 @@ void ciEnv::record_out_of_memory_failure() {
|
||||
record_method_not_compilable("out of memory");
|
||||
}
|
||||
|
||||
fileStream* ciEnv::_replay_data_stream = NULL;
|
||||
|
||||
void ciEnv::dump_replay_data() {
|
||||
void ciEnv::dump_replay_data(outputStream* out) {
|
||||
VM_ENTRY_MARK;
|
||||
MutexLocker ml(Compile_lock);
|
||||
if (_replay_data_stream == NULL) {
|
||||
_replay_data_stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(ReplayDataFile);
|
||||
if (_replay_data_stream == NULL) {
|
||||
fatal(err_msg("Can't open %s for replay data", ReplayDataFile));
|
||||
}
|
||||
}
|
||||
dump_replay_data(_replay_data_stream);
|
||||
}
|
||||
|
||||
|
||||
void ciEnv::dump_replay_data(outputStream* out) {
|
||||
ASSERT_IN_VM;
|
||||
ResourceMark rm;
|
||||
#if INCLUDE_JVMTI
|
||||
out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables);
|
||||
@ -1178,13 +1165,15 @@ void ciEnv::dump_replay_data(outputStream* out) {
|
||||
for (int i = 0; i < objects->length(); i++) {
|
||||
objects->at(i)->dump_replay_data(out);
|
||||
}
|
||||
Method* method = task()->method();
|
||||
int entry_bci = task()->osr_bci();
|
||||
CompileTask* task = this->task();
|
||||
Method* method = task->method();
|
||||
int entry_bci = task->osr_bci();
|
||||
int comp_level = task->comp_level();
|
||||
// Klass holder = method->method_holder();
|
||||
out->print_cr("compile %s %s %s %d",
|
||||
out->print_cr("compile %s %s %s %d %d",
|
||||
method->klass_name()->as_quoted_ascii(),
|
||||
method->name()->as_quoted_ascii(),
|
||||
method->signature()->as_quoted_ascii(),
|
||||
entry_bci);
|
||||
entry_bci, comp_level);
|
||||
out->flush();
|
||||
}
|
||||
|
@ -46,8 +46,6 @@ class ciEnv : StackObj {
|
||||
friend class CompileBroker;
|
||||
friend class Dependencies; // for get_object, during logging
|
||||
|
||||
static fileStream* _replay_data_stream;
|
||||
|
||||
private:
|
||||
Arena* _arena; // Alias for _ciEnv_arena except in init_shared_objects()
|
||||
Arena _ciEnv_arena;
|
||||
@ -451,10 +449,6 @@ public:
|
||||
// RedefineClasses support
|
||||
void metadata_do(void f(Metadata*)) { _factory->metadata_do(f); }
|
||||
|
||||
// Dump the compilation replay data for this ciEnv to
|
||||
// ReplayDataFile, creating the file if needed.
|
||||
void dump_replay_data();
|
||||
|
||||
// Dump the compilation replay data for the ciEnv to the stream.
|
||||
void dump_replay_data(outputStream* out);
|
||||
};
|
||||
|
@ -196,7 +196,6 @@ class ciMethod : public ciMetadata {
|
||||
// Analysis and profiling.
|
||||
//
|
||||
// Usage note: liveness_at_bci and init_vars should be wrapped in ResourceMarks.
|
||||
bool uses_monitors() const { return _uses_monitors; } // this one should go away, it has a misleading name
|
||||
bool has_monitor_bytecodes() const { return _uses_monitors; }
|
||||
bool has_balanced_monitors();
|
||||
|
||||
|
@ -89,7 +89,7 @@ class CompileReplay : public StackObj {
|
||||
loader = Handle(thread, SystemDictionary::java_system_loader());
|
||||
stream = fopen(filename, "rt");
|
||||
if (stream == NULL) {
|
||||
fprintf(stderr, "Can't open replay file %s\n", filename);
|
||||
fprintf(stderr, "ERROR: Can't open replay file %s\n", filename);
|
||||
}
|
||||
buffer_length = 32;
|
||||
buffer = NEW_RESOURCE_ARRAY(char, buffer_length);
|
||||
@ -327,7 +327,6 @@ class CompileReplay : public StackObj {
|
||||
if (had_error()) {
|
||||
tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message);
|
||||
tty->print_cr("%s", buffer);
|
||||
assert(false, "error");
|
||||
return;
|
||||
}
|
||||
pos = 0;
|
||||
@ -370,11 +369,47 @@ class CompileReplay : public StackObj {
|
||||
}
|
||||
}
|
||||
|
||||
// compile <klass> <name> <signature> <entry_bci>
|
||||
// validation of comp_level
|
||||
bool is_valid_comp_level(int comp_level) {
|
||||
const int msg_len = 256;
|
||||
char* msg = NULL;
|
||||
if (!is_compile(comp_level)) {
|
||||
msg = NEW_RESOURCE_ARRAY(char, msg_len);
|
||||
jio_snprintf(msg, msg_len, "%d isn't compilation level", comp_level);
|
||||
} else if (!TieredCompilation && (comp_level != CompLevel_highest_tier)) {
|
||||
msg = NEW_RESOURCE_ARRAY(char, msg_len);
|
||||
switch (comp_level) {
|
||||
case CompLevel_simple:
|
||||
jio_snprintf(msg, msg_len, "compilation level %d requires Client VM or TieredCompilation", comp_level);
|
||||
break;
|
||||
case CompLevel_full_optimization:
|
||||
jio_snprintf(msg, msg_len, "compilation level %d requires Server VM", comp_level);
|
||||
break;
|
||||
default:
|
||||
jio_snprintf(msg, msg_len, "compilation level %d requires TieredCompilation", comp_level);
|
||||
}
|
||||
}
|
||||
if (msg != NULL) {
|
||||
report_error(msg);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// compile <klass> <name> <signature> <entry_bci> <comp_level>
|
||||
void process_compile(TRAPS) {
|
||||
// methodHandle method;
|
||||
Method* method = parse_method(CHECK);
|
||||
int entry_bci = parse_int("entry_bci");
|
||||
const char* comp_level_label = "comp_level";
|
||||
int comp_level = parse_int(comp_level_label);
|
||||
// old version w/o comp_level
|
||||
if (had_error() && (error_message() == comp_level_label)) {
|
||||
comp_level = CompLevel_full_optimization;
|
||||
}
|
||||
if (!is_valid_comp_level(comp_level)) {
|
||||
return;
|
||||
}
|
||||
Klass* k = method->method_holder();
|
||||
((InstanceKlass*)k)->initialize(THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
@ -389,12 +424,12 @@ class CompileReplay : public StackObj {
|
||||
}
|
||||
}
|
||||
// Make sure the existence of a prior compile doesn't stop this one
|
||||
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, CompLevel_full_optimization, true) : method->code();
|
||||
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
|
||||
if (nm != NULL) {
|
||||
nm->make_not_entrant();
|
||||
}
|
||||
replay_state = this;
|
||||
CompileBroker::compile_method(method, entry_bci, CompLevel_full_optimization,
|
||||
CompileBroker::compile_method(method, entry_bci, comp_level,
|
||||
methodHandle(), 0, "replay", THREAD);
|
||||
replay_state = NULL;
|
||||
reset();
|
||||
@ -551,7 +586,7 @@ class CompileReplay : public StackObj {
|
||||
if (parsed_two_word == i) continue;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
fatal(err_msg_res("Unexpected tag: %d", cp->tag_at(i).value()));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -819,6 +854,11 @@ int ciReplay::replay_impl(TRAPS) {
|
||||
ReplaySuppressInitializers = 1;
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(ReplayDataFile)) {
|
||||
tty->print_cr("ERROR: no compiler replay data file specified (use -XX:ReplayDataFile=replay_pid12345.txt).");
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Load and parse the replay data
|
||||
CompileReplay rp(ReplayDataFile, THREAD);
|
||||
int exit_code = 0;
|
||||
|
@ -75,8 +75,8 @@ ConstantPool* BytecodeConstantPool::create_constant_pool(TRAPS) const {
|
||||
int idx = i + _orig->length();
|
||||
switch (entry._tag) {
|
||||
case BytecodeCPEntry::UTF8:
|
||||
cp->symbol_at_put(idx, entry._u.utf8);
|
||||
entry._u.utf8->increment_refcount();
|
||||
cp->symbol_at_put(idx, entry._u.utf8);
|
||||
break;
|
||||
case BytecodeCPEntry::KLASS:
|
||||
cp->unresolved_klass_at_put(
|
||||
|
@ -2027,7 +2027,6 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
|
||||
u2 method_parameters_length = 0;
|
||||
u1* method_parameters_data = NULL;
|
||||
bool method_parameters_seen = false;
|
||||
bool method_parameters_four_byte_flags;
|
||||
bool parsed_code_attribute = false;
|
||||
bool parsed_checked_exceptions_attribute = false;
|
||||
bool parsed_stackmap_attribute = false;
|
||||
@ -2241,26 +2240,14 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
|
||||
}
|
||||
method_parameters_seen = true;
|
||||
method_parameters_length = cfs->get_u1_fast();
|
||||
// Track the actual size (note: this is written for clarity; a
|
||||
// decent compiler will CSE and constant-fold this into a single
|
||||
// expression)
|
||||
// Use the attribute length to figure out the size of flags
|
||||
if (method_attribute_length == (method_parameters_length * 6u) + 1u) {
|
||||
method_parameters_four_byte_flags = true;
|
||||
} else if (method_attribute_length == (method_parameters_length * 4u) + 1u) {
|
||||
method_parameters_four_byte_flags = false;
|
||||
} else {
|
||||
if (method_attribute_length != (method_parameters_length * 4u) + 1u) {
|
||||
classfile_parse_error(
|
||||
"Invalid MethodParameters method attribute length %u in class file",
|
||||
method_attribute_length, CHECK_(nullHandle));
|
||||
}
|
||||
method_parameters_data = cfs->get_u1_buffer();
|
||||
cfs->skip_u2_fast(method_parameters_length);
|
||||
if (method_parameters_four_byte_flags) {
|
||||
cfs->skip_u4_fast(method_parameters_length);
|
||||
} else {
|
||||
cfs->skip_u2_fast(method_parameters_length);
|
||||
}
|
||||
cfs->skip_u2_fast(method_parameters_length);
|
||||
// ignore this attribute if it cannot be reflected
|
||||
if (!SystemDictionary::Parameter_klass_loaded())
|
||||
method_parameters_length = 0;
|
||||
@ -2423,13 +2410,8 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
|
||||
for (int i = 0; i < method_parameters_length; i++) {
|
||||
elem[i].name_cp_index = Bytes::get_Java_u2(method_parameters_data);
|
||||
method_parameters_data += 2;
|
||||
if (method_parameters_four_byte_flags) {
|
||||
elem[i].flags = Bytes::get_Java_u4(method_parameters_data);
|
||||
method_parameters_data += 4;
|
||||
} else {
|
||||
elem[i].flags = Bytes::get_Java_u2(method_parameters_data);
|
||||
method_parameters_data += 2;
|
||||
}
|
||||
elem[i].flags = Bytes::get_Java_u2(method_parameters_data);
|
||||
method_parameters_data += 2;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -304,7 +304,19 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
inline void assert_property(bool b, const char* msg, TRAPS) {
|
||||
#ifdef ASSERT
|
||||
if (!b) { fatal(msg); }
|
||||
if (!b) {
|
||||
ResourceMark rm(THREAD);
|
||||
fatal(err_msg(msg, _class_name->as_C_string()));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void assert_property(bool b, const char* msg, int index, TRAPS) {
|
||||
#ifdef ASSERT
|
||||
if (!b) {
|
||||
ResourceMark rm(THREAD);
|
||||
fatal(err_msg(msg, index, _class_name->as_C_string()));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -312,7 +324,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
if (_need_verify) {
|
||||
guarantee_property(property, msg, index, CHECK);
|
||||
} else {
|
||||
assert_property(property, msg, CHECK);
|
||||
assert_property(property, msg, index, CHECK);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1345,9 +1345,10 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
|
||||
tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer);
|
||||
// Preload all classes to get around uncommon traps
|
||||
// Iterate over all methods in class
|
||||
int comp_level = CompilationPolicy::policy()->initial_compile_level();
|
||||
for (int n = 0; n < k->methods()->length(); n++) {
|
||||
methodHandle m (THREAD, k->methods()->at(n));
|
||||
if (CompilationPolicy::can_be_compiled(m)) {
|
||||
if (CompilationPolicy::can_be_compiled(m, comp_level)) {
|
||||
|
||||
if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
|
||||
// Give sweeper a chance to keep up with CTW
|
||||
@ -1356,7 +1357,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
|
||||
_codecache_sweep_counter = 0;
|
||||
}
|
||||
// Force compilation
|
||||
CompileBroker::compile_method(m, InvocationEntryBci, CompilationPolicy::policy()->initial_compile_level(),
|
||||
CompileBroker::compile_method(m, InvocationEntryBci, comp_level,
|
||||
methodHandle(), 0, "CTW", THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
clear_pending_exception_if_not_oom(CHECK);
|
||||
|
@ -53,6 +53,7 @@
|
||||
#include "classfile/metadataOnStackMark.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "memory/gcLocker.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
@ -65,17 +66,19 @@
|
||||
|
||||
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
|
||||
|
||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
|
||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
|
||||
_class_loader(h_class_loader()),
|
||||
_is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
|
||||
_metaspace(NULL), _unloading(false), _klasses(NULL),
|
||||
_claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
|
||||
_next(NULL), _dependencies(),
|
||||
_next(NULL), _dependencies(dependencies),
|
||||
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
|
||||
// empty
|
||||
}
|
||||
|
||||
void ClassLoaderData::init_dependencies(TRAPS) {
|
||||
assert(!Universe::is_fully_initialized(), "should only be called when initializing");
|
||||
assert(is_the_null_class_loader_data(), "should only call this for the null class loader");
|
||||
_dependencies.init(CHECK);
|
||||
}
|
||||
|
||||
@ -277,6 +280,9 @@ void ClassLoaderData::remove_class(Klass* scratch_class) {
|
||||
void ClassLoaderData::unload() {
|
||||
_unloading = true;
|
||||
|
||||
// Tell serviceability tools these classes are unloading
|
||||
classes_do(InstanceKlass::notify_unload_class);
|
||||
|
||||
if (TraceClassLoaderData) {
|
||||
ResourceMark rm;
|
||||
tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this);
|
||||
@ -300,6 +306,9 @@ bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
|
||||
|
||||
|
||||
ClassLoaderData::~ClassLoaderData() {
|
||||
// Release C heap structures for all the classes.
|
||||
classes_do(InstanceKlass::release_C_heap_structures);
|
||||
|
||||
Metaspace *m = _metaspace;
|
||||
if (m != NULL) {
|
||||
_metaspace = NULL;
|
||||
@ -423,7 +432,7 @@ void ClassLoaderData::free_deallocate_list() {
|
||||
// These anonymous class loaders are to contain classes used for JSR292
|
||||
ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
|
||||
// Add a new class loader data to the graph.
|
||||
return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL);
|
||||
return ClassLoaderDataGraph::add(loader, true, CHECK_NULL);
|
||||
}
|
||||
|
||||
const char* ClassLoaderData::loader_name() {
|
||||
@ -495,19 +504,22 @@ ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
|
||||
ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
|
||||
ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
|
||||
|
||||
|
||||
// Add a new class loader data node to the list. Assign the newly created
|
||||
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
|
||||
ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) {
|
||||
// Not assigned a class loader data yet.
|
||||
// Create one.
|
||||
ClassLoaderData* *list_head = &_head;
|
||||
ClassLoaderData* next = _head;
|
||||
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) {
|
||||
// We need to allocate all the oops for the ClassLoaderData before allocating the
|
||||
// actual ClassLoaderData object.
|
||||
ClassLoaderData::Dependencies dependencies(CHECK_NULL);
|
||||
|
||||
bool is_anonymous = (cld_addr == NULL);
|
||||
ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
|
||||
No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the
|
||||
// ClassLoaderData in the graph since the CLD
|
||||
// contains unhandled oops
|
||||
|
||||
if (cld_addr != NULL) {
|
||||
ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies);
|
||||
|
||||
|
||||
if (!is_anonymous) {
|
||||
ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
|
||||
// First, Atomically set it
|
||||
ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
|
||||
if (old != NULL) {
|
||||
@ -519,6 +531,9 @@ ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle lo
|
||||
|
||||
// We won the race, and therefore the task of adding the data to the list of
|
||||
// class loader data
|
||||
ClassLoaderData** list_head = &_head;
|
||||
ClassLoaderData* next = _head;
|
||||
|
||||
do {
|
||||
cld->set_next(next);
|
||||
ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
|
||||
@ -531,10 +546,6 @@ ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle lo
|
||||
cld->loader_name());
|
||||
tty->print_cr("]");
|
||||
}
|
||||
// Create dependencies after the CLD is added to the list. Otherwise,
|
||||
// the GC GC will not find the CLD and the _class_loader field will
|
||||
// not be updated.
|
||||
cld->init_dependencies(CHECK_NULL);
|
||||
return cld;
|
||||
}
|
||||
next = exchanged;
|
||||
@ -665,6 +676,8 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
||||
dead->unload();
|
||||
data = data->next();
|
||||
// Remove from loader list.
|
||||
// This class loader data will no longer be found
|
||||
// in the ClassLoaderDataGraph.
|
||||
if (prev != NULL) {
|
||||
prev->set_next(data);
|
||||
} else {
|
||||
@ -686,6 +699,7 @@ void ClassLoaderDataGraph::purge() {
|
||||
next = purge_me->next();
|
||||
delete purge_me;
|
||||
}
|
||||
Metaspace::purge();
|
||||
}
|
||||
|
||||
// CDS support
|
||||
|
@ -62,7 +62,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
// CMS support.
|
||||
static ClassLoaderData* _saved_head;
|
||||
|
||||
static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS);
|
||||
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
|
||||
public:
|
||||
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
|
||||
static void purge();
|
||||
@ -100,6 +100,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
Thread* THREAD);
|
||||
public:
|
||||
Dependencies() : _list_head(NULL) {}
|
||||
Dependencies(TRAPS) : _list_head(NULL) {
|
||||
init(CHECK);
|
||||
}
|
||||
void add(Handle dependency, TRAPS);
|
||||
void init(TRAPS);
|
||||
void oops_do(OopClosure* f);
|
||||
@ -150,7 +153,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
void set_next(ClassLoaderData* next) { _next = next; }
|
||||
ClassLoaderData* next() const { return _next; }
|
||||
|
||||
ClassLoaderData(Handle h_class_loader, bool is_anonymous);
|
||||
ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
|
||||
~ClassLoaderData();
|
||||
|
||||
void set_metaspace(Metaspace* m) { _metaspace = m; }
|
||||
@ -190,7 +193,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
static void init_null_class_loader_data() {
|
||||
assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
|
||||
assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
|
||||
_the_null_class_loader_data = new ClassLoaderData((oop)NULL, false);
|
||||
|
||||
// We explicitly initialize the Dependencies object at a later phase in the initialization
|
||||
_the_null_class_loader_data = new ClassLoaderData((oop)NULL, false, Dependencies());
|
||||
ClassLoaderDataGraph::_head = _the_null_class_loader_data;
|
||||
assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
|
||||
if (DumpSharedSpaces) {
|
||||
|
@ -43,10 +43,9 @@ inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAP
|
||||
assert(loader() != NULL,"Must be a class loader");
|
||||
// Gets the class loader data out of the java/lang/ClassLoader object, if non-null
|
||||
// it's already in the loader_data, so no need to add
|
||||
ClassLoaderData** loader_data_addr = java_lang_ClassLoader::loader_data_addr(loader());
|
||||
ClassLoaderData* loader_data_id = *loader_data_addr;
|
||||
if (loader_data_id) {
|
||||
return loader_data_id;
|
||||
ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader());
|
||||
if (loader_data) {
|
||||
return loader_data;
|
||||
}
|
||||
return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD);
|
||||
return ClassLoaderDataGraph::add(loader, false, THREAD);
|
||||
}
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiRedefineClassesTrace.hpp"
|
||||
#include "services/classLoadingService.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
|
||||
|
||||
@ -156,19 +155,7 @@ bool Dictionary::do_unloading() {
|
||||
if (k_def_class_loader_data == loader_data) {
|
||||
// This is the defining entry, so the referred class is about
|
||||
// to be unloaded.
|
||||
// Notify the debugger and clean up the class.
|
||||
class_was_unloaded = true;
|
||||
// notify the debugger
|
||||
if (JvmtiExport::should_post_class_unload()) {
|
||||
JvmtiExport::post_class_unload(ik);
|
||||
}
|
||||
|
||||
// notify ClassLoadingService of class unload
|
||||
ClassLoadingService::notify_class_unloaded(ik);
|
||||
|
||||
// Clean up C heap
|
||||
ik->release_C_heap_structures();
|
||||
ik->constants()->release_C_heap_structures();
|
||||
}
|
||||
// Also remove this system dictionary entry.
|
||||
purge_entry = true;
|
||||
|
@ -315,14 +315,18 @@ Handle java_lang_String::char_converter(Handle java_string, jchar from_char, jch
|
||||
return string;
|
||||
}
|
||||
|
||||
jchar* java_lang_String::as_unicode_string(oop java_string, int& length) {
|
||||
jchar* java_lang_String::as_unicode_string(oop java_string, int& length, TRAPS) {
|
||||
typeArrayOop value = java_lang_String::value(java_string);
|
||||
int offset = java_lang_String::offset(java_string);
|
||||
length = java_lang_String::length(java_string);
|
||||
|
||||
jchar* result = NEW_RESOURCE_ARRAY(jchar, length);
|
||||
for (int index = 0; index < length; index++) {
|
||||
result[index] = value->char_at(index + offset);
|
||||
jchar* result = NEW_RESOURCE_ARRAY_RETURN_NULL(jchar, length);
|
||||
if (result != NULL) {
|
||||
for (int index = 0; index < length; index++) {
|
||||
result[index] = value->char_at(index + offset);
|
||||
}
|
||||
} else {
|
||||
THROW_MSG_0(vmSymbols::java_lang_OutOfMemoryError(), "could not allocate Unicode string");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ class java_lang_String : AllStatic {
|
||||
static char* as_utf8_string(oop java_string, char* buf, int buflen);
|
||||
static char* as_utf8_string(oop java_string, int start, int len);
|
||||
static char* as_platform_dependent_str(Handle java_string, TRAPS);
|
||||
static jchar* as_unicode_string(oop java_string, int& length);
|
||||
static jchar* as_unicode_string(oop java_string, int& length, TRAPS);
|
||||
// produce an ascii string with all other values quoted using \u####
|
||||
static char* as_quoted_ascii(oop java_string);
|
||||
|
||||
|
@ -735,7 +735,7 @@ oop StringTable::intern(oop string, TRAPS)
|
||||
ResourceMark rm(THREAD);
|
||||
int length;
|
||||
Handle h_string (THREAD, string);
|
||||
jchar* chars = java_lang_String::as_unicode_string(string, length);
|
||||
jchar* chars = java_lang_String::as_unicode_string(string, length, CHECK_NULL);
|
||||
oop result = intern(h_string, chars, length, CHECK_NULL);
|
||||
return result;
|
||||
}
|
||||
|
@ -830,7 +830,7 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
|
||||
Klass *kk;
|
||||
{
|
||||
MutexLocker mu(SystemDictionary_lock, THREAD);
|
||||
kk = find_class(name, ik->class_loader_data());
|
||||
kk = find_class(d_index, d_hash, name, ik->class_loader_data());
|
||||
}
|
||||
if (kk != NULL) {
|
||||
// No clean up is needed if the shared class has been entered
|
||||
|
@ -517,13 +517,18 @@
|
||||
template(sun_management_ManagementFactory, "sun/management/ManagementFactory") \
|
||||
template(sun_management_Sensor, "sun/management/Sensor") \
|
||||
template(sun_management_Agent, "sun/management/Agent") \
|
||||
template(sun_management_DiagnosticCommandImpl, "sun/management/DiagnosticCommandImpl") \
|
||||
template(sun_management_GarbageCollectorImpl, "sun/management/GarbageCollectorImpl") \
|
||||
template(sun_management_ManagementFactoryHelper, "sun/management/ManagementFactoryHelper") \
|
||||
template(getDiagnosticCommandMBean_name, "getDiagnosticCommandMBean") \
|
||||
template(getDiagnosticCommandMBean_signature, "()Lcom/sun/management/DiagnosticCommandMBean;") \
|
||||
template(getGcInfoBuilder_name, "getGcInfoBuilder") \
|
||||
template(getGcInfoBuilder_signature, "()Lsun/management/GcInfoBuilder;") \
|
||||
template(com_sun_management_GcInfo, "com/sun/management/GcInfo") \
|
||||
template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \
|
||||
template(createGCNotification_name, "createGCNotification") \
|
||||
template(createGCNotification_signature, "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \
|
||||
template(createDiagnosticFrameworkNotification_name, "createDiagnosticFrameworkNotification") \
|
||||
template(createMemoryPoolMBean_name, "createMemoryPoolMBean") \
|
||||
template(createMemoryManagerMBean_name, "createMemoryManagerMBean") \
|
||||
template(createGarbageCollectorMBean_name, "createGarbageCollectorMBean") \
|
||||
|
@ -463,8 +463,10 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
|
||||
}
|
||||
#endif //PRODUCT
|
||||
|
||||
|
||||
nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
|
||||
/**
|
||||
* Remove and return nmethod from the saved code list in order to reanimate it.
|
||||
*/
|
||||
nmethod* CodeCache::reanimate_saved_code(Method* m) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
nmethod* saved = _saved_nmethods;
|
||||
nmethod* prev = NULL;
|
||||
@ -479,7 +481,7 @@ nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
|
||||
saved->set_speculatively_disconnected(false);
|
||||
saved->set_saved_nmethod_link(NULL);
|
||||
if (PrintMethodFlushing) {
|
||||
saved->print_on(tty, " ### nmethod is reconnected\n");
|
||||
saved->print_on(tty, " ### nmethod is reconnected");
|
||||
}
|
||||
if (LogCompilation && (xtty != NULL)) {
|
||||
ttyLocker ttyl;
|
||||
@ -496,6 +498,9 @@ nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove nmethod from the saved code list in order to discard it permanently
|
||||
*/
|
||||
void CodeCache::remove_saved_code(nmethod* nm) {
|
||||
// For conc swpr this will be called with CodeCache_lock taken by caller
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
@ -529,7 +534,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
|
||||
nm->set_saved_nmethod_link(_saved_nmethods);
|
||||
_saved_nmethods = nm;
|
||||
if (PrintMethodFlushing) {
|
||||
nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
|
||||
nm->print_on(tty, " ### nmethod is speculatively disconnected");
|
||||
}
|
||||
if (LogCompilation && (xtty != NULL)) {
|
||||
ttyLocker ttyl;
|
||||
|
@ -57,7 +57,7 @@ class CodeCache : AllStatic {
|
||||
static int _number_of_nmethods_with_dependencies;
|
||||
static bool _needs_cache_clean;
|
||||
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
||||
static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look()
|
||||
static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods.
|
||||
|
||||
static void verify_if_often() PRODUCT_RETURN;
|
||||
|
||||
@ -168,7 +168,7 @@ class CodeCache : AllStatic {
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
|
||||
static nmethod* find_and_remove_saved_code(Method* m);
|
||||
static nmethod* reanimate_saved_code(Method* m);
|
||||
static void remove_saved_code(nmethod* nm);
|
||||
static void speculatively_disconnect(nmethod* nm);
|
||||
|
||||
|
@ -45,25 +45,6 @@
|
||||
// Every time a compiled IC is changed or its type is being accessed,
|
||||
// either the CompiledIC_lock must be set or we must be at a safe point.
|
||||
|
||||
|
||||
// Release the CompiledICHolder* associated with this call site is there is one.
|
||||
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
if (is_icholder_entry(call->destination())) {
|
||||
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
|
||||
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
return is_icholder_entry(call->destination());
|
||||
}
|
||||
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Low-level access to an inline cache. Private, since they might not be
|
||||
// MT-safe to use.
|
||||
@ -488,33 +469,6 @@ bool CompiledIC::is_icholder_entry(address entry) {
|
||||
return (cb != NULL && cb->is_adapter_blob());
|
||||
}
|
||||
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// search for the ic_call at the given address
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
void CompiledStaticCall::set_to_clean() {
|
||||
@ -549,33 +503,6 @@ bool CompiledStaticCall::is_call_to_interpreted() const {
|
||||
return nm->stub_contains(destination());
|
||||
}
|
||||
|
||||
|
||||
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
||||
address stub=find_stub();
|
||||
guarantee(stub != NULL, "stub not found");
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
||||
instruction_address(),
|
||||
callee->name_and_sig_as_C_string());
|
||||
}
|
||||
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
|
||||
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
|
||||
|
||||
// Update stub
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
jump->set_jump_destination(entry);
|
||||
|
||||
// Update jump to call
|
||||
set_destination_mt_safe(stub);
|
||||
}
|
||||
|
||||
|
||||
void CompiledStaticCall::set(const StaticCallInfo& info) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
@ -618,19 +545,6 @@ void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset stub
|
||||
address stub = static_stub->addr();
|
||||
assert(stub!=NULL, "stub not found");
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
method_holder->set_data(0);
|
||||
jump->set_jump_destination((address)-1);
|
||||
}
|
||||
|
||||
|
||||
address CompiledStaticCall::find_stub() {
|
||||
// Find reloc. information containing this call-site
|
||||
RelocIterator iter((nmethod*)NULL, instruction_address());
|
||||
@ -668,19 +582,16 @@ void CompiledIC::verify() {
|
||||
|| is_optimized() || is_megamorphic(), "sanity check");
|
||||
}
|
||||
|
||||
|
||||
void CompiledIC::print() {
|
||||
print_compiled_ic();
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
|
||||
void CompiledIC::print_compiled_ic() {
|
||||
tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
|
||||
instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value());
|
||||
}
|
||||
|
||||
|
||||
void CompiledStaticCall::print() {
|
||||
tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address());
|
||||
if (is_clean()) {
|
||||
@ -693,21 +604,4 @@ void CompiledStaticCall::print() {
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
void CompiledStaticCall::verify() {
|
||||
// Verify call
|
||||
NativeCall::verify();
|
||||
if (os::is_MP()) {
|
||||
verify_alignment();
|
||||
}
|
||||
|
||||
// Verify stub
|
||||
address stub = find_stub();
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
// Verify state
|
||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // !PRODUCT
|
||||
|
@ -304,6 +304,11 @@ class CompiledStaticCall: public NativeCall {
|
||||
friend CompiledStaticCall* compiledStaticCall_at(address native_call);
|
||||
friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
|
||||
|
||||
// Code
|
||||
static void emit_to_interp_stub(CodeBuffer &cbuf);
|
||||
static int to_interp_stub_size();
|
||||
static int reloc_to_interp_stub();
|
||||
|
||||
// State
|
||||
bool is_clean() const;
|
||||
bool is_call_to_compiled() const;
|
||||
|
@ -67,7 +67,7 @@ StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
|
||||
intptr_t size = round_to(buffer_size, 2*BytesPerWord);
|
||||
BufferBlob* blob = BufferBlob::create(name, size);
|
||||
if( blob == NULL) {
|
||||
vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name));
|
||||
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, err_msg("CodeCache: no room for %s", name));
|
||||
}
|
||||
_stub_interface = stub_interface;
|
||||
_buffer_size = blob->content_size();
|
||||
|
@ -60,7 +60,7 @@ void* VtableStub::operator new(size_t size, int code_size) {
|
||||
const int bytes = chunk_factor * real_size + pd_code_alignment();
|
||||
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
|
||||
if (blob == NULL) {
|
||||
vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks");
|
||||
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
|
||||
}
|
||||
_chunk = blob->content_begin();
|
||||
_chunk_end = _chunk + bytes;
|
||||
|
@ -65,7 +65,7 @@ HS_DTRACE_PROBE_DECL8(hotspot, method__compile__begin,
|
||||
HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
||||
char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool);
|
||||
|
||||
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \
|
||||
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \
|
||||
{ \
|
||||
Symbol* klass_name = (method)->klass_name(); \
|
||||
Symbol* name = (method)->name(); \
|
||||
@ -77,8 +77,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
||||
signature->bytes(), signature->utf8_length()); \
|
||||
}
|
||||
|
||||
#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \
|
||||
comp_name, success) \
|
||||
#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \
|
||||
{ \
|
||||
Symbol* klass_name = (method)->klass_name(); \
|
||||
Symbol* name = (method)->name(); \
|
||||
@ -92,7 +91,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
||||
|
||||
#else /* USDT2 */
|
||||
|
||||
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \
|
||||
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \
|
||||
{ \
|
||||
Symbol* klass_name = (method)->klass_name(); \
|
||||
Symbol* name = (method)->name(); \
|
||||
@ -104,8 +103,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
||||
(char *) signature->bytes(), signature->utf8_length()); \
|
||||
}
|
||||
|
||||
#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \
|
||||
comp_name, success) \
|
||||
#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \
|
||||
{ \
|
||||
Symbol* klass_name = (method)->klass_name(); \
|
||||
Symbol* name = (method)->name(); \
|
||||
@ -120,8 +118,8 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
||||
|
||||
#else // ndef DTRACE_ENABLED
|
||||
|
||||
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name)
|
||||
#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, comp_name, success)
|
||||
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)
|
||||
#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)
|
||||
|
||||
#endif // ndef DTRACE_ENABLED
|
||||
|
||||
@ -1229,7 +1227,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
if (method->is_not_compilable(comp_level)) return NULL;
|
||||
|
||||
if (UseCodeCacheFlushing) {
|
||||
nmethod* saved = CodeCache::find_and_remove_saved_code(method());
|
||||
nmethod* saved = CodeCache::reanimate_saved_code(method());
|
||||
if (saved != NULL) {
|
||||
method->set_code(method, saved);
|
||||
return saved;
|
||||
@ -1288,9 +1286,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
method->jmethod_id();
|
||||
}
|
||||
|
||||
// If the compiler is shut off due to code cache flushing or otherwise,
|
||||
// If the compiler is shut off due to code cache getting full
|
||||
// fail out now so blocking compiles dont hang the java thread
|
||||
if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
|
||||
if (!should_compile_new_jobs()) {
|
||||
CompilationPolicy::policy()->delay_compilation(method());
|
||||
return NULL;
|
||||
}
|
||||
@ -1766,8 +1764,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
// Save information about this method in case of failure.
|
||||
set_last_compile(thread, method, is_osr, task_level);
|
||||
|
||||
DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler(task_level), method,
|
||||
compiler_name(task_level));
|
||||
DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
|
||||
}
|
||||
|
||||
// Allocate a new set of JNI handles.
|
||||
@ -1842,13 +1839,14 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// simulate crash during compilation
|
||||
assert(task->compile_id() != CICrashAt, "just as planned");
|
||||
}
|
||||
pop_jni_handle_block();
|
||||
|
||||
methodHandle method(thread, task->method());
|
||||
|
||||
DTRACE_METHOD_COMPILE_END_PROBE(compiler(task_level), method,
|
||||
compiler_name(task_level), task->is_success());
|
||||
DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success());
|
||||
|
||||
collect_statistics(thread, time, task);
|
||||
|
||||
|
@ -193,7 +193,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
|
||||
CardGeneration(rs, initial_byte_size, level, ct),
|
||||
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
|
||||
_debug_collection_type(Concurrent_collection_type)
|
||||
_debug_collection_type(Concurrent_collection_type),
|
||||
_did_compact(false)
|
||||
{
|
||||
HeapWord* bottom = (HeapWord*) _virtual_space.low();
|
||||
HeapWord* end = (HeapWord*) _virtual_space.high();
|
||||
@ -917,18 +918,15 @@ void ConcurrentMarkSweepGeneration::compute_new_size() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Compute some numbers about the state of the heap.
|
||||
const size_t used_after_gc = used();
|
||||
const size_t capacity_after_gc = capacity();
|
||||
// The heap has been compacted but not reset yet.
|
||||
// Any metric such as free() or used() will be incorrect.
|
||||
|
||||
CardGeneration::compute_new_size();
|
||||
|
||||
// Reset again after a possible resizing
|
||||
cmsSpace()->reset_after_compaction();
|
||||
|
||||
assert(used() == used_after_gc && used_after_gc <= capacity(),
|
||||
err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
|
||||
" capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
|
||||
if (did_compact()) {
|
||||
cmsSpace()->reset_after_compaction();
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
|
||||
@ -1578,6 +1576,8 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
return false;
|
||||
}
|
||||
|
||||
void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
|
||||
|
||||
// Clear _expansion_cause fields of constituent generations
|
||||
void CMSCollector::clear_expansion_cause() {
|
||||
_cmsGen->clear_expansion_cause();
|
||||
@ -1675,7 +1675,6 @@ void CMSCollector::collect(bool full,
|
||||
}
|
||||
acquire_control_and_collect(full, clear_all_soft_refs);
|
||||
_full_gcs_since_conc_gc++;
|
||||
|
||||
}
|
||||
|
||||
void CMSCollector::request_full_gc(unsigned int full_gc_count) {
|
||||
@ -1857,6 +1856,7 @@ NOT_PRODUCT(
|
||||
}
|
||||
}
|
||||
|
||||
set_did_compact(should_compact);
|
||||
if (should_compact) {
|
||||
// If the collection is being acquired from the background
|
||||
// collector, there may be references on the discovered
|
||||
@ -2444,8 +2444,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
||||
// initial marking in checkpointRootsInitialWork has been completed
|
||||
if (VerifyDuringGC &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
gclog_or_tty->print("Verify before initial mark: ");
|
||||
Universe::verify();
|
||||
Universe::verify("Verify before initial mark: ");
|
||||
}
|
||||
{
|
||||
bool res = markFromRoots(false);
|
||||
@ -2456,8 +2455,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
||||
case FinalMarking:
|
||||
if (VerifyDuringGC &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
gclog_or_tty->print("Verify before re-mark: ");
|
||||
Universe::verify();
|
||||
Universe::verify("Verify before re-mark: ");
|
||||
}
|
||||
checkpointRootsFinal(false, clear_all_soft_refs,
|
||||
init_mark_was_synchronous);
|
||||
@ -2468,8 +2466,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
||||
// final marking in checkpointRootsFinal has been completed
|
||||
if (VerifyDuringGC &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
gclog_or_tty->print("Verify before sweep: ");
|
||||
Universe::verify();
|
||||
Universe::verify("Verify before sweep: ");
|
||||
}
|
||||
sweep(false);
|
||||
assert(_collectorState == Resizing, "Incorrect state");
|
||||
@ -2484,8 +2481,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
||||
// The heap has been resized.
|
||||
if (VerifyDuringGC &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
gclog_or_tty->print("Verify before reset: ");
|
||||
Universe::verify();
|
||||
Universe::verify("Verify before reset: ");
|
||||
}
|
||||
reset(false);
|
||||
assert(_collectorState == Idling, "Collector state should "
|
||||
@ -2722,6 +2718,7 @@ void CMSCollector::gc_epilogue(bool full) {
|
||||
Chunk::clean_chunk_pool();
|
||||
}
|
||||
|
||||
set_did_compact(false);
|
||||
_between_prologue_and_epilogue = false; // ready for next cycle
|
||||
}
|
||||
|
||||
@ -2853,8 +2850,8 @@ class VerifyMarkedClosure: public BitMapClosure {
|
||||
bool failed() { return _failed; }
|
||||
};
|
||||
|
||||
bool CMSCollector::verify_after_remark() {
|
||||
gclog_or_tty->print(" [Verifying CMS Marking... ");
|
||||
bool CMSCollector::verify_after_remark(bool silent) {
|
||||
if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
|
||||
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
|
||||
static bool init = false;
|
||||
|
||||
@ -2915,7 +2912,7 @@ bool CMSCollector::verify_after_remark() {
|
||||
warning("Unrecognized value %d for CMSRemarkVerifyVariant",
|
||||
CMSRemarkVerifyVariant);
|
||||
}
|
||||
gclog_or_tty->print(" done] ");
|
||||
if (!silent) gclog_or_tty->print(" done] ");
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3426,8 +3423,9 @@ bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
|
||||
void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
assert_lock_strong(freelistLock());
|
||||
// XXX Fix when compaction is implemented.
|
||||
warning("Shrinking of CMS not yet implemented");
|
||||
if (PrintGCDetails && Verbose) {
|
||||
warning("Shrinking of CMS not yet implemented");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -6010,26 +6008,23 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
||||
&cmsDrainMarkingStackClosure,
|
||||
NULL);
|
||||
}
|
||||
verify_work_stacks_empty();
|
||||
}
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
verify_work_stacks_empty();
|
||||
|
||||
if (should_unload_classes()) {
|
||||
{
|
||||
TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
|
||||
|
||||
// Follow SystemDictionary roots and unload classes
|
||||
// Unload classes and purge the SystemDictionary.
|
||||
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
|
||||
|
||||
// Follow CodeCache roots and unload any methods marked for unloading
|
||||
// Unload nmethods.
|
||||
CodeCache::do_unloading(&_is_alive_closure, purged_class);
|
||||
|
||||
cmsDrainMarkingStackClosure.do_void();
|
||||
verify_work_stacks_empty();
|
||||
|
||||
// Update subklass/sibling/implementor links in KlassKlass descendants
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links(&_is_alive_closure);
|
||||
// Nothing should have been pushed onto the working stacks.
|
||||
verify_work_stacks_empty();
|
||||
}
|
||||
|
||||
{
|
||||
@ -6043,11 +6038,10 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
||||
// Need to check if we really scanned the StringTable.
|
||||
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
|
||||
TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
|
||||
// Now clean up stale oops in StringTable
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(&_is_alive_closure);
|
||||
}
|
||||
|
||||
verify_work_stacks_empty();
|
||||
// Restore any preserved marks as a result of mark stack or
|
||||
// work queue overflow
|
||||
restore_preserved_marks_if_any(); // done single-threaded for now
|
||||
|
@ -604,6 +604,8 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
ConcurrentMarkSweepPolicy* _collector_policy;
|
||||
ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
|
||||
|
||||
void set_did_compact(bool v);
|
||||
|
||||
// XXX Move these to CMSStats ??? FIX ME !!!
|
||||
elapsedTimer _inter_sweep_timer; // time between sweeps
|
||||
elapsedTimer _intra_sweep_timer; // time _in_ sweeps
|
||||
@ -990,7 +992,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
|
||||
// debugging
|
||||
void verify();
|
||||
bool verify_after_remark();
|
||||
bool verify_after_remark(bool silent = VerifySilently);
|
||||
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
||||
void verify_work_stacks_empty() const PRODUCT_RETURN;
|
||||
void verify_overflow_empty() const PRODUCT_RETURN;
|
||||
@ -1081,6 +1083,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
|
||||
CollectionTypes _debug_collection_type;
|
||||
|
||||
// True if a compactiing collection was done.
|
||||
bool _did_compact;
|
||||
bool did_compact() { return _did_compact; }
|
||||
|
||||
// Fraction of current occupancy at which to start a CMS collection which
|
||||
// will collect this generation (at least).
|
||||
double _initiating_occupancy;
|
||||
@ -1121,6 +1127,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
// Adaptive size policy
|
||||
CMSAdaptiveSizePolicy* size_policy();
|
||||
|
||||
void set_did_compact(bool v) { _did_compact = v; }
|
||||
|
||||
bool refs_discovery_is_atomic() const { return false; }
|
||||
bool refs_discovery_is_mt() const {
|
||||
// Note: CMS does MT-discovery during the parallel-remark
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,40 +26,12 @@
|
||||
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
||||
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "gc_implementation/g1/g1HotCardCache.hpp"
|
||||
|
||||
// Possible sizes for the card counts cache: odd primes that roughly double in size.
|
||||
// (See jvmtiTagMap.cpp).
|
||||
|
||||
#define MAX_SIZE ((size_t) -1)
|
||||
|
||||
size_t ConcurrentG1Refine::_cc_cache_sizes[] = {
|
||||
16381, 32771, 76831, 150001, 307261,
|
||||
614563, 1228891, 2457733, 4915219, 9830479,
|
||||
19660831, 39321619, 78643219, 157286461, MAX_SIZE
|
||||
};
|
||||
|
||||
ConcurrentG1Refine::ConcurrentG1Refine() :
|
||||
_card_counts(NULL), _card_epochs(NULL),
|
||||
_n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
|
||||
_cache_size_index(0), _expand_card_counts(false),
|
||||
_hot_cache(NULL),
|
||||
_def_use_cache(false), _use_cache(false),
|
||||
// We initialize the epochs of the array to 0. By initializing
|
||||
// _n_periods to 1 and not 0 we automatically invalidate all the
|
||||
// entries on the array. Otherwise we might accidentally think that
|
||||
// we claimed a card that was in fact never set (see CR7033292).
|
||||
_n_periods(1),
|
||||
_threads(NULL), _n_threads(0)
|
||||
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
|
||||
_threads(NULL), _n_threads(0),
|
||||
_hot_card_cache(g1h)
|
||||
{
|
||||
|
||||
// Ergomonically select initial concurrent refinement parameters
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
|
||||
@ -75,13 +47,17 @@ ConcurrentG1Refine::ConcurrentG1Refine() :
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
|
||||
}
|
||||
set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
|
||||
|
||||
_n_worker_threads = thread_num();
|
||||
// We need one extra thread to do the young gen rset size sampling.
|
||||
_n_threads = _n_worker_threads + 1;
|
||||
|
||||
reset_threshold_step();
|
||||
|
||||
_threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
|
||||
|
||||
int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
|
||||
|
||||
ConcurrentG1RefineThread *next = NULL;
|
||||
for (int i = _n_threads - 1; i >= 0; i--) {
|
||||
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
|
||||
@ -100,74 +76,8 @@ void ConcurrentG1Refine::reset_threshold_step() {
|
||||
}
|
||||
}
|
||||
|
||||
int ConcurrentG1Refine::thread_num() {
|
||||
return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::init() {
|
||||
if (G1ConcRSLogCacheSize > 0) {
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
|
||||
_max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift;
|
||||
_max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100;
|
||||
|
||||
size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
|
||||
guarantee(_max_cards < max_card_num, "card_num representation");
|
||||
|
||||
// We need _n_card_counts to be less than _max_n_card_counts here
|
||||
// so that the expansion call (below) actually allocates the
|
||||
// _counts and _epochs arrays.
|
||||
assert(_n_card_counts == 0, "pre-condition");
|
||||
assert(_max_n_card_counts > 0, "pre-condition");
|
||||
|
||||
// Find the index into cache size array that is of a size that's
|
||||
// large enough to hold desired_sz.
|
||||
size_t desired_sz = _max_cards / InitialCacheFraction;
|
||||
int desired_sz_index = 0;
|
||||
while (_cc_cache_sizes[desired_sz_index] < desired_sz) {
|
||||
desired_sz_index += 1;
|
||||
assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant");
|
||||
}
|
||||
assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant");
|
||||
|
||||
// If the desired_sz value is between two sizes then
|
||||
// _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index]
|
||||
// we will start with the lower size in the optimistic expectation that
|
||||
// we will not need to expand up. Note desired_sz_index could also be 0.
|
||||
if (desired_sz_index > 0 &&
|
||||
_cc_cache_sizes[desired_sz_index] > desired_sz) {
|
||||
desired_sz_index -= 1;
|
||||
}
|
||||
|
||||
if (!expand_card_count_cache(desired_sz_index)) {
|
||||
// Allocation was unsuccessful - exit
|
||||
vm_exit_during_initialization("Could not reserve enough space for card count cache");
|
||||
}
|
||||
assert(_n_card_counts > 0, "post-condition");
|
||||
assert(_cache_size_index == desired_sz_index, "post-condition");
|
||||
|
||||
Copy::fill_to_bytes(&_card_counts[0],
|
||||
_n_card_counts * sizeof(CardCountCacheEntry));
|
||||
Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
|
||||
|
||||
ModRefBarrierSet* bs = _g1h->mr_bs();
|
||||
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
|
||||
_ct_bs = (CardTableModRefBS*)bs;
|
||||
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
|
||||
|
||||
_def_use_cache = true;
|
||||
_use_cache = true;
|
||||
_hot_cache_size = (1 << G1ConcRSLogCacheSize);
|
||||
_hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
|
||||
_n_hot = 0;
|
||||
_hot_cache_idx = 0;
|
||||
|
||||
// For refining the cards in the hot cache in parallel
|
||||
int n_workers = (ParallelGCThreads > 0 ?
|
||||
_g1h->workers()->total_workers() : 1);
|
||||
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
}
|
||||
_hot_card_cache.initialize();
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::stop() {
|
||||
@ -188,17 +98,6 @@ void ConcurrentG1Refine::reinitialize_threads() {
|
||||
}
|
||||
|
||||
ConcurrentG1Refine::~ConcurrentG1Refine() {
|
||||
if (G1ConcRSLogCacheSize > 0) {
|
||||
// Please see the comment in allocate_card_count_cache
|
||||
// for why we call os::malloc() and os::free() directly.
|
||||
assert(_card_counts != NULL, "Logic");
|
||||
os::free(_card_counts, mtGC);
|
||||
assert(_card_epochs != NULL, "Logic");
|
||||
os::free(_card_epochs, mtGC);
|
||||
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
|
||||
}
|
||||
if (_threads != NULL) {
|
||||
for (int i = 0; i < _n_threads; i++) {
|
||||
delete _threads[i];
|
||||
@ -215,317 +114,10 @@ void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
|
||||
}
|
||||
}
|
||||
|
||||
bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
HeapRegion* r = _g1h->heap_region_containing(start);
|
||||
if (r != NULL && r->is_young()) {
|
||||
return true;
|
||||
}
|
||||
// This card is not associated with a heap region
|
||||
// so can't be young.
|
||||
return false;
|
||||
}
|
||||
|
||||
jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
|
||||
unsigned new_card_num = ptr_2_card_num(card_ptr);
|
||||
unsigned bucket = hash(new_card_num);
|
||||
assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
|
||||
|
||||
CardCountCacheEntry* count_ptr = &_card_counts[bucket];
|
||||
CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
|
||||
|
||||
// We have to construct a new entry if we haven't updated the counts
|
||||
// during the current period, or if the count was updated for a
|
||||
// different card number.
|
||||
unsigned int new_epoch = (unsigned int) _n_periods;
|
||||
julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
|
||||
|
||||
while (true) {
|
||||
// Fetch the previous epoch value
|
||||
julong prev_epoch_entry = epoch_ptr->_value;
|
||||
julong cas_res;
|
||||
|
||||
if (extract_epoch(prev_epoch_entry) != new_epoch) {
|
||||
// This entry has not yet been updated during this period.
|
||||
// Note: we update the epoch value atomically to ensure
|
||||
// that there is only one winner that updates the cached
|
||||
// card_ptr value even though all the refine threads share
|
||||
// the same epoch value.
|
||||
|
||||
cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
|
||||
(volatile jlong*)&epoch_ptr->_value,
|
||||
(jlong) prev_epoch_entry);
|
||||
|
||||
if (cas_res == prev_epoch_entry) {
|
||||
// We have successfully won the race to update the
|
||||
// epoch and card_num value. Make it look like the
|
||||
// count and eviction count were previously cleared.
|
||||
count_ptr->_count = 1;
|
||||
count_ptr->_evict_count = 0;
|
||||
*count = 0;
|
||||
// We can defer the processing of card_ptr
|
||||
*defer = true;
|
||||
return card_ptr;
|
||||
}
|
||||
// We did not win the race to update the epoch field, so some other
|
||||
// thread must have done it. The value that gets returned by CAS
|
||||
// should be the new epoch value.
|
||||
assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
|
||||
// We could 'continue' here or just re-read the previous epoch value
|
||||
prev_epoch_entry = epoch_ptr->_value;
|
||||
}
|
||||
|
||||
// The epoch entry for card_ptr has been updated during this period.
|
||||
unsigned old_card_num = extract_card_num(prev_epoch_entry);
|
||||
|
||||
// The card count that will be returned to caller
|
||||
*count = count_ptr->_count;
|
||||
|
||||
// Are we updating the count for the same card?
|
||||
if (new_card_num == old_card_num) {
|
||||
// Same card - just update the count. We could have more than one
|
||||
// thread racing to update count for the current card. It should be
|
||||
// OK not to use a CAS as the only penalty should be some missed
|
||||
// increments of the count which delays identifying the card as "hot".
|
||||
|
||||
if (*count < max_jubyte) count_ptr->_count++;
|
||||
// We can defer the processing of card_ptr
|
||||
*defer = true;
|
||||
return card_ptr;
|
||||
}
|
||||
|
||||
// Different card - evict old card info
|
||||
if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
|
||||
if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
|
||||
// Trigger a resize the next time we clear
|
||||
_expand_card_counts = true;
|
||||
}
|
||||
|
||||
cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
|
||||
(volatile jlong*)&epoch_ptr->_value,
|
||||
(jlong) prev_epoch_entry);
|
||||
|
||||
if (cas_res == prev_epoch_entry) {
|
||||
// We successfully updated the card num value in the epoch entry
|
||||
count_ptr->_count = 0; // initialize counter for new card num
|
||||
jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
|
||||
|
||||
// Even though the region containg the card at old_card_num was not
|
||||
// in the young list when old_card_num was recorded in the epoch
|
||||
// cache it could have been added to the free list and subsequently
|
||||
// added to the young list in the intervening time. See CR 6817995.
|
||||
// We do not deal with this case here - it will be handled in
|
||||
// HeapRegion::oops_on_card_seq_iterate_careful after it has been
|
||||
// determined that the region containing the card has been allocated
|
||||
// to, and it's safe to check the young type of the region.
|
||||
|
||||
// We do not want to defer processing of card_ptr in this case
|
||||
// (we need to refine old_card_ptr and card_ptr)
|
||||
*defer = false;
|
||||
return old_card_ptr;
|
||||
}
|
||||
// Someone else beat us - try again.
|
||||
}
|
||||
}
|
||||
|
||||
jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
|
||||
int count;
|
||||
jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
|
||||
assert(cached_ptr != NULL, "bad cached card ptr");
|
||||
|
||||
// We've just inserted a card pointer into the card count cache
|
||||
// and got back the card that we just inserted or (evicted) the
|
||||
// previous contents of that count slot.
|
||||
|
||||
// The card we got back could be in a young region. When the
|
||||
// returned card (if evicted) was originally inserted, we had
|
||||
// determined that its containing region was not young. However
|
||||
// it is possible for the region to be freed during a cleanup
|
||||
// pause, then reallocated and tagged as young which will result
|
||||
// in the returned card residing in a young region.
|
||||
//
|
||||
// We do not deal with this case here - the change from non-young
|
||||
// to young could be observed at any time - it will be handled in
|
||||
// HeapRegion::oops_on_card_seq_iterate_careful after it has been
|
||||
// determined that the region containing the card has been allocated
|
||||
// to.
|
||||
|
||||
// The card pointer we obtained from card count cache is not hot
|
||||
// so do not store it in the cache; return it for immediate
|
||||
// refining.
|
||||
if (count < G1ConcRSHotCardLimit) {
|
||||
return cached_ptr;
|
||||
}
|
||||
|
||||
// Otherwise, the pointer we got from the _card_counts cache is hot.
|
||||
jbyte* res = NULL;
|
||||
MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_n_hot == _hot_cache_size) {
|
||||
res = _hot_cache[_hot_cache_idx];
|
||||
_n_hot--;
|
||||
}
|
||||
// Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
|
||||
_hot_cache[_hot_cache_idx] = cached_ptr;
|
||||
_hot_cache_idx++;
|
||||
if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
|
||||
_n_hot++;
|
||||
|
||||
// The card obtained from the hot card cache could be in a young
|
||||
// region. See above on how this can happen.
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::clean_up_cache(int worker_i,
|
||||
G1RemSet* g1rs,
|
||||
DirtyCardQueue* into_cset_dcq) {
|
||||
assert(!use_cache(), "cache should be disabled");
|
||||
int start_idx;
|
||||
|
||||
while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
|
||||
int end_idx = start_idx + _hot_cache_par_chunk_size;
|
||||
|
||||
if (start_idx ==
|
||||
Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
|
||||
// The current worker has successfully claimed the chunk [start_idx..end_idx)
|
||||
end_idx = MIN2(end_idx, _n_hot);
|
||||
for (int i = start_idx; i < end_idx; i++) {
|
||||
jbyte* entry = _hot_cache[i];
|
||||
if (entry != NULL) {
|
||||
if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
|
||||
// 'entry' contains references that point into the current
|
||||
// collection set. We need to record 'entry' in the DCQS
|
||||
// that's used for that purpose.
|
||||
//
|
||||
// The only time we care about recording cards that contain
|
||||
// references that point into the collection set is during
|
||||
// RSet updating while within an evacuation pause.
|
||||
// In this case worker_i should be the id of a GC worker thread
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
|
||||
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
|
||||
into_cset_dcq->enqueue(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The arrays used to hold the card counts and the epochs must have
|
||||
// a 1:1 correspondence. Hence they are allocated and freed together
|
||||
// Returns true if the allocations of both the counts and epochs
|
||||
// were successful; false otherwise.
|
||||
bool ConcurrentG1Refine::allocate_card_count_cache(size_t n,
|
||||
CardCountCacheEntry** counts,
|
||||
CardEpochCacheEntry** epochs) {
|
||||
// We call the allocation/free routines directly for the counts
|
||||
// and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY
|
||||
// macros call AllocateHeap and FreeHeap respectively.
|
||||
// AllocateHeap will call vm_exit_out_of_memory in the event
|
||||
// of an allocation failure and abort the JVM. With the
|
||||
// _counts/epochs arrays we only need to abort the JVM if the
|
||||
// initial allocation of these arrays fails.
|
||||
//
|
||||
// Additionally AllocateHeap/FreeHeap do some tracing of
|
||||
// allocate/free calls so calling one without calling the
|
||||
// other can cause inconsistencies in the tracing. So we
|
||||
// call neither.
|
||||
|
||||
assert(*counts == NULL, "out param");
|
||||
assert(*epochs == NULL, "out param");
|
||||
|
||||
size_t counts_size = n * sizeof(CardCountCacheEntry);
|
||||
size_t epochs_size = n * sizeof(CardEpochCacheEntry);
|
||||
|
||||
*counts = (CardCountCacheEntry*) os::malloc(counts_size, mtGC);
|
||||
if (*counts == NULL) {
|
||||
// allocation was unsuccessful
|
||||
return false;
|
||||
}
|
||||
|
||||
*epochs = (CardEpochCacheEntry*) os::malloc(epochs_size, mtGC);
|
||||
if (*epochs == NULL) {
|
||||
// allocation was unsuccessful - free counts array
|
||||
assert(*counts != NULL, "must be");
|
||||
os::free(*counts, mtGC);
|
||||
*counts = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// We successfully allocated both counts and epochs
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns true if the card counts/epochs cache was
|
||||
// successfully expanded; false otherwise.
|
||||
bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
|
||||
// Can we expand the card count and epoch tables?
|
||||
if (_n_card_counts < _max_n_card_counts) {
|
||||
assert(cache_size_idx >= 0 && cache_size_idx < MAX_CC_CACHE_INDEX, "oob");
|
||||
|
||||
size_t cache_size = _cc_cache_sizes[cache_size_idx];
|
||||
// Make sure we don't go bigger than we will ever need
|
||||
cache_size = MIN2(cache_size, _max_n_card_counts);
|
||||
|
||||
// Should we expand the card count and card epoch tables?
|
||||
if (cache_size > _n_card_counts) {
|
||||
// We have been asked to allocate new, larger, arrays for
|
||||
// the card counts and the epochs. Attempt the allocation
|
||||
// of both before we free the existing arrays in case
|
||||
// the allocation is unsuccessful...
|
||||
CardCountCacheEntry* counts = NULL;
|
||||
CardEpochCacheEntry* epochs = NULL;
|
||||
|
||||
if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
|
||||
// Allocation was successful.
|
||||
// We can just free the old arrays; we're
|
||||
// not interested in preserving the contents
|
||||
if (_card_counts != NULL) os::free(_card_counts, mtGC);
|
||||
if (_card_epochs != NULL) os::free(_card_epochs, mtGC);
|
||||
|
||||
// Cache the size of the arrays and the index that got us there.
|
||||
_n_card_counts = cache_size;
|
||||
_cache_size_index = cache_size_idx;
|
||||
|
||||
_card_counts = counts;
|
||||
_card_epochs = epochs;
|
||||
|
||||
// We successfully allocated/expanded the caches.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We did not successfully expand the caches.
|
||||
return false;
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::clear_and_record_card_counts() {
|
||||
if (G1ConcRSLogCacheSize == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
double start = os::elapsedTime();
|
||||
|
||||
if (_expand_card_counts) {
|
||||
int new_idx = _cache_size_index + 1;
|
||||
|
||||
if (expand_card_count_cache(new_idx)) {
|
||||
// Allocation was successful and _n_card_counts has
|
||||
// been updated to the new size. We only need to clear
|
||||
// the epochs so we don't read a bogus epoch value
|
||||
// when inserting a card into the hot card cache.
|
||||
Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
|
||||
}
|
||||
_expand_card_counts = false;
|
||||
}
|
||||
|
||||
int this_epoch = (int) _n_periods;
|
||||
assert((this_epoch+1) <= max_jint, "to many periods");
|
||||
// Update epoch
|
||||
_n_periods++;
|
||||
double cc_clear_time_ms = (os::elapsedTime() - start) * 1000;
|
||||
_g1h->g1_policy()->phase_times()->record_cc_clear_time_ms(cc_clear_time_ms);
|
||||
int ConcurrentG1Refine::thread_num() {
|
||||
int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
|
||||
: ParallelGCThreads;
|
||||
return MAX2<int>(n_threads, 1);
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,13 +25,15 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1HotCardCache.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// Forward decl
|
||||
class ConcurrentG1RefineThread;
|
||||
class G1CollectedHeap;
|
||||
class G1HotCardCache;
|
||||
class G1RemSet;
|
||||
|
||||
class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
@ -61,141 +63,14 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
|
||||
int _thread_threshold_step;
|
||||
|
||||
// We delay the refinement of 'hot' cards using the hot card cache.
|
||||
G1HotCardCache _hot_card_cache;
|
||||
|
||||
// Reset the threshold step value based of the current zone boundaries.
|
||||
void reset_threshold_step();
|
||||
|
||||
// The cache for card refinement.
|
||||
bool _use_cache;
|
||||
bool _def_use_cache;
|
||||
|
||||
size_t _n_periods; // Used as clearing epoch
|
||||
|
||||
// An evicting cache of the number of times each card
|
||||
// is accessed. Reduces, but does not eliminate, the amount
|
||||
// of duplicated processing of dirty cards.
|
||||
|
||||
enum SomePrivateConstants {
|
||||
epoch_bits = 32,
|
||||
card_num_shift = epoch_bits,
|
||||
epoch_mask = AllBits,
|
||||
card_num_mask = AllBits,
|
||||
|
||||
// The initial cache size is approximately this fraction
|
||||
// of a maximal cache (i.e. the size needed for all cards
|
||||
// in the heap)
|
||||
InitialCacheFraction = 512
|
||||
};
|
||||
|
||||
const static julong card_num_mask_in_place =
|
||||
(julong) card_num_mask << card_num_shift;
|
||||
|
||||
typedef struct {
|
||||
julong _value; // | card_num | epoch |
|
||||
} CardEpochCacheEntry;
|
||||
|
||||
julong make_epoch_entry(unsigned int card_num, unsigned int epoch) {
|
||||
assert(0 <= card_num && card_num < _max_cards, "Bounds");
|
||||
assert(0 <= epoch && epoch <= _n_periods, "must be");
|
||||
|
||||
return ((julong) card_num << card_num_shift) | epoch;
|
||||
}
|
||||
|
||||
unsigned int extract_epoch(julong v) {
|
||||
return (v & epoch_mask);
|
||||
}
|
||||
|
||||
unsigned int extract_card_num(julong v) {
|
||||
return (v & card_num_mask_in_place) >> card_num_shift;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
unsigned char _count;
|
||||
unsigned char _evict_count;
|
||||
} CardCountCacheEntry;
|
||||
|
||||
CardCountCacheEntry* _card_counts;
|
||||
CardEpochCacheEntry* _card_epochs;
|
||||
|
||||
// The current number of buckets in the card count cache
|
||||
size_t _n_card_counts;
|
||||
|
||||
// The number of cards for the entire reserved heap
|
||||
size_t _max_cards;
|
||||
|
||||
// The max number of buckets for the card counts and epochs caches.
|
||||
// This is the maximum that the counts and epochs will grow to.
|
||||
// It is specified as a fraction or percentage of _max_cards using
|
||||
// G1MaxHotCardCountSizePercent.
|
||||
size_t _max_n_card_counts;
|
||||
|
||||
// Possible sizes of the cache: odd primes that roughly double in size.
|
||||
// (See jvmtiTagMap.cpp).
|
||||
enum {
|
||||
MAX_CC_CACHE_INDEX = 15 // maximum index into the cache size array.
|
||||
};
|
||||
|
||||
static size_t _cc_cache_sizes[MAX_CC_CACHE_INDEX];
|
||||
|
||||
// The index in _cc_cache_sizes corresponding to the size of
|
||||
// _card_counts.
|
||||
int _cache_size_index;
|
||||
|
||||
bool _expand_card_counts;
|
||||
|
||||
const jbyte* _ct_bot;
|
||||
|
||||
jbyte** _hot_cache;
|
||||
int _hot_cache_size;
|
||||
int _n_hot;
|
||||
int _hot_cache_idx;
|
||||
|
||||
int _hot_cache_par_chunk_size;
|
||||
volatile int _hot_cache_par_claimed_idx;
|
||||
|
||||
// Needed to workaround 6817995
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// Helper routine for expand_card_count_cache().
|
||||
// The arrays used to hold the card counts and the epochs must have
|
||||
// a 1:1 correspondence. Hence they are allocated and freed together.
|
||||
// Returns true if the allocations of both the counts and epochs
|
||||
// were successful; false otherwise.
|
||||
bool allocate_card_count_cache(size_t n,
|
||||
CardCountCacheEntry** counts,
|
||||
CardEpochCacheEntry** epochs);
|
||||
|
||||
// Expands the arrays that hold the card counts and epochs
|
||||
// to the cache size at index. Returns true if the expansion/
|
||||
// allocation was successful; false otherwise.
|
||||
bool expand_card_count_cache(int index);
|
||||
|
||||
// hash a given key (index of card_ptr) with the specified size
|
||||
static unsigned int hash(size_t key, size_t size) {
|
||||
return (unsigned int) (key % size);
|
||||
}
|
||||
|
||||
// hash a given key (index of card_ptr)
|
||||
unsigned int hash(size_t key) {
|
||||
return hash(key, _n_card_counts);
|
||||
}
|
||||
|
||||
unsigned int ptr_2_card_num(jbyte* card_ptr) {
|
||||
return (unsigned int) (card_ptr - _ct_bot);
|
||||
}
|
||||
|
||||
jbyte* card_num_2_ptr(unsigned int card_num) {
|
||||
return (jbyte*) (_ct_bot + card_num);
|
||||
}
|
||||
|
||||
// Returns the count of this card after incrementing it.
|
||||
jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer);
|
||||
|
||||
// Returns true if this card is in a young region
|
||||
bool is_young_card(jbyte* card_ptr);
|
||||
|
||||
public:
|
||||
ConcurrentG1Refine();
|
||||
ConcurrentG1Refine(G1CollectedHeap* g1h);
|
||||
~ConcurrentG1Refine();
|
||||
|
||||
void init(); // Accomplish some initialization that has to wait.
|
||||
@ -206,34 +81,6 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
// Iterate over the conc refine threads
|
||||
void threads_do(ThreadClosure *tc);
|
||||
|
||||
// If this is the first entry for the slot, writes into the cache and
|
||||
// returns NULL. If it causes an eviction, returns the evicted pointer.
|
||||
// Otherwise, its a cache hit, and returns NULL.
|
||||
jbyte* cache_insert(jbyte* card_ptr, bool* defer);
|
||||
|
||||
// Process the cached entries.
|
||||
void clean_up_cache(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
|
||||
|
||||
// Set up for parallel processing of the cards in the hot cache
|
||||
void clear_hot_cache_claimed_index() {
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
}
|
||||
|
||||
// Discard entries in the hot cache.
|
||||
void clear_hot_cache() {
|
||||
_hot_cache_idx = 0; _n_hot = 0;
|
||||
}
|
||||
|
||||
bool hot_cache_is_empty() { return _n_hot == 0; }
|
||||
|
||||
bool use_cache() { return _use_cache; }
|
||||
void set_use_cache(bool b) {
|
||||
if (b) _use_cache = _def_use_cache;
|
||||
else _use_cache = false;
|
||||
}
|
||||
|
||||
void clear_and_record_card_counts();
|
||||
|
||||
static int thread_num();
|
||||
|
||||
void print_worker_threads_on(outputStream* st) const;
|
||||
@ -250,6 +97,8 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
int worker_thread_num() const { return _n_worker_threads; }
|
||||
|
||||
int thread_threshold_step() const { return _thread_threshold_step; }
|
||||
|
||||
G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
|
||||
|
@ -1273,10 +1273,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(before)");
|
||||
}
|
||||
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
@ -1300,10 +1299,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
// Verify the heap w.r.t. the previous marking bitmap.
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(overflow)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(overflow)");
|
||||
}
|
||||
|
||||
// Clear the marking state because we will be restarting
|
||||
@ -1323,10 +1321,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UseNextMarking);
|
||||
Universe::verify(VerifyOption_G1UseNextMarking,
|
||||
" VerifyDuringGC:(after)");
|
||||
}
|
||||
assert(!restart_for_overflow(), "sanity");
|
||||
// Completely reset the marking state since marking completed
|
||||
@ -1972,10 +1969,9 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(before)");
|
||||
}
|
||||
|
||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||
@ -2127,10 +2123,9 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(after)");
|
||||
}
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
|
@ -77,7 +77,7 @@ void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
|
||||
assert(delta > 0, "just checking");
|
||||
if (!_vs.expand_by(delta)) {
|
||||
// Do better than this for Merlin
|
||||
vm_exit_out_of_memory(delta, "offset table expansion");
|
||||
vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
|
||||
}
|
||||
assert(_vs.high() == high + delta, "invalid expansion");
|
||||
// Initialization of the contents is left to the
|
||||
|
212
hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp
Normal file
212
hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp
Normal file
@ -0,0 +1,212 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1CardCounts.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
|
||||
if (has_count_table()) {
|
||||
check_card_num(from_card_num,
|
||||
err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
|
||||
assert(from_card_num < to_card_num,
|
||||
err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
|
||||
from_card_num, to_card_num));
|
||||
assert(to_card_num <= _committed_max_card_num,
|
||||
err_msg("to card num out of range: "
|
||||
"to: "SIZE_FORMAT ", "
|
||||
"max: "SIZE_FORMAT,
|
||||
to_card_num, _committed_max_card_num));
|
||||
|
||||
to_card_num = MIN2(_committed_max_card_num, to_card_num);
|
||||
|
||||
Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
|
||||
}
|
||||
}
|
||||
|
||||
G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
|
||||
_g1h(g1h), _card_counts(NULL),
|
||||
_reserved_max_card_num(0), _committed_max_card_num(0),
|
||||
_committed_size(0) {}
|
||||
|
||||
void G1CardCounts::initialize() {
|
||||
assert(_g1h->max_capacity() > 0, "initialization order");
|
||||
assert(_g1h->capacity() == 0, "initialization order");
|
||||
|
||||
if (G1ConcRSHotCardLimit > 0) {
|
||||
// The max value we can store in the counts table is
|
||||
// max_jubyte. Guarantee the value of the hot
|
||||
// threshold limit is no more than this.
|
||||
guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
|
||||
|
||||
ModRefBarrierSet* bs = _g1h->mr_bs();
|
||||
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
|
||||
_ct_bs = (CardTableModRefBS*)bs;
|
||||
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
|
||||
|
||||
// Allocate/Reserve the counts table
|
||||
size_t reserved_bytes = _g1h->max_capacity();
|
||||
_reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
|
||||
|
||||
size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
|
||||
ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
|
||||
if (!rs.is_reserved()) {
|
||||
warning("Could not reserve enough space for the card counts table");
|
||||
guarantee(!has_reserved_count_table(), "should be NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
|
||||
|
||||
_card_counts_storage.initialize(rs, 0);
|
||||
_card_counts = (jubyte*) _card_counts_storage.low();
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardCounts::resize(size_t heap_capacity) {
|
||||
// Expand the card counts table to handle a heap with the given capacity.
|
||||
|
||||
if (!has_reserved_count_table()) {
|
||||
// Don't expand if we failed to reserve the card counts table.
|
||||
return;
|
||||
}
|
||||
|
||||
assert(_committed_size ==
|
||||
ReservedSpace::allocation_align_size_up(_committed_size),
|
||||
err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
|
||||
|
||||
// Verify that the committed space for the card counts
|
||||
// matches our committed max card num.
|
||||
size_t prev_committed_size = _committed_size;
|
||||
size_t prev_committed_card_num = prev_committed_size / sizeof(jbyte);
|
||||
assert(prev_committed_card_num == _committed_max_card_num,
|
||||
err_msg("Card mismatch: "
|
||||
"prev: " SIZE_FORMAT ", "
|
||||
"committed: "SIZE_FORMAT,
|
||||
prev_committed_card_num, _committed_max_card_num));
|
||||
|
||||
size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
|
||||
size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
|
||||
size_t new_committed_card_num =
|
||||
MIN2(_reserved_max_card_num, new_committed_size / sizeof(jbyte));
|
||||
|
||||
if (_committed_max_card_num < new_committed_card_num) {
|
||||
// we need to expand the backing store for the card counts
|
||||
size_t expand_size = new_committed_size - prev_committed_size;
|
||||
|
||||
if (!_card_counts_storage.expand_by(expand_size)) {
|
||||
warning("Card counts table backing store commit failure");
|
||||
return;
|
||||
}
|
||||
assert(_card_counts_storage.committed_size() == new_committed_size,
|
||||
"expansion commit failure");
|
||||
|
||||
_committed_size = new_committed_size;
|
||||
_committed_max_card_num = new_committed_card_num;
|
||||
|
||||
clear_range(prev_committed_card_num, _committed_max_card_num);
|
||||
}
|
||||
}
|
||||
|
||||
uint G1CardCounts::add_card_count(jbyte* card_ptr) {
|
||||
// Returns the number of times the card has been refined.
|
||||
// If we failed to reserve/commit the counts table, return 0.
|
||||
// If card_ptr is beyond the committed end of the counts table,
|
||||
// return 0.
|
||||
// Otherwise return the actual count.
|
||||
// Unless G1ConcRSHotCardLimit has been set appropriately,
|
||||
// returning 0 will result in the card being considered
|
||||
// cold and will be refined immediately.
|
||||
uint count = 0;
|
||||
if (has_count_table()) {
|
||||
size_t card_num = ptr_2_card_num(card_ptr);
|
||||
if (card_num < _committed_max_card_num) {
|
||||
count = (uint) _card_counts[card_num];
|
||||
if (count < G1ConcRSHotCardLimit) {
|
||||
_card_counts[card_num] += 1;
|
||||
}
|
||||
assert(_card_counts[card_num] <= G1ConcRSHotCardLimit,
|
||||
err_msg("Refinement count overflow? "
|
||||
"new count: "UINT32_FORMAT,
|
||||
(uint) _card_counts[card_num]));
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
bool G1CardCounts::is_hot(uint count) {
|
||||
return (count >= G1ConcRSHotCardLimit);
|
||||
}
|
||||
|
||||
void G1CardCounts::clear_region(HeapRegion* hr) {
|
||||
assert(!hr->isHumongous(), "Should have been cleared");
|
||||
if (has_count_table()) {
|
||||
HeapWord* bottom = hr->bottom();
|
||||
|
||||
// We use the last address in hr as hr could be the
|
||||
// last region in the heap. In which case trying to find
|
||||
// the card for hr->end() will be an OOB accesss to the
|
||||
// card table.
|
||||
HeapWord* last = hr->end() - 1;
|
||||
assert(_g1h->g1_committed().contains(last),
|
||||
err_msg("last not in committed: "
|
||||
"last: " PTR_FORMAT ", "
|
||||
"committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
last,
|
||||
_g1h->g1_committed().start(),
|
||||
_g1h->g1_committed().end()));
|
||||
|
||||
const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
|
||||
const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
|
||||
|
||||
#ifdef ASSERT
|
||||
HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
|
||||
assert(start_addr == hr->bottom(), "alignment");
|
||||
HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
|
||||
assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
|
||||
#endif // ASSERT
|
||||
|
||||
// Clear the counts for the (exclusive) card range.
|
||||
size_t from_card_num = ptr_2_card_num(from_card_ptr);
|
||||
size_t to_card_num = ptr_2_card_num(last_card_ptr) + 1;
|
||||
clear_range(from_card_num, to_card_num);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardCounts::clear_all() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
|
||||
clear_range((size_t)0, _committed_max_card_num);
|
||||
}
|
||||
|
||||
G1CardCounts::~G1CardCounts() {
|
||||
if (has_reserved_count_table()) {
|
||||
_card_counts_storage.release();
|
||||
}
|
||||
}
|
||||
|
126
hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
Normal file
126
hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/virtualspace.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class CardTableModRefBS;
|
||||
class G1CollectedHeap;
|
||||
class HeapRegion;
|
||||
|
||||
// Table to track the number of times a card has been refined. Once
|
||||
// a card has been refined a certain number of times, it is
|
||||
// considered 'hot' and its refinement is delayed by inserting the
|
||||
// card into the hot card cache. The card will then be refined when
|
||||
// it is evicted from the hot card cache, or when the hot card cache
|
||||
// is 'drained' during the next evacuation pause.
|
||||
|
||||
class G1CardCounts: public CHeapObj<mtGC> {
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The table of counts
|
||||
jubyte* _card_counts;
|
||||
|
||||
// Max capacity of the reserved space for the counts table
|
||||
size_t _reserved_max_card_num;
|
||||
|
||||
// Max capacity of the committed space for the counts table
|
||||
size_t _committed_max_card_num;
|
||||
|
||||
// Size of committed space for the counts table
|
||||
size_t _committed_size;
|
||||
|
||||
// CardTable bottom.
|
||||
const jbyte* _ct_bot;
|
||||
|
||||
// Barrier set
|
||||
CardTableModRefBS* _ct_bs;
|
||||
|
||||
// The virtual memory backing the counts table
|
||||
VirtualSpace _card_counts_storage;
|
||||
|
||||
// Returns true if the card counts table has been reserved.
|
||||
bool has_reserved_count_table() { return _card_counts != NULL; }
|
||||
|
||||
// Returns true if the card counts table has been reserved and committed.
|
||||
bool has_count_table() {
|
||||
return has_reserved_count_table() && _committed_max_card_num > 0;
|
||||
}
|
||||
|
||||
void check_card_num(size_t card_num, const char* msg) {
|
||||
assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
|
||||
}
|
||||
|
||||
size_t ptr_2_card_num(const jbyte* card_ptr) {
|
||||
assert(card_ptr >= _ct_bot,
|
||||
err_msg("Inavalied card pointer: "
|
||||
"card_ptr: " PTR_FORMAT ", "
|
||||
"_ct_bot: " PTR_FORMAT,
|
||||
card_ptr, _ct_bot));
|
||||
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
|
||||
check_card_num(card_num,
|
||||
err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
|
||||
return card_num;
|
||||
}
|
||||
|
||||
jbyte* card_num_2_ptr(size_t card_num) {
|
||||
check_card_num(card_num,
|
||||
err_msg("card num out of range: "SIZE_FORMAT, card_num));
|
||||
return (jbyte*) (_ct_bot + card_num);
|
||||
}
|
||||
|
||||
// Clear the counts table for the given (exclusive) index range.
|
||||
void clear_range(size_t from_card_num, size_t to_card_num);
|
||||
|
||||
public:
|
||||
G1CardCounts(G1CollectedHeap* g1h);
|
||||
~G1CardCounts();
|
||||
|
||||
void initialize();
|
||||
|
||||
// Resize the committed space for the card counts table in
|
||||
// response to a resize of the committed space for the heap.
|
||||
void resize(size_t heap_capacity);
|
||||
|
||||
// Increments the refinement count for the given card.
|
||||
// Returns the pre-increment count value.
|
||||
uint add_card_count(jbyte* card_ptr);
|
||||
|
||||
// Returns true if the given count is high enough to be considered
|
||||
// 'hot'; false otherwise.
|
||||
bool is_hot(uint count);
|
||||
|
||||
// Clears the card counts for the cards spanned by the region
|
||||
void clear_region(HeapRegion* hr);
|
||||
|
||||
// Clear the entire card counts table during GC.
|
||||
// Updates the policy stats with the duration.
|
||||
void clear_all();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
@ -96,7 +96,7 @@ public:
|
||||
_sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
|
||||
{}
|
||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
||||
bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
|
||||
bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
|
||||
// This path is executed by the concurrent refine or mutator threads,
|
||||
// concurrently, and so we do not care if card_ptr contains references
|
||||
// that point into the collection set.
|
||||
@ -1271,9 +1271,8 @@ double G1CollectedHeap::verify(bool guard, const char* msg) {
|
||||
if (guard && total_collections() >= VerifyGCStartAt) {
|
||||
double verify_start = os::elapsedTime();
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(msg);
|
||||
prepare_for_verify();
|
||||
Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, msg);
|
||||
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
|
||||
}
|
||||
|
||||
@ -1304,7 +1303,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
|
||||
print_heap_before_gc();
|
||||
|
||||
size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
|
||||
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
||||
|
||||
HRSPhaseSetter x(HRSPhaseFullGC);
|
||||
verify_region_sets_optional();
|
||||
@ -1425,6 +1424,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
MetaspaceAux::verify_metrics();
|
||||
|
||||
// Note: since we've just done a full GC, concurrent
|
||||
// marking is no longer active. Therefore we need not
|
||||
@ -1452,9 +1452,10 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
_hr_printer.end_gc(true /* full */, (size_t) total_collections());
|
||||
}
|
||||
|
||||
if (_cg1r->use_cache()) {
|
||||
_cg1r->clear_and_record_card_counts();
|
||||
_cg1r->clear_hot_cache();
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
if (hot_card_cache->use_cache()) {
|
||||
hot_card_cache->reset_card_counts();
|
||||
hot_card_cache->reset_hot_cache();
|
||||
}
|
||||
|
||||
// Rebuild remembered sets of all regions.
|
||||
@ -1767,6 +1768,8 @@ void G1CollectedHeap::update_committed_space(HeapWord* old_end,
|
||||
Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
|
||||
// Tell the BOT about the update.
|
||||
_bot_shared->resize(_g1_committed.word_size());
|
||||
// Tell the hot card cache about the update
|
||||
_cg1r->hot_card_cache()->resize_card_counts(capacity());
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::expand(size_t expand_bytes) {
|
||||
@ -1831,7 +1834,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
|
||||
if (G1ExitOnExpansionFailure &&
|
||||
_g1_storage.uncommitted_size() >= aligned_expand_bytes) {
|
||||
// We had head room...
|
||||
vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
|
||||
vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
|
||||
}
|
||||
}
|
||||
return successful;
|
||||
@ -1843,33 +1846,32 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
|
||||
ReservedSpace::page_align_size_down(shrink_bytes);
|
||||
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
|
||||
HeapRegion::GrainBytes);
|
||||
uint num_regions_deleted = 0;
|
||||
MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
|
||||
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
|
||||
|
||||
uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
|
||||
HeapWord* old_end = (HeapWord*) _g1_storage.high();
|
||||
assert(mr.end() == old_end, "post-condition");
|
||||
size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
|
||||
|
||||
ergo_verbose3(ErgoHeapSizing,
|
||||
"shrink the heap",
|
||||
ergo_format_byte("requested shrinking amount")
|
||||
ergo_format_byte("aligned shrinking amount")
|
||||
ergo_format_byte("attempted shrinking amount"),
|
||||
shrink_bytes, aligned_shrink_bytes, mr.byte_size());
|
||||
if (mr.byte_size() > 0) {
|
||||
shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
|
||||
if (num_regions_removed > 0) {
|
||||
_g1_storage.shrink_by(shrunk_bytes);
|
||||
HeapWord* new_end = (HeapWord*) _g1_storage.high();
|
||||
|
||||
if (_hr_printer.is_active()) {
|
||||
HeapWord* curr = mr.end();
|
||||
while (curr > mr.start()) {
|
||||
HeapWord* curr = old_end;
|
||||
while (curr > new_end) {
|
||||
HeapWord* curr_end = curr;
|
||||
curr -= HeapRegion::GrainWords;
|
||||
_hr_printer.uncommit(curr, curr_end);
|
||||
}
|
||||
assert(curr == mr.start(), "post-condition");
|
||||
}
|
||||
|
||||
_g1_storage.shrink_by(mr.byte_size());
|
||||
HeapWord* new_end = (HeapWord*) _g1_storage.high();
|
||||
assert(mr.start() == new_end, "post-condition");
|
||||
|
||||
_expansion_regions += num_regions_deleted;
|
||||
_expansion_regions += num_regions_removed;
|
||||
update_committed_space(old_end, new_end);
|
||||
HeapRegionRemSet::shrink_heap(n_regions());
|
||||
g1_policy()->record_new_heap_size(n_regions());
|
||||
@ -1955,13 +1957,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
|
||||
assert(n_rem_sets > 0, "Invariant.");
|
||||
|
||||
HeapRegionRemSetIterator** iter_arr =
|
||||
NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
|
||||
for (int i = 0; i < n_queues; i++) {
|
||||
iter_arr[i] = new HeapRegionRemSetIterator();
|
||||
}
|
||||
_rem_set_iterator = iter_arr;
|
||||
|
||||
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
|
||||
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
|
||||
|
||||
@ -2007,7 +2002,7 @@ jint G1CollectedHeap::initialize() {
|
||||
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
||||
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
||||
|
||||
_cg1r = new ConcurrentG1Refine();
|
||||
_cg1r = new ConcurrentG1Refine(this);
|
||||
|
||||
// Reserve the maximum.
|
||||
|
||||
@ -2068,6 +2063,9 @@ jint G1CollectedHeap::initialize() {
|
||||
(HeapWord*) _g1_reserved.end(),
|
||||
_expansion_regions);
|
||||
|
||||
// Do later initialization work for concurrent refinement.
|
||||
_cg1r->init();
|
||||
|
||||
// 6843694 - ensure that the maximum region index can fit
|
||||
// in the remembered set structures.
|
||||
const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
|
||||
@ -2085,20 +2083,20 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
_g1h = this;
|
||||
|
||||
_in_cset_fast_test_length = max_regions();
|
||||
_in_cset_fast_test_base =
|
||||
_in_cset_fast_test_length = max_regions();
|
||||
_in_cset_fast_test_base =
|
||||
NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
|
||||
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the first
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the first
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
|
||||
// Create the ConcurrentMark data structure and thread.
|
||||
// (Must do this late, so that "max_regions" is defined.)
|
||||
@ -2160,9 +2158,6 @@ jint G1CollectedHeap::initialize() {
|
||||
// counts and that mechanism.
|
||||
SpecializationStats::clear();
|
||||
|
||||
// Do later initialization work for concurrent refinement.
|
||||
_cg1r->init();
|
||||
|
||||
// Here we allocate the dummy full region that is required by the
|
||||
// G1AllocRegion class. If we don't pass an address in the reserved
|
||||
// space here, lots of asserts fire.
|
||||
@ -2321,7 +2316,8 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
||||
bool concurrent,
|
||||
int worker_i) {
|
||||
// Clean cards in the hot card cache
|
||||
concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
|
||||
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
int n_completed_buffers = 0;
|
||||
@ -3614,7 +3610,7 @@ G1CollectedHeap::setup_surviving_young_words() {
|
||||
uint array_length = g1_policy()->young_cset_region_length();
|
||||
_surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
|
||||
if (_surviving_young_words == NULL) {
|
||||
vm_exit_out_of_memory(sizeof(size_t) * array_length,
|
||||
vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
|
||||
"Not enough space for young surv words summary.");
|
||||
}
|
||||
memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
|
||||
@ -4397,7 +4393,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
|
||||
PADDING_ELEM_NUM;
|
||||
_surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
|
||||
if (_surviving_young_words_base == NULL)
|
||||
vm_exit_out_of_memory(array_length * sizeof(size_t),
|
||||
vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
|
||||
"Not enough space for young surv histo.");
|
||||
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
|
||||
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
|
||||
@ -5079,10 +5075,9 @@ g1_process_strong_roots(bool is_scavenging,
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
|
||||
OopClosure* non_root_closure) {
|
||||
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
|
||||
CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
|
||||
SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
|
||||
SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
|
||||
}
|
||||
|
||||
// Weak Reference Processing support
|
||||
@ -5612,8 +5607,11 @@ void G1CollectedHeap::evacuate_collection_set() {
|
||||
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
|
||||
|
||||
g1_rem_set()->prepare_for_oops_into_collection_set_do();
|
||||
concurrent_g1_refine()->set_use_cache(false);
|
||||
concurrent_g1_refine()->clear_hot_cache_claimed_index();
|
||||
|
||||
// Disable the hot card cache.
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
hot_card_cache->reset_hot_cache_claimed_index();
|
||||
hot_card_cache->set_use_cache(false);
|
||||
|
||||
uint n_workers;
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
@ -5695,8 +5693,11 @@ void G1CollectedHeap::evacuate_collection_set() {
|
||||
release_gc_alloc_regions(n_workers);
|
||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
concurrent_g1_refine()->clear_hot_cache();
|
||||
concurrent_g1_refine()->set_use_cache(true);
|
||||
// Reset and re-enable the hot card cache.
|
||||
// Note the counts for the cards in the regions in the
|
||||
// collection set are reset when the collection set is freed.
|
||||
hot_card_cache->reset_hot_cache();
|
||||
hot_card_cache->set_use_cache(true);
|
||||
|
||||
finalize_for_evac_failure();
|
||||
|
||||
@ -5758,6 +5759,12 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
|
||||
assert(!hr->is_empty(), "the region should not be empty");
|
||||
assert(free_list != NULL, "pre-condition");
|
||||
|
||||
// Clear the card counts for this region.
|
||||
// Note: we only need to do this if the region is not young
|
||||
// (since we don't refine cards in young regions).
|
||||
if (!hr->is_young()) {
|
||||
_cg1r->hot_card_cache()->reset_card_counts(hr);
|
||||
}
|
||||
*pre_used += hr->used();
|
||||
hr->hr_clear(par, true /* clear_space */);
|
||||
free_list->add_as_head(hr);
|
||||
|
@ -786,9 +786,6 @@ protected:
|
||||
// concurrently after the collection.
|
||||
DirtyCardQueueSet _dirty_card_queue_set;
|
||||
|
||||
// The Heap Region Rem Set Iterator.
|
||||
HeapRegionRemSetIterator** _rem_set_iterator;
|
||||
|
||||
// The closure used to refine a single card.
|
||||
RefineCardTableEntryClosure* _refine_cte_cl;
|
||||
|
||||
@ -827,8 +824,7 @@ protected:
|
||||
// Apply "blk" to all the weak roots of the system. These include
|
||||
// JNI weak roots, the code cache, system dictionary, symbol table,
|
||||
// string table, and referents of reachable weak refs.
|
||||
void g1_process_weak_roots(OopClosure* root_closure,
|
||||
OopClosure* non_root_closure);
|
||||
void g1_process_weak_roots(OopClosure* root_closure);
|
||||
|
||||
// Frees a non-humongous region by initializing its contents and
|
||||
// adding it to the free list that's passed as a parameter (this is
|
||||
@ -1114,15 +1110,6 @@ public:
|
||||
G1RemSet* g1_rem_set() const { return _g1_rem_set; }
|
||||
ModRefBarrierSet* mr_bs() const { return _mr_bs; }
|
||||
|
||||
// The rem set iterator.
|
||||
HeapRegionRemSetIterator* rem_set_iterator(int i) {
|
||||
return _rem_set_iterator[i];
|
||||
}
|
||||
|
||||
HeapRegionRemSetIterator* rem_set_iterator() {
|
||||
return _rem_set_iterator[0];
|
||||
}
|
||||
|
||||
unsigned get_gc_time_stamp() {
|
||||
return _gc_time_stamp;
|
||||
}
|
||||
|
@ -309,7 +309,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
|
||||
void G1CollectorPolicy::initialize_flags() {
|
||||
set_min_alignment(HeapRegion::GrainBytes);
|
||||
set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
|
||||
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
|
||||
set_max_alignment(MAX2(card_table_alignment, min_alignment()));
|
||||
if (SurvivorRatio < 1) {
|
||||
vm_exit_during_initialization("Invalid survivor ratio specified");
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -155,11 +155,6 @@ void WorkerDataArray<T>::verify() {
|
||||
|
||||
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
||||
_max_gc_threads(max_gc_threads),
|
||||
_min_clear_cc_time_ms(-1.0),
|
||||
_max_clear_cc_time_ms(-1.0),
|
||||
_cur_clear_cc_time_ms(0.0),
|
||||
_cum_clear_cc_time_ms(0.0),
|
||||
_num_cc_clears(0L),
|
||||
_last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false),
|
||||
_last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"),
|
||||
@ -212,11 +207,11 @@ void G1GCPhaseTimes::note_gc_end() {
|
||||
_last_gc_worker_times_ms.set(i, worker_time);
|
||||
|
||||
double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
|
||||
_last_satb_filtering_times_ms.get(i) +
|
||||
_last_update_rs_times_ms.get(i) +
|
||||
_last_scan_rs_times_ms.get(i) +
|
||||
_last_obj_copy_times_ms.get(i) +
|
||||
_last_termination_times_ms.get(i);
|
||||
_last_satb_filtering_times_ms.get(i) +
|
||||
_last_update_rs_times_ms.get(i) +
|
||||
_last_scan_rs_times_ms.get(i) +
|
||||
_last_obj_copy_times_ms.get(i) +
|
||||
_last_termination_times_ms.get(i);
|
||||
|
||||
double worker_other_time = worker_time - worker_known_time;
|
||||
_last_gc_worker_other_times_ms.set(i, worker_other_time);
|
||||
@ -285,15 +280,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
}
|
||||
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
|
||||
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
|
||||
if (Verbose && G1Log::finest()) {
|
||||
print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
|
||||
print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
|
||||
print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
|
||||
print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
|
||||
if (_num_cc_clears > 0) {
|
||||
print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
|
||||
}
|
||||
}
|
||||
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
|
||||
print_stats(1, "Other", misc_time_ms);
|
||||
if (_cur_verify_before_time_ms > 0.0) {
|
||||
@ -311,19 +297,3 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
print_stats(2, "Verify After", _cur_verify_after_time_ms);
|
||||
}
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::record_cc_clear_time_ms(double ms) {
|
||||
if (!(Verbose && G1Log::finest())) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) {
|
||||
_min_clear_cc_time_ms = ms;
|
||||
}
|
||||
if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) {
|
||||
_max_clear_cc_time_ms = ms;
|
||||
}
|
||||
_cur_clear_cc_time_ms = ms;
|
||||
_cum_clear_cc_time_ms += ms;
|
||||
_num_cc_clears++;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -133,13 +133,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
double _cur_ref_proc_time_ms;
|
||||
double _cur_ref_enq_time_ms;
|
||||
|
||||
// Card Table Count Cache stats
|
||||
double _min_clear_cc_time_ms; // min
|
||||
double _max_clear_cc_time_ms; // max
|
||||
double _cur_clear_cc_time_ms; // clearing time during current pause
|
||||
double _cum_clear_cc_time_ms; // cummulative clearing time
|
||||
jlong _num_cc_clears; // number of times the card count cache has been cleared
|
||||
|
||||
double _cur_collection_start_sec;
|
||||
double _root_region_scan_wait_time_ms;
|
||||
|
||||
@ -227,8 +220,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_root_region_scan_wait_time_ms = time_ms;
|
||||
}
|
||||
|
||||
void record_cc_clear_time_ms(double ms);
|
||||
|
||||
void record_young_free_cset_time_ms(double time_ms) {
|
||||
_recorded_young_free_cset_time_ms = time_ms;
|
||||
}
|
||||
|
148
hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
Normal file
148
hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/dirtyCardQueue.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1HotCardCache.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
|
||||
_g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
|
||||
|
||||
void G1HotCardCache::initialize() {
|
||||
if (default_use_cache()) {
|
||||
_use_cache = true;
|
||||
|
||||
_hot_cache_size = (1 << G1ConcRSLogCacheSize);
|
||||
_hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
|
||||
|
||||
_n_hot = 0;
|
||||
_hot_cache_idx = 0;
|
||||
|
||||
// For refining the cards in the hot cache in parallel
|
||||
int n_workers = (ParallelGCThreads > 0 ?
|
||||
_g1h->workers()->total_workers() : 1);
|
||||
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
|
||||
_card_counts.initialize();
|
||||
}
|
||||
}
|
||||
|
||||
G1HotCardCache::~G1HotCardCache() {
|
||||
if (default_use_cache()) {
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
|
||||
}
|
||||
}
|
||||
|
||||
jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
|
||||
uint count = _card_counts.add_card_count(card_ptr);
|
||||
if (!_card_counts.is_hot(count)) {
|
||||
// The card is not hot so do not store it in the cache;
|
||||
// return it for immediate refining.
|
||||
return card_ptr;
|
||||
}
|
||||
|
||||
// Otherwise, the card is hot.
|
||||
jbyte* res = NULL;
|
||||
MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_n_hot == _hot_cache_size) {
|
||||
res = _hot_cache[_hot_cache_idx];
|
||||
_n_hot--;
|
||||
}
|
||||
|
||||
// Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
|
||||
_hot_cache[_hot_cache_idx] = card_ptr;
|
||||
_hot_cache_idx++;
|
||||
|
||||
if (_hot_cache_idx == _hot_cache_size) {
|
||||
// Wrap around
|
||||
_hot_cache_idx = 0;
|
||||
}
|
||||
_n_hot++;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void G1HotCardCache::drain(int worker_i,
|
||||
G1RemSet* g1rs,
|
||||
DirtyCardQueue* into_cset_dcq) {
|
||||
if (!default_use_cache()) {
|
||||
assert(_hot_cache == NULL, "Logic");
|
||||
return;
|
||||
}
|
||||
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
assert(!use_cache(), "cache should be disabled");
|
||||
int start_idx;
|
||||
|
||||
while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
|
||||
int end_idx = start_idx + _hot_cache_par_chunk_size;
|
||||
|
||||
if (start_idx ==
|
||||
Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
|
||||
// The current worker has successfully claimed the chunk [start_idx..end_idx)
|
||||
end_idx = MIN2(end_idx, _n_hot);
|
||||
for (int i = start_idx; i < end_idx; i++) {
|
||||
jbyte* card_ptr = _hot_cache[i];
|
||||
if (card_ptr != NULL) {
|
||||
if (g1rs->refine_card(card_ptr, worker_i, true)) {
|
||||
// The part of the heap spanned by the card contains references
|
||||
// that point into the current collection set.
|
||||
// We need to record the card pointer in the DirtyCardQueueSet
|
||||
// that we use for such cards.
|
||||
//
|
||||
// The only time we care about recording cards that contain
|
||||
// references that point into the collection set is during
|
||||
// RSet updating while within an evacuation pause.
|
||||
// In this case worker_i should be the id of a GC worker thread
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
||||
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
|
||||
err_msg("incorrect worker id: "INT32_FORMAT, worker_i));
|
||||
|
||||
into_cset_dcq->enqueue(card_ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// The existing entries in the hot card cache, which were just refined
|
||||
// above, are discarded prior to re-enabling the cache near the end of the GC.
|
||||
}
|
||||
|
||||
void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
|
||||
_card_counts.resize(heap_capacity);
|
||||
}
|
||||
|
||||
void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
|
||||
_card_counts.clear_region(hr);
|
||||
}
|
||||
|
||||
void G1HotCardCache::reset_card_counts() {
|
||||
_card_counts.clear_all();
|
||||
}
|
128
hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
Normal file
128
hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1_globals.hpp"
|
||||
#include "gc_implementation/g1/g1CardCounts.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class DirtyCardQueue;
|
||||
class G1CollectedHeap;
|
||||
class G1RemSet;
|
||||
class HeapRegion;
|
||||
|
||||
// An evicting cache of cards that have been logged by the G1 post
|
||||
// write barrier. Placing a card in the cache delays the refinement
|
||||
// of the card until the card is evicted, or the cache is drained
|
||||
// during the next evacuation pause.
|
||||
//
|
||||
// The first thing the G1 post write barrier does is to check whether
|
||||
// the card containing the updated pointer is already dirty and, if
|
||||
// so, skips the remaining code in the barrier.
|
||||
//
|
||||
// Delaying the refinement of a card will make the card fail the
|
||||
// first is_dirty check in the write barrier, skipping the remainder
|
||||
// of the write barrier.
|
||||
//
|
||||
// This can significantly reduce the overhead of the write barrier
|
||||
// code, increasing throughput.
|
||||
|
||||
class G1HotCardCache: public CHeapObj<mtGC> {
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The card cache table
|
||||
jbyte** _hot_cache;
|
||||
|
||||
int _hot_cache_size;
|
||||
int _n_hot;
|
||||
int _hot_cache_idx;
|
||||
|
||||
int _hot_cache_par_chunk_size;
|
||||
volatile int _hot_cache_par_claimed_idx;
|
||||
|
||||
bool _use_cache;
|
||||
|
||||
G1CardCounts _card_counts;
|
||||
|
||||
bool default_use_cache() const {
|
||||
return (G1ConcRSLogCacheSize > 0);
|
||||
}
|
||||
|
||||
public:
|
||||
G1HotCardCache(G1CollectedHeap* g1h);
|
||||
~G1HotCardCache();
|
||||
|
||||
void initialize();
|
||||
|
||||
bool use_cache() { return _use_cache; }
|
||||
|
||||
void set_use_cache(bool b) {
|
||||
_use_cache = (b ? default_use_cache() : false);
|
||||
}
|
||||
|
||||
// Returns the card to be refined or NULL.
|
||||
//
|
||||
// Increments the count for given the card. if the card is not 'hot',
|
||||
// it is returned for immediate refining. Otherwise the card is
|
||||
// added to the hot card cache.
|
||||
// If there is enough room in the hot card cache for the card we're
|
||||
// adding, NULL is returned and no further action in needed.
|
||||
// If we evict a card from the cache to make room for the new card,
|
||||
// the evicted card is then returned for refinement.
|
||||
jbyte* insert(jbyte* card_ptr);
|
||||
|
||||
// Refine the cards that have delayed as a result of
|
||||
// being in the cache.
|
||||
void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
|
||||
|
||||
// Set up for parallel processing of the cards in the hot cache
|
||||
void reset_hot_cache_claimed_index() {
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
}
|
||||
|
||||
// Resets the hot card cache and discards the entries.
|
||||
void reset_hot_cache() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
||||
assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
|
||||
_hot_cache_idx = 0; _n_hot = 0;
|
||||
}
|
||||
|
||||
bool hot_cache_is_empty() { return _n_hot == 0; }
|
||||
|
||||
// Resizes the card counts table to match the given capacity
|
||||
void resize_card_counts(size_t heap_capacity);
|
||||
|
||||
// Zeros the values in the card counts table for entire committed heap
|
||||
void reset_card_counts();
|
||||
|
||||
// Zeros the values in the card counts table for the given region
|
||||
void reset_card_counts(HeapRegion* hr);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|
@ -144,33 +144,28 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
&GenMarkSweep::follow_stack_closure,
|
||||
NULL);
|
||||
|
||||
// Follow system dictionary roots and unload classes
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
|
||||
|
||||
// Unload classes and purge the SystemDictionary.
|
||||
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
||||
"stack should be empty by now");
|
||||
|
||||
// Follow code cache roots (has to be done after system dictionary,
|
||||
// assumes all live klasses are marked)
|
||||
// Unload nmethods.
|
||||
CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
|
||||
GenMarkSweep::follow_stack();
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
||||
"stack should be empty by now");
|
||||
|
||||
// Visit interned string tables and delete unmarked oops
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(&GenMarkSweep::is_alive);
|
||||
|
||||
// Clean up unreferenced symbols in symbol table.
|
||||
SymbolTable::unlink();
|
||||
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
||||
"stack should be empty by now");
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
|
||||
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
// Note: we can verify only the heap here. When an object is
|
||||
// marked, the previous value of the mark word (including
|
||||
@ -182,11 +177,13 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
// fail. At the end of the GC, the orginal mark word values
|
||||
// (including hash values) are restored to the appropriate
|
||||
// objects.
|
||||
Universe::heap()->verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UseMarkWord);
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
gclog_or_tty->print_cr("]");
|
||||
if (!VerifySilently) {
|
||||
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
|
||||
}
|
||||
Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
|
||||
if (!VerifySilently) {
|
||||
gclog_or_tty->print_cr("]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -308,17 +305,16 @@ void G1MarkSweep::mark_sweep_phase3() {
|
||||
sh->process_strong_roots(true, // activate StrongRootsScope
|
||||
false, // not scavenging.
|
||||
SharedHeap::SO_AllClasses,
|
||||
&GenMarkSweep::adjust_root_pointer_closure,
|
||||
&GenMarkSweep::adjust_pointer_closure,
|
||||
NULL, // do not touch code cache here
|
||||
&GenMarkSweep::adjust_klass_closure);
|
||||
|
||||
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
|
||||
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
|
||||
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
|
||||
|
||||
// Now adjust pointers in remaining weak roots. (All of which should
|
||||
// have been cleared if they pointed to non-surviving objects.)
|
||||
g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
|
||||
&GenMarkSweep::adjust_pointer_closure);
|
||||
g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
|
||||
|
||||
GenMarkSweep::adjust_marks();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1HotCardCache.hpp"
|
||||
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
@ -169,14 +170,13 @@ public:
|
||||
// _try_claimed || r->claim_iter()
|
||||
// is true: either we're supposed to work on claimed-but-not-complete
|
||||
// regions, or we successfully claimed the region.
|
||||
HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
|
||||
hrrs->init_iterator(iter);
|
||||
HeapRegionRemSetIterator iter(hrrs);
|
||||
size_t card_index;
|
||||
|
||||
// We claim cards in block so as to recude the contention. The block size is determined by
|
||||
// the G1RSetScanBlockSize parameter.
|
||||
size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||
for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
|
||||
for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
|
||||
if (current_card >= jump_to_card + _block_size) {
|
||||
jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||
}
|
||||
@ -248,7 +248,7 @@ public:
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
|
||||
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
|
||||
|
||||
if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
|
||||
if (_g1rs->refine_card(card_ptr, worker_i, true)) {
|
||||
// 'card_ptr' contains references that point into the collection
|
||||
// set. We need to record the card in the DCQS
|
||||
// (G1CollectedHeap::into_cset_dirty_card_queue_set())
|
||||
@ -289,9 +289,6 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
||||
#if CARD_REPEAT_HISTO
|
||||
ct_freq_update_histo_and_reset();
|
||||
#endif
|
||||
if (worker_i == 0) {
|
||||
_cg1r->clear_and_record_card_counts();
|
||||
}
|
||||
|
||||
// We cache the value of 'oc' closure into the appropriate slot in the
|
||||
// _cset_rs_update_cl for this worker
|
||||
@ -397,7 +394,7 @@ public:
|
||||
// RSet updating,
|
||||
// * the post-write barrier shouldn't be logging updates to young
|
||||
// regions (but there is a situation where this can happen - see
|
||||
// the comment in G1RemSet::concurrentRefineOneCard below -
|
||||
// the comment in G1RemSet::refine_card() below -
|
||||
// that should not be applicable here), and
|
||||
// * during actual RSet updating, the filtering of cards in young
|
||||
// regions in HeapRegion::oops_on_card_seq_iterate_careful is
|
||||
@ -503,8 +500,6 @@ void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||
claim_val);
|
||||
}
|
||||
|
||||
|
||||
|
||||
G1TriggerClosure::G1TriggerClosure() :
|
||||
_triggered(false) { }
|
||||
|
||||
@ -525,13 +520,91 @@ G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||
_record_refs_into_cset(record_refs_into_cset),
|
||||
_push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
|
||||
|
||||
bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset) {
|
||||
// Returns true if the given card contains references that point
|
||||
// into the collection set, if we're checking for such references;
|
||||
// false otherwise.
|
||||
|
||||
bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset) {
|
||||
|
||||
// If the card is no longer dirty, nothing to do.
|
||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
||||
// No need to return that this card contains refs that point
|
||||
// into the collection set.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Construct the region representing the card.
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
// And find the region containing it.
|
||||
HeapRegion* r = _g1->heap_region_containing(start);
|
||||
assert(r != NULL, "unexpected null");
|
||||
if (r == NULL) {
|
||||
// Again no need to return that this card contains refs that
|
||||
// point into the collection set.
|
||||
return false; // Not in the G1 heap (might be in perm, for example.)
|
||||
}
|
||||
|
||||
// Why do we have to check here whether a card is on a young region,
|
||||
// given that we dirty young regions and, as a result, the
|
||||
// post-barrier is supposed to filter them out and never to enqueue
|
||||
// them? When we allocate a new region as the "allocation region" we
|
||||
// actually dirty its cards after we release the lock, since card
|
||||
// dirtying while holding the lock was a performance bottleneck. So,
|
||||
// as a result, it is possible for other threads to actually
|
||||
// allocate objects in the region (after the acquire the lock)
|
||||
// before all the cards on the region are dirtied. This is unlikely,
|
||||
// and it doesn't happen often, but it can happen. So, the extra
|
||||
// check below filters out those cards.
|
||||
if (r->is_young()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// While we are processing RSet buffers during the collection, we
|
||||
// actually don't want to scan any cards on the collection set,
|
||||
// since we don't want to update remebered sets with entries that
|
||||
// point into the collection set, given that live objects from the
|
||||
// collection set are about to move and such entries will be stale
|
||||
// very soon. This change also deals with a reliability issue which
|
||||
// involves scanning a card in the collection set and coming across
|
||||
// an array that was being chunked and looking malformed. Note,
|
||||
// however, that if evacuation fails, we have to scan any objects
|
||||
// that were not moved and create any missing entries.
|
||||
if (r->in_collection_set()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// The result from the hot card cache insert call is either:
|
||||
// * pointer to the current card
|
||||
// (implying that the current card is not 'hot'),
|
||||
// * null
|
||||
// (meaning we had inserted the card ptr into the "hot" card cache,
|
||||
// which had some headroom),
|
||||
// * a pointer to a "hot" card that was evicted from the "hot" cache.
|
||||
//
|
||||
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
if (hot_card_cache->use_cache()) {
|
||||
assert(!check_for_refs_into_cset, "sanity");
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
|
||||
|
||||
card_ptr = hot_card_cache->insert(card_ptr);
|
||||
if (card_ptr == NULL) {
|
||||
// There was no eviction. Nothing to do.
|
||||
return false;
|
||||
}
|
||||
|
||||
start = _ct_bs->addr_for(card_ptr);
|
||||
r = _g1->heap_region_containing(start);
|
||||
if (r == NULL) {
|
||||
// Not in the G1 heap
|
||||
return false;
|
||||
}
|
||||
|
||||
// Checking whether the region we got back from the cache
|
||||
// is young here is inappropriate. The region could have been
|
||||
// freed, reallocated and tagged as young while in the cache.
|
||||
// Hence we could see its young type change at any time.
|
||||
}
|
||||
|
||||
// Don't use addr_for(card_ptr + 1) which can ask for
|
||||
// a card beyond the heap. This is not safe without a perm
|
||||
@ -611,140 +684,17 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
_conc_refine_cards++;
|
||||
}
|
||||
|
||||
return trigger_cl.triggered();
|
||||
}
|
||||
// This gets set to true if the card being refined has
|
||||
// references that point into the collection set.
|
||||
bool has_refs_into_cset = trigger_cl.triggered();
|
||||
|
||||
bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset) {
|
||||
// If the card is no longer dirty, nothing to do.
|
||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
||||
// No need to return that this card contains refs that point
|
||||
// into the collection set.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Construct the region representing the card.
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
// And find the region containing it.
|
||||
HeapRegion* r = _g1->heap_region_containing(start);
|
||||
if (r == NULL) {
|
||||
// Again no need to return that this card contains refs that
|
||||
// point into the collection set.
|
||||
return false; // Not in the G1 heap (might be in perm, for example.)
|
||||
}
|
||||
// Why do we have to check here whether a card is on a young region,
|
||||
// given that we dirty young regions and, as a result, the
|
||||
// post-barrier is supposed to filter them out and never to enqueue
|
||||
// them? When we allocate a new region as the "allocation region" we
|
||||
// actually dirty its cards after we release the lock, since card
|
||||
// dirtying while holding the lock was a performance bottleneck. So,
|
||||
// as a result, it is possible for other threads to actually
|
||||
// allocate objects in the region (after the acquire the lock)
|
||||
// before all the cards on the region are dirtied. This is unlikely,
|
||||
// and it doesn't happen often, but it can happen. So, the extra
|
||||
// check below filters out those cards.
|
||||
if (r->is_young()) {
|
||||
return false;
|
||||
}
|
||||
// While we are processing RSet buffers during the collection, we
|
||||
// actually don't want to scan any cards on the collection set,
|
||||
// since we don't want to update remebered sets with entries that
|
||||
// point into the collection set, given that live objects from the
|
||||
// collection set are about to move and such entries will be stale
|
||||
// very soon. This change also deals with a reliability issue which
|
||||
// involves scanning a card in the collection set and coming across
|
||||
// an array that was being chunked and looking malformed. Note,
|
||||
// however, that if evacuation fails, we have to scan any objects
|
||||
// that were not moved and create any missing entries.
|
||||
if (r->in_collection_set()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Should we defer processing the card?
|
||||
//
|
||||
// Previously the result from the insert_cache call would be
|
||||
// either card_ptr (implying that card_ptr was currently "cold"),
|
||||
// null (meaning we had inserted the card ptr into the "hot"
|
||||
// cache, which had some headroom), or a "hot" card ptr
|
||||
// extracted from the "hot" cache.
|
||||
//
|
||||
// Now that the _card_counts cache in the ConcurrentG1Refine
|
||||
// instance is an evicting hash table, the result we get back
|
||||
// could be from evicting the card ptr in an already occupied
|
||||
// bucket (in which case we have replaced the card ptr in the
|
||||
// bucket with card_ptr and "defer" is set to false). To avoid
|
||||
// having a data structure (updates to which would need a lock)
|
||||
// to hold these unprocessed dirty cards, we need to immediately
|
||||
// process card_ptr. The actions needed to be taken on return
|
||||
// from cache_insert are summarized in the following table:
|
||||
//
|
||||
// res defer action
|
||||
// --------------------------------------------------------------
|
||||
// null false card evicted from _card_counts & replaced with
|
||||
// card_ptr; evicted ptr added to hot cache.
|
||||
// No need to process res; immediately process card_ptr
|
||||
//
|
||||
// null true card not evicted from _card_counts; card_ptr added
|
||||
// to hot cache.
|
||||
// Nothing to do.
|
||||
//
|
||||
// non-null false card evicted from _card_counts & replaced with
|
||||
// card_ptr; evicted ptr is currently "cold" or
|
||||
// caused an eviction from the hot cache.
|
||||
// Immediately process res; process card_ptr.
|
||||
//
|
||||
// non-null true card not evicted from _card_counts; card_ptr is
|
||||
// currently cold, or caused an eviction from hot
|
||||
// cache.
|
||||
// Immediately process res; no need to process card_ptr.
|
||||
|
||||
|
||||
jbyte* res = card_ptr;
|
||||
bool defer = false;
|
||||
|
||||
// This gets set to true if the card being refined has references
|
||||
// that point into the collection set.
|
||||
bool oops_into_cset = false;
|
||||
|
||||
if (_cg1r->use_cache()) {
|
||||
jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
|
||||
if (res != NULL && (res != card_ptr || defer)) {
|
||||
start = _ct_bs->addr_for(res);
|
||||
r = _g1->heap_region_containing(start);
|
||||
if (r != NULL) {
|
||||
// Checking whether the region we got back from the cache
|
||||
// is young here is inappropriate. The region could have been
|
||||
// freed, reallocated and tagged as young while in the cache.
|
||||
// Hence we could see its young type change at any time.
|
||||
//
|
||||
// Process card pointer we get back from the hot card cache. This
|
||||
// will check whether the region containing the card is young
|
||||
// _after_ checking that the region has been allocated from.
|
||||
oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
|
||||
false /* check_for_refs_into_cset */);
|
||||
// The above call to concurrentRefineOneCard_impl is only
|
||||
// performed if the hot card cache is enabled. This cache is
|
||||
// disabled during an evacuation pause - which is the only
|
||||
// time when we need know if the card contains references
|
||||
// that point into the collection set. Also when the hot card
|
||||
// cache is enabled, this code is executed by the concurrent
|
||||
// refine threads - rather than the GC worker threads - and
|
||||
// concurrentRefineOneCard_impl will return false.
|
||||
assert(!oops_into_cset, "should not see true here");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!defer) {
|
||||
oops_into_cset =
|
||||
concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
|
||||
// We should only be detecting that the card contains references
|
||||
// that point into the collection set if the current thread is
|
||||
// a GC worker thread.
|
||||
assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
|
||||
// We should only be detecting that the card contains references
|
||||
// that point into the collection set if the current thread is
|
||||
// a GC worker thread.
|
||||
assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(),
|
||||
"invalid result at non safepoint");
|
||||
}
|
||||
return oops_into_cset;
|
||||
|
||||
return has_refs_into_cset;
|
||||
}
|
||||
|
||||
class HRRSStatsIter: public HeapRegionClosure {
|
||||
@ -847,13 +797,16 @@ void G1RemSet::prepare_for_verify() {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
dcqs.concatenate_logs();
|
||||
}
|
||||
bool cg1r_use_cache = _cg1r->use_cache();
|
||||
_cg1r->set_use_cache(false);
|
||||
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
bool use_hot_card_cache = hot_card_cache->use_cache();
|
||||
hot_card_cache->set_use_cache(false);
|
||||
|
||||
DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
|
||||
updateRS(&into_cset_dcq, 0);
|
||||
_g1->into_cset_dirty_card_queue_set().clear();
|
||||
_cg1r->set_use_cache(cg1r_use_cache);
|
||||
|
||||
hot_card_cache->set_use_cache(use_hot_card_cache);
|
||||
assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,27 +53,19 @@ protected:
|
||||
NumSeqTasks = 1
|
||||
};
|
||||
|
||||
CardTableModRefBS* _ct_bs;
|
||||
SubTasksDone* _seq_task;
|
||||
G1CollectorPolicy* _g1p;
|
||||
CardTableModRefBS* _ct_bs;
|
||||
SubTasksDone* _seq_task;
|
||||
G1CollectorPolicy* _g1p;
|
||||
|
||||
ConcurrentG1Refine* _cg1r;
|
||||
ConcurrentG1Refine* _cg1r;
|
||||
|
||||
size_t* _cards_scanned;
|
||||
size_t _total_cards_scanned;
|
||||
size_t* _cards_scanned;
|
||||
size_t _total_cards_scanned;
|
||||
|
||||
// Used for caching the closure that is responsible for scanning
|
||||
// references into the collection set.
|
||||
OopsInHeapRegionClosure** _cset_rs_update_cl;
|
||||
|
||||
// The routine that performs the actual work of refining a dirty
|
||||
// card.
|
||||
// If check_for_refs_into_refs is true then a true result is returned
|
||||
// if the card contains oops that have references into the current
|
||||
// collection set.
|
||||
bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset);
|
||||
|
||||
public:
|
||||
// This is called to reset dual hash tables after the gc pause
|
||||
// is finished and the initial hash table is no longer being
|
||||
@ -90,8 +82,7 @@ public:
|
||||
// function can be helpful in partitioning the work to be done. It
|
||||
// should be the same as the "i" passed to the calling thread's
|
||||
// work(i) function. In the sequential case this param will be ingored.
|
||||
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
||||
int worker_i);
|
||||
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);
|
||||
|
||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||
// call. Must call each of these once before and after (in sequential
|
||||
@ -124,14 +115,13 @@ public:
|
||||
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||
uint worker_num, int claim_val);
|
||||
|
||||
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
|
||||
// join and leave around parts that must be atomic wrt GC. (NULL means
|
||||
// being done at a safepoint.)
|
||||
// Refine the card corresponding to "card_ptr".
|
||||
// If check_for_refs_into_cset is true, a true result is returned
|
||||
// if the given card contains oops that have references into the
|
||||
// current collection set.
|
||||
virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset);
|
||||
virtual bool refine_card(jbyte* card_ptr,
|
||||
int worker_i,
|
||||
bool check_for_refs_into_cset);
|
||||
|
||||
// Print any relevant summary info.
|
||||
virtual void print_summary_info();
|
||||
|
@ -163,16 +163,12 @@
|
||||
"Select green, yellow and red zones adaptively to meet the " \
|
||||
"the pause requirements.") \
|
||||
\
|
||||
develop(intx, G1ConcRSLogCacheSize, 10, \
|
||||
product(uintx, G1ConcRSLogCacheSize, 10, \
|
||||
"Log base 2 of the length of conc RS hot-card cache.") \
|
||||
\
|
||||
develop(intx, G1ConcRSHotCardLimit, 4, \
|
||||
product(uintx, G1ConcRSHotCardLimit, 4, \
|
||||
"The threshold that defines (>=) a hot card.") \
|
||||
\
|
||||
develop(intx, G1MaxHotCardCountSizePercent, 25, \
|
||||
"The maximum size of the hot card count cache as a " \
|
||||
"percentage of the number of cards for the maximum heap.") \
|
||||
\
|
||||
develop(bool, G1PrintOopAppls, false, \
|
||||
"When true, print applications of closures to external locs.") \
|
||||
\
|
||||
@ -247,10 +243,6 @@
|
||||
"If non-0 is the number of parallel rem set update threads, " \
|
||||
"otherwise the value is determined ergonomically.") \
|
||||
\
|
||||
develop(intx, G1CardCountCacheExpandThreshold, 16, \
|
||||
"Expand the card count cache if the number of collisions for " \
|
||||
"a particular entry exceeds this value.") \
|
||||
\
|
||||
develop(bool, G1VerifyCTCleanup, false, \
|
||||
"Verify card table cleanup.") \
|
||||
\
|
||||
|
@ -285,7 +285,7 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
|
||||
_fine_grain_regions = new PerRegionTablePtr[_max_fine_entries];
|
||||
|
||||
if (_fine_grain_regions == NULL) {
|
||||
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries,
|
||||
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
|
||||
"Failed to allocate _fine_grain_entries.");
|
||||
}
|
||||
|
||||
@ -877,14 +877,9 @@ bool HeapRegionRemSet::iter_is_complete() {
|
||||
return _iter_state == Complete;
|
||||
}
|
||||
|
||||
void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
|
||||
iter->initialize(this);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void HeapRegionRemSet::print() const {
|
||||
HeapRegionRemSetIterator iter;
|
||||
init_iterator(&iter);
|
||||
HeapRegionRemSetIterator iter(this);
|
||||
size_t card_index;
|
||||
while (iter.has_next(card_index)) {
|
||||
HeapWord* card_start =
|
||||
@ -928,35 +923,23 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
|
||||
|
||||
//-------------------- Iteration --------------------
|
||||
|
||||
HeapRegionRemSetIterator::
|
||||
HeapRegionRemSetIterator() :
|
||||
_hrrs(NULL),
|
||||
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
|
||||
_hrrs(hrrs),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_bosa(NULL),
|
||||
_sparse_iter() { }
|
||||
|
||||
void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
|
||||
_hrrs = hrrs;
|
||||
_coarse_map = &_hrrs->_other_regions._coarse_map;
|
||||
_fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
|
||||
_bosa = _hrrs->bosa();
|
||||
|
||||
_is = Sparse;
|
||||
_coarse_map(&hrrs->_other_regions._coarse_map),
|
||||
_fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
|
||||
_bosa(hrrs->bosa()),
|
||||
_is(Sparse),
|
||||
// Set these values so that we increment to the first region.
|
||||
_coarse_cur_region_index = -1;
|
||||
_coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
|
||||
|
||||
_cur_region_cur_card = 0;
|
||||
|
||||
_fine_array_index = -1;
|
||||
_fine_cur_prt = NULL;
|
||||
|
||||
_n_yielded_coarse = 0;
|
||||
_n_yielded_fine = 0;
|
||||
_n_yielded_sparse = 0;
|
||||
|
||||
_sparse_iter.init(&hrrs->_other_regions._sparse_table);
|
||||
}
|
||||
_coarse_cur_region_index(-1),
|
||||
_coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
|
||||
_cur_region_cur_card(0),
|
||||
_fine_array_index(-1),
|
||||
_fine_cur_prt(NULL),
|
||||
_n_yielded_coarse(0),
|
||||
_n_yielded_fine(0),
|
||||
_n_yielded_sparse(0),
|
||||
_sparse_iter(&hrrs->_other_regions._sparse_table) {}
|
||||
|
||||
bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
|
||||
if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
|
||||
@ -1209,8 +1192,7 @@ void HeapRegionRemSet::test() {
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
|
||||
|
||||
// Now, does iteration yield these three?
|
||||
HeapRegionRemSetIterator iter;
|
||||
hrrs->init_iterator(&iter);
|
||||
HeapRegionRemSetIterator iter(hrrs);
|
||||
size_t sum = 0;
|
||||
size_t card_index;
|
||||
while (iter.has_next(card_index)) {
|
||||
|
@ -281,9 +281,6 @@ public:
|
||||
return (_iter_state == Unclaimed) && (_iter_claimed == 0);
|
||||
}
|
||||
|
||||
// Initialize the given iterator to iterate over this rem set.
|
||||
void init_iterator(HeapRegionRemSetIterator* iter) const;
|
||||
|
||||
// The actual # of bytes this hr_remset takes up.
|
||||
size_t mem_size() {
|
||||
return _other_regions.mem_size()
|
||||
@ -345,9 +342,9 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
|
||||
class HeapRegionRemSetIterator : public StackObj {
|
||||
|
||||
// The region over which we're iterating.
|
||||
// The region RSet over which we're iterating.
|
||||
const HeapRegionRemSet* _hrrs;
|
||||
|
||||
// Local caching of HRRS fields.
|
||||
@ -362,8 +359,10 @@ class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
|
||||
size_t _n_yielded_coarse;
|
||||
size_t _n_yielded_sparse;
|
||||
|
||||
// If true we're iterating over the coarse table; if false the fine
|
||||
// table.
|
||||
// Indicates what granularity of table that we're currently iterating over.
|
||||
// We start iterating over the sparse table, progress to the fine grain
|
||||
// table, and then finish with the coarse table.
|
||||
// See HeapRegionRemSetIterator::has_next().
|
||||
enum IterState {
|
||||
Sparse,
|
||||
Fine,
|
||||
@ -403,9 +402,7 @@ class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
|
||||
public:
|
||||
// We require an iterator to be initialized before use, so the
|
||||
// constructor does little.
|
||||
HeapRegionRemSetIterator();
|
||||
|
||||
void initialize(const HeapRegionRemSet* hrrs);
|
||||
HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs);
|
||||
|
||||
// If there remains one or more cards to be yielded, returns true and
|
||||
// sets "card_index" to one of those cards (which is then considered
|
||||
|
@ -124,11 +124,11 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
|
||||
}
|
||||
assert(_regions[index] == NULL, "invariant");
|
||||
_regions[index] = new_hr;
|
||||
increment_length(&_allocated_length);
|
||||
increment_allocated_length();
|
||||
}
|
||||
// Have to increment the length first, otherwise we will get an
|
||||
// assert failure at(index) below.
|
||||
increment_length(&_length);
|
||||
increment_length();
|
||||
HeapRegion* hr = at(index);
|
||||
list->add_as_tail(hr);
|
||||
|
||||
@ -201,45 +201,29 @@ void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
|
||||
}
|
||||
}
|
||||
|
||||
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
|
||||
uint* num_regions_deleted) {
|
||||
uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
|
||||
// Reset this in case it's currently pointing into the regions that
|
||||
// we just removed.
|
||||
_next_search_index = 0;
|
||||
|
||||
assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
|
||||
assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
|
||||
assert(length() > 0, "the region sequence should not be empty");
|
||||
assert(length() <= _allocated_length, "invariant");
|
||||
assert(_allocated_length > 0, "we should have at least one region committed");
|
||||
assert(num_regions_to_remove < length(), "We should never remove all regions");
|
||||
|
||||
// around the loop, i will be the next region to be removed
|
||||
uint i = length() - 1;
|
||||
assert(i > 0, "we should never remove all regions");
|
||||
// [last_start, end) is the MemRegion that covers the regions we will remove.
|
||||
HeapWord* end = at(i)->end();
|
||||
HeapWord* last_start = end;
|
||||
*num_regions_deleted = 0;
|
||||
while (shrink_bytes > 0) {
|
||||
HeapRegion* cur = at(i);
|
||||
// We should leave the humongous regions where they are.
|
||||
if (cur->isHumongous()) break;
|
||||
// We should stop shrinking if we come across a non-empty region.
|
||||
if (!cur->is_empty()) break;
|
||||
uint i = 0;
|
||||
for (; i < num_regions_to_remove; i++) {
|
||||
HeapRegion* cur = at(length() - 1);
|
||||
|
||||
i -= 1;
|
||||
*num_regions_deleted += 1;
|
||||
shrink_bytes -= cur->capacity();
|
||||
last_start = cur->bottom();
|
||||
decrement_length(&_length);
|
||||
// We will reclaim the HeapRegion. _allocated_length should be
|
||||
// covering this index. So, even though we removed the region from
|
||||
// the active set by decreasing _length, we still have it
|
||||
// available in the future if we need to re-use it.
|
||||
assert(i > 0, "we should never remove all regions");
|
||||
assert(length() > 0, "we should never remove all regions");
|
||||
if (!cur->is_empty()) {
|
||||
// We have to give up if the region can not be moved
|
||||
break;
|
||||
}
|
||||
return MemRegion(last_start, end);
|
||||
assert(!cur->isHumongous(), "Humongous regions should not be empty");
|
||||
|
||||
decrement_length();
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -92,14 +92,19 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
|
||||
// address is valid.
|
||||
inline uintx addr_to_index_biased(HeapWord* addr) const;
|
||||
|
||||
void increment_length(uint* length) {
|
||||
assert(*length < _max_length, "pre-condition");
|
||||
*length += 1;
|
||||
void increment_allocated_length() {
|
||||
assert(_allocated_length < _max_length, "pre-condition");
|
||||
_allocated_length++;
|
||||
}
|
||||
|
||||
void decrement_length(uint* length) {
|
||||
assert(*length > 0, "pre-condition");
|
||||
*length -= 1;
|
||||
void increment_length() {
|
||||
assert(_length < _max_length, "pre-condition");
|
||||
_length++;
|
||||
}
|
||||
|
||||
void decrement_length() {
|
||||
assert(_length > 0, "pre-condition");
|
||||
_length--;
|
||||
}
|
||||
|
||||
public:
|
||||
@ -153,11 +158,9 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
|
||||
void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
|
||||
|
||||
// Tag as uncommitted as many regions that are completely free as
|
||||
// possible, up to shrink_bytes, from the suffix of the committed
|
||||
// sequence. Return a MemRegion that corresponds to the address
|
||||
// range of the uncommitted regions. Assume shrink_bytes is page and
|
||||
// heap region aligned.
|
||||
MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted);
|
||||
// possible, up to num_regions_to_remove, from the suffix of the committed
|
||||
// sequence. Return the actual number of removed regions.
|
||||
uint shrink_by(uint num_regions_to_remove);
|
||||
|
||||
// Do some sanity checking.
|
||||
void verify_optional() PRODUCT_RETURN;
|
||||
|
@ -35,10 +35,6 @@
|
||||
|
||||
#define UNROLL_CARD_LOOPS 1
|
||||
|
||||
void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
|
||||
sprt_iter->init(this);
|
||||
}
|
||||
|
||||
void SparsePRTEntry::init(RegionIdx_t region_ind) {
|
||||
_region_ind = region_ind;
|
||||
_next_index = NullEntry;
|
||||
|
@ -192,18 +192,11 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
||||
size_t compute_card_ind(CardIdx_t ci);
|
||||
|
||||
public:
|
||||
RSHashTableIter() :
|
||||
_tbl_ind(RSHashTable::NullEntry),
|
||||
RSHashTableIter(RSHashTable* rsht) :
|
||||
_tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
|
||||
_bl_ind(RSHashTable::NullEntry),
|
||||
_card_ind((SparsePRTEntry::cards_num() - 1)),
|
||||
_rsht(NULL) {}
|
||||
|
||||
void init(RSHashTable* rsht) {
|
||||
_rsht = rsht;
|
||||
_tbl_ind = -1; // So that first increment gets to 0.
|
||||
_bl_ind = RSHashTable::NullEntry;
|
||||
_card_ind = (SparsePRTEntry::cards_num() - 1);
|
||||
}
|
||||
_rsht(rsht) {}
|
||||
|
||||
bool has_next(size_t& card_index);
|
||||
};
|
||||
@ -284,8 +277,6 @@ public:
|
||||
static void cleanup_all();
|
||||
RSHashTable* cur() const { return _cur; }
|
||||
|
||||
void init_iterator(SparsePRTIter* sprt_iter);
|
||||
|
||||
static void add_to_expanded_list(SparsePRT* sprt);
|
||||
static SparsePRT* get_from_expanded_list();
|
||||
|
||||
@ -321,9 +312,9 @@ public:
|
||||
|
||||
class SparsePRTIter: public RSHashTableIter {
|
||||
public:
|
||||
void init(const SparsePRT* sprt) {
|
||||
RSHashTableIter::init(sprt->cur());
|
||||
}
|
||||
SparsePRTIter(const SparsePRT* sprt) :
|
||||
RSHashTableIter(sprt->cur()) {}
|
||||
|
||||
bool has_next(size_t& card_index) {
|
||||
return RSHashTableIter::has_next(card_index);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user