Merge
This commit is contained in:
commit
409e738a07
1
.hgtags
1
.hgtags
@ -209,3 +209,4 @@ d7ad0dfaa41151bd3a9ae46725b0aec3730a9cd0 jdk8-b84
|
|||||||
1872c12529090e1c1dbf567f02ad7ae6231b8f0c jdk8-b85
|
1872c12529090e1c1dbf567f02ad7ae6231b8f0c jdk8-b85
|
||||||
da9a4c9312816451884aa6db6f18be51a07bff13 jdk8-b86
|
da9a4c9312816451884aa6db6f18be51a07bff13 jdk8-b86
|
||||||
5ebf6c63714de2c9dcf831074086d31daec819df jdk8-b87
|
5ebf6c63714de2c9dcf831074086d31daec819df jdk8-b87
|
||||||
|
e517701a4d0e25ae9c7945bca6e1762a8c5d8aa6 jdk8-b88
|
||||||
|
@ -209,3 +209,4 @@ fd1a5574cf68af24bfd52decc37ac6361afb278a jdk8-b78
|
|||||||
7fc358f5943676b82f1dccd3152b1ac07d92e38b jdk8-b85
|
7fc358f5943676b82f1dccd3152b1ac07d92e38b jdk8-b85
|
||||||
df9b5240f0a76c91cfe1a5b39da4d08df56e05be jdk8-b86
|
df9b5240f0a76c91cfe1a5b39da4d08df56e05be jdk8-b86
|
||||||
b9415faa7066a4d3b16d466556d5428446918d95 jdk8-b87
|
b9415faa7066a4d3b16d466556d5428446918d95 jdk8-b87
|
||||||
|
e1a929afcfc492470d50be0b6b0e8dc77d3760b9 jdk8-b88
|
||||||
|
@ -411,6 +411,8 @@ define SetupNativeCompilation
|
|||||||
$1_EXTRA_LDFLAGS+="-implib:$$($1_OBJECT_DIR)/$$($1_LIBRARY).lib"
|
$1_EXTRA_LDFLAGS+="-implib:$$($1_OBJECT_DIR)/$$($1_LIBRARY).lib"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
$1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)
|
||||||
|
|
||||||
ifneq (,$$($1_DEBUG_SYMBOLS))
|
ifneq (,$$($1_DEBUG_SYMBOLS))
|
||||||
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
|
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
|
||||||
ifeq ($(OPENJDK_TARGET_OS), windows)
|
ifeq ($(OPENJDK_TARGET_OS), windows)
|
||||||
@ -549,6 +551,8 @@ define SetupNativeCompilation
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
$1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)
|
||||||
|
|
||||||
$$($1_TARGET) : $$($1_EXPECTED_OBJS) $$($1_RES) $$($1_GEN_MANIFEST)
|
$$($1_TARGET) : $$($1_EXPECTED_OBJS) $$($1_RES) $$($1_GEN_MANIFEST)
|
||||||
$$(call LINKING_EXE_MSG,$$($1_BASENAME))
|
$$(call LINKING_EXE_MSG,$$($1_BASENAME))
|
||||||
$$($1_LDEXE) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $(EXE_OUT_OPTION)$$($1_TARGET) \
|
$$($1_LDEXE) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $(EXE_OUT_OPTION)$$($1_TARGET) \
|
||||||
|
@ -142,6 +142,7 @@ CORE_PKGS = \
|
|||||||
java.util.prefs \
|
java.util.prefs \
|
||||||
java.util.regex \
|
java.util.regex \
|
||||||
java.util.spi \
|
java.util.spi \
|
||||||
|
java.util.stream \
|
||||||
java.util.zip \
|
java.util.zip \
|
||||||
javax.accessibility \
|
javax.accessibility \
|
||||||
javax.activation \
|
javax.activation \
|
||||||
|
@ -390,6 +390,17 @@ $(COREAPI_OPTIONS_FILE): $(COREAPI_OVERVIEW)
|
|||||||
$(call OptionPair,-tag,specdefault:X) ; \
|
$(call OptionPair,-tag,specdefault:X) ; \
|
||||||
$(call OptionPair,-tag,Note:X) ; \
|
$(call OptionPair,-tag,Note:X) ; \
|
||||||
$(call OptionPair,-tag,ToDo:X) ; \
|
$(call OptionPair,-tag,ToDo:X) ; \
|
||||||
|
$(call OptionPair,-tag,apiNote:a:API Note:) ; \
|
||||||
|
$(call OptionPair,-tag,implSpec:a:Implementation Requirements:) ; \
|
||||||
|
$(call OptionPair,-tag,implNote:a:Implementation Note:) ; \
|
||||||
|
$(call OptionPair,-tag,param) ; \
|
||||||
|
$(call OptionPair,-tag,return) ; \
|
||||||
|
$(call OptionPair,-tag,throws) ; \
|
||||||
|
$(call OptionPair,-tag,since) ; \
|
||||||
|
$(call OptionPair,-tag,version) ; \
|
||||||
|
$(call OptionPair,-tag,serialData) ; \
|
||||||
|
$(call OptionPair,-tag,factory) ; \
|
||||||
|
$(call OptionPair,-tag,see) ; \
|
||||||
$(call OptionPair,-tag,$(TAG_JLS)) ; \
|
$(call OptionPair,-tag,$(TAG_JLS)) ; \
|
||||||
$(call OptionOnly,-splitIndex) ; \
|
$(call OptionOnly,-splitIndex) ; \
|
||||||
$(call OptionPair,-overview,$(COREAPI_OVERVIEW)) ; \
|
$(call OptionPair,-overview,$(COREAPI_OVERVIEW)) ; \
|
||||||
|
@ -209,3 +209,4 @@ a45bb25a67c7517b45f00c9682e317f46fecbba9 jdk8-b83
|
|||||||
9583a6431596bac1959d2d8828f5ea217843dd12 jdk8-b85
|
9583a6431596bac1959d2d8828f5ea217843dd12 jdk8-b85
|
||||||
44a8ce4a759f2668ff434661a93ff462ea472478 jdk8-b86
|
44a8ce4a759f2668ff434661a93ff462ea472478 jdk8-b86
|
||||||
f1709874d55a06bc3d5dfa02dbcdfbc59f4cba34 jdk8-b87
|
f1709874d55a06bc3d5dfa02dbcdfbc59f4cba34 jdk8-b87
|
||||||
|
4e3a881ebb1ee96ce0872508b0066d74f310dbfa jdk8-b88
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2004, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -258,6 +258,19 @@ public class UnionGen implements com.sun.tools.corba.se.idl.UnionGen, JavaGenera
|
|||||||
{
|
{
|
||||||
Vector labels = vectorizeLabels (u.branches (), true);
|
Vector labels = vectorizeLabels (u.branches (), true);
|
||||||
|
|
||||||
|
if (Util.javaName(utype).equals ("boolean")) {
|
||||||
|
stream.println( "" ) ;
|
||||||
|
stream.println( " private void verifyDefault (boolean discriminator)" ) ;
|
||||||
|
stream.println( " {" ) ;
|
||||||
|
if (labels.contains ("true"))
|
||||||
|
stream.println (" if ( discriminator )");
|
||||||
|
else
|
||||||
|
stream.println (" if ( !discriminator )");
|
||||||
|
stream.println( " throw new org.omg.CORBA.BAD_OPERATION();" ) ;
|
||||||
|
stream.println( " }" ) ;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
stream.println( "" ) ;
|
stream.println( "" ) ;
|
||||||
stream.println( " private void verifyDefault( " + Util.javaName(utype) +
|
stream.println( " private void verifyDefault( " + Util.javaName(utype) +
|
||||||
" value )" ) ;
|
" value )" ) ;
|
||||||
@ -763,7 +776,7 @@ public class UnionGen implements com.sun.tools.corba.se.idl.UnionGen, JavaGenera
|
|||||||
stream.println (indent + "if (" + disName + ')');
|
stream.println (indent + "if (" + disName + ')');
|
||||||
|
|
||||||
if (firstBranch == null)
|
if (firstBranch == null)
|
||||||
stream.println (indent + " throw new org.omg.CORBA.BAD_OPERATION ();");
|
stream.println (indent + " value._default(" + disName + ");");
|
||||||
else {
|
else {
|
||||||
stream.println (indent + '{');
|
stream.println (indent + '{');
|
||||||
index = readBranch (index, indent + " ", firstBranch.typedef.name (),
|
index = readBranch (index, indent + " ", firstBranch.typedef.name (),
|
||||||
@ -774,7 +787,7 @@ public class UnionGen implements com.sun.tools.corba.se.idl.UnionGen, JavaGenera
|
|||||||
stream.println (indent + "else");
|
stream.println (indent + "else");
|
||||||
|
|
||||||
if (secondBranch == null)
|
if (secondBranch == null)
|
||||||
stream.println (indent + " throw new org.omg.CORBA.BAD_OPERATION ();");
|
stream.println (indent + " value._default(" + disName + ");");
|
||||||
else {
|
else {
|
||||||
stream.println (indent + '{');
|
stream.println (indent + '{');
|
||||||
index = readBranch (index, indent + " ", secondBranch.typedef.name (),
|
index = readBranch (index, indent + " ", secondBranch.typedef.name (),
|
||||||
@ -924,23 +937,25 @@ public class UnionGen implements com.sun.tools.corba.se.idl.UnionGen, JavaGenera
|
|||||||
firstBranch = secondBranch;
|
firstBranch = secondBranch;
|
||||||
secondBranch = tmp;
|
secondBranch = tmp;
|
||||||
}
|
}
|
||||||
stream.println (indent + "if (" + disName + ')');
|
if (firstBranch != null && secondBranch != null) {
|
||||||
if (firstBranch == null)
|
stream.println (indent + "if (" + disName + ')');
|
||||||
stream.println (indent + " throw new org.omg.CORBA.BAD_OPERATION ();");
|
stream.println (indent + '{');
|
||||||
else
|
index = writeBranch (index, indent + " ", name, firstBranch.typedef, stream);
|
||||||
{
|
stream.println (indent + '}');
|
||||||
stream.println (indent + '{');
|
stream.println (indent + "else");
|
||||||
index = writeBranch (index, indent + " ", name, firstBranch.typedef, stream);
|
stream.println (indent + '{');
|
||||||
stream.println (indent + '}');
|
index = writeBranch (index, indent + " ", name, secondBranch.typedef, stream);
|
||||||
}
|
stream.println (indent + '}');
|
||||||
stream.println (indent + "else");
|
} else if (firstBranch != null) {
|
||||||
if (secondBranch == null)
|
stream.println (indent + "if (" + disName + ')');
|
||||||
stream.println (indent + " throw new org.omg.CORBA.BAD_OPERATION ();");
|
stream.println (indent + '{');
|
||||||
else
|
index = writeBranch (index, indent + " ", name, firstBranch.typedef, stream);
|
||||||
{
|
stream.println (indent + '}');
|
||||||
stream.println (indent + '{');
|
} else {
|
||||||
index = writeBranch (index, indent + " ", name, secondBranch.typedef, stream);
|
stream.println (indent + "if (!" + disName + ')');
|
||||||
stream.println (indent + '}');
|
stream.println (indent + '{');
|
||||||
|
index = writeBranch (index, indent + " ", name, secondBranch.typedef, stream);
|
||||||
|
stream.println (indent + '}');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return index;
|
return index;
|
||||||
|
@ -337,3 +337,5 @@ a947f40fb536e5b9e0aa210cf26abb430f80887a hs25-b26
|
|||||||
d4c2667846607042370760e23f64c3ab9350e60d jdk8-b87
|
d4c2667846607042370760e23f64c3ab9350e60d jdk8-b87
|
||||||
01d5f04e64dc2d64625b2db2056f5ed4de918a45 hs25-b29
|
01d5f04e64dc2d64625b2db2056f5ed4de918a45 hs25-b29
|
||||||
c4af77d2045476c56fbf3f914b336bb1b7cd18af hs25-b30
|
c4af77d2045476c56fbf3f914b336bb1b7cd18af hs25-b30
|
||||||
|
8482058e74bc8c1a890e6f3be3eff192dba6ce67 jdk8-b88
|
||||||
|
4ec91349972255650f97bedfd07e6423e02428cf hs25-b31
|
||||||
|
@ -1,22 +1,22 @@
|
|||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<title>
|
<title>
|
||||||
C2 Replay
|
Replay
|
||||||
</title>
|
</title>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
|
|
||||||
<h1>C2 compiler replay</h1>
|
<h1>Compiler replay</h1>
|
||||||
<p>
|
<p>
|
||||||
The C2 compiler replay is a function to repeat the compiling process from a crashed java process in compiled method<br>
|
The compiler replay is a function to repeat the compiling process from a crashed java process in compiled method<br>
|
||||||
This function only exists in debug version of VM
|
This function only exists in debug version of VM
|
||||||
</p>
|
</p>
|
||||||
<h2>Usage</h2>
|
<h2>Usage</h2>
|
||||||
<pre>
|
<pre>
|
||||||
First, use SA to attach to the core file, if suceeded, do
|
First, use SA to attach to the core file, if succeeded, do
|
||||||
clhsdb>dumpreplaydata <address> | -a | <thread_id> [> replay.txt]
|
hsdb> dumpreplaydata <address> | -a | <thread_id> [> replay.txt]
|
||||||
create file replay.txt, address is address of Method, or nmethod(CodeBlob)
|
create file replay.txt, address is address of Method, or nmethod(CodeBlob)
|
||||||
clhsdb>buildreplayjars [all | boot | app]
|
hsdb> buildreplayjars [all | boot | app]
|
||||||
create files:
|
create files:
|
||||||
all:
|
all:
|
||||||
app.jar, boot.jar
|
app.jar, boot.jar
|
||||||
@ -26,16 +26,16 @@ First, use SA to attach to the core file, if suceeded, do
|
|||||||
app.jar
|
app.jar
|
||||||
exit SA now.
|
exit SA now.
|
||||||
Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java
|
Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java
|
||||||
java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=<datafile> -XX:+ReplayCompiles ....
|
java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=<datafile> -XX:+ReplayCompiles ....
|
||||||
This will replay the compiling process.
|
This will replay the compiling process.
|
||||||
|
|
||||||
With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app.
|
With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app.
|
||||||
|
|
||||||
notes:
|
notes:
|
||||||
1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes.
|
1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes.
|
||||||
2) If encounter error as "<flag>" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/<os>/proc/<os_platform>
|
2) If encounter error as "<flag>" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/<os>/proc/<os_platform>
|
||||||
|
|
||||||
Use this tool to dump VM type library:
|
Use this tool to dump VM type library:
|
||||||
vmstructsdump libjvm.so > <type_name>.db
|
vmstructsdump libjvm.so > <type_name>.db
|
||||||
|
|
||||||
set env SA_TYPEDB=<type_name>.db (refer different shell for set envs)
|
set env SA_TYPEDB=<type_name>.db (refer different shell for set envs)
|
@ -15,7 +15,7 @@ GUI tools. Command line HSDB (CLHSDB) tool is alternative to SA GUI tool HSDB.
|
|||||||
<p>
|
<p>
|
||||||
There is also JavaScript based SA command line interface called <a href="jsdb.html">jsdb</a>.
|
There is also JavaScript based SA command line interface called <a href="jsdb.html">jsdb</a>.
|
||||||
But, CLHSDB supports Unix shell-like (or dbx/gdb-like) command line interface with
|
But, CLHSDB supports Unix shell-like (or dbx/gdb-like) command line interface with
|
||||||
support for output redirection/appending (familiar >, >>), command history and so on.
|
support for output redirection/appending (familiar >, >>), command history and so on.
|
||||||
Each CLHSDB command can have zero or more arguments and optionally end with output redirection
|
Each CLHSDB command can have zero or more arguments and optionally end with output redirection
|
||||||
(or append) to a file. Commands may be stored in a file and run using <b>source</b> command.
|
(or append) to a file. Commands may be stored in a file and run using <b>source</b> command.
|
||||||
<b>help</b> command prints usage message for all supported commands (or a specific command)
|
<b>help</b> command prints usage message for all supported commands (or a specific command)
|
||||||
@ -49,7 +49,7 @@ Available commands:
|
|||||||
dumpheap [ file ] <font color="red">dump heap in hprof binary format</font>
|
dumpheap [ file ] <font color="red">dump heap in hprof binary format</font>
|
||||||
dumpideal -a | id <font color="red">dump ideal graph like debug flag -XX:+PrintIdeal</font>
|
dumpideal -a | id <font color="red">dump ideal graph like debug flag -XX:+PrintIdeal</font>
|
||||||
dumpilt -a | id <font color="red">dump inline tree for C2 compilation</font>
|
dumpilt -a | id <font color="red">dump inline tree for C2 compilation</font>
|
||||||
dumpreplaydata <address> | -a | <thread_id> [>replay.txt] <font color="red">dump replay data into a file</font>
|
dumpreplaydata <address> | -a | <thread_id> [>replay.txt] <font color="red">dump replay data into a file</font>
|
||||||
echo [ true | false ] <font color="red">turn on/off command echo mode</font>
|
echo [ true | false ] <font color="red">turn on/off command echo mode</font>
|
||||||
examine [ address/count ] | [ address,address] <font color="red">show contents of memory from given address</font>
|
examine [ address/count ] | [ address,address] <font color="red">show contents of memory from given address</font>
|
||||||
field [ type [ name fieldtype isStatic offset address ] ] <font color="red">print info about a field of HotSpot type</font>
|
field [ type [ name fieldtype isStatic offset address ] ] <font color="red">print info about a field of HotSpot type</font>
|
||||||
@ -96,11 +96,11 @@ Available commands:
|
|||||||
|
|
||||||
<h3>JavaScript integration</h3>
|
<h3>JavaScript integration</h3>
|
||||||
|
|
||||||
<p>Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set
|
<p>Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set
|
||||||
by implementing more commands in a JavaScript file and by loading it by <b>jsload</b> command. <b>jseval</b>
|
by implementing more commands in a JavaScript file and by loading it by <b>jsload</b> command. <b>jseval</b>
|
||||||
command may be used to evaluate arbitrary JavaScript expression from a string. Any JavaScript function
|
command may be used to evaluate arbitrary JavaScript expression from a string. Any JavaScript function
|
||||||
may be exposed as a CLHSDB command by registering it using JavaScript <b><code>registerCommand</code></b>
|
may be exposed as a CLHSDB command by registering it using JavaScript <b><code>registerCommand</code></b>
|
||||||
function. This function accepts command name, usage and name of the JavaScript implementation function
|
function. This function accepts command name, usage and name of the JavaScript implementation function
|
||||||
as arguments.
|
as arguments.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
@ -127,11 +127,11 @@ hsdb> jsload test.js
|
|||||||
</code>
|
</code>
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
<h3>C2 Compilation Replay</h3>
|
<h3>Compilation Replay</h3>
|
||||||
<p>
|
<p>
|
||||||
When a java process crashes in compiled method, usually a core file is saved.
|
When a java process crashes in compiled method, usually a core file is saved.
|
||||||
The C2 replay function can reproduce the compiling process in the core.
|
The replay function can reproduce the compiling process in the core.
|
||||||
<a href="c2replay.html">c2replay.html</a>
|
<a href="cireplay.html">cireplay.html</a>
|
||||||
|
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
@ -93,10 +93,11 @@ public class ciEnv extends VMObject {
|
|||||||
CompileTask task = task();
|
CompileTask task = task();
|
||||||
Method method = task.method();
|
Method method = task.method();
|
||||||
int entryBci = task.osrBci();
|
int entryBci = task.osrBci();
|
||||||
|
int compLevel = task.compLevel();
|
||||||
Klass holder = method.getMethodHolder();
|
Klass holder = method.getMethodHolder();
|
||||||
out.println("compile " + holder.getName().asString() + " " +
|
out.println("compile " + holder.getName().asString() + " " +
|
||||||
OopUtilities.escapeString(method.getName().asString()) + " " +
|
OopUtilities.escapeString(method.getName().asString()) + " " +
|
||||||
method.getSignature().asString() + " " +
|
method.getSignature().asString() + " " +
|
||||||
entryBci);
|
entryBci + " " + compLevel);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,6 +78,8 @@ public class NMethod extends CodeBlob {
|
|||||||
current sweep traversal index. */
|
current sweep traversal index. */
|
||||||
private static CIntegerField stackTraversalMarkField;
|
private static CIntegerField stackTraversalMarkField;
|
||||||
|
|
||||||
|
private static CIntegerField compLevelField;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
VM.registerVMInitializedObserver(new Observer() {
|
VM.registerVMInitializedObserver(new Observer() {
|
||||||
public void update(Observable o, Object data) {
|
public void update(Observable o, Object data) {
|
||||||
@ -113,7 +115,7 @@ public class NMethod extends CodeBlob {
|
|||||||
osrEntryPointField = type.getAddressField("_osr_entry_point");
|
osrEntryPointField = type.getAddressField("_osr_entry_point");
|
||||||
lockCountField = type.getJIntField("_lock_count");
|
lockCountField = type.getJIntField("_lock_count");
|
||||||
stackTraversalMarkField = type.getCIntegerField("_stack_traversal_mark");
|
stackTraversalMarkField = type.getCIntegerField("_stack_traversal_mark");
|
||||||
|
compLevelField = type.getCIntegerField("_comp_level");
|
||||||
pcDescSize = db.lookupType("PcDesc").getSize();
|
pcDescSize = db.lookupType("PcDesc").getSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -530,7 +532,7 @@ public class NMethod extends CodeBlob {
|
|||||||
out.println("compile " + holder.getName().asString() + " " +
|
out.println("compile " + holder.getName().asString() + " " +
|
||||||
OopUtilities.escapeString(method.getName().asString()) + " " +
|
OopUtilities.escapeString(method.getName().asString()) + " " +
|
||||||
method.getSignature().asString() + " " +
|
method.getSignature().asString() + " " +
|
||||||
getEntryBCI());
|
getEntryBCI() + " " + getCompLevel());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -551,4 +553,5 @@ public class NMethod extends CodeBlob {
|
|||||||
private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); }
|
private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); }
|
||||||
private int getNulChkTableOffset() { return (int) nulChkTableOffsetField .getValue(addr); }
|
private int getNulChkTableOffset() { return (int) nulChkTableOffsetField .getValue(addr); }
|
||||||
private int getNMethodEndOffset() { return (int) nmethodEndOffsetField .getValue(addr); }
|
private int getNMethodEndOffset() { return (int) nmethodEndOffsetField .getValue(addr); }
|
||||||
|
private int getCompLevel() { return (int) compLevelField .getValue(addr); }
|
||||||
}
|
}
|
||||||
|
@ -46,10 +46,12 @@ public class CompileTask extends VMObject {
|
|||||||
Type type = db.lookupType("CompileTask");
|
Type type = db.lookupType("CompileTask");
|
||||||
methodField = type.getAddressField("_method");
|
methodField = type.getAddressField("_method");
|
||||||
osrBciField = new CIntField(type.getCIntegerField("_osr_bci"), 0);
|
osrBciField = new CIntField(type.getCIntegerField("_osr_bci"), 0);
|
||||||
|
compLevelField = new CIntField(type.getCIntegerField("_comp_level"), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static AddressField methodField;
|
private static AddressField methodField;
|
||||||
private static CIntField osrBciField;
|
private static CIntField osrBciField;
|
||||||
|
private static CIntField compLevelField;
|
||||||
|
|
||||||
public CompileTask(Address addr) {
|
public CompileTask(Address addr) {
|
||||||
super(addr);
|
super(addr);
|
||||||
@ -63,4 +65,8 @@ public class CompileTask extends VMObject {
|
|||||||
public int osrBci() {
|
public int osrBci() {
|
||||||
return (int)osrBciField.getValue(getAddress());
|
return (int)osrBciField.getValue(getAddress());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public int compLevel() {
|
||||||
|
return (int)compLevelField.getValue(getAddress());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -117,8 +117,6 @@ public class JMap extends Tool {
|
|||||||
mode = MODE_HEAP_SUMMARY;
|
mode = MODE_HEAP_SUMMARY;
|
||||||
} else if (modeFlag.equals("-histo")) {
|
} else if (modeFlag.equals("-histo")) {
|
||||||
mode = MODE_HISTOGRAM;
|
mode = MODE_HISTOGRAM;
|
||||||
} else if (modeFlag.equals("-permstat")) {
|
|
||||||
mode = MODE_CLSTATS;
|
|
||||||
} else if (modeFlag.equals("-clstats")) {
|
} else if (modeFlag.equals("-clstats")) {
|
||||||
mode = MODE_CLSTATS;
|
mode = MODE_CLSTATS;
|
||||||
} else if (modeFlag.equals("-finalizerinfo")) {
|
} else if (modeFlag.equals("-finalizerinfo")) {
|
||||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
|
|||||||
|
|
||||||
HS_MAJOR_VER=25
|
HS_MAJOR_VER=25
|
||||||
HS_MINOR_VER=0
|
HS_MINOR_VER=0
|
||||||
HS_BUILD_NUMBER=31
|
HS_BUILD_NUMBER=32
|
||||||
|
|
||||||
JDK_MAJOR_VER=1
|
JDK_MAJOR_VER=1
|
||||||
JDK_MINOR_VER=8
|
JDK_MINOR_VER=8
|
||||||
|
@ -52,7 +52,7 @@ CXX=cl.exe
|
|||||||
# improving the quality of crash log stack traces involving jvm.dll.
|
# improving the quality of crash log stack traces involving jvm.dll.
|
||||||
|
|
||||||
# These are always used in all compiles
|
# These are always used in all compiles
|
||||||
CXX_FLAGS=/nologo /W3 /WX
|
CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3 /WX
|
||||||
|
|
||||||
# Let's add debug information when Full Debug Symbols is enabled
|
# Let's add debug information when Full Debug Symbols is enabled
|
||||||
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
|
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
|
||||||
|
@ -193,7 +193,7 @@ ifdef COOKED_BUILD_NUMBER
|
|||||||
MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER)
|
MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
NMAKE= MAKEFLAGS= MFLAGS= nmake -NOLOGO
|
NMAKE= MAKEFLAGS= MFLAGS= EXTRA_CFLAGS="$(EXTRA_CFLAGS)" nmake -NOLOGO
|
||||||
ifndef SYSTEM_UNAME
|
ifndef SYSTEM_UNAME
|
||||||
SYSTEM_UNAME := $(shell uname)
|
SYSTEM_UNAME := $(shell uname)
|
||||||
export SYSTEM_UNAME
|
export SYSTEM_UNAME
|
||||||
|
193
hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp
Normal file
193
hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
|
#include "code/compiledIC.hpp"
|
||||||
|
#include "code/icBuffer.hpp"
|
||||||
|
#include "code/nmethod.hpp"
|
||||||
|
#include "memory/resourceArea.hpp"
|
||||||
|
#include "runtime/mutexLocker.hpp"
|
||||||
|
#include "runtime/safepoint.hpp"
|
||||||
|
#ifdef COMPILER2
|
||||||
|
#include "opto/matcher.hpp"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Release the CompiledICHolder* associated with this call site is there is one.
|
||||||
|
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
|
||||||
|
// This call site might have become stale so inspect it carefully.
|
||||||
|
NativeCall* call = nativeCall_at(call_site->addr());
|
||||||
|
if (is_icholder_entry(call->destination())) {
|
||||||
|
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
|
||||||
|
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||||
|
// This call site might have become stale so inspect it carefully.
|
||||||
|
NativeCall* call = nativeCall_at(call_site->addr());
|
||||||
|
return is_icholder_entry(call->destination());
|
||||||
|
}
|
||||||
|
|
||||||
|
//-----------------------------------------------------------------------------
|
||||||
|
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||||
|
|
||||||
|
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||||
|
: _ic_call(call)
|
||||||
|
{
|
||||||
|
address ic_call = call->instruction_address();
|
||||||
|
|
||||||
|
assert(ic_call != NULL, "ic_call address must be set");
|
||||||
|
assert(nm != NULL, "must pass nmethod");
|
||||||
|
assert(nm->contains(ic_call), "must be in nmethod");
|
||||||
|
|
||||||
|
// Search for the ic_call at the given address.
|
||||||
|
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||||
|
bool ret = iter.next();
|
||||||
|
assert(ret == true, "relocInfo must exist at this address");
|
||||||
|
assert(iter.addr() == ic_call, "must find ic_call");
|
||||||
|
if (iter.type() == relocInfo::virtual_call_type) {
|
||||||
|
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||||
|
_is_optimized = false;
|
||||||
|
_value = nativeMovConstReg_at(r->cached_value());
|
||||||
|
} else {
|
||||||
|
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||||
|
_is_optimized = true;
|
||||||
|
_value = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#define __ _masm.
|
||||||
|
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||||
|
#ifdef COMPILER2
|
||||||
|
// Stub is fixed up when the corresponding call is converted from calling
|
||||||
|
// compiled code to calling interpreted code.
|
||||||
|
// set (empty), G5
|
||||||
|
// jmp -1
|
||||||
|
|
||||||
|
address mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
||||||
|
|
||||||
|
MacroAssembler _masm(&cbuf);
|
||||||
|
|
||||||
|
address base =
|
||||||
|
__ start_a_stub(to_interp_stub_size()*2);
|
||||||
|
if (base == NULL) return; // CodeBuffer::expand failed.
|
||||||
|
|
||||||
|
// Static stub relocation stores the instruction address of the call.
|
||||||
|
__ relocate(static_stub_Relocation::spec(mark));
|
||||||
|
|
||||||
|
__ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode()));
|
||||||
|
|
||||||
|
__ set_inst_mark();
|
||||||
|
AddressLiteral addrlit(-1);
|
||||||
|
__ JUMP(addrlit, G3, 0);
|
||||||
|
|
||||||
|
__ delayed()->nop();
|
||||||
|
|
||||||
|
// Update current stubs pointer and restore code_end.
|
||||||
|
__ end_a_stub();
|
||||||
|
#else
|
||||||
|
ShouldNotReachHere();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#undef __
|
||||||
|
|
||||||
|
int CompiledStaticCall::to_interp_stub_size() {
|
||||||
|
// This doesn't need to be accurate but it must be larger or equal to
|
||||||
|
// the real size of the stub.
|
||||||
|
return (NativeMovConstReg::instruction_size + // sethi/setlo;
|
||||||
|
NativeJump::instruction_size + // sethi; jmp; nop
|
||||||
|
(TraceJumps ? 20 * BytesPerInstWord : 0) );
|
||||||
|
}
|
||||||
|
|
||||||
|
// Relocation entries for call stub, compiled java to interpreter.
|
||||||
|
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||||
|
return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call
|
||||||
|
}
|
||||||
|
|
||||||
|
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
||||||
|
address stub = find_stub();
|
||||||
|
guarantee(stub != NULL, "stub not found");
|
||||||
|
|
||||||
|
if (TraceICs) {
|
||||||
|
ResourceMark rm;
|
||||||
|
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
||||||
|
instruction_address(),
|
||||||
|
callee->name_and_sig_as_C_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creation also verifies the object.
|
||||||
|
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||||
|
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||||
|
|
||||||
|
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
|
||||||
|
"a) MT-unsafe modification of inline cache");
|
||||||
|
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
|
||||||
|
"b) MT-unsafe modification of inline cache");
|
||||||
|
|
||||||
|
// Update stub.
|
||||||
|
method_holder->set_data((intptr_t)callee());
|
||||||
|
jump->set_jump_destination(entry);
|
||||||
|
|
||||||
|
// Update jump to call.
|
||||||
|
set_destination_mt_safe(stub);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||||
|
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||||
|
// Reset stub.
|
||||||
|
address stub = static_stub->addr();
|
||||||
|
assert(stub != NULL, "stub not found");
|
||||||
|
// Creation also verifies the object.
|
||||||
|
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||||
|
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||||
|
method_holder->set_data(0);
|
||||||
|
jump->set_jump_destination((address)-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
//-----------------------------------------------------------------------------
|
||||||
|
// Non-product mode code
|
||||||
|
#ifndef PRODUCT
|
||||||
|
|
||||||
|
void CompiledStaticCall::verify() {
|
||||||
|
// Verify call.
|
||||||
|
NativeCall::verify();
|
||||||
|
if (os::is_MP()) {
|
||||||
|
verify_alignment();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify stub.
|
||||||
|
address stub = find_stub();
|
||||||
|
assert(stub != NULL, "no stub found for static call");
|
||||||
|
// Creation also verifies the object.
|
||||||
|
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||||
|
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||||
|
|
||||||
|
// Verify state.
|
||||||
|
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !PRODUCT
|
@ -1655,53 +1655,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
|
|||||||
return ra_->C->scratch_emit_size(this);
|
return ra_->C->scratch_emit_size(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
//=============================================================================
|
|
||||||
|
|
||||||
// emit call stub, compiled java to interpretor
|
|
||||||
void emit_java_to_interp(CodeBuffer &cbuf ) {
|
|
||||||
|
|
||||||
// Stub is fixed up when the corresponding call is converted from calling
|
|
||||||
// compiled code to calling interpreted code.
|
|
||||||
// set (empty), G5
|
|
||||||
// jmp -1
|
|
||||||
|
|
||||||
address mark = cbuf.insts_mark(); // get mark within main instrs section
|
|
||||||
|
|
||||||
MacroAssembler _masm(&cbuf);
|
|
||||||
|
|
||||||
address base =
|
|
||||||
__ start_a_stub(Compile::MAX_stubs_size);
|
|
||||||
if (base == NULL) return; // CodeBuffer::expand failed
|
|
||||||
|
|
||||||
// static stub relocation stores the instruction address of the call
|
|
||||||
__ relocate(static_stub_Relocation::spec(mark));
|
|
||||||
|
|
||||||
__ set_metadata(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode()));
|
|
||||||
|
|
||||||
__ set_inst_mark();
|
|
||||||
AddressLiteral addrlit(-1);
|
|
||||||
__ JUMP(addrlit, G3, 0);
|
|
||||||
|
|
||||||
__ delayed()->nop();
|
|
||||||
|
|
||||||
// Update current stubs pointer and restore code_end.
|
|
||||||
__ end_a_stub();
|
|
||||||
}
|
|
||||||
|
|
||||||
// size of call stub, compiled java to interpretor
|
|
||||||
uint size_java_to_interp() {
|
|
||||||
// This doesn't need to be accurate but it must be larger or equal to
|
|
||||||
// the real size of the stub.
|
|
||||||
return (NativeMovConstReg::instruction_size + // sethi/setlo;
|
|
||||||
NativeJump::instruction_size + // sethi; jmp; nop
|
|
||||||
(TraceJumps ? 20 * BytesPerInstWord : 0) );
|
|
||||||
}
|
|
||||||
// relocation entries for call stub, compiled java to interpretor
|
|
||||||
uint reloc_java_to_interp() {
|
|
||||||
return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||||
@ -2576,15 +2529,15 @@ encode %{
|
|||||||
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
|
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
|
||||||
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
|
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
|
||||||
// who we intended to call.
|
// who we intended to call.
|
||||||
if ( !_method ) {
|
if (!_method) {
|
||||||
emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type);
|
emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type);
|
||||||
} else if (_optimized_virtual) {
|
} else if (_optimized_virtual) {
|
||||||
emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type);
|
emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type);
|
||||||
} else {
|
} else {
|
||||||
emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type);
|
emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type);
|
||||||
}
|
}
|
||||||
if( _method ) { // Emit stub for static call
|
if (_method) { // Emit stub for static call.
|
||||||
emit_java_to_interp(cbuf);
|
CompiledStaticCall::emit_to_interp_stub(cbuf);
|
||||||
}
|
}
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
180
hotspot/src/cpu/x86/vm/compiledIC_x86.cpp
Normal file
180
hotspot/src/cpu/x86/vm/compiledIC_x86.cpp
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
|
#include "code/compiledIC.hpp"
|
||||||
|
#include "code/icBuffer.hpp"
|
||||||
|
#include "code/nmethod.hpp"
|
||||||
|
#include "memory/resourceArea.hpp"
|
||||||
|
#include "runtime/mutexLocker.hpp"
|
||||||
|
#include "runtime/safepoint.hpp"
|
||||||
|
|
||||||
|
// Release the CompiledICHolder* associated with this call site is there is one.
|
||||||
|
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
|
||||||
|
// This call site might have become stale so inspect it carefully.
|
||||||
|
NativeCall* call = nativeCall_at(call_site->addr());
|
||||||
|
if (is_icholder_entry(call->destination())) {
|
||||||
|
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
|
||||||
|
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||||
|
// This call site might have become stale so inspect it carefully.
|
||||||
|
NativeCall* call = nativeCall_at(call_site->addr());
|
||||||
|
return is_icholder_entry(call->destination());
|
||||||
|
}
|
||||||
|
|
||||||
|
//-----------------------------------------------------------------------------
|
||||||
|
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||||
|
|
||||||
|
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||||
|
: _ic_call(call)
|
||||||
|
{
|
||||||
|
address ic_call = call->instruction_address();
|
||||||
|
|
||||||
|
assert(ic_call != NULL, "ic_call address must be set");
|
||||||
|
assert(nm != NULL, "must pass nmethod");
|
||||||
|
assert(nm->contains(ic_call), "must be in nmethod");
|
||||||
|
|
||||||
|
// Search for the ic_call at the given address.
|
||||||
|
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||||
|
bool ret = iter.next();
|
||||||
|
assert(ret == true, "relocInfo must exist at this address");
|
||||||
|
assert(iter.addr() == ic_call, "must find ic_call");
|
||||||
|
if (iter.type() == relocInfo::virtual_call_type) {
|
||||||
|
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||||
|
_is_optimized = false;
|
||||||
|
_value = nativeMovConstReg_at(r->cached_value());
|
||||||
|
} else {
|
||||||
|
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||||
|
_is_optimized = true;
|
||||||
|
_value = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#define __ _masm.
|
||||||
|
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||||
|
// Stub is fixed up when the corresponding call is converted from
|
||||||
|
// calling compiled code to calling interpreted code.
|
||||||
|
// movq rbx, 0
|
||||||
|
// jmp -5 # to self
|
||||||
|
|
||||||
|
address mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
||||||
|
|
||||||
|
// Note that the code buffer's insts_mark is always relative to insts.
|
||||||
|
// That's why we must use the macroassembler to generate a stub.
|
||||||
|
MacroAssembler _masm(&cbuf);
|
||||||
|
|
||||||
|
address base =
|
||||||
|
__ start_a_stub(to_interp_stub_size()*2);
|
||||||
|
if (base == NULL) return; // CodeBuffer::expand failed.
|
||||||
|
// Static stub relocation stores the instruction address of the call.
|
||||||
|
__ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand);
|
||||||
|
// Static stub relocation also tags the Method* in the code-stream.
|
||||||
|
__ mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time.
|
||||||
|
// This is recognized as unresolved by relocs/nativeinst/ic code.
|
||||||
|
__ jump(RuntimeAddress(__ pc()));
|
||||||
|
|
||||||
|
// Update current stubs pointer and restore insts_end.
|
||||||
|
__ end_a_stub();
|
||||||
|
}
|
||||||
|
#undef __
|
||||||
|
|
||||||
|
int CompiledStaticCall::to_interp_stub_size() {
|
||||||
|
return NOT_LP64(10) // movl; jmp
|
||||||
|
LP64_ONLY(15); // movq (1+1+8); jmp (1+4)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Relocation entries for call stub, compiled java to interpreter.
|
||||||
|
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||||
|
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
|
||||||
|
}
|
||||||
|
|
||||||
|
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
||||||
|
address stub = find_stub();
|
||||||
|
guarantee(stub != NULL, "stub not found");
|
||||||
|
|
||||||
|
if (TraceICs) {
|
||||||
|
ResourceMark rm;
|
||||||
|
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
||||||
|
instruction_address(),
|
||||||
|
callee->name_and_sig_as_C_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creation also verifies the object.
|
||||||
|
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||||
|
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||||
|
|
||||||
|
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
|
||||||
|
"a) MT-unsafe modification of inline cache");
|
||||||
|
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
|
||||||
|
"b) MT-unsafe modification of inline cache");
|
||||||
|
|
||||||
|
// Update stub.
|
||||||
|
method_holder->set_data((intptr_t)callee());
|
||||||
|
jump->set_jump_destination(entry);
|
||||||
|
|
||||||
|
// Update jump to call.
|
||||||
|
set_destination_mt_safe(stub);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||||
|
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||||
|
// Reset stub.
|
||||||
|
address stub = static_stub->addr();
|
||||||
|
assert(stub != NULL, "stub not found");
|
||||||
|
// Creation also verifies the object.
|
||||||
|
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||||
|
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||||
|
method_holder->set_data(0);
|
||||||
|
jump->set_jump_destination((address)-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
//-----------------------------------------------------------------------------
|
||||||
|
// Non-product mode code
|
||||||
|
#ifndef PRODUCT
|
||||||
|
|
||||||
|
void CompiledStaticCall::verify() {
|
||||||
|
// Verify call.
|
||||||
|
NativeCall::verify();
|
||||||
|
if (os::is_MP()) {
|
||||||
|
verify_alignment();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify stub.
|
||||||
|
address stub = find_stub();
|
||||||
|
assert(stub != NULL, "no stub found for static call");
|
||||||
|
// Creation also verifies the object.
|
||||||
|
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||||
|
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||||
|
|
||||||
|
// Verify state.
|
||||||
|
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !PRODUCT
|
@ -1256,43 +1256,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//=============================================================================
|
|
||||||
|
|
||||||
// emit call stub, compiled java to interpreter
|
|
||||||
void emit_java_to_interp(CodeBuffer &cbuf ) {
|
|
||||||
// Stub is fixed up when the corresponding call is converted from calling
|
|
||||||
// compiled code to calling interpreted code.
|
|
||||||
// mov rbx,0
|
|
||||||
// jmp -1
|
|
||||||
|
|
||||||
address mark = cbuf.insts_mark(); // get mark within main instrs section
|
|
||||||
|
|
||||||
// Note that the code buffer's insts_mark is always relative to insts.
|
|
||||||
// That's why we must use the macroassembler to generate a stub.
|
|
||||||
MacroAssembler _masm(&cbuf);
|
|
||||||
|
|
||||||
address base =
|
|
||||||
__ start_a_stub(Compile::MAX_stubs_size);
|
|
||||||
if (base == NULL) return; // CodeBuffer::expand failed
|
|
||||||
// static stub relocation stores the instruction address of the call
|
|
||||||
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32);
|
|
||||||
// static stub relocation also tags the Method* in the code-stream.
|
|
||||||
__ mov_metadata(rbx, (Metadata*)NULL); // method is zapped till fixup time
|
|
||||||
// This is recognized as unresolved by relocs/nativeInst/ic code
|
|
||||||
__ jump(RuntimeAddress(__ pc()));
|
|
||||||
|
|
||||||
__ end_a_stub();
|
|
||||||
// Update current stubs pointer and restore insts_end.
|
|
||||||
}
|
|
||||||
// size of call stub, compiled java to interpretor
|
|
||||||
uint size_java_to_interp() {
|
|
||||||
return 10; // movl; jmp
|
|
||||||
}
|
|
||||||
// relocation entries for call stub, compiled java to interpretor
|
|
||||||
uint reloc_java_to_interp() {
|
|
||||||
return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
|
|
||||||
}
|
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
|
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
|
||||||
@ -1909,8 +1872,8 @@ encode %{
|
|||||||
emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
|
emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
|
||||||
static_call_Relocation::spec(), RELOC_IMM32 );
|
static_call_Relocation::spec(), RELOC_IMM32 );
|
||||||
}
|
}
|
||||||
if (_method) { // Emit stub for static call
|
if (_method) { // Emit stub for static call.
|
||||||
emit_java_to_interp(cbuf);
|
CompiledStaticCall::emit_to_interp_stub(cbuf);
|
||||||
}
|
}
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -1387,48 +1387,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const
|
|||||||
return (offset < 0x80) ? 5 : 8; // REX
|
return (offset < 0x80) ? 5 : 8; // REX
|
||||||
}
|
}
|
||||||
|
|
||||||
//=============================================================================
|
|
||||||
|
|
||||||
// emit call stub, compiled java to interpreter
|
|
||||||
void emit_java_to_interp(CodeBuffer& cbuf)
|
|
||||||
{
|
|
||||||
// Stub is fixed up when the corresponding call is converted from
|
|
||||||
// calling compiled code to calling interpreted code.
|
|
||||||
// movq rbx, 0
|
|
||||||
// jmp -5 # to self
|
|
||||||
|
|
||||||
address mark = cbuf.insts_mark(); // get mark within main instrs section
|
|
||||||
|
|
||||||
// Note that the code buffer's insts_mark is always relative to insts.
|
|
||||||
// That's why we must use the macroassembler to generate a stub.
|
|
||||||
MacroAssembler _masm(&cbuf);
|
|
||||||
|
|
||||||
address base =
|
|
||||||
__ start_a_stub(Compile::MAX_stubs_size);
|
|
||||||
if (base == NULL) return; // CodeBuffer::expand failed
|
|
||||||
// static stub relocation stores the instruction address of the call
|
|
||||||
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
|
|
||||||
// static stub relocation also tags the Method* in the code-stream.
|
|
||||||
__ mov_metadata(rbx, (Metadata*) NULL); // method is zapped till fixup time
|
|
||||||
// This is recognized as unresolved by relocs/nativeinst/ic code
|
|
||||||
__ jump(RuntimeAddress(__ pc()));
|
|
||||||
|
|
||||||
// Update current stubs pointer and restore insts_end.
|
|
||||||
__ end_a_stub();
|
|
||||||
}
|
|
||||||
|
|
||||||
// size of call stub, compiled java to interpretor
|
|
||||||
uint size_java_to_interp()
|
|
||||||
{
|
|
||||||
return 15; // movq (1+1+8); jmp (1+4)
|
|
||||||
}
|
|
||||||
|
|
||||||
// relocation entries for call stub, compiled java to interpretor
|
|
||||||
uint reloc_java_to_interp()
|
|
||||||
{
|
|
||||||
return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
|
|
||||||
}
|
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
||||||
@ -2078,8 +2036,8 @@ encode %{
|
|||||||
RELOC_DISP32);
|
RELOC_DISP32);
|
||||||
}
|
}
|
||||||
if (_method) {
|
if (_method) {
|
||||||
// Emit stub for static call
|
// Emit stub for static call.
|
||||||
emit_java_to_interp(cbuf);
|
CompiledStaticCall::emit_to_interp_stub(cbuf);
|
||||||
}
|
}
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
122
hotspot/src/cpu/zero/vm/compiledIC_zero.cpp
Normal file
122
hotspot/src/cpu/zero/vm/compiledIC_zero.cpp
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "classfile/systemDictionary.hpp"
|
||||||
|
#include "code/codeCache.hpp"
|
||||||
|
#include "code/compiledIC.hpp"
|
||||||
|
#include "code/icBuffer.hpp"
|
||||||
|
#include "code/nmethod.hpp"
|
||||||
|
#include "code/vtableStubs.hpp"
|
||||||
|
#include "interpreter/interpreter.hpp"
|
||||||
|
#include "interpreter/linkResolver.hpp"
|
||||||
|
#include "memory/metadataFactory.hpp"
|
||||||
|
#include "memory/oopFactory.hpp"
|
||||||
|
#include "oops/method.hpp"
|
||||||
|
#include "oops/oop.inline.hpp"
|
||||||
|
#include "oops/symbol.hpp"
|
||||||
|
#include "runtime/icache.hpp"
|
||||||
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
#include "runtime/stubRoutines.hpp"
|
||||||
|
#include "utilities/events.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
// Release the CompiledICHolder* associated with this call site is there is one.
|
||||||
|
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
|
||||||
|
// This call site might have become stale so inspect it carefully.
|
||||||
|
NativeCall* call = nativeCall_at(call_site->addr());
|
||||||
|
if (is_icholder_entry(call->destination())) {
|
||||||
|
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
|
||||||
|
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||||
|
// This call site might have become stale so inspect it carefully.
|
||||||
|
NativeCall* call = nativeCall_at(call_site->addr());
|
||||||
|
return is_icholder_entry(call->destination());
|
||||||
|
}
|
||||||
|
|
||||||
|
//-----------------------------------------------------------------------------
|
||||||
|
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||||
|
|
||||||
|
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||||
|
: _ic_call(call)
|
||||||
|
{
|
||||||
|
address ic_call = call->instruction_address();
|
||||||
|
|
||||||
|
assert(ic_call != NULL, "ic_call address must be set");
|
||||||
|
assert(nm != NULL, "must pass nmethod");
|
||||||
|
assert(nm->contains(ic_call), "must be in nmethod");
|
||||||
|
|
||||||
|
// Search for the ic_call at the given address.
|
||||||
|
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||||
|
bool ret = iter.next();
|
||||||
|
assert(ret == true, "relocInfo must exist at this address");
|
||||||
|
assert(iter.addr() == ic_call, "must find ic_call");
|
||||||
|
if (iter.type() == relocInfo::virtual_call_type) {
|
||||||
|
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||||
|
_is_optimized = false;
|
||||||
|
_value = nativeMovConstReg_at(r->cached_value());
|
||||||
|
} else {
|
||||||
|
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||||
|
_is_optimized = true;
|
||||||
|
_value = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||||
|
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||||
|
}
|
||||||
|
|
||||||
|
int CompiledStaticCall::to_interp_stub_size() {
|
||||||
|
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Relocation entries for call stub, compiled java to interpreter.
|
||||||
|
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||||
|
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
||||||
|
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||||
|
}
|
||||||
|
|
||||||
|
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||||
|
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||||
|
}
|
||||||
|
|
||||||
|
//-----------------------------------------------------------------------------
|
||||||
|
// Non-product mode code.
|
||||||
|
#ifndef PRODUCT
|
||||||
|
|
||||||
|
void CompiledStaticCall::verify() {
|
||||||
|
ShouldNotReachHere(); // Only needed for COMPILER2.
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !PRODUCT
|
@ -1230,10 +1230,6 @@ bool os::dll_build_name(char* buffer, size_t buflen,
|
|||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* os::get_current_directory(char *buf, int buflen) {
|
|
||||||
return getcwd(buf, buflen);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if addr is inside libjvm.so
|
// check if addr is inside libjvm.so
|
||||||
bool os::address_is_in_vm(address addr) {
|
bool os::address_is_in_vm(address addr) {
|
||||||
static address libjvm_base_addr;
|
static address libjvm_base_addr;
|
||||||
|
@ -1663,10 +1663,6 @@ bool os::dll_build_name(char* buffer, size_t buflen,
|
|||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* os::get_current_directory(char *buf, int buflen) {
|
|
||||||
return getcwd(buf, buflen);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if addr is inside libjvm.so
|
// check if addr is inside libjvm.so
|
||||||
bool os::address_is_in_vm(address addr) {
|
bool os::address_is_in_vm(address addr) {
|
||||||
static address libjvm_base_addr;
|
static address libjvm_base_addr;
|
||||||
|
@ -251,3 +251,11 @@ bool os::has_allocatable_memory_limit(julong* limit) {
|
|||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char* os::get_current_directory(char *buf, size_t buflen) {
|
||||||
|
return getcwd(buf, buflen);
|
||||||
|
}
|
||||||
|
|
||||||
|
FILE* os::open(int fd, const char* mode) {
|
||||||
|
return ::fdopen(fd, mode);
|
||||||
|
}
|
||||||
|
@ -1916,10 +1916,6 @@ bool os::dll_build_name(char* buffer, size_t buflen,
|
|||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* os::get_current_directory(char *buf, int buflen) {
|
|
||||||
return getcwd(buf, buflen);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if addr is inside libjvm.so
|
// check if addr is inside libjvm.so
|
||||||
bool os::address_is_in_vm(address addr) {
|
bool os::address_is_in_vm(address addr) {
|
||||||
static address libjvm_base_addr;
|
static address libjvm_base_addr;
|
||||||
|
@ -1221,8 +1221,10 @@ bool os::dll_build_name(char *buffer, size_t buflen,
|
|||||||
|
|
||||||
// Needs to be in os specific directory because windows requires another
|
// Needs to be in os specific directory because windows requires another
|
||||||
// header file <direct.h>
|
// header file <direct.h>
|
||||||
const char* os::get_current_directory(char *buf, int buflen) {
|
const char* os::get_current_directory(char *buf, size_t buflen) {
|
||||||
return _getcwd(buf, buflen);
|
int n = static_cast<int>(buflen);
|
||||||
|
if (buflen > INT_MAX) n = INT_MAX;
|
||||||
|
return _getcwd(buf, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
//-----------------------------------------------------------
|
//-----------------------------------------------------------
|
||||||
@ -4098,6 +4100,10 @@ int os::open(const char *path, int oflag, int mode) {
|
|||||||
return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
|
return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FILE* os::open(int fd, const char* mode) {
|
||||||
|
return ::_fdopen(fd, mode);
|
||||||
|
}
|
||||||
|
|
||||||
// Is a (classpath) directory empty?
|
// Is a (classpath) directory empty?
|
||||||
bool os::dir_is_empty(const char* path) {
|
bool os::dir_is_empty(const char* path) {
|
||||||
WIN32_FIND_DATA fd;
|
WIN32_FIND_DATA fd;
|
||||||
|
@ -213,6 +213,7 @@ int main(int argc, char *argv[])
|
|||||||
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name));
|
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name));
|
||||||
AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp");
|
AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp");
|
||||||
AD.addInclude(AD._CPP_file, "asm/macroAssembler.inline.hpp");
|
AD.addInclude(AD._CPP_file, "asm/macroAssembler.inline.hpp");
|
||||||
|
AD.addInclude(AD._CPP_file, "code/compiledIC.hpp");
|
||||||
AD.addInclude(AD._CPP_file, "code/vmreg.hpp");
|
AD.addInclude(AD._CPP_file, "code/vmreg.hpp");
|
||||||
AD.addInclude(AD._CPP_file, "gc_interface/collectedHeap.inline.hpp");
|
AD.addInclude(AD._CPP_file, "gc_interface/collectedHeap.inline.hpp");
|
||||||
AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp");
|
AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp");
|
||||||
|
@ -1150,23 +1150,9 @@ void ciEnv::record_out_of_memory_failure() {
|
|||||||
record_method_not_compilable("out of memory");
|
record_method_not_compilable("out of memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
fileStream* ciEnv::_replay_data_stream = NULL;
|
void ciEnv::dump_replay_data(outputStream* out) {
|
||||||
|
|
||||||
void ciEnv::dump_replay_data() {
|
|
||||||
VM_ENTRY_MARK;
|
VM_ENTRY_MARK;
|
||||||
MutexLocker ml(Compile_lock);
|
MutexLocker ml(Compile_lock);
|
||||||
if (_replay_data_stream == NULL) {
|
|
||||||
_replay_data_stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(ReplayDataFile);
|
|
||||||
if (_replay_data_stream == NULL) {
|
|
||||||
fatal(err_msg("Can't open %s for replay data", ReplayDataFile));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dump_replay_data(_replay_data_stream);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void ciEnv::dump_replay_data(outputStream* out) {
|
|
||||||
ASSERT_IN_VM;
|
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
#if INCLUDE_JVMTI
|
#if INCLUDE_JVMTI
|
||||||
out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables);
|
out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables);
|
||||||
@ -1179,13 +1165,15 @@ void ciEnv::dump_replay_data(outputStream* out) {
|
|||||||
for (int i = 0; i < objects->length(); i++) {
|
for (int i = 0; i < objects->length(); i++) {
|
||||||
objects->at(i)->dump_replay_data(out);
|
objects->at(i)->dump_replay_data(out);
|
||||||
}
|
}
|
||||||
Method* method = task()->method();
|
CompileTask* task = this->task();
|
||||||
int entry_bci = task()->osr_bci();
|
Method* method = task->method();
|
||||||
|
int entry_bci = task->osr_bci();
|
||||||
|
int comp_level = task->comp_level();
|
||||||
// Klass holder = method->method_holder();
|
// Klass holder = method->method_holder();
|
||||||
out->print_cr("compile %s %s %s %d",
|
out->print_cr("compile %s %s %s %d %d",
|
||||||
method->klass_name()->as_quoted_ascii(),
|
method->klass_name()->as_quoted_ascii(),
|
||||||
method->name()->as_quoted_ascii(),
|
method->name()->as_quoted_ascii(),
|
||||||
method->signature()->as_quoted_ascii(),
|
method->signature()->as_quoted_ascii(),
|
||||||
entry_bci);
|
entry_bci, comp_level);
|
||||||
out->flush();
|
out->flush();
|
||||||
}
|
}
|
||||||
|
@ -46,8 +46,6 @@ class ciEnv : StackObj {
|
|||||||
friend class CompileBroker;
|
friend class CompileBroker;
|
||||||
friend class Dependencies; // for get_object, during logging
|
friend class Dependencies; // for get_object, during logging
|
||||||
|
|
||||||
static fileStream* _replay_data_stream;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Arena* _arena; // Alias for _ciEnv_arena except in init_shared_objects()
|
Arena* _arena; // Alias for _ciEnv_arena except in init_shared_objects()
|
||||||
Arena _ciEnv_arena;
|
Arena _ciEnv_arena;
|
||||||
@ -451,10 +449,6 @@ public:
|
|||||||
// RedefineClasses support
|
// RedefineClasses support
|
||||||
void metadata_do(void f(Metadata*)) { _factory->metadata_do(f); }
|
void metadata_do(void f(Metadata*)) { _factory->metadata_do(f); }
|
||||||
|
|
||||||
// Dump the compilation replay data for this ciEnv to
|
|
||||||
// ReplayDataFile, creating the file if needed.
|
|
||||||
void dump_replay_data();
|
|
||||||
|
|
||||||
// Dump the compilation replay data for the ciEnv to the stream.
|
// Dump the compilation replay data for the ciEnv to the stream.
|
||||||
void dump_replay_data(outputStream* out);
|
void dump_replay_data(outputStream* out);
|
||||||
};
|
};
|
||||||
|
@ -196,7 +196,6 @@ class ciMethod : public ciMetadata {
|
|||||||
// Analysis and profiling.
|
// Analysis and profiling.
|
||||||
//
|
//
|
||||||
// Usage note: liveness_at_bci and init_vars should be wrapped in ResourceMarks.
|
// Usage note: liveness_at_bci and init_vars should be wrapped in ResourceMarks.
|
||||||
bool uses_monitors() const { return _uses_monitors; } // this one should go away, it has a misleading name
|
|
||||||
bool has_monitor_bytecodes() const { return _uses_monitors; }
|
bool has_monitor_bytecodes() const { return _uses_monitors; }
|
||||||
bool has_balanced_monitors();
|
bool has_balanced_monitors();
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ class CompileReplay : public StackObj {
|
|||||||
loader = Handle(thread, SystemDictionary::java_system_loader());
|
loader = Handle(thread, SystemDictionary::java_system_loader());
|
||||||
stream = fopen(filename, "rt");
|
stream = fopen(filename, "rt");
|
||||||
if (stream == NULL) {
|
if (stream == NULL) {
|
||||||
fprintf(stderr, "Can't open replay file %s\n", filename);
|
fprintf(stderr, "ERROR: Can't open replay file %s\n", filename);
|
||||||
}
|
}
|
||||||
buffer_length = 32;
|
buffer_length = 32;
|
||||||
buffer = NEW_RESOURCE_ARRAY(char, buffer_length);
|
buffer = NEW_RESOURCE_ARRAY(char, buffer_length);
|
||||||
@ -327,7 +327,6 @@ class CompileReplay : public StackObj {
|
|||||||
if (had_error()) {
|
if (had_error()) {
|
||||||
tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message);
|
tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message);
|
||||||
tty->print_cr("%s", buffer);
|
tty->print_cr("%s", buffer);
|
||||||
assert(false, "error");
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
pos = 0;
|
pos = 0;
|
||||||
@ -370,11 +369,47 @@ class CompileReplay : public StackObj {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// compile <klass> <name> <signature> <entry_bci>
|
// validation of comp_level
|
||||||
|
bool is_valid_comp_level(int comp_level) {
|
||||||
|
const int msg_len = 256;
|
||||||
|
char* msg = NULL;
|
||||||
|
if (!is_compile(comp_level)) {
|
||||||
|
msg = NEW_RESOURCE_ARRAY(char, msg_len);
|
||||||
|
jio_snprintf(msg, msg_len, "%d isn't compilation level", comp_level);
|
||||||
|
} else if (!TieredCompilation && (comp_level != CompLevel_highest_tier)) {
|
||||||
|
msg = NEW_RESOURCE_ARRAY(char, msg_len);
|
||||||
|
switch (comp_level) {
|
||||||
|
case CompLevel_simple:
|
||||||
|
jio_snprintf(msg, msg_len, "compilation level %d requires Client VM or TieredCompilation", comp_level);
|
||||||
|
break;
|
||||||
|
case CompLevel_full_optimization:
|
||||||
|
jio_snprintf(msg, msg_len, "compilation level %d requires Server VM", comp_level);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
jio_snprintf(msg, msg_len, "compilation level %d requires TieredCompilation", comp_level);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (msg != NULL) {
|
||||||
|
report_error(msg);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// compile <klass> <name> <signature> <entry_bci> <comp_level>
|
||||||
void process_compile(TRAPS) {
|
void process_compile(TRAPS) {
|
||||||
// methodHandle method;
|
// methodHandle method;
|
||||||
Method* method = parse_method(CHECK);
|
Method* method = parse_method(CHECK);
|
||||||
int entry_bci = parse_int("entry_bci");
|
int entry_bci = parse_int("entry_bci");
|
||||||
|
const char* comp_level_label = "comp_level";
|
||||||
|
int comp_level = parse_int(comp_level_label);
|
||||||
|
// old version w/o comp_level
|
||||||
|
if (had_error() && (error_message() == comp_level_label)) {
|
||||||
|
comp_level = CompLevel_full_optimization;
|
||||||
|
}
|
||||||
|
if (!is_valid_comp_level(comp_level)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
Klass* k = method->method_holder();
|
Klass* k = method->method_holder();
|
||||||
((InstanceKlass*)k)->initialize(THREAD);
|
((InstanceKlass*)k)->initialize(THREAD);
|
||||||
if (HAS_PENDING_EXCEPTION) {
|
if (HAS_PENDING_EXCEPTION) {
|
||||||
@ -389,12 +424,12 @@ class CompileReplay : public StackObj {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Make sure the existence of a prior compile doesn't stop this one
|
// Make sure the existence of a prior compile doesn't stop this one
|
||||||
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, CompLevel_full_optimization, true) : method->code();
|
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
|
||||||
if (nm != NULL) {
|
if (nm != NULL) {
|
||||||
nm->make_not_entrant();
|
nm->make_not_entrant();
|
||||||
}
|
}
|
||||||
replay_state = this;
|
replay_state = this;
|
||||||
CompileBroker::compile_method(method, entry_bci, CompLevel_full_optimization,
|
CompileBroker::compile_method(method, entry_bci, comp_level,
|
||||||
methodHandle(), 0, "replay", THREAD);
|
methodHandle(), 0, "replay", THREAD);
|
||||||
replay_state = NULL;
|
replay_state = NULL;
|
||||||
reset();
|
reset();
|
||||||
@ -551,7 +586,7 @@ class CompileReplay : public StackObj {
|
|||||||
if (parsed_two_word == i) continue;
|
if (parsed_two_word == i) continue;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
fatal(err_msg_res("Unexpected tag: %d", cp->tag_at(i).value()));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -819,6 +854,11 @@ int ciReplay::replay_impl(TRAPS) {
|
|||||||
ReplaySuppressInitializers = 1;
|
ReplaySuppressInitializers = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (FLAG_IS_DEFAULT(ReplayDataFile)) {
|
||||||
|
tty->print_cr("ERROR: no compiler replay data file specified (use -XX:ReplayDataFile=replay_pid12345.txt).");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
// Load and parse the replay data
|
// Load and parse the replay data
|
||||||
CompileReplay rp(ReplayDataFile, THREAD);
|
CompileReplay rp(ReplayDataFile, THREAD);
|
||||||
int exit_code = 0;
|
int exit_code = 0;
|
||||||
|
@ -1345,9 +1345,10 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
|
|||||||
tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer);
|
tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer);
|
||||||
// Preload all classes to get around uncommon traps
|
// Preload all classes to get around uncommon traps
|
||||||
// Iterate over all methods in class
|
// Iterate over all methods in class
|
||||||
|
int comp_level = CompilationPolicy::policy()->initial_compile_level();
|
||||||
for (int n = 0; n < k->methods()->length(); n++) {
|
for (int n = 0; n < k->methods()->length(); n++) {
|
||||||
methodHandle m (THREAD, k->methods()->at(n));
|
methodHandle m (THREAD, k->methods()->at(n));
|
||||||
if (CompilationPolicy::can_be_compiled(m)) {
|
if (CompilationPolicy::can_be_compiled(m, comp_level)) {
|
||||||
|
|
||||||
if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
|
if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
|
||||||
// Give sweeper a chance to keep up with CTW
|
// Give sweeper a chance to keep up with CTW
|
||||||
@ -1356,7 +1357,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
|
|||||||
_codecache_sweep_counter = 0;
|
_codecache_sweep_counter = 0;
|
||||||
}
|
}
|
||||||
// Force compilation
|
// Force compilation
|
||||||
CompileBroker::compile_method(m, InvocationEntryBci, CompilationPolicy::policy()->initial_compile_level(),
|
CompileBroker::compile_method(m, InvocationEntryBci, comp_level,
|
||||||
methodHandle(), 0, "CTW", THREAD);
|
methodHandle(), 0, "CTW", THREAD);
|
||||||
if (HAS_PENDING_EXCEPTION) {
|
if (HAS_PENDING_EXCEPTION) {
|
||||||
clear_pending_exception_if_not_oom(CHECK);
|
clear_pending_exception_if_not_oom(CHECK);
|
||||||
|
@ -53,6 +53,7 @@
|
|||||||
#include "classfile/metadataOnStackMark.hpp"
|
#include "classfile/metadataOnStackMark.hpp"
|
||||||
#include "classfile/systemDictionary.hpp"
|
#include "classfile/systemDictionary.hpp"
|
||||||
#include "code/codeCache.hpp"
|
#include "code/codeCache.hpp"
|
||||||
|
#include "memory/gcLocker.hpp"
|
||||||
#include "memory/metadataFactory.hpp"
|
#include "memory/metadataFactory.hpp"
|
||||||
#include "memory/metaspaceShared.hpp"
|
#include "memory/metaspaceShared.hpp"
|
||||||
#include "memory/oopFactory.hpp"
|
#include "memory/oopFactory.hpp"
|
||||||
@ -65,17 +66,19 @@
|
|||||||
|
|
||||||
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
|
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
|
||||||
|
|
||||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
|
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
|
||||||
_class_loader(h_class_loader()),
|
_class_loader(h_class_loader()),
|
||||||
_is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
|
_is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
|
||||||
_metaspace(NULL), _unloading(false), _klasses(NULL),
|
_metaspace(NULL), _unloading(false), _klasses(NULL),
|
||||||
_claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
|
_claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
|
||||||
_next(NULL), _dependencies(),
|
_next(NULL), _dependencies(dependencies),
|
||||||
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
|
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
|
||||||
// empty
|
// empty
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClassLoaderData::init_dependencies(TRAPS) {
|
void ClassLoaderData::init_dependencies(TRAPS) {
|
||||||
|
assert(!Universe::is_fully_initialized(), "should only be called when initializing");
|
||||||
|
assert(is_the_null_class_loader_data(), "should only call this for the null class loader");
|
||||||
_dependencies.init(CHECK);
|
_dependencies.init(CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -429,7 +432,7 @@ void ClassLoaderData::free_deallocate_list() {
|
|||||||
// These anonymous class loaders are to contain classes used for JSR292
|
// These anonymous class loaders are to contain classes used for JSR292
|
||||||
ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
|
ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
|
||||||
// Add a new class loader data to the graph.
|
// Add a new class loader data to the graph.
|
||||||
return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL);
|
return ClassLoaderDataGraph::add(loader, true, CHECK_NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* ClassLoaderData::loader_name() {
|
const char* ClassLoaderData::loader_name() {
|
||||||
@ -501,19 +504,22 @@ ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
|
|||||||
ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
|
ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
|
||||||
ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
|
ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
|
||||||
|
|
||||||
|
|
||||||
// Add a new class loader data node to the list. Assign the newly created
|
// Add a new class loader data node to the list. Assign the newly created
|
||||||
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
|
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
|
||||||
ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) {
|
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) {
|
||||||
// Not assigned a class loader data yet.
|
// We need to allocate all the oops for the ClassLoaderData before allocating the
|
||||||
// Create one.
|
// actual ClassLoaderData object.
|
||||||
ClassLoaderData* *list_head = &_head;
|
ClassLoaderData::Dependencies dependencies(CHECK_NULL);
|
||||||
ClassLoaderData* next = _head;
|
|
||||||
|
|
||||||
bool is_anonymous = (cld_addr == NULL);
|
No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the
|
||||||
ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
|
// ClassLoaderData in the graph since the CLD
|
||||||
|
// contains unhandled oops
|
||||||
|
|
||||||
if (cld_addr != NULL) {
|
ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies);
|
||||||
|
|
||||||
|
|
||||||
|
if (!is_anonymous) {
|
||||||
|
ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
|
||||||
// First, Atomically set it
|
// First, Atomically set it
|
||||||
ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
|
ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
|
||||||
if (old != NULL) {
|
if (old != NULL) {
|
||||||
@ -525,6 +531,9 @@ ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle lo
|
|||||||
|
|
||||||
// We won the race, and therefore the task of adding the data to the list of
|
// We won the race, and therefore the task of adding the data to the list of
|
||||||
// class loader data
|
// class loader data
|
||||||
|
ClassLoaderData** list_head = &_head;
|
||||||
|
ClassLoaderData* next = _head;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
cld->set_next(next);
|
cld->set_next(next);
|
||||||
ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
|
ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
|
||||||
@ -537,10 +546,6 @@ ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle lo
|
|||||||
cld->loader_name());
|
cld->loader_name());
|
||||||
tty->print_cr("]");
|
tty->print_cr("]");
|
||||||
}
|
}
|
||||||
// Create dependencies after the CLD is added to the list. Otherwise,
|
|
||||||
// the GC GC will not find the CLD and the _class_loader field will
|
|
||||||
// not be updated.
|
|
||||||
cld->init_dependencies(CHECK_NULL);
|
|
||||||
return cld;
|
return cld;
|
||||||
}
|
}
|
||||||
next = exchanged;
|
next = exchanged;
|
||||||
@ -671,6 +676,8 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
|||||||
dead->unload();
|
dead->unload();
|
||||||
data = data->next();
|
data = data->next();
|
||||||
// Remove from loader list.
|
// Remove from loader list.
|
||||||
|
// This class loader data will no longer be found
|
||||||
|
// in the ClassLoaderDataGraph.
|
||||||
if (prev != NULL) {
|
if (prev != NULL) {
|
||||||
prev->set_next(data);
|
prev->set_next(data);
|
||||||
} else {
|
} else {
|
||||||
@ -692,6 +699,7 @@ void ClassLoaderDataGraph::purge() {
|
|||||||
next = purge_me->next();
|
next = purge_me->next();
|
||||||
delete purge_me;
|
delete purge_me;
|
||||||
}
|
}
|
||||||
|
Metaspace::purge();
|
||||||
}
|
}
|
||||||
|
|
||||||
// CDS support
|
// CDS support
|
||||||
|
@ -62,7 +62,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
|||||||
// CMS support.
|
// CMS support.
|
||||||
static ClassLoaderData* _saved_head;
|
static ClassLoaderData* _saved_head;
|
||||||
|
|
||||||
static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS);
|
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
|
||||||
public:
|
public:
|
||||||
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
|
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
|
||||||
static void purge();
|
static void purge();
|
||||||
@ -100,6 +100,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
|||||||
Thread* THREAD);
|
Thread* THREAD);
|
||||||
public:
|
public:
|
||||||
Dependencies() : _list_head(NULL) {}
|
Dependencies() : _list_head(NULL) {}
|
||||||
|
Dependencies(TRAPS) : _list_head(NULL) {
|
||||||
|
init(CHECK);
|
||||||
|
}
|
||||||
void add(Handle dependency, TRAPS);
|
void add(Handle dependency, TRAPS);
|
||||||
void init(TRAPS);
|
void init(TRAPS);
|
||||||
void oops_do(OopClosure* f);
|
void oops_do(OopClosure* f);
|
||||||
@ -150,7 +153,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
|||||||
void set_next(ClassLoaderData* next) { _next = next; }
|
void set_next(ClassLoaderData* next) { _next = next; }
|
||||||
ClassLoaderData* next() const { return _next; }
|
ClassLoaderData* next() const { return _next; }
|
||||||
|
|
||||||
ClassLoaderData(Handle h_class_loader, bool is_anonymous);
|
ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
|
||||||
~ClassLoaderData();
|
~ClassLoaderData();
|
||||||
|
|
||||||
void set_metaspace(Metaspace* m) { _metaspace = m; }
|
void set_metaspace(Metaspace* m) { _metaspace = m; }
|
||||||
@ -190,7 +193,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
|||||||
static void init_null_class_loader_data() {
|
static void init_null_class_loader_data() {
|
||||||
assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
|
assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
|
||||||
assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
|
assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
|
||||||
_the_null_class_loader_data = new ClassLoaderData((oop)NULL, false);
|
|
||||||
|
// We explicitly initialize the Dependencies object at a later phase in the initialization
|
||||||
|
_the_null_class_loader_data = new ClassLoaderData((oop)NULL, false, Dependencies());
|
||||||
ClassLoaderDataGraph::_head = _the_null_class_loader_data;
|
ClassLoaderDataGraph::_head = _the_null_class_loader_data;
|
||||||
assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
|
assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
|
||||||
if (DumpSharedSpaces) {
|
if (DumpSharedSpaces) {
|
||||||
|
@ -43,10 +43,9 @@ inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAP
|
|||||||
assert(loader() != NULL,"Must be a class loader");
|
assert(loader() != NULL,"Must be a class loader");
|
||||||
// Gets the class loader data out of the java/lang/ClassLoader object, if non-null
|
// Gets the class loader data out of the java/lang/ClassLoader object, if non-null
|
||||||
// it's already in the loader_data, so no need to add
|
// it's already in the loader_data, so no need to add
|
||||||
ClassLoaderData** loader_data_addr = java_lang_ClassLoader::loader_data_addr(loader());
|
ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader());
|
||||||
ClassLoaderData* loader_data_id = *loader_data_addr;
|
if (loader_data) {
|
||||||
if (loader_data_id) {
|
return loader_data;
|
||||||
return loader_data_id;
|
|
||||||
}
|
}
|
||||||
return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD);
|
return ClassLoaderDataGraph::add(loader, false, THREAD);
|
||||||
}
|
}
|
||||||
|
@ -830,7 +830,7 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
|
|||||||
Klass *kk;
|
Klass *kk;
|
||||||
{
|
{
|
||||||
MutexLocker mu(SystemDictionary_lock, THREAD);
|
MutexLocker mu(SystemDictionary_lock, THREAD);
|
||||||
kk = find_class(name, ik->class_loader_data());
|
kk = find_class(d_index, d_hash, name, ik->class_loader_data());
|
||||||
}
|
}
|
||||||
if (kk != NULL) {
|
if (kk != NULL) {
|
||||||
// No clean up is needed if the shared class has been entered
|
// No clean up is needed if the shared class has been entered
|
||||||
|
@ -517,13 +517,18 @@
|
|||||||
template(sun_management_ManagementFactory, "sun/management/ManagementFactory") \
|
template(sun_management_ManagementFactory, "sun/management/ManagementFactory") \
|
||||||
template(sun_management_Sensor, "sun/management/Sensor") \
|
template(sun_management_Sensor, "sun/management/Sensor") \
|
||||||
template(sun_management_Agent, "sun/management/Agent") \
|
template(sun_management_Agent, "sun/management/Agent") \
|
||||||
|
template(sun_management_DiagnosticCommandImpl, "sun/management/DiagnosticCommandImpl") \
|
||||||
template(sun_management_GarbageCollectorImpl, "sun/management/GarbageCollectorImpl") \
|
template(sun_management_GarbageCollectorImpl, "sun/management/GarbageCollectorImpl") \
|
||||||
|
template(sun_management_ManagementFactoryHelper, "sun/management/ManagementFactoryHelper") \
|
||||||
|
template(getDiagnosticCommandMBean_name, "getDiagnosticCommandMBean") \
|
||||||
|
template(getDiagnosticCommandMBean_signature, "()Lcom/sun/management/DiagnosticCommandMBean;") \
|
||||||
template(getGcInfoBuilder_name, "getGcInfoBuilder") \
|
template(getGcInfoBuilder_name, "getGcInfoBuilder") \
|
||||||
template(getGcInfoBuilder_signature, "()Lsun/management/GcInfoBuilder;") \
|
template(getGcInfoBuilder_signature, "()Lsun/management/GcInfoBuilder;") \
|
||||||
template(com_sun_management_GcInfo, "com/sun/management/GcInfo") \
|
template(com_sun_management_GcInfo, "com/sun/management/GcInfo") \
|
||||||
template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \
|
template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \
|
||||||
template(createGCNotification_name, "createGCNotification") \
|
template(createGCNotification_name, "createGCNotification") \
|
||||||
template(createGCNotification_signature, "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \
|
template(createGCNotification_signature, "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \
|
||||||
|
template(createDiagnosticFrameworkNotification_name, "createDiagnosticFrameworkNotification") \
|
||||||
template(createMemoryPoolMBean_name, "createMemoryPoolMBean") \
|
template(createMemoryPoolMBean_name, "createMemoryPoolMBean") \
|
||||||
template(createMemoryManagerMBean_name, "createMemoryManagerMBean") \
|
template(createMemoryManagerMBean_name, "createMemoryManagerMBean") \
|
||||||
template(createGarbageCollectorMBean_name, "createGarbageCollectorMBean") \
|
template(createGarbageCollectorMBean_name, "createGarbageCollectorMBean") \
|
||||||
|
@ -463,8 +463,10 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
|
|||||||
}
|
}
|
||||||
#endif //PRODUCT
|
#endif //PRODUCT
|
||||||
|
|
||||||
|
/**
|
||||||
nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
|
* Remove and return nmethod from the saved code list in order to reanimate it.
|
||||||
|
*/
|
||||||
|
nmethod* CodeCache::reanimate_saved_code(Method* m) {
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
nmethod* saved = _saved_nmethods;
|
nmethod* saved = _saved_nmethods;
|
||||||
nmethod* prev = NULL;
|
nmethod* prev = NULL;
|
||||||
@ -479,7 +481,7 @@ nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
|
|||||||
saved->set_speculatively_disconnected(false);
|
saved->set_speculatively_disconnected(false);
|
||||||
saved->set_saved_nmethod_link(NULL);
|
saved->set_saved_nmethod_link(NULL);
|
||||||
if (PrintMethodFlushing) {
|
if (PrintMethodFlushing) {
|
||||||
saved->print_on(tty, " ### nmethod is reconnected\n");
|
saved->print_on(tty, " ### nmethod is reconnected");
|
||||||
}
|
}
|
||||||
if (LogCompilation && (xtty != NULL)) {
|
if (LogCompilation && (xtty != NULL)) {
|
||||||
ttyLocker ttyl;
|
ttyLocker ttyl;
|
||||||
@ -496,6 +498,9 @@ nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove nmethod from the saved code list in order to discard it permanently
|
||||||
|
*/
|
||||||
void CodeCache::remove_saved_code(nmethod* nm) {
|
void CodeCache::remove_saved_code(nmethod* nm) {
|
||||||
// For conc swpr this will be called with CodeCache_lock taken by caller
|
// For conc swpr this will be called with CodeCache_lock taken by caller
|
||||||
assert_locked_or_safepoint(CodeCache_lock);
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
@ -529,7 +534,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
|
|||||||
nm->set_saved_nmethod_link(_saved_nmethods);
|
nm->set_saved_nmethod_link(_saved_nmethods);
|
||||||
_saved_nmethods = nm;
|
_saved_nmethods = nm;
|
||||||
if (PrintMethodFlushing) {
|
if (PrintMethodFlushing) {
|
||||||
nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
|
nm->print_on(tty, " ### nmethod is speculatively disconnected");
|
||||||
}
|
}
|
||||||
if (LogCompilation && (xtty != NULL)) {
|
if (LogCompilation && (xtty != NULL)) {
|
||||||
ttyLocker ttyl;
|
ttyLocker ttyl;
|
||||||
|
@ -57,7 +57,7 @@ class CodeCache : AllStatic {
|
|||||||
static int _number_of_nmethods_with_dependencies;
|
static int _number_of_nmethods_with_dependencies;
|
||||||
static bool _needs_cache_clean;
|
static bool _needs_cache_clean;
|
||||||
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
||||||
static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look()
|
static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods.
|
||||||
|
|
||||||
static void verify_if_often() PRODUCT_RETURN;
|
static void verify_if_often() PRODUCT_RETURN;
|
||||||
|
|
||||||
@ -168,7 +168,7 @@ class CodeCache : AllStatic {
|
|||||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||||
static void clear_inline_caches(); // clear all inline caches
|
static void clear_inline_caches(); // clear all inline caches
|
||||||
|
|
||||||
static nmethod* find_and_remove_saved_code(Method* m);
|
static nmethod* reanimate_saved_code(Method* m);
|
||||||
static void remove_saved_code(nmethod* nm);
|
static void remove_saved_code(nmethod* nm);
|
||||||
static void speculatively_disconnect(nmethod* nm);
|
static void speculatively_disconnect(nmethod* nm);
|
||||||
|
|
||||||
|
@ -45,25 +45,6 @@
|
|||||||
// Every time a compiled IC is changed or its type is being accessed,
|
// Every time a compiled IC is changed or its type is being accessed,
|
||||||
// either the CompiledIC_lock must be set or we must be at a safe point.
|
// either the CompiledIC_lock must be set or we must be at a safe point.
|
||||||
|
|
||||||
|
|
||||||
// Release the CompiledICHolder* associated with this call site is there is one.
|
|
||||||
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
|
|
||||||
// This call site might have become stale so inspect it carefully.
|
|
||||||
NativeCall* call = nativeCall_at(call_site->addr());
|
|
||||||
if (is_icholder_entry(call->destination())) {
|
|
||||||
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
|
|
||||||
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
|
||||||
// This call site might have become stale so inspect it carefully.
|
|
||||||
NativeCall* call = nativeCall_at(call_site->addr());
|
|
||||||
return is_icholder_entry(call->destination());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
// Low-level access to an inline cache. Private, since they might not be
|
// Low-level access to an inline cache. Private, since they might not be
|
||||||
// MT-safe to use.
|
// MT-safe to use.
|
||||||
@ -488,33 +469,6 @@ bool CompiledIC::is_icholder_entry(address entry) {
|
|||||||
return (cb != NULL && cb->is_adapter_blob());
|
return (cb != NULL && cb->is_adapter_blob());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
|
||||||
: _ic_call(call)
|
|
||||||
{
|
|
||||||
address ic_call = call->instruction_address();
|
|
||||||
|
|
||||||
assert(ic_call != NULL, "ic_call address must be set");
|
|
||||||
assert(nm != NULL, "must pass nmethod");
|
|
||||||
assert(nm->contains(ic_call), "must be in nmethod");
|
|
||||||
|
|
||||||
// search for the ic_call at the given address
|
|
||||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
|
||||||
bool ret = iter.next();
|
|
||||||
assert(ret == true, "relocInfo must exist at this address");
|
|
||||||
assert(iter.addr() == ic_call, "must find ic_call");
|
|
||||||
if (iter.type() == relocInfo::virtual_call_type) {
|
|
||||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
|
||||||
_is_optimized = false;
|
|
||||||
_value = nativeMovConstReg_at(r->cached_value());
|
|
||||||
} else {
|
|
||||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
|
||||||
_is_optimized = true;
|
|
||||||
_value = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
void CompiledStaticCall::set_to_clean() {
|
void CompiledStaticCall::set_to_clean() {
|
||||||
@ -549,33 +503,6 @@ bool CompiledStaticCall::is_call_to_interpreted() const {
|
|||||||
return nm->stub_contains(destination());
|
return nm->stub_contains(destination());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
|
||||||
address stub=find_stub();
|
|
||||||
guarantee(stub != NULL, "stub not found");
|
|
||||||
|
|
||||||
if (TraceICs) {
|
|
||||||
ResourceMark rm;
|
|
||||||
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
|
||||||
instruction_address(),
|
|
||||||
callee->name_and_sig_as_C_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
|
|
||||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
|
||||||
|
|
||||||
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
|
|
||||||
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
|
|
||||||
|
|
||||||
// Update stub
|
|
||||||
method_holder->set_data((intptr_t)callee());
|
|
||||||
jump->set_jump_destination(entry);
|
|
||||||
|
|
||||||
// Update jump to call
|
|
||||||
set_destination_mt_safe(stub);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void CompiledStaticCall::set(const StaticCallInfo& info) {
|
void CompiledStaticCall::set(const StaticCallInfo& info) {
|
||||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||||
@ -618,19 +545,6 @@ void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
|
||||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
|
||||||
// Reset stub
|
|
||||||
address stub = static_stub->addr();
|
|
||||||
assert(stub!=NULL, "stub not found");
|
|
||||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
|
|
||||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
|
||||||
method_holder->set_data(0);
|
|
||||||
jump->set_jump_destination((address)-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
address CompiledStaticCall::find_stub() {
|
address CompiledStaticCall::find_stub() {
|
||||||
// Find reloc. information containing this call-site
|
// Find reloc. information containing this call-site
|
||||||
RelocIterator iter((nmethod*)NULL, instruction_address());
|
RelocIterator iter((nmethod*)NULL, instruction_address());
|
||||||
@ -668,19 +582,16 @@ void CompiledIC::verify() {
|
|||||||
|| is_optimized() || is_megamorphic(), "sanity check");
|
|| is_optimized() || is_megamorphic(), "sanity check");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CompiledIC::print() {
|
void CompiledIC::print() {
|
||||||
print_compiled_ic();
|
print_compiled_ic();
|
||||||
tty->cr();
|
tty->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CompiledIC::print_compiled_ic() {
|
void CompiledIC::print_compiled_ic() {
|
||||||
tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
|
tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
|
||||||
instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value());
|
instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CompiledStaticCall::print() {
|
void CompiledStaticCall::print() {
|
||||||
tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address());
|
tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address());
|
||||||
if (is_clean()) {
|
if (is_clean()) {
|
||||||
@ -693,21 +604,4 @@ void CompiledStaticCall::print() {
|
|||||||
tty->cr();
|
tty->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompiledStaticCall::verify() {
|
#endif // !PRODUCT
|
||||||
// Verify call
|
|
||||||
NativeCall::verify();
|
|
||||||
if (os::is_MP()) {
|
|
||||||
verify_alignment();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify stub
|
|
||||||
address stub = find_stub();
|
|
||||||
assert(stub != NULL, "no stub found for static call");
|
|
||||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
|
|
||||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
|
||||||
|
|
||||||
// Verify state
|
|
||||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
@ -304,6 +304,11 @@ class CompiledStaticCall: public NativeCall {
|
|||||||
friend CompiledStaticCall* compiledStaticCall_at(address native_call);
|
friend CompiledStaticCall* compiledStaticCall_at(address native_call);
|
||||||
friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
|
friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
|
||||||
|
|
||||||
|
// Code
|
||||||
|
static void emit_to_interp_stub(CodeBuffer &cbuf);
|
||||||
|
static int to_interp_stub_size();
|
||||||
|
static int reloc_to_interp_stub();
|
||||||
|
|
||||||
// State
|
// State
|
||||||
bool is_clean() const;
|
bool is_clean() const;
|
||||||
bool is_call_to_compiled() const;
|
bool is_call_to_compiled() const;
|
||||||
|
@ -65,7 +65,7 @@ HS_DTRACE_PROBE_DECL8(hotspot, method__compile__begin,
|
|||||||
HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
||||||
char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool);
|
char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool);
|
||||||
|
|
||||||
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \
|
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \
|
||||||
{ \
|
{ \
|
||||||
Symbol* klass_name = (method)->klass_name(); \
|
Symbol* klass_name = (method)->klass_name(); \
|
||||||
Symbol* name = (method)->name(); \
|
Symbol* name = (method)->name(); \
|
||||||
@ -77,8 +77,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
|||||||
signature->bytes(), signature->utf8_length()); \
|
signature->bytes(), signature->utf8_length()); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \
|
#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \
|
||||||
comp_name, success) \
|
|
||||||
{ \
|
{ \
|
||||||
Symbol* klass_name = (method)->klass_name(); \
|
Symbol* klass_name = (method)->klass_name(); \
|
||||||
Symbol* name = (method)->name(); \
|
Symbol* name = (method)->name(); \
|
||||||
@ -92,7 +91,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
|||||||
|
|
||||||
#else /* USDT2 */
|
#else /* USDT2 */
|
||||||
|
|
||||||
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \
|
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \
|
||||||
{ \
|
{ \
|
||||||
Symbol* klass_name = (method)->klass_name(); \
|
Symbol* klass_name = (method)->klass_name(); \
|
||||||
Symbol* name = (method)->name(); \
|
Symbol* name = (method)->name(); \
|
||||||
@ -104,8 +103,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
|||||||
(char *) signature->bytes(), signature->utf8_length()); \
|
(char *) signature->bytes(), signature->utf8_length()); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \
|
#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \
|
||||||
comp_name, success) \
|
|
||||||
{ \
|
{ \
|
||||||
Symbol* klass_name = (method)->klass_name(); \
|
Symbol* klass_name = (method)->klass_name(); \
|
||||||
Symbol* name = (method)->name(); \
|
Symbol* name = (method)->name(); \
|
||||||
@ -120,8 +118,8 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
|||||||
|
|
||||||
#else // ndef DTRACE_ENABLED
|
#else // ndef DTRACE_ENABLED
|
||||||
|
|
||||||
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name)
|
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)
|
||||||
#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, comp_name, success)
|
#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)
|
||||||
|
|
||||||
#endif // ndef DTRACE_ENABLED
|
#endif // ndef DTRACE_ENABLED
|
||||||
|
|
||||||
@ -1229,7 +1227,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
|||||||
if (method->is_not_compilable(comp_level)) return NULL;
|
if (method->is_not_compilable(comp_level)) return NULL;
|
||||||
|
|
||||||
if (UseCodeCacheFlushing) {
|
if (UseCodeCacheFlushing) {
|
||||||
nmethod* saved = CodeCache::find_and_remove_saved_code(method());
|
nmethod* saved = CodeCache::reanimate_saved_code(method());
|
||||||
if (saved != NULL) {
|
if (saved != NULL) {
|
||||||
method->set_code(method, saved);
|
method->set_code(method, saved);
|
||||||
return saved;
|
return saved;
|
||||||
@ -1288,9 +1286,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
|||||||
method->jmethod_id();
|
method->jmethod_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the compiler is shut off due to code cache flushing or otherwise,
|
// If the compiler is shut off due to code cache getting full
|
||||||
// fail out now so blocking compiles dont hang the java thread
|
// fail out now so blocking compiles dont hang the java thread
|
||||||
if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
|
if (!should_compile_new_jobs()) {
|
||||||
CompilationPolicy::policy()->delay_compilation(method());
|
CompilationPolicy::policy()->delay_compilation(method());
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1766,8 +1764,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
|||||||
// Save information about this method in case of failure.
|
// Save information about this method in case of failure.
|
||||||
set_last_compile(thread, method, is_osr, task_level);
|
set_last_compile(thread, method, is_osr, task_level);
|
||||||
|
|
||||||
DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler(task_level), method,
|
DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
|
||||||
compiler_name(task_level));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate a new set of JNI handles.
|
// Allocate a new set of JNI handles.
|
||||||
@ -1842,13 +1839,14 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// simulate crash during compilation
|
||||||
|
assert(task->compile_id() != CICrashAt, "just as planned");
|
||||||
}
|
}
|
||||||
pop_jni_handle_block();
|
pop_jni_handle_block();
|
||||||
|
|
||||||
methodHandle method(thread, task->method());
|
methodHandle method(thread, task->method());
|
||||||
|
|
||||||
DTRACE_METHOD_COMPILE_END_PROBE(compiler(task_level), method,
|
DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success());
|
||||||
compiler_name(task_level), task->is_success());
|
|
||||||
|
|
||||||
collect_statistics(thread, time, task);
|
collect_statistics(thread, time, task);
|
||||||
|
|
||||||
|
@ -2444,8 +2444,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
|||||||
// initial marking in checkpointRootsInitialWork has been completed
|
// initial marking in checkpointRootsInitialWork has been completed
|
||||||
if (VerifyDuringGC &&
|
if (VerifyDuringGC &&
|
||||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||||
gclog_or_tty->print("Verify before initial mark: ");
|
Universe::verify("Verify before initial mark: ");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
bool res = markFromRoots(false);
|
bool res = markFromRoots(false);
|
||||||
@ -2456,8 +2455,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
|||||||
case FinalMarking:
|
case FinalMarking:
|
||||||
if (VerifyDuringGC &&
|
if (VerifyDuringGC &&
|
||||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||||
gclog_or_tty->print("Verify before re-mark: ");
|
Universe::verify("Verify before re-mark: ");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
checkpointRootsFinal(false, clear_all_soft_refs,
|
checkpointRootsFinal(false, clear_all_soft_refs,
|
||||||
init_mark_was_synchronous);
|
init_mark_was_synchronous);
|
||||||
@ -2468,8 +2466,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
|||||||
// final marking in checkpointRootsFinal has been completed
|
// final marking in checkpointRootsFinal has been completed
|
||||||
if (VerifyDuringGC &&
|
if (VerifyDuringGC &&
|
||||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||||
gclog_or_tty->print("Verify before sweep: ");
|
Universe::verify("Verify before sweep: ");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
sweep(false);
|
sweep(false);
|
||||||
assert(_collectorState == Resizing, "Incorrect state");
|
assert(_collectorState == Resizing, "Incorrect state");
|
||||||
@ -2484,8 +2481,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
|||||||
// The heap has been resized.
|
// The heap has been resized.
|
||||||
if (VerifyDuringGC &&
|
if (VerifyDuringGC &&
|
||||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||||
gclog_or_tty->print("Verify before reset: ");
|
Universe::verify("Verify before reset: ");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
reset(false);
|
reset(false);
|
||||||
assert(_collectorState == Idling, "Collector state should "
|
assert(_collectorState == Idling, "Collector state should "
|
||||||
@ -2853,8 +2849,8 @@ class VerifyMarkedClosure: public BitMapClosure {
|
|||||||
bool failed() { return _failed; }
|
bool failed() { return _failed; }
|
||||||
};
|
};
|
||||||
|
|
||||||
bool CMSCollector::verify_after_remark() {
|
bool CMSCollector::verify_after_remark(bool silent) {
|
||||||
gclog_or_tty->print(" [Verifying CMS Marking... ");
|
if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
|
||||||
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
|
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
|
||||||
static bool init = false;
|
static bool init = false;
|
||||||
|
|
||||||
@ -2915,7 +2911,7 @@ bool CMSCollector::verify_after_remark() {
|
|||||||
warning("Unrecognized value %d for CMSRemarkVerifyVariant",
|
warning("Unrecognized value %d for CMSRemarkVerifyVariant",
|
||||||
CMSRemarkVerifyVariant);
|
CMSRemarkVerifyVariant);
|
||||||
}
|
}
|
||||||
gclog_or_tty->print(" done] ");
|
if (!silent) gclog_or_tty->print(" done] ");
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3426,8 +3422,9 @@ bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
|
|||||||
void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
|
void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
assert_lock_strong(freelistLock());
|
assert_lock_strong(freelistLock());
|
||||||
// XXX Fix when compaction is implemented.
|
if (PrintGCDetails && Verbose) {
|
||||||
warning("Shrinking of CMS not yet implemented");
|
warning("Shrinking of CMS not yet implemented");
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6010,26 +6007,23 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
|||||||
&cmsDrainMarkingStackClosure,
|
&cmsDrainMarkingStackClosure,
|
||||||
NULL);
|
NULL);
|
||||||
}
|
}
|
||||||
verify_work_stacks_empty();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This is the point where the entire marking should have completed.
|
||||||
|
verify_work_stacks_empty();
|
||||||
|
|
||||||
if (should_unload_classes()) {
|
if (should_unload_classes()) {
|
||||||
{
|
{
|
||||||
TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
|
TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
|
||||||
|
|
||||||
// Follow SystemDictionary roots and unload classes
|
// Unload classes and purge the SystemDictionary.
|
||||||
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
|
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
|
||||||
|
|
||||||
// Follow CodeCache roots and unload any methods marked for unloading
|
// Unload nmethods.
|
||||||
CodeCache::do_unloading(&_is_alive_closure, purged_class);
|
CodeCache::do_unloading(&_is_alive_closure, purged_class);
|
||||||
|
|
||||||
cmsDrainMarkingStackClosure.do_void();
|
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||||
verify_work_stacks_empty();
|
|
||||||
|
|
||||||
// Update subklass/sibling/implementor links in KlassKlass descendants
|
|
||||||
Klass::clean_weak_klass_links(&_is_alive_closure);
|
Klass::clean_weak_klass_links(&_is_alive_closure);
|
||||||
// Nothing should have been pushed onto the working stacks.
|
|
||||||
verify_work_stacks_empty();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -6043,11 +6037,10 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
|||||||
// Need to check if we really scanned the StringTable.
|
// Need to check if we really scanned the StringTable.
|
||||||
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
|
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
|
||||||
TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
|
TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
|
||||||
// Now clean up stale oops in StringTable
|
// Delete entries for dead interned strings.
|
||||||
StringTable::unlink(&_is_alive_closure);
|
StringTable::unlink(&_is_alive_closure);
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_work_stacks_empty();
|
|
||||||
// Restore any preserved marks as a result of mark stack or
|
// Restore any preserved marks as a result of mark stack or
|
||||||
// work queue overflow
|
// work queue overflow
|
||||||
restore_preserved_marks_if_any(); // done single-threaded for now
|
restore_preserved_marks_if_any(); // done single-threaded for now
|
||||||
|
@ -990,7 +990,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||||||
|
|
||||||
// debugging
|
// debugging
|
||||||
void verify();
|
void verify();
|
||||||
bool verify_after_remark();
|
bool verify_after_remark(bool silent = VerifySilently);
|
||||||
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
||||||
void verify_work_stacks_empty() const PRODUCT_RETURN;
|
void verify_work_stacks_empty() const PRODUCT_RETURN;
|
||||||
void verify_overflow_empty() const PRODUCT_RETURN;
|
void verify_overflow_empty() const PRODUCT_RETURN;
|
||||||
|
@ -1273,10 +1273,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||||||
|
|
||||||
if (VerifyDuringGC) {
|
if (VerifyDuringGC) {
|
||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
Universe::verify(/* silent */ false,
|
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
" VerifyDuringGC:(before)");
|
||||||
}
|
}
|
||||||
|
|
||||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||||
@ -1300,10 +1299,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||||||
// Verify the heap w.r.t. the previous marking bitmap.
|
// Verify the heap w.r.t. the previous marking bitmap.
|
||||||
if (VerifyDuringGC) {
|
if (VerifyDuringGC) {
|
||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(overflow)");
|
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
Universe::verify(/* silent */ false,
|
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
" VerifyDuringGC:(overflow)");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear the marking state because we will be restarting
|
// Clear the marking state because we will be restarting
|
||||||
@ -1323,10 +1321,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||||||
|
|
||||||
if (VerifyDuringGC) {
|
if (VerifyDuringGC) {
|
||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
Universe::verify(/* silent */ false,
|
Universe::verify(VerifyOption_G1UseNextMarking,
|
||||||
/* option */ VerifyOption_G1UseNextMarking);
|
" VerifyDuringGC:(after)");
|
||||||
}
|
}
|
||||||
assert(!restart_for_overflow(), "sanity");
|
assert(!restart_for_overflow(), "sanity");
|
||||||
// Completely reset the marking state since marking completed
|
// Completely reset the marking state since marking completed
|
||||||
@ -1972,10 +1969,9 @@ void ConcurrentMark::cleanup() {
|
|||||||
|
|
||||||
if (VerifyDuringGC) {
|
if (VerifyDuringGC) {
|
||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
Universe::verify(/* silent */ false,
|
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
" VerifyDuringGC:(before)");
|
||||||
}
|
}
|
||||||
|
|
||||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||||
@ -2127,10 +2123,9 @@ void ConcurrentMark::cleanup() {
|
|||||||
|
|
||||||
if (VerifyDuringGC) {
|
if (VerifyDuringGC) {
|
||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
Universe::verify(/* silent */ false,
|
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
" VerifyDuringGC:(after)");
|
||||||
}
|
}
|
||||||
|
|
||||||
g1h->verify_region_sets_optional();
|
g1h->verify_region_sets_optional();
|
||||||
|
@ -1271,9 +1271,8 @@ double G1CollectedHeap::verify(bool guard, const char* msg) {
|
|||||||
if (guard && total_collections() >= VerifyGCStartAt) {
|
if (guard && total_collections() >= VerifyGCStartAt) {
|
||||||
double verify_start = os::elapsedTime();
|
double verify_start = os::elapsedTime();
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(msg);
|
|
||||||
prepare_for_verify();
|
prepare_for_verify();
|
||||||
Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
|
Universe::verify(VerifyOption_G1UsePrevMarking, msg);
|
||||||
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
|
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1304,7 +1303,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
|
|
||||||
print_heap_before_gc();
|
print_heap_before_gc();
|
||||||
|
|
||||||
size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
|
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
||||||
|
|
||||||
HRSPhaseSetter x(HRSPhaseFullGC);
|
HRSPhaseSetter x(HRSPhaseFullGC);
|
||||||
verify_region_sets_optional();
|
verify_region_sets_optional();
|
||||||
@ -1425,6 +1424,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
|
|
||||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||||
ClassLoaderDataGraph::purge();
|
ClassLoaderDataGraph::purge();
|
||||||
|
MetaspaceAux::verify_metrics();
|
||||||
|
|
||||||
// Note: since we've just done a full GC, concurrent
|
// Note: since we've just done a full GC, concurrent
|
||||||
// marking is no longer active. Therefore we need not
|
// marking is no longer active. Therefore we need not
|
||||||
@ -1955,13 +1955,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
|||||||
int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
|
int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
|
||||||
assert(n_rem_sets > 0, "Invariant.");
|
assert(n_rem_sets > 0, "Invariant.");
|
||||||
|
|
||||||
HeapRegionRemSetIterator** iter_arr =
|
|
||||||
NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
|
|
||||||
for (int i = 0; i < n_queues; i++) {
|
|
||||||
iter_arr[i] = new HeapRegionRemSetIterator();
|
|
||||||
}
|
|
||||||
_rem_set_iterator = iter_arr;
|
|
||||||
|
|
||||||
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
|
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
|
||||||
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
|
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
|
||||||
|
|
||||||
@ -5079,10 +5072,9 @@ g1_process_strong_roots(bool is_scavenging,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
|
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
|
||||||
OopClosure* non_root_closure) {
|
|
||||||
CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
|
CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
|
||||||
SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
|
SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Weak Reference Processing support
|
// Weak Reference Processing support
|
||||||
|
@ -786,9 +786,6 @@ protected:
|
|||||||
// concurrently after the collection.
|
// concurrently after the collection.
|
||||||
DirtyCardQueueSet _dirty_card_queue_set;
|
DirtyCardQueueSet _dirty_card_queue_set;
|
||||||
|
|
||||||
// The Heap Region Rem Set Iterator.
|
|
||||||
HeapRegionRemSetIterator** _rem_set_iterator;
|
|
||||||
|
|
||||||
// The closure used to refine a single card.
|
// The closure used to refine a single card.
|
||||||
RefineCardTableEntryClosure* _refine_cte_cl;
|
RefineCardTableEntryClosure* _refine_cte_cl;
|
||||||
|
|
||||||
@ -827,8 +824,7 @@ protected:
|
|||||||
// Apply "blk" to all the weak roots of the system. These include
|
// Apply "blk" to all the weak roots of the system. These include
|
||||||
// JNI weak roots, the code cache, system dictionary, symbol table,
|
// JNI weak roots, the code cache, system dictionary, symbol table,
|
||||||
// string table, and referents of reachable weak refs.
|
// string table, and referents of reachable weak refs.
|
||||||
void g1_process_weak_roots(OopClosure* root_closure,
|
void g1_process_weak_roots(OopClosure* root_closure);
|
||||||
OopClosure* non_root_closure);
|
|
||||||
|
|
||||||
// Frees a non-humongous region by initializing its contents and
|
// Frees a non-humongous region by initializing its contents and
|
||||||
// adding it to the free list that's passed as a parameter (this is
|
// adding it to the free list that's passed as a parameter (this is
|
||||||
@ -1114,15 +1110,6 @@ public:
|
|||||||
G1RemSet* g1_rem_set() const { return _g1_rem_set; }
|
G1RemSet* g1_rem_set() const { return _g1_rem_set; }
|
||||||
ModRefBarrierSet* mr_bs() const { return _mr_bs; }
|
ModRefBarrierSet* mr_bs() const { return _mr_bs; }
|
||||||
|
|
||||||
// The rem set iterator.
|
|
||||||
HeapRegionRemSetIterator* rem_set_iterator(int i) {
|
|
||||||
return _rem_set_iterator[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapRegionRemSetIterator* rem_set_iterator() {
|
|
||||||
return _rem_set_iterator[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned get_gc_time_stamp() {
|
unsigned get_gc_time_stamp() {
|
||||||
return _gc_time_stamp;
|
return _gc_time_stamp;
|
||||||
}
|
}
|
||||||
|
@ -144,33 +144,28 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
|||||||
&GenMarkSweep::follow_stack_closure,
|
&GenMarkSweep::follow_stack_closure,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
// Follow system dictionary roots and unload classes
|
|
||||||
|
// This is the point where the entire marking should have completed.
|
||||||
|
assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
|
||||||
|
|
||||||
|
// Unload classes and purge the SystemDictionary.
|
||||||
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
|
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
|
||||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
|
||||||
"stack should be empty by now");
|
|
||||||
|
|
||||||
// Follow code cache roots (has to be done after system dictionary,
|
// Unload nmethods.
|
||||||
// assumes all live klasses are marked)
|
|
||||||
CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
|
CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
|
||||||
GenMarkSweep::follow_stack();
|
|
||||||
|
|
||||||
// Update subklass/sibling/implementor links of live klasses
|
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||||
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
|
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
|
||||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
|
||||||
"stack should be empty by now");
|
|
||||||
|
|
||||||
// Visit interned string tables and delete unmarked oops
|
// Delete entries for dead interned strings.
|
||||||
StringTable::unlink(&GenMarkSweep::is_alive);
|
StringTable::unlink(&GenMarkSweep::is_alive);
|
||||||
|
|
||||||
// Clean up unreferenced symbols in symbol table.
|
// Clean up unreferenced symbols in symbol table.
|
||||||
SymbolTable::unlink();
|
SymbolTable::unlink();
|
||||||
|
|
||||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
|
||||||
"stack should be empty by now");
|
|
||||||
|
|
||||||
if (VerifyDuringGC) {
|
if (VerifyDuringGC) {
|
||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
|
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
|
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
// Note: we can verify only the heap here. When an object is
|
// Note: we can verify only the heap here. When an object is
|
||||||
// marked, the previous value of the mark word (including
|
// marked, the previous value of the mark word (including
|
||||||
@ -182,11 +177,13 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
|||||||
// fail. At the end of the GC, the orginal mark word values
|
// fail. At the end of the GC, the orginal mark word values
|
||||||
// (including hash values) are restored to the appropriate
|
// (including hash values) are restored to the appropriate
|
||||||
// objects.
|
// objects.
|
||||||
Universe::heap()->verify(/* silent */ false,
|
if (!VerifySilently) {
|
||||||
/* option */ VerifyOption_G1UseMarkWord);
|
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
|
||||||
|
}
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
|
||||||
gclog_or_tty->print_cr("]");
|
if (!VerifySilently) {
|
||||||
|
gclog_or_tty->print_cr("]");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -308,17 +305,16 @@ void G1MarkSweep::mark_sweep_phase3() {
|
|||||||
sh->process_strong_roots(true, // activate StrongRootsScope
|
sh->process_strong_roots(true, // activate StrongRootsScope
|
||||||
false, // not scavenging.
|
false, // not scavenging.
|
||||||
SharedHeap::SO_AllClasses,
|
SharedHeap::SO_AllClasses,
|
||||||
&GenMarkSweep::adjust_root_pointer_closure,
|
&GenMarkSweep::adjust_pointer_closure,
|
||||||
NULL, // do not touch code cache here
|
NULL, // do not touch code cache here
|
||||||
&GenMarkSweep::adjust_klass_closure);
|
&GenMarkSweep::adjust_klass_closure);
|
||||||
|
|
||||||
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
|
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
|
||||||
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
|
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
|
||||||
|
|
||||||
// Now adjust pointers in remaining weak roots. (All of which should
|
// Now adjust pointers in remaining weak roots. (All of which should
|
||||||
// have been cleared if they pointed to non-surviving objects.)
|
// have been cleared if they pointed to non-surviving objects.)
|
||||||
g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
|
g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
|
||||||
&GenMarkSweep::adjust_pointer_closure);
|
|
||||||
|
|
||||||
GenMarkSweep::adjust_marks();
|
GenMarkSweep::adjust_marks();
|
||||||
|
|
||||||
|
@ -169,14 +169,13 @@ public:
|
|||||||
// _try_claimed || r->claim_iter()
|
// _try_claimed || r->claim_iter()
|
||||||
// is true: either we're supposed to work on claimed-but-not-complete
|
// is true: either we're supposed to work on claimed-but-not-complete
|
||||||
// regions, or we successfully claimed the region.
|
// regions, or we successfully claimed the region.
|
||||||
HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
|
HeapRegionRemSetIterator iter(hrrs);
|
||||||
hrrs->init_iterator(iter);
|
|
||||||
size_t card_index;
|
size_t card_index;
|
||||||
|
|
||||||
// We claim cards in block so as to recude the contention. The block size is determined by
|
// We claim cards in block so as to recude the contention. The block size is determined by
|
||||||
// the G1RSetScanBlockSize parameter.
|
// the G1RSetScanBlockSize parameter.
|
||||||
size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
|
size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||||
for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
|
for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
|
||||||
if (current_card >= jump_to_card + _block_size) {
|
if (current_card >= jump_to_card + _block_size) {
|
||||||
jump_to_card = hrrs->iter_claimed_next(_block_size);
|
jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||||
}
|
}
|
||||||
|
@ -53,14 +53,14 @@ protected:
|
|||||||
NumSeqTasks = 1
|
NumSeqTasks = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
CardTableModRefBS* _ct_bs;
|
CardTableModRefBS* _ct_bs;
|
||||||
SubTasksDone* _seq_task;
|
SubTasksDone* _seq_task;
|
||||||
G1CollectorPolicy* _g1p;
|
G1CollectorPolicy* _g1p;
|
||||||
|
|
||||||
ConcurrentG1Refine* _cg1r;
|
ConcurrentG1Refine* _cg1r;
|
||||||
|
|
||||||
size_t* _cards_scanned;
|
size_t* _cards_scanned;
|
||||||
size_t _total_cards_scanned;
|
size_t _total_cards_scanned;
|
||||||
|
|
||||||
// Used for caching the closure that is responsible for scanning
|
// Used for caching the closure that is responsible for scanning
|
||||||
// references into the collection set.
|
// references into the collection set.
|
||||||
|
@ -877,14 +877,9 @@ bool HeapRegionRemSet::iter_is_complete() {
|
|||||||
return _iter_state == Complete;
|
return _iter_state == Complete;
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
|
|
||||||
iter->initialize(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void HeapRegionRemSet::print() const {
|
void HeapRegionRemSet::print() const {
|
||||||
HeapRegionRemSetIterator iter;
|
HeapRegionRemSetIterator iter(this);
|
||||||
init_iterator(&iter);
|
|
||||||
size_t card_index;
|
size_t card_index;
|
||||||
while (iter.has_next(card_index)) {
|
while (iter.has_next(card_index)) {
|
||||||
HeapWord* card_start =
|
HeapWord* card_start =
|
||||||
@ -928,35 +923,23 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
|
|||||||
|
|
||||||
//-------------------- Iteration --------------------
|
//-------------------- Iteration --------------------
|
||||||
|
|
||||||
HeapRegionRemSetIterator::
|
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
|
||||||
HeapRegionRemSetIterator() :
|
_hrrs(hrrs),
|
||||||
_hrrs(NULL),
|
|
||||||
_g1h(G1CollectedHeap::heap()),
|
_g1h(G1CollectedHeap::heap()),
|
||||||
_bosa(NULL),
|
_coarse_map(&hrrs->_other_regions._coarse_map),
|
||||||
_sparse_iter() { }
|
_fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
|
||||||
|
_bosa(hrrs->bosa()),
|
||||||
void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
|
_is(Sparse),
|
||||||
_hrrs = hrrs;
|
|
||||||
_coarse_map = &_hrrs->_other_regions._coarse_map;
|
|
||||||
_fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
|
|
||||||
_bosa = _hrrs->bosa();
|
|
||||||
|
|
||||||
_is = Sparse;
|
|
||||||
// Set these values so that we increment to the first region.
|
// Set these values so that we increment to the first region.
|
||||||
_coarse_cur_region_index = -1;
|
_coarse_cur_region_index(-1),
|
||||||
_coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
|
_coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
|
||||||
|
_cur_region_cur_card(0),
|
||||||
_cur_region_cur_card = 0;
|
_fine_array_index(-1),
|
||||||
|
_fine_cur_prt(NULL),
|
||||||
_fine_array_index = -1;
|
_n_yielded_coarse(0),
|
||||||
_fine_cur_prt = NULL;
|
_n_yielded_fine(0),
|
||||||
|
_n_yielded_sparse(0),
|
||||||
_n_yielded_coarse = 0;
|
_sparse_iter(&hrrs->_other_regions._sparse_table) {}
|
||||||
_n_yielded_fine = 0;
|
|
||||||
_n_yielded_sparse = 0;
|
|
||||||
|
|
||||||
_sparse_iter.init(&hrrs->_other_regions._sparse_table);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
|
bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
|
||||||
if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
|
if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
|
||||||
@ -1209,8 +1192,7 @@ void HeapRegionRemSet::test() {
|
|||||||
hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
|
hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
|
||||||
|
|
||||||
// Now, does iteration yield these three?
|
// Now, does iteration yield these three?
|
||||||
HeapRegionRemSetIterator iter;
|
HeapRegionRemSetIterator iter(hrrs);
|
||||||
hrrs->init_iterator(&iter);
|
|
||||||
size_t sum = 0;
|
size_t sum = 0;
|
||||||
size_t card_index;
|
size_t card_index;
|
||||||
while (iter.has_next(card_index)) {
|
while (iter.has_next(card_index)) {
|
||||||
|
@ -281,9 +281,6 @@ public:
|
|||||||
return (_iter_state == Unclaimed) && (_iter_claimed == 0);
|
return (_iter_state == Unclaimed) && (_iter_claimed == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the given iterator to iterate over this rem set.
|
|
||||||
void init_iterator(HeapRegionRemSetIterator* iter) const;
|
|
||||||
|
|
||||||
// The actual # of bytes this hr_remset takes up.
|
// The actual # of bytes this hr_remset takes up.
|
||||||
size_t mem_size() {
|
size_t mem_size() {
|
||||||
return _other_regions.mem_size()
|
return _other_regions.mem_size()
|
||||||
@ -345,9 +342,9 @@ public:
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
|
class HeapRegionRemSetIterator : public StackObj {
|
||||||
|
|
||||||
// The region over which we're iterating.
|
// The region RSet over which we're iterating.
|
||||||
const HeapRegionRemSet* _hrrs;
|
const HeapRegionRemSet* _hrrs;
|
||||||
|
|
||||||
// Local caching of HRRS fields.
|
// Local caching of HRRS fields.
|
||||||
@ -362,8 +359,10 @@ class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
|
|||||||
size_t _n_yielded_coarse;
|
size_t _n_yielded_coarse;
|
||||||
size_t _n_yielded_sparse;
|
size_t _n_yielded_sparse;
|
||||||
|
|
||||||
// If true we're iterating over the coarse table; if false the fine
|
// Indicates what granularity of table that we're currently iterating over.
|
||||||
// table.
|
// We start iterating over the sparse table, progress to the fine grain
|
||||||
|
// table, and then finish with the coarse table.
|
||||||
|
// See HeapRegionRemSetIterator::has_next().
|
||||||
enum IterState {
|
enum IterState {
|
||||||
Sparse,
|
Sparse,
|
||||||
Fine,
|
Fine,
|
||||||
@ -403,9 +402,7 @@ class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
|
|||||||
public:
|
public:
|
||||||
// We require an iterator to be initialized before use, so the
|
// We require an iterator to be initialized before use, so the
|
||||||
// constructor does little.
|
// constructor does little.
|
||||||
HeapRegionRemSetIterator();
|
HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs);
|
||||||
|
|
||||||
void initialize(const HeapRegionRemSet* hrrs);
|
|
||||||
|
|
||||||
// If there remains one or more cards to be yielded, returns true and
|
// If there remains one or more cards to be yielded, returns true and
|
||||||
// sets "card_index" to one of those cards (which is then considered
|
// sets "card_index" to one of those cards (which is then considered
|
||||||
|
@ -35,10 +35,6 @@
|
|||||||
|
|
||||||
#define UNROLL_CARD_LOOPS 1
|
#define UNROLL_CARD_LOOPS 1
|
||||||
|
|
||||||
void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
|
|
||||||
sprt_iter->init(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SparsePRTEntry::init(RegionIdx_t region_ind) {
|
void SparsePRTEntry::init(RegionIdx_t region_ind) {
|
||||||
_region_ind = region_ind;
|
_region_ind = region_ind;
|
||||||
_next_index = NullEntry;
|
_next_index = NullEntry;
|
||||||
|
@ -192,18 +192,11 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
|||||||
size_t compute_card_ind(CardIdx_t ci);
|
size_t compute_card_ind(CardIdx_t ci);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
RSHashTableIter() :
|
RSHashTableIter(RSHashTable* rsht) :
|
||||||
_tbl_ind(RSHashTable::NullEntry),
|
_tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
|
||||||
_bl_ind(RSHashTable::NullEntry),
|
_bl_ind(RSHashTable::NullEntry),
|
||||||
_card_ind((SparsePRTEntry::cards_num() - 1)),
|
_card_ind((SparsePRTEntry::cards_num() - 1)),
|
||||||
_rsht(NULL) {}
|
_rsht(rsht) {}
|
||||||
|
|
||||||
void init(RSHashTable* rsht) {
|
|
||||||
_rsht = rsht;
|
|
||||||
_tbl_ind = -1; // So that first increment gets to 0.
|
|
||||||
_bl_ind = RSHashTable::NullEntry;
|
|
||||||
_card_ind = (SparsePRTEntry::cards_num() - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool has_next(size_t& card_index);
|
bool has_next(size_t& card_index);
|
||||||
};
|
};
|
||||||
@ -284,8 +277,6 @@ public:
|
|||||||
static void cleanup_all();
|
static void cleanup_all();
|
||||||
RSHashTable* cur() const { return _cur; }
|
RSHashTable* cur() const { return _cur; }
|
||||||
|
|
||||||
void init_iterator(SparsePRTIter* sprt_iter);
|
|
||||||
|
|
||||||
static void add_to_expanded_list(SparsePRT* sprt);
|
static void add_to_expanded_list(SparsePRT* sprt);
|
||||||
static SparsePRT* get_from_expanded_list();
|
static SparsePRT* get_from_expanded_list();
|
||||||
|
|
||||||
@ -321,9 +312,9 @@ public:
|
|||||||
|
|
||||||
class SparsePRTIter: public RSHashTableIter {
|
class SparsePRTIter: public RSHashTableIter {
|
||||||
public:
|
public:
|
||||||
void init(const SparsePRT* sprt) {
|
SparsePRTIter(const SparsePRT* sprt) :
|
||||||
RSHashTableIter::init(sprt->cur());
|
RSHashTableIter(sprt->cur()) {}
|
||||||
}
|
|
||||||
bool has_next(size_t& card_index) {
|
bool has_next(size_t& card_index) {
|
||||||
return RSHashTableIter::has_next(card_index);
|
return RSHashTableIter::has_next(card_index);
|
||||||
}
|
}
|
||||||
|
@ -138,8 +138,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
|||||||
|
|
||||||
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
|
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
Universe::verify(" VerifyBeforeGC:");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify object start arrays
|
// Verify object start arrays
|
||||||
@ -177,7 +176,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
|||||||
size_t prev_used = heap->used();
|
size_t prev_used = heap->used();
|
||||||
|
|
||||||
// Capture metadata size before collection for sizing.
|
// Capture metadata size before collection for sizing.
|
||||||
size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
|
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
||||||
|
|
||||||
// For PrintGCDetails
|
// For PrintGCDetails
|
||||||
size_t old_gen_prev_used = old_gen->used_in_bytes();
|
size_t old_gen_prev_used = old_gen->used_in_bytes();
|
||||||
@ -238,6 +237,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
|||||||
|
|
||||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||||
ClassLoaderDataGraph::purge();
|
ClassLoaderDataGraph::purge();
|
||||||
|
MetaspaceAux::verify_metrics();
|
||||||
|
|
||||||
BiasedLocking::restore_marks();
|
BiasedLocking::restore_marks();
|
||||||
Threads::gc_epilogue();
|
Threads::gc_epilogue();
|
||||||
@ -340,8 +340,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
|||||||
|
|
||||||
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
Universe::verify(" VerifyAfterGC:");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-verify object start arrays
|
// Re-verify object start arrays
|
||||||
@ -518,23 +517,23 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
|||||||
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
|
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Follow system dictionary roots and unload classes
|
// This is the point where the entire marking should have completed.
|
||||||
|
assert(_marking_stack.is_empty(), "Marking should have completed");
|
||||||
|
|
||||||
|
// Unload classes and purge the SystemDictionary.
|
||||||
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
|
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
|
||||||
|
|
||||||
// Follow code cache roots
|
// Unload nmethods.
|
||||||
CodeCache::do_unloading(is_alive_closure(), purged_class);
|
CodeCache::do_unloading(is_alive_closure(), purged_class);
|
||||||
follow_stack(); // Flush marking stack
|
|
||||||
|
|
||||||
// Update subklass/sibling/implementor links of live klasses
|
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||||
Klass::clean_weak_klass_links(&is_alive);
|
Klass::clean_weak_klass_links(is_alive_closure());
|
||||||
assert(_marking_stack.is_empty(), "just drained");
|
|
||||||
|
|
||||||
// Visit interned string tables and delete unmarked oops
|
// Delete entries for dead interned strings.
|
||||||
StringTable::unlink(is_alive_closure());
|
StringTable::unlink(is_alive_closure());
|
||||||
|
|
||||||
// Clean up unreferenced symbols in symbol table.
|
// Clean up unreferenced symbols in symbol table.
|
||||||
SymbolTable::unlink();
|
SymbolTable::unlink();
|
||||||
|
|
||||||
assert(_marking_stack.is_empty(), "stack should be empty by now");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -583,28 +582,27 @@ void PSMarkSweep::mark_sweep_phase3() {
|
|||||||
ClassLoaderDataGraph::clear_claimed_marks();
|
ClassLoaderDataGraph::clear_claimed_marks();
|
||||||
|
|
||||||
// General strong roots.
|
// General strong roots.
|
||||||
Universe::oops_do(adjust_root_pointer_closure());
|
Universe::oops_do(adjust_pointer_closure());
|
||||||
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
|
JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
|
||||||
CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
|
CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
|
||||||
Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
|
Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
|
||||||
ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
|
ObjectSynchronizer::oops_do(adjust_pointer_closure());
|
||||||
FlatProfiler::oops_do(adjust_root_pointer_closure());
|
FlatProfiler::oops_do(adjust_pointer_closure());
|
||||||
Management::oops_do(adjust_root_pointer_closure());
|
Management::oops_do(adjust_pointer_closure());
|
||||||
JvmtiExport::oops_do(adjust_root_pointer_closure());
|
JvmtiExport::oops_do(adjust_pointer_closure());
|
||||||
// SO_AllClasses
|
// SO_AllClasses
|
||||||
SystemDictionary::oops_do(adjust_root_pointer_closure());
|
SystemDictionary::oops_do(adjust_pointer_closure());
|
||||||
ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
|
ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
|
||||||
//CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
|
|
||||||
|
|
||||||
// Now adjust pointers in remaining weak roots. (All of which should
|
// Now adjust pointers in remaining weak roots. (All of which should
|
||||||
// have been cleared if they pointed to non-surviving objects.)
|
// have been cleared if they pointed to non-surviving objects.)
|
||||||
// Global (weak) JNI handles
|
// Global (weak) JNI handles
|
||||||
JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
|
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
|
||||||
|
|
||||||
CodeCache::oops_do(adjust_pointer_closure());
|
CodeCache::oops_do(adjust_pointer_closure());
|
||||||
StringTable::oops_do(adjust_root_pointer_closure());
|
StringTable::oops_do(adjust_pointer_closure());
|
||||||
ref_processor()->weak_oops_do(adjust_root_pointer_closure());
|
ref_processor()->weak_oops_do(adjust_pointer_closure());
|
||||||
PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
|
PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
|
||||||
|
|
||||||
adjust_marks();
|
adjust_marks();
|
||||||
|
|
||||||
|
@ -44,7 +44,6 @@ class PSMarkSweep : public MarkSweep {
|
|||||||
static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
|
static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
|
||||||
static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
|
static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
|
||||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
|
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
|
||||||
static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_root_pointer_closure; }
|
|
||||||
static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
|
static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
|
||||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
|
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
|
||||||
|
|
||||||
|
@ -787,12 +787,11 @@ bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap(
|
|||||||
void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
||||||
void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
||||||
|
|
||||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
|
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
|
||||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
|
|
||||||
PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
|
PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
|
||||||
|
|
||||||
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); }
|
||||||
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
|
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
|
||||||
|
|
||||||
void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
|
void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
|
||||||
|
|
||||||
@ -805,7 +804,7 @@ void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) {
|
|||||||
klass->oops_do(_mark_and_push_closure);
|
klass->oops_do(_mark_and_push_closure);
|
||||||
}
|
}
|
||||||
void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
|
void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
|
||||||
klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure);
|
klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PSParallelCompact::post_initialize() {
|
void PSParallelCompact::post_initialize() {
|
||||||
@ -892,7 +891,7 @@ public:
|
|||||||
_heap_used = heap->used();
|
_heap_used = heap->used();
|
||||||
_young_gen_used = heap->young_gen()->used_in_bytes();
|
_young_gen_used = heap->young_gen()->used_in_bytes();
|
||||||
_old_gen_used = heap->old_gen()->used_in_bytes();
|
_old_gen_used = heap->old_gen()->used_in_bytes();
|
||||||
_metadata_used = MetaspaceAux::used_in_bytes();
|
_metadata_used = MetaspaceAux::allocated_used_bytes();
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t heap_used() const { return _heap_used; }
|
size_t heap_used() const { return _heap_used; }
|
||||||
@ -967,8 +966,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
|
|||||||
|
|
||||||
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
|
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
Universe::verify(" VerifyBeforeGC:");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify object start arrays
|
// Verify object start arrays
|
||||||
@ -1027,6 +1025,7 @@ void PSParallelCompact::post_compact()
|
|||||||
|
|
||||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||||
ClassLoaderDataGraph::purge();
|
ClassLoaderDataGraph::purge();
|
||||||
|
MetaspaceAux::verify_metrics();
|
||||||
|
|
||||||
Threads::gc_epilogue();
|
Threads::gc_epilogue();
|
||||||
CodeCache::gc_epilogue();
|
CodeCache::gc_epilogue();
|
||||||
@ -2168,8 +2167,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
|||||||
|
|
||||||
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
Universe::verify(" VerifyAfterGC:");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-verify object start arrays
|
// Re-verify object start arrays
|
||||||
@ -2356,22 +2354,24 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
|
TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
|
||||||
|
|
||||||
|
// This is the point where the entire marking should have completed.
|
||||||
|
assert(cm->marking_stacks_empty(), "Marking should have completed");
|
||||||
|
|
||||||
// Follow system dictionary roots and unload classes.
|
// Follow system dictionary roots and unload classes.
|
||||||
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
|
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
|
||||||
|
|
||||||
// Follow code cache roots.
|
// Unload nmethods.
|
||||||
CodeCache::do_unloading(is_alive_closure(), purged_class);
|
CodeCache::do_unloading(is_alive_closure(), purged_class);
|
||||||
cm->follow_marking_stacks(); // Flush marking stack.
|
|
||||||
|
|
||||||
// Update subklass/sibling/implementor links of live klasses
|
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||||
Klass::clean_weak_klass_links(is_alive_closure());
|
Klass::clean_weak_klass_links(is_alive_closure());
|
||||||
|
|
||||||
// Visit interned string tables and delete unmarked oops
|
// Delete entries for dead interned strings.
|
||||||
StringTable::unlink(is_alive_closure());
|
StringTable::unlink(is_alive_closure());
|
||||||
|
|
||||||
// Clean up unreferenced symbols in symbol table.
|
// Clean up unreferenced symbols in symbol table.
|
||||||
SymbolTable::unlink();
|
SymbolTable::unlink();
|
||||||
|
|
||||||
assert(cm->marking_stacks_empty(), "marking stacks should be empty");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
|
void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
|
||||||
@ -2398,7 +2398,7 @@ void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
|
|||||||
|
|
||||||
void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
|
void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
|
||||||
ClassLoaderData* cld) {
|
ClassLoaderData* cld) {
|
||||||
cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(),
|
cld->oops_do(PSParallelCompact::adjust_pointer_closure(),
|
||||||
PSParallelCompact::adjust_klass_closure(),
|
PSParallelCompact::adjust_klass_closure(),
|
||||||
true);
|
true);
|
||||||
}
|
}
|
||||||
@ -2419,32 +2419,31 @@ void PSParallelCompact::adjust_roots() {
|
|||||||
ClassLoaderDataGraph::clear_claimed_marks();
|
ClassLoaderDataGraph::clear_claimed_marks();
|
||||||
|
|
||||||
// General strong roots.
|
// General strong roots.
|
||||||
Universe::oops_do(adjust_root_pointer_closure());
|
Universe::oops_do(adjust_pointer_closure());
|
||||||
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
|
JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
|
||||||
CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
|
CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
|
||||||
Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
|
Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
|
||||||
ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
|
ObjectSynchronizer::oops_do(adjust_pointer_closure());
|
||||||
FlatProfiler::oops_do(adjust_root_pointer_closure());
|
FlatProfiler::oops_do(adjust_pointer_closure());
|
||||||
Management::oops_do(adjust_root_pointer_closure());
|
Management::oops_do(adjust_pointer_closure());
|
||||||
JvmtiExport::oops_do(adjust_root_pointer_closure());
|
JvmtiExport::oops_do(adjust_pointer_closure());
|
||||||
// SO_AllClasses
|
// SO_AllClasses
|
||||||
SystemDictionary::oops_do(adjust_root_pointer_closure());
|
SystemDictionary::oops_do(adjust_pointer_closure());
|
||||||
ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
|
ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
|
||||||
|
|
||||||
// Now adjust pointers in remaining weak roots. (All of which should
|
// Now adjust pointers in remaining weak roots. (All of which should
|
||||||
// have been cleared if they pointed to non-surviving objects.)
|
// have been cleared if they pointed to non-surviving objects.)
|
||||||
// Global (weak) JNI handles
|
// Global (weak) JNI handles
|
||||||
JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
|
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
|
||||||
|
|
||||||
CodeCache::oops_do(adjust_pointer_closure());
|
CodeCache::oops_do(adjust_pointer_closure());
|
||||||
StringTable::oops_do(adjust_root_pointer_closure());
|
StringTable::oops_do(adjust_pointer_closure());
|
||||||
ref_processor()->weak_oops_do(adjust_root_pointer_closure());
|
ref_processor()->weak_oops_do(adjust_pointer_closure());
|
||||||
// Roots were visited so references into the young gen in roots
|
// Roots were visited so references into the young gen in roots
|
||||||
// may have been scanned. Process them also.
|
// may have been scanned. Process them also.
|
||||||
// Should the reference processor have a span that excludes
|
// Should the reference processor have a span that excludes
|
||||||
// young gen objects?
|
// young gen objects?
|
||||||
PSScavenge::reference_processor()->weak_oops_do(
|
PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
|
||||||
adjust_root_pointer_closure());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
|
void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
|
||||||
|
@ -799,16 +799,6 @@ class PSParallelCompact : AllStatic {
|
|||||||
virtual void do_oop(narrowOop* p);
|
virtual void do_oop(narrowOop* p);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Current unused
|
|
||||||
class FollowRootClosure: public OopsInGenClosure {
|
|
||||||
private:
|
|
||||||
ParCompactionManager* _compaction_manager;
|
|
||||||
public:
|
|
||||||
FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
|
||||||
virtual void do_oop(oop* p);
|
|
||||||
virtual void do_oop(narrowOop* p);
|
|
||||||
};
|
|
||||||
|
|
||||||
class FollowStackClosure: public VoidClosure {
|
class FollowStackClosure: public VoidClosure {
|
||||||
private:
|
private:
|
||||||
ParCompactionManager* _compaction_manager;
|
ParCompactionManager* _compaction_manager;
|
||||||
@ -818,10 +808,7 @@ class PSParallelCompact : AllStatic {
|
|||||||
};
|
};
|
||||||
|
|
||||||
class AdjustPointerClosure: public OopClosure {
|
class AdjustPointerClosure: public OopClosure {
|
||||||
private:
|
|
||||||
bool _is_root;
|
|
||||||
public:
|
public:
|
||||||
AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
|
|
||||||
virtual void do_oop(oop* p);
|
virtual void do_oop(oop* p);
|
||||||
virtual void do_oop(narrowOop* p);
|
virtual void do_oop(narrowOop* p);
|
||||||
// do not walk from thread stacks to the code cache on this phase
|
// do not walk from thread stacks to the code cache on this phase
|
||||||
@ -838,7 +825,6 @@ class PSParallelCompact : AllStatic {
|
|||||||
friend class AdjustPointerClosure;
|
friend class AdjustPointerClosure;
|
||||||
friend class AdjustKlassClosure;
|
friend class AdjustKlassClosure;
|
||||||
friend class FollowKlassClosure;
|
friend class FollowKlassClosure;
|
||||||
friend class FollowRootClosure;
|
|
||||||
friend class InstanceClassLoaderKlass;
|
friend class InstanceClassLoaderKlass;
|
||||||
friend class RefProcTaskProxy;
|
friend class RefProcTaskProxy;
|
||||||
|
|
||||||
@ -853,7 +839,6 @@ class PSParallelCompact : AllStatic {
|
|||||||
static IsAliveClosure _is_alive_closure;
|
static IsAliveClosure _is_alive_closure;
|
||||||
static SpaceInfo _space_info[last_space_id];
|
static SpaceInfo _space_info[last_space_id];
|
||||||
static bool _print_phases;
|
static bool _print_phases;
|
||||||
static AdjustPointerClosure _adjust_root_pointer_closure;
|
|
||||||
static AdjustPointerClosure _adjust_pointer_closure;
|
static AdjustPointerClosure _adjust_pointer_closure;
|
||||||
static AdjustKlassClosure _adjust_klass_closure;
|
static AdjustKlassClosure _adjust_klass_closure;
|
||||||
|
|
||||||
@ -889,9 +874,6 @@ class PSParallelCompact : AllStatic {
|
|||||||
static void marking_phase(ParCompactionManager* cm,
|
static void marking_phase(ParCompactionManager* cm,
|
||||||
bool maximum_heap_compaction);
|
bool maximum_heap_compaction);
|
||||||
|
|
||||||
template <class T> static inline void adjust_pointer(T* p, bool is_root);
|
|
||||||
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
|
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
static inline void follow_root(ParCompactionManager* cm, T* p);
|
static inline void follow_root(ParCompactionManager* cm, T* p);
|
||||||
|
|
||||||
@ -1046,7 +1028,6 @@ class PSParallelCompact : AllStatic {
|
|||||||
|
|
||||||
// Closure accessors
|
// Closure accessors
|
||||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
|
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
|
||||||
static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
|
|
||||||
static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
|
static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
|
||||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
|
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
|
||||||
|
|
||||||
@ -1067,6 +1048,7 @@ class PSParallelCompact : AllStatic {
|
|||||||
// Check mark and maybe push on marking stack
|
// Check mark and maybe push on marking stack
|
||||||
template <class T> static inline void mark_and_push(ParCompactionManager* cm,
|
template <class T> static inline void mark_and_push(ParCompactionManager* cm,
|
||||||
T* p);
|
T* p);
|
||||||
|
template <class T> static inline void adjust_pointer(T* p);
|
||||||
|
|
||||||
static void follow_klass(ParCompactionManager* cm, Klass* klass);
|
static void follow_klass(ParCompactionManager* cm, Klass* klass);
|
||||||
static void adjust_klass(ParCompactionManager* cm, Klass* klass);
|
static void adjust_klass(ParCompactionManager* cm, Klass* klass);
|
||||||
@ -1151,9 +1133,6 @@ class PSParallelCompact : AllStatic {
|
|||||||
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
|
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
|
||||||
static ParallelCompactData& summary_data() { return _summary_data; }
|
static ParallelCompactData& summary_data() { return _summary_data; }
|
||||||
|
|
||||||
static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
|
|
||||||
static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
|
|
||||||
|
|
||||||
// Reference Processing
|
// Reference Processing
|
||||||
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
||||||
|
|
||||||
@ -1230,7 +1209,7 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
|
inline void PSParallelCompact::adjust_pointer(T* p) {
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (!oopDesc::is_null(heap_oop)) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
@ -314,8 +314,7 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
|
|
||||||
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
|
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
Universe::verify(" VerifyBeforeGC:");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -638,8 +637,7 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
|
|
||||||
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
Universe::verify(" VerifyAfterGC:");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
heap->print_heap_after_gc();
|
heap->print_heap_after_gc();
|
||||||
|
@ -81,7 +81,7 @@ void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MarkSweep::adjust_class_loader(ClassLoaderData* cld) {
|
void MarkSweep::adjust_class_loader(ClassLoaderData* cld) {
|
||||||
cld->oops_do(&MarkSweep::adjust_root_pointer_closure, &MarkSweep::adjust_klass_closure, true);
|
cld->oops_do(&MarkSweep::adjust_pointer_closure, &MarkSweep::adjust_klass_closure, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -121,11 +121,10 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true);
|
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
|
||||||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false);
|
|
||||||
|
|
||||||
void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); }
|
||||||
void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
|
void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
|
||||||
|
|
||||||
void MarkSweep::adjust_marks() {
|
void MarkSweep::adjust_marks() {
|
||||||
assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
|
assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
|
||||||
|
@ -80,10 +80,7 @@ class MarkSweep : AllStatic {
|
|||||||
};
|
};
|
||||||
|
|
||||||
class AdjustPointerClosure: public OopsInGenClosure {
|
class AdjustPointerClosure: public OopsInGenClosure {
|
||||||
private:
|
|
||||||
bool _is_root;
|
|
||||||
public:
|
public:
|
||||||
AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
|
|
||||||
virtual void do_oop(oop* p);
|
virtual void do_oop(oop* p);
|
||||||
virtual void do_oop(narrowOop* p);
|
virtual void do_oop(narrowOop* p);
|
||||||
};
|
};
|
||||||
@ -146,7 +143,6 @@ class MarkSweep : AllStatic {
|
|||||||
static MarkAndPushClosure mark_and_push_closure;
|
static MarkAndPushClosure mark_and_push_closure;
|
||||||
static FollowKlassClosure follow_klass_closure;
|
static FollowKlassClosure follow_klass_closure;
|
||||||
static FollowStackClosure follow_stack_closure;
|
static FollowStackClosure follow_stack_closure;
|
||||||
static AdjustPointerClosure adjust_root_pointer_closure;
|
|
||||||
static AdjustPointerClosure adjust_pointer_closure;
|
static AdjustPointerClosure adjust_pointer_closure;
|
||||||
static AdjustKlassClosure adjust_klass_closure;
|
static AdjustKlassClosure adjust_klass_closure;
|
||||||
|
|
||||||
@ -179,12 +175,7 @@ class MarkSweep : AllStatic {
|
|||||||
static void adjust_marks(); // Adjust the pointers in the preserved marks table
|
static void adjust_marks(); // Adjust the pointers in the preserved marks table
|
||||||
static void restore_marks(); // Restore the marks that we saved in preserve_mark
|
static void restore_marks(); // Restore the marks that we saved in preserve_mark
|
||||||
|
|
||||||
template <class T> static inline void adjust_pointer(T* p, bool isroot);
|
template <class T> static inline void adjust_pointer(T* p);
|
||||||
|
|
||||||
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
|
|
||||||
static void adjust_pointer(oop* p) { adjust_pointer(p, false); }
|
|
||||||
static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class PreservedMark VALUE_OBJ_CLASS_SPEC {
|
class PreservedMark VALUE_OBJ_CLASS_SPEC {
|
||||||
|
@ -76,7 +76,7 @@ void MarkSweep::push_objarray(oop obj, size_t index) {
|
|||||||
_objarray_stack.push(task);
|
_objarray_stack.push(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
|
template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (!oopDesc::is_null(heap_oop)) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
@ -225,7 +225,10 @@ void VM_CollectForMetadataAllocation::doit() {
|
|||||||
gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
|
gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
|
||||||
}
|
}
|
||||||
heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
|
heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
|
||||||
_result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
// After a GC try to allocate without expanding. Could fail
|
||||||
|
// and expansion will be tried below.
|
||||||
|
_result =
|
||||||
|
_loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||||
}
|
}
|
||||||
if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
|
if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
|
||||||
// If still failing, allow the Metaspace to expand.
|
// If still failing, allow the Metaspace to expand.
|
||||||
|
@ -238,8 +238,8 @@ void FileMapInfo::write_header() {
|
|||||||
|
|
||||||
void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
|
void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
|
||||||
align_file_position();
|
align_file_position();
|
||||||
size_t used = space->used_words(Metaspace::NonClassType) * BytesPerWord;
|
size_t used = space->used_bytes_slow(Metaspace::NonClassType);
|
||||||
size_t capacity = space->capacity_words(Metaspace::NonClassType) * BytesPerWord;
|
size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
|
||||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
|
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
|
||||||
write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
|
write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
|
||||||
}
|
}
|
||||||
|
@ -377,7 +377,7 @@ void GenCollectedHeap::do_collection(bool full,
|
|||||||
|
|
||||||
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
|
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
|
||||||
|
|
||||||
const size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
|
const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
||||||
|
|
||||||
print_heap_before_gc();
|
print_heap_before_gc();
|
||||||
|
|
||||||
@ -447,8 +447,7 @@ void GenCollectedHeap::do_collection(bool full,
|
|||||||
prepare_for_verify();
|
prepare_for_verify();
|
||||||
prepared_for_verification = true;
|
prepared_for_verification = true;
|
||||||
}
|
}
|
||||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
Universe::verify(" VerifyBeforeGC:");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||||
|
|
||||||
@ -519,8 +518,7 @@ void GenCollectedHeap::do_collection(bool full,
|
|||||||
if (VerifyAfterGC && i >= VerifyGCLevel &&
|
if (VerifyAfterGC && i >= VerifyGCLevel &&
|
||||||
total_collections() >= VerifyGCStartAt) {
|
total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
Universe::verify(" VerifyAfterGC:");
|
||||||
Universe::verify();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PrintGCDetails) {
|
if (PrintGCDetails) {
|
||||||
@ -556,6 +554,7 @@ void GenCollectedHeap::do_collection(bool full,
|
|||||||
if (complete) {
|
if (complete) {
|
||||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||||
ClassLoaderDataGraph::purge();
|
ClassLoaderDataGraph::purge();
|
||||||
|
MetaspaceAux::verify_metrics();
|
||||||
// Resize the metaspace capacity after full collections
|
// Resize the metaspace capacity after full collections
|
||||||
MetaspaceGC::compute_new_size();
|
MetaspaceGC::compute_new_size();
|
||||||
update_full_collections_completed();
|
update_full_collections_completed();
|
||||||
@ -633,9 +632,8 @@ gen_process_strong_roots(int level,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
|
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
|
||||||
CodeBlobClosure* code_roots,
|
CodeBlobClosure* code_roots) {
|
||||||
OopClosure* non_root_closure) {
|
SharedHeap::process_weak_roots(root_closure, code_roots);
|
||||||
SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
|
|
||||||
// "Local" "weak" refs
|
// "Local" "weak" refs
|
||||||
for (int i = 0; i < _n_gens; i++) {
|
for (int i = 0; i < _n_gens; i++) {
|
||||||
_gens[i]->ref_processor()->weak_oops_do(root_closure);
|
_gens[i]->ref_processor()->weak_oops_do(root_closure);
|
||||||
|
@ -432,8 +432,7 @@ public:
|
|||||||
// JNI weak roots, the code cache, system dictionary, symbol table,
|
// JNI weak roots, the code cache, system dictionary, symbol table,
|
||||||
// string table, and referents of reachable weak refs.
|
// string table, and referents of reachable weak refs.
|
||||||
void gen_process_weak_roots(OopClosure* root_closure,
|
void gen_process_weak_roots(OopClosure* root_closure,
|
||||||
CodeBlobClosure* code_roots,
|
CodeBlobClosure* code_roots);
|
||||||
OopClosure* non_root_closure);
|
|
||||||
|
|
||||||
// Set the saved marks of generations, if that makes sense.
|
// Set the saved marks of generations, if that makes sense.
|
||||||
// In particular, if any generation might iterate over the oops
|
// In particular, if any generation might iterate over the oops
|
||||||
|
@ -223,23 +223,23 @@ void GenMarkSweep::mark_sweep_phase1(int level,
|
|||||||
&is_alive, &keep_alive, &follow_stack_closure, NULL);
|
&is_alive, &keep_alive, &follow_stack_closure, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Follow system dictionary roots and unload classes
|
// This is the point where the entire marking should have completed.
|
||||||
|
assert(_marking_stack.is_empty(), "Marking should have completed");
|
||||||
|
|
||||||
|
// Unload classes and purge the SystemDictionary.
|
||||||
bool purged_class = SystemDictionary::do_unloading(&is_alive);
|
bool purged_class = SystemDictionary::do_unloading(&is_alive);
|
||||||
|
|
||||||
// Follow code cache roots
|
// Unload nmethods.
|
||||||
CodeCache::do_unloading(&is_alive, purged_class);
|
CodeCache::do_unloading(&is_alive, purged_class);
|
||||||
follow_stack(); // Flush marking stack
|
|
||||||
|
|
||||||
// Update subklass/sibling/implementor links of live klasses
|
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||||
Klass::clean_weak_klass_links(&is_alive);
|
Klass::clean_weak_klass_links(&is_alive);
|
||||||
assert(_marking_stack.is_empty(), "just drained");
|
|
||||||
|
|
||||||
// Visit interned string tables and delete unmarked oops
|
// Delete entries for dead interned strings.
|
||||||
StringTable::unlink(&is_alive);
|
StringTable::unlink(&is_alive);
|
||||||
|
|
||||||
// Clean up unreferenced symbols in symbol table.
|
// Clean up unreferenced symbols in symbol table.
|
||||||
SymbolTable::unlink();
|
SymbolTable::unlink();
|
||||||
|
|
||||||
assert(_marking_stack.is_empty(), "stack should be empty by now");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -282,11 +282,10 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
|
|||||||
// Need new claim bits for the pointer adjustment tracing.
|
// Need new claim bits for the pointer adjustment tracing.
|
||||||
ClassLoaderDataGraph::clear_claimed_marks();
|
ClassLoaderDataGraph::clear_claimed_marks();
|
||||||
|
|
||||||
// Because the two closures below are created statically, cannot
|
// Because the closure below is created statically, we cannot
|
||||||
// use OopsInGenClosure constructor which takes a generation,
|
// use OopsInGenClosure constructor which takes a generation,
|
||||||
// as the Universe has not been created when the static constructors
|
// as the Universe has not been created when the static constructors
|
||||||
// are run.
|
// are run.
|
||||||
adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level));
|
|
||||||
adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
|
adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
|
||||||
|
|
||||||
gch->gen_process_strong_roots(level,
|
gch->gen_process_strong_roots(level,
|
||||||
@ -294,18 +293,17 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
|
|||||||
true, // activate StrongRootsScope
|
true, // activate StrongRootsScope
|
||||||
false, // not scavenging
|
false, // not scavenging
|
||||||
SharedHeap::SO_AllClasses,
|
SharedHeap::SO_AllClasses,
|
||||||
&adjust_root_pointer_closure,
|
&adjust_pointer_closure,
|
||||||
false, // do not walk code
|
false, // do not walk code
|
||||||
&adjust_root_pointer_closure,
|
&adjust_pointer_closure,
|
||||||
&adjust_klass_closure);
|
&adjust_klass_closure);
|
||||||
|
|
||||||
// Now adjust pointers in remaining weak roots. (All of which should
|
// Now adjust pointers in remaining weak roots. (All of which should
|
||||||
// have been cleared if they pointed to non-surviving objects.)
|
// have been cleared if they pointed to non-surviving objects.)
|
||||||
CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
|
CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
|
||||||
/*do_marking=*/ false);
|
/*do_marking=*/ false);
|
||||||
gch->gen_process_weak_roots(&adjust_root_pointer_closure,
|
gch->gen_process_weak_roots(&adjust_pointer_closure,
|
||||||
&adjust_code_pointer_closure,
|
&adjust_code_pointer_closure);
|
||||||
&adjust_pointer_closure);
|
|
||||||
|
|
||||||
adjust_marks();
|
adjust_marks();
|
||||||
GenAdjustPointersClosure blk;
|
GenAdjustPointersClosure blk;
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "utilities/copy.hpp"
|
#include "utilities/copy.hpp"
|
||||||
#include "utilities/debug.hpp"
|
#include "utilities/debug.hpp"
|
||||||
|
|
||||||
|
class VirtualSpaceNode;
|
||||||
//
|
//
|
||||||
// Future modification
|
// Future modification
|
||||||
//
|
//
|
||||||
@ -45,27 +46,30 @@ size_t Metachunk::_overhead =
|
|||||||
|
|
||||||
// Metachunk methods
|
// Metachunk methods
|
||||||
|
|
||||||
Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
|
Metachunk::Metachunk(size_t word_size,
|
||||||
// Set bottom, top, and end. Allow space for the Metachunk itself
|
VirtualSpaceNode* container) :
|
||||||
Metachunk* chunk = (Metachunk*) ptr;
|
_word_size(word_size),
|
||||||
|
_bottom(NULL),
|
||||||
MetaWord* chunk_bottom = ptr + _overhead;
|
_end(NULL),
|
||||||
chunk->set_bottom(ptr);
|
_top(NULL),
|
||||||
chunk->set_top(chunk_bottom);
|
_next(NULL),
|
||||||
MetaWord* chunk_end = ptr + word_size;
|
_prev(NULL),
|
||||||
assert(chunk_end > chunk_bottom, "Chunk must be too small");
|
_container(container)
|
||||||
chunk->set_end(chunk_end);
|
{
|
||||||
chunk->set_next(NULL);
|
_bottom = (MetaWord*)this;
|
||||||
chunk->set_prev(NULL);
|
_top = (MetaWord*)this + _overhead;
|
||||||
chunk->set_word_size(word_size);
|
_end = (MetaWord*)this + word_size;
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord));
|
set_is_free(false);
|
||||||
Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize);
|
size_t data_word_size = pointer_delta(end(),
|
||||||
|
top(),
|
||||||
|
sizeof(MetaWord));
|
||||||
|
Copy::fill_to_words((HeapWord*) top(),
|
||||||
|
data_word_size,
|
||||||
|
metadata_chunk_initialize);
|
||||||
#endif
|
#endif
|
||||||
return chunk;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
MetaWord* Metachunk::allocate(size_t word_size) {
|
MetaWord* Metachunk::allocate(size_t word_size) {
|
||||||
MetaWord* result = NULL;
|
MetaWord* result = NULL;
|
||||||
// If available, bump the pointer to allocate.
|
// If available, bump the pointer to allocate.
|
||||||
|
@ -41,10 +41,13 @@
|
|||||||
// | | | |
|
// | | | |
|
||||||
// +--------------+ <- bottom ---+ ---+
|
// +--------------+ <- bottom ---+ ---+
|
||||||
|
|
||||||
|
class VirtualSpaceNode;
|
||||||
|
|
||||||
class Metachunk VALUE_OBJ_CLASS_SPEC {
|
class Metachunk VALUE_OBJ_CLASS_SPEC {
|
||||||
// link to support lists of chunks
|
// link to support lists of chunks
|
||||||
Metachunk* _next;
|
Metachunk* _next;
|
||||||
Metachunk* _prev;
|
Metachunk* _prev;
|
||||||
|
VirtualSpaceNode* _container;
|
||||||
|
|
||||||
MetaWord* _bottom;
|
MetaWord* _bottom;
|
||||||
MetaWord* _end;
|
MetaWord* _end;
|
||||||
@ -61,29 +64,20 @@ class Metachunk VALUE_OBJ_CLASS_SPEC {
|
|||||||
// the space.
|
// the space.
|
||||||
static size_t _overhead;
|
static size_t _overhead;
|
||||||
|
|
||||||
void set_bottom(MetaWord* v) { _bottom = v; }
|
|
||||||
void set_end(MetaWord* v) { _end = v; }
|
|
||||||
void set_top(MetaWord* v) { _top = v; }
|
|
||||||
void set_word_size(size_t v) { _word_size = v; }
|
|
||||||
public:
|
public:
|
||||||
#ifdef ASSERT
|
Metachunk(size_t word_size , VirtualSpaceNode* container);
|
||||||
Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false),
|
|
||||||
_next(NULL), _prev(NULL) {}
|
|
||||||
#else
|
|
||||||
Metachunk() : _bottom(NULL), _end(NULL), _top(NULL),
|
|
||||||
_next(NULL), _prev(NULL) {}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Used to add a Metachunk to a list of Metachunks
|
// Used to add a Metachunk to a list of Metachunks
|
||||||
void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
|
void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
|
||||||
void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
|
void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
|
||||||
|
void set_container(VirtualSpaceNode* v) { _container = v; }
|
||||||
|
|
||||||
MetaWord* allocate(size_t word_size);
|
MetaWord* allocate(size_t word_size);
|
||||||
static Metachunk* initialize(MetaWord* ptr, size_t word_size);
|
|
||||||
|
|
||||||
// Accessors
|
// Accessors
|
||||||
Metachunk* next() const { return _next; }
|
Metachunk* next() const { return _next; }
|
||||||
Metachunk* prev() const { return _prev; }
|
Metachunk* prev() const { return _prev; }
|
||||||
|
VirtualSpaceNode* container() const { return _container; }
|
||||||
MetaWord* bottom() const { return _bottom; }
|
MetaWord* bottom() const { return _bottom; }
|
||||||
MetaWord* end() const { return _end; }
|
MetaWord* end() const { return _end; }
|
||||||
MetaWord* top() const { return _top; }
|
MetaWord* top() const { return _top; }
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -111,6 +111,10 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
SpaceManager* _class_vsm;
|
SpaceManager* _class_vsm;
|
||||||
SpaceManager* class_vsm() const { return _class_vsm; }
|
SpaceManager* class_vsm() const { return _class_vsm; }
|
||||||
|
|
||||||
|
// Allocate space for metadata of type mdtype. This is space
|
||||||
|
// within a Metachunk and is used by
|
||||||
|
// allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
|
||||||
|
// which returns a Metablock.
|
||||||
MetaWord* allocate(size_t word_size, MetadataType mdtype);
|
MetaWord* allocate(size_t word_size, MetadataType mdtype);
|
||||||
|
|
||||||
// Virtual Space lists for both classes and other metadata
|
// Virtual Space lists for both classes and other metadata
|
||||||
@ -133,11 +137,14 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
|
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
|
||||||
|
|
||||||
char* bottom() const;
|
char* bottom() const;
|
||||||
size_t used_words(MetadataType mdtype) const;
|
size_t used_words_slow(MetadataType mdtype) const;
|
||||||
size_t free_words(MetadataType mdtype) const;
|
size_t free_words(MetadataType mdtype) const;
|
||||||
size_t capacity_words(MetadataType mdtype) const;
|
size_t capacity_words_slow(MetadataType mdtype) const;
|
||||||
size_t waste_words(MetadataType mdtype) const;
|
size_t waste_words(MetadataType mdtype) const;
|
||||||
|
|
||||||
|
size_t used_bytes_slow(MetadataType mdtype) const;
|
||||||
|
size_t capacity_bytes_slow(MetadataType mdtype) const;
|
||||||
|
|
||||||
static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
|
static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
|
||||||
bool read_only, MetadataType mdtype, TRAPS);
|
bool read_only, MetadataType mdtype, TRAPS);
|
||||||
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
|
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
|
||||||
@ -150,6 +157,9 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
static bool contains(const void *ptr);
|
static bool contains(const void *ptr);
|
||||||
void dump(outputStream* const out) const;
|
void dump(outputStream* const out) const;
|
||||||
|
|
||||||
|
// Free empty virtualspaces
|
||||||
|
static void purge();
|
||||||
|
|
||||||
void print_on(outputStream* st) const;
|
void print_on(outputStream* st) const;
|
||||||
// Debugging support
|
// Debugging support
|
||||||
void verify();
|
void verify();
|
||||||
@ -158,28 +168,81 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
class MetaspaceAux : AllStatic {
|
class MetaspaceAux : AllStatic {
|
||||||
|
|
||||||
// Statistics for class space and data space in metaspace.
|
// Statistics for class space and data space in metaspace.
|
||||||
static size_t used_in_bytes(Metaspace::MetadataType mdtype);
|
|
||||||
|
// These methods iterate over the classloader data graph
|
||||||
|
// for the given Metaspace type. These are slow.
|
||||||
|
static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
|
||||||
static size_t free_in_bytes(Metaspace::MetadataType mdtype);
|
static size_t free_in_bytes(Metaspace::MetadataType mdtype);
|
||||||
static size_t capacity_in_bytes(Metaspace::MetadataType mdtype);
|
static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
|
||||||
|
|
||||||
|
// Iterates over the virtual space list.
|
||||||
static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
|
static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
|
||||||
|
|
||||||
static size_t free_chunks_total(Metaspace::MetadataType mdtype);
|
static size_t free_chunks_total(Metaspace::MetadataType mdtype);
|
||||||
static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
|
static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Total of space allocated to metadata in all Metaspaces
|
// Running sum of space in all Metachunks that has been
|
||||||
static size_t used_in_bytes() {
|
// allocated to a Metaspace. This is used instead of
|
||||||
return used_in_bytes(Metaspace::ClassType) +
|
// iterating over all the classloaders
|
||||||
used_in_bytes(Metaspace::NonClassType);
|
static size_t _allocated_capacity_words;
|
||||||
|
// Running sum of space in all Metachunks that have
|
||||||
|
// are being used for metadata.
|
||||||
|
static size_t _allocated_used_words;
|
||||||
|
|
||||||
|
public:
|
||||||
|
// Decrement and increment _allocated_capacity_words
|
||||||
|
static void dec_capacity(size_t words);
|
||||||
|
static void inc_capacity(size_t words);
|
||||||
|
|
||||||
|
// Decrement and increment _allocated_used_words
|
||||||
|
static void dec_used(size_t words);
|
||||||
|
static void inc_used(size_t words);
|
||||||
|
|
||||||
|
// Total of space allocated to metadata in all Metaspaces.
|
||||||
|
// This sums the space used in each Metachunk by
|
||||||
|
// iterating over the classloader data graph
|
||||||
|
static size_t used_bytes_slow() {
|
||||||
|
return used_bytes_slow(Metaspace::ClassType) +
|
||||||
|
used_bytes_slow(Metaspace::NonClassType);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Total of available space in all Metaspaces
|
// Used by MetaspaceCounters
|
||||||
// Total of capacity allocated to all Metaspaces. This includes
|
static size_t free_chunks_total();
|
||||||
// space in Metachunks not yet allocated and in the Metachunk
|
static size_t free_chunks_total_in_bytes();
|
||||||
// freelist.
|
|
||||||
static size_t capacity_in_bytes() {
|
static size_t allocated_capacity_words() {
|
||||||
return capacity_in_bytes(Metaspace::ClassType) +
|
return _allocated_capacity_words;
|
||||||
capacity_in_bytes(Metaspace::NonClassType);
|
}
|
||||||
|
static size_t allocated_capacity_bytes() {
|
||||||
|
return _allocated_capacity_words * BytesPerWord;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t allocated_used_words() {
|
||||||
|
return _allocated_used_words;
|
||||||
|
}
|
||||||
|
static size_t allocated_used_bytes() {
|
||||||
|
return _allocated_used_words * BytesPerWord;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t free_bytes();
|
||||||
|
|
||||||
|
// Total capacity in all Metaspaces
|
||||||
|
static size_t capacity_bytes_slow() {
|
||||||
|
#ifdef PRODUCT
|
||||||
|
// Use allocated_capacity_bytes() in PRODUCT instead of this function.
|
||||||
|
guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
|
||||||
|
#endif
|
||||||
|
size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
|
||||||
|
size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
|
||||||
|
assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
|
||||||
|
err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
|
||||||
|
" class_capacity + non_class_capacity " SIZE_FORMAT
|
||||||
|
" class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
|
||||||
|
allocated_capacity_bytes(), class_capacity + non_class_capacity,
|
||||||
|
class_capacity, non_class_capacity));
|
||||||
|
|
||||||
|
return class_capacity + non_class_capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Total space reserved in all Metaspaces
|
// Total space reserved in all Metaspaces
|
||||||
@ -198,6 +261,11 @@ class MetaspaceAux : AllStatic {
|
|||||||
static void print_waste(outputStream* out);
|
static void print_waste(outputStream* out);
|
||||||
static void dump(outputStream* out);
|
static void dump(outputStream* out);
|
||||||
static void verify_free_chunks();
|
static void verify_free_chunks();
|
||||||
|
// Checks that the values returned by allocated_capacity_bytes() and
|
||||||
|
// capacity_bytes_slow() are the same.
|
||||||
|
static void verify_capacity();
|
||||||
|
static void verify_used();
|
||||||
|
static void verify_metrics();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Metaspace are deallocated when their class loader are GC'ed.
|
// Metaspace are deallocated when their class loader are GC'ed.
|
||||||
@ -232,7 +300,6 @@ class MetaspaceGC : AllStatic {
|
|||||||
public:
|
public:
|
||||||
|
|
||||||
static size_t capacity_until_GC() { return _capacity_until_GC; }
|
static size_t capacity_until_GC() { return _capacity_until_GC; }
|
||||||
static size_t capacity_until_GC_in_bytes() { return _capacity_until_GC * BytesPerWord; }
|
|
||||||
static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
|
static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
|
||||||
static void dec_capacity_until_GC(size_t v) {
|
static void dec_capacity_until_GC(size_t v) {
|
||||||
_capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
|
_capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
|
||||||
|
@ -29,6 +29,16 @@
|
|||||||
|
|
||||||
MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
|
MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
|
||||||
|
|
||||||
|
size_t MetaspaceCounters::calc_total_capacity() {
|
||||||
|
// The total capacity is the sum of
|
||||||
|
// 1) capacity of Metachunks in use by all Metaspaces
|
||||||
|
// 2) unused space at the end of each Metachunk
|
||||||
|
// 3) space in the freelist
|
||||||
|
size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
|
||||||
|
+ MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
|
||||||
|
return total_capacity;
|
||||||
|
}
|
||||||
|
|
||||||
MetaspaceCounters::MetaspaceCounters() :
|
MetaspaceCounters::MetaspaceCounters() :
|
||||||
_capacity(NULL),
|
_capacity(NULL),
|
||||||
_used(NULL),
|
_used(NULL),
|
||||||
@ -36,8 +46,8 @@ MetaspaceCounters::MetaspaceCounters() :
|
|||||||
if (UsePerfData) {
|
if (UsePerfData) {
|
||||||
size_t min_capacity = MetaspaceAux::min_chunk_size();
|
size_t min_capacity = MetaspaceAux::min_chunk_size();
|
||||||
size_t max_capacity = MetaspaceAux::reserved_in_bytes();
|
size_t max_capacity = MetaspaceAux::reserved_in_bytes();
|
||||||
size_t curr_capacity = MetaspaceAux::capacity_in_bytes();
|
size_t curr_capacity = calc_total_capacity();
|
||||||
size_t used = MetaspaceAux::used_in_bytes();
|
size_t used = MetaspaceAux::allocated_used_bytes();
|
||||||
|
|
||||||
initialize(min_capacity, max_capacity, curr_capacity, used);
|
initialize(min_capacity, max_capacity, curr_capacity, used);
|
||||||
}
|
}
|
||||||
@ -82,15 +92,13 @@ void MetaspaceCounters::initialize(size_t min_capacity,
|
|||||||
|
|
||||||
void MetaspaceCounters::update_capacity() {
|
void MetaspaceCounters::update_capacity() {
|
||||||
assert(UsePerfData, "Should not be called unless being used");
|
assert(UsePerfData, "Should not be called unless being used");
|
||||||
assert(_capacity != NULL, "Should be initialized");
|
size_t total_capacity = calc_total_capacity();
|
||||||
size_t capacity_in_bytes = MetaspaceAux::capacity_in_bytes();
|
_capacity->set_value(total_capacity);
|
||||||
_capacity->set_value(capacity_in_bytes);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceCounters::update_used() {
|
void MetaspaceCounters::update_used() {
|
||||||
assert(UsePerfData, "Should not be called unless being used");
|
assert(UsePerfData, "Should not be called unless being used");
|
||||||
assert(_used != NULL, "Should be initialized");
|
size_t used_in_bytes = MetaspaceAux::allocated_used_bytes();
|
||||||
size_t used_in_bytes = MetaspaceAux::used_in_bytes();
|
|
||||||
_used->set_value(used_in_bytes);
|
_used->set_value(used_in_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,6 +37,7 @@ class MetaspaceCounters: public CHeapObj<mtClass> {
|
|||||||
size_t max_capacity,
|
size_t max_capacity,
|
||||||
size_t curr_capacity,
|
size_t curr_capacity,
|
||||||
size_t used);
|
size_t used);
|
||||||
|
size_t calc_total_capacity();
|
||||||
public:
|
public:
|
||||||
MetaspaceCounters();
|
MetaspaceCounters();
|
||||||
~MetaspaceCounters();
|
~MetaspaceCounters();
|
||||||
|
@ -376,18 +376,17 @@ void VM_PopulateDumpSharedSpace::doit() {
|
|||||||
const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT;
|
const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT;
|
||||||
Metaspace* ro_space = _loader_data->ro_metaspace();
|
Metaspace* ro_space = _loader_data->ro_metaspace();
|
||||||
Metaspace* rw_space = _loader_data->rw_metaspace();
|
Metaspace* rw_space = _loader_data->rw_metaspace();
|
||||||
const size_t BPW = BytesPerWord;
|
|
||||||
|
|
||||||
// Allocated size of each space (may not be all occupied)
|
// Allocated size of each space (may not be all occupied)
|
||||||
const size_t ro_alloced = ro_space->capacity_words(Metaspace::NonClassType) * BPW;
|
const size_t ro_alloced = ro_space->capacity_bytes_slow(Metaspace::NonClassType);
|
||||||
const size_t rw_alloced = rw_space->capacity_words(Metaspace::NonClassType) * BPW;
|
const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType);
|
||||||
const size_t md_alloced = md_end-md_low;
|
const size_t md_alloced = md_end-md_low;
|
||||||
const size_t mc_alloced = mc_end-mc_low;
|
const size_t mc_alloced = mc_end-mc_low;
|
||||||
const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced;
|
const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced;
|
||||||
|
|
||||||
// Occupied size of each space.
|
// Occupied size of each space.
|
||||||
const size_t ro_bytes = ro_space->used_words(Metaspace::NonClassType) * BPW;
|
const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType);
|
||||||
const size_t rw_bytes = rw_space->used_words(Metaspace::NonClassType) * BPW;
|
const size_t rw_bytes = rw_space->used_bytes_slow(Metaspace::NonClassType);
|
||||||
const size_t md_bytes = size_t(md_top - md_low);
|
const size_t md_bytes = size_t(md_top - md_low);
|
||||||
const size_t mc_bytes = size_t(mc_top - mc_low);
|
const size_t mc_bytes = size_t(mc_top - mc_low);
|
||||||
|
|
||||||
|
@ -218,14 +218,13 @@ public:
|
|||||||
static AlwaysTrueClosure always_true;
|
static AlwaysTrueClosure always_true;
|
||||||
|
|
||||||
void SharedHeap::process_weak_roots(OopClosure* root_closure,
|
void SharedHeap::process_weak_roots(OopClosure* root_closure,
|
||||||
CodeBlobClosure* code_roots,
|
CodeBlobClosure* code_roots) {
|
||||||
OopClosure* non_root_closure) {
|
|
||||||
// Global (weak) JNI handles
|
// Global (weak) JNI handles
|
||||||
JNIHandles::weak_oops_do(&always_true, root_closure);
|
JNIHandles::weak_oops_do(&always_true, root_closure);
|
||||||
|
|
||||||
CodeCache::blobs_do(code_roots);
|
CodeCache::blobs_do(code_roots);
|
||||||
StringTable::oops_do(root_closure);
|
StringTable::oops_do(root_closure);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedHeap::set_barrier_set(BarrierSet* bs) {
|
void SharedHeap::set_barrier_set(BarrierSet* bs) {
|
||||||
_barrier_set = bs;
|
_barrier_set = bs;
|
||||||
|
@ -249,8 +249,7 @@ public:
|
|||||||
// JNI weak roots, the code cache, system dictionary, symbol table,
|
// JNI weak roots, the code cache, system dictionary, symbol table,
|
||||||
// string table.
|
// string table.
|
||||||
void process_weak_roots(OopClosure* root_closure,
|
void process_weak_roots(OopClosure* root_closure,
|
||||||
CodeBlobClosure* code_roots,
|
CodeBlobClosure* code_roots);
|
||||||
OopClosure* non_root_closure);
|
|
||||||
|
|
||||||
// The functions below are helper functions that a subclass of
|
// The functions below are helper functions that a subclass of
|
||||||
// "SharedHeap" can use in the implementation of its virtual
|
// "SharedHeap" can use in the implementation of its virtual
|
||||||
|
@ -1270,7 +1270,7 @@ void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
|
|||||||
st->print_cr("}");
|
st->print_cr("}");
|
||||||
}
|
}
|
||||||
|
|
||||||
void Universe::verify(bool silent, VerifyOption option) {
|
void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
|
||||||
// The use of _verify_in_progress is a temporary work around for
|
// The use of _verify_in_progress is a temporary work around for
|
||||||
// 6320749. Don't bother with a creating a class to set and clear
|
// 6320749. Don't bother with a creating a class to set and clear
|
||||||
// it since it is only used in this method and the control flow is
|
// it since it is only used in this method and the control flow is
|
||||||
@ -1287,11 +1287,12 @@ void Universe::verify(bool silent, VerifyOption option) {
|
|||||||
HandleMark hm; // Handles created during verification can be zapped
|
HandleMark hm; // Handles created during verification can be zapped
|
||||||
_verify_count++;
|
_verify_count++;
|
||||||
|
|
||||||
|
if (!silent) gclog_or_tty->print(prefix);
|
||||||
if (!silent) gclog_or_tty->print("[Verifying ");
|
if (!silent) gclog_or_tty->print("[Verifying ");
|
||||||
if (!silent) gclog_or_tty->print("threads ");
|
if (!silent) gclog_or_tty->print("threads ");
|
||||||
Threads::verify();
|
Threads::verify();
|
||||||
|
if (!silent) gclog_or_tty->print("heap ");
|
||||||
heap()->verify(silent, option);
|
heap()->verify(silent, option);
|
||||||
|
|
||||||
if (!silent) gclog_or_tty->print("syms ");
|
if (!silent) gclog_or_tty->print("syms ");
|
||||||
SymbolTable::verify();
|
SymbolTable::verify();
|
||||||
if (!silent) gclog_or_tty->print("strs ");
|
if (!silent) gclog_or_tty->print("strs ");
|
||||||
|
@ -445,12 +445,12 @@ class Universe: AllStatic {
|
|||||||
|
|
||||||
// Debugging
|
// Debugging
|
||||||
static bool verify_in_progress() { return _verify_in_progress; }
|
static bool verify_in_progress() { return _verify_in_progress; }
|
||||||
static void verify(bool silent, VerifyOption option);
|
static void verify(VerifyOption option, const char* prefix, bool silent = VerifySilently);
|
||||||
static void verify(bool silent) {
|
static void verify(const char* prefix, bool silent = VerifySilently) {
|
||||||
verify(silent, VerifyOption_Default /* option */);
|
verify(VerifyOption_Default, prefix, silent);
|
||||||
}
|
}
|
||||||
static void verify() {
|
static void verify(bool silent = VerifySilently) {
|
||||||
verify(false /* silent */);
|
verify("", silent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int verify_count() { return _verify_count; }
|
static int verify_count() { return _verify_count; }
|
||||||
|
@ -519,6 +519,9 @@ bool klassVtable::is_miranda_entry_at(int i) {
|
|||||||
// check if a method is a miranda method, given a class's methods table and it's super
|
// check if a method is a miranda method, given a class's methods table and it's super
|
||||||
// the caller must make sure that the method belongs to an interface implemented by the class
|
// the caller must make sure that the method belongs to an interface implemented by the class
|
||||||
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
|
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
|
||||||
|
if (m->is_static()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
Symbol* name = m->name();
|
Symbol* name = m->name();
|
||||||
Symbol* signature = m->signature();
|
Symbol* signature = m->signature();
|
||||||
if (InstanceKlass::find_method(class_methods, name, signature) == NULL) {
|
if (InstanceKlass::find_method(class_methods, name, signature) == NULL) {
|
||||||
|
@ -877,7 +877,7 @@ address Method::verified_code_entry() {
|
|||||||
debug_only(No_Safepoint_Verifier nsv;)
|
debug_only(No_Safepoint_Verifier nsv;)
|
||||||
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
|
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
|
||||||
if (code == NULL && UseCodeCacheFlushing) {
|
if (code == NULL && UseCodeCacheFlushing) {
|
||||||
nmethod *saved_code = CodeCache::find_and_remove_saved_code(this);
|
nmethod *saved_code = CodeCache::reanimate_saved_code(this);
|
||||||
if (saved_code != NULL) {
|
if (saved_code != NULL) {
|
||||||
methodHandle method(this);
|
methodHandle method(this);
|
||||||
assert( ! saved_code->is_osr_method(), "should not get here for osr" );
|
assert( ! saved_code->is_osr_method(), "should not get here for osr" );
|
||||||
|
@ -3564,7 +3564,8 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
|
|||||||
|
|
||||||
Node* no_ctrl = NULL;
|
Node* no_ctrl = NULL;
|
||||||
Node* no_base = __ top();
|
Node* no_base = __ top();
|
||||||
Node* zero = __ ConI(0);
|
Node* zero = __ ConI(0);
|
||||||
|
Node* zeroX = __ ConX(0);
|
||||||
|
|
||||||
float likely = PROB_LIKELY(0.999);
|
float likely = PROB_LIKELY(0.999);
|
||||||
float unlikely = PROB_UNLIKELY(0.999);
|
float unlikely = PROB_UNLIKELY(0.999);
|
||||||
@ -3590,7 +3591,9 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
|
|||||||
|
|
||||||
// if (!marking)
|
// if (!marking)
|
||||||
__ if_then(marking, BoolTest::ne, zero); {
|
__ if_then(marking, BoolTest::ne, zero); {
|
||||||
Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
|
BasicType index_bt = TypeX_X->basic_type();
|
||||||
|
assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
|
||||||
|
Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
if (do_load) {
|
if (do_load) {
|
||||||
// load original value
|
// load original value
|
||||||
@ -3603,22 +3606,16 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
|
|||||||
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
// is the queue for this thread full?
|
// is the queue for this thread full?
|
||||||
__ if_then(index, BoolTest::ne, zero, likely); {
|
__ if_then(index, BoolTest::ne, zeroX, likely); {
|
||||||
|
|
||||||
// decrement the index
|
// decrement the index
|
||||||
Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
|
Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
|
||||||
Node* next_indexX = next_index;
|
|
||||||
#ifdef _LP64
|
|
||||||
// We could refine the type for what it's worth
|
|
||||||
// const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
|
|
||||||
next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Now get the buffer location we will log the previous value into and store it
|
// Now get the buffer location we will log the previous value into and store it
|
||||||
Node *log_addr = __ AddP(no_base, buffer, next_indexX);
|
Node *log_addr = __ AddP(no_base, buffer, next_index);
|
||||||
__ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
|
__ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
|
||||||
// update the index
|
// update the index
|
||||||
__ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
|
__ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
} __ else_(); {
|
} __ else_(); {
|
||||||
|
|
||||||
@ -3645,26 +3642,21 @@ void GraphKit::g1_mark_card(IdealKit& ideal,
|
|||||||
Node* buffer,
|
Node* buffer,
|
||||||
const TypeFunc* tf) {
|
const TypeFunc* tf) {
|
||||||
|
|
||||||
Node* zero = __ ConI(0);
|
Node* zero = __ ConI(0);
|
||||||
|
Node* zeroX = __ ConX(0);
|
||||||
Node* no_base = __ top();
|
Node* no_base = __ top();
|
||||||
BasicType card_bt = T_BYTE;
|
BasicType card_bt = T_BYTE;
|
||||||
// Smash zero into card. MUST BE ORDERED WRT TO STORE
|
// Smash zero into card. MUST BE ORDERED WRT TO STORE
|
||||||
__ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
|
__ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
// Now do the queue work
|
// Now do the queue work
|
||||||
__ if_then(index, BoolTest::ne, zero); {
|
__ if_then(index, BoolTest::ne, zeroX); {
|
||||||
|
|
||||||
Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
|
Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
|
||||||
Node* next_indexX = next_index;
|
Node* log_addr = __ AddP(no_base, buffer, next_index);
|
||||||
#ifdef _LP64
|
|
||||||
// We could refine the type for what it's worth
|
|
||||||
// const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
|
|
||||||
next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
|
|
||||||
#endif // _LP64
|
|
||||||
Node* log_addr = __ AddP(no_base, buffer, next_indexX);
|
|
||||||
|
|
||||||
__ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
|
__ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
|
||||||
__ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
|
__ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw);
|
||||||
|
|
||||||
} __ else_(); {
|
} __ else_(); {
|
||||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
|
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
|
||||||
@ -3725,7 +3717,7 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
|
|||||||
// Now some values
|
// Now some values
|
||||||
// Use ctrl to avoid hoisting these values past a safepoint, which could
|
// Use ctrl to avoid hoisting these values past a safepoint, which could
|
||||||
// potentially reset these fields in the JavaThread.
|
// potentially reset these fields in the JavaThread.
|
||||||
Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
|
Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
|
||||||
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
// Convert the store obj pointer to an int prior to doing math on it
|
// Convert the store obj pointer to an int prior to doing math on it
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/assembler.inline.hpp"
|
#include "asm/assembler.inline.hpp"
|
||||||
|
#include "code/compiledIC.hpp"
|
||||||
#include "code/debugInfo.hpp"
|
#include "code/debugInfo.hpp"
|
||||||
#include "code/debugInfoRec.hpp"
|
#include "code/debugInfoRec.hpp"
|
||||||
#include "compiler/compileBroker.hpp"
|
#include "compiler/compileBroker.hpp"
|
||||||
@ -41,8 +42,6 @@
|
|||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "utilities/xmlstream.hpp"
|
#include "utilities/xmlstream.hpp"
|
||||||
|
|
||||||
extern uint size_java_to_interp();
|
|
||||||
extern uint reloc_java_to_interp();
|
|
||||||
extern uint size_exception_handler();
|
extern uint size_exception_handler();
|
||||||
extern uint size_deopt_handler();
|
extern uint size_deopt_handler();
|
||||||
|
|
||||||
@ -389,15 +388,15 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
|||||||
MachNode *mach = nj->as_Mach();
|
MachNode *mach = nj->as_Mach();
|
||||||
blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
|
blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
|
||||||
reloc_size += mach->reloc();
|
reloc_size += mach->reloc();
|
||||||
if( mach->is_MachCall() ) {
|
if (mach->is_MachCall()) {
|
||||||
MachCallNode *mcall = mach->as_MachCall();
|
MachCallNode *mcall = mach->as_MachCall();
|
||||||
// This destination address is NOT PC-relative
|
// This destination address is NOT PC-relative
|
||||||
|
|
||||||
mcall->method_set((intptr_t)mcall->entry_point());
|
mcall->method_set((intptr_t)mcall->entry_point());
|
||||||
|
|
||||||
if( mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method ) {
|
if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
|
||||||
stub_size += size_java_to_interp();
|
stub_size += CompiledStaticCall::to_interp_stub_size();
|
||||||
reloc_size += reloc_java_to_interp();
|
reloc_size += CompiledStaticCall::reloc_to_interp_stub();
|
||||||
}
|
}
|
||||||
} else if (mach->is_MachSafePoint()) {
|
} else if (mach->is_MachSafePoint()) {
|
||||||
// If call/safepoint are adjacent, account for possible
|
// If call/safepoint are adjacent, account for possible
|
||||||
|
@ -513,6 +513,11 @@ void JvmtiClassFileReconstituter::write_method_info(methodHandle method) {
|
|||||||
AnnotationArray* param_anno = method->parameter_annotations();
|
AnnotationArray* param_anno = method->parameter_annotations();
|
||||||
AnnotationArray* default_anno = method->annotation_default();
|
AnnotationArray* default_anno = method->annotation_default();
|
||||||
|
|
||||||
|
// skip generated default interface methods
|
||||||
|
if (method->is_overpass()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
write_u2(access_flags.get_flags() & JVM_RECOGNIZED_METHOD_MODIFIERS);
|
write_u2(access_flags.get_flags() & JVM_RECOGNIZED_METHOD_MODIFIERS);
|
||||||
write_u2(const_method->name_index());
|
write_u2(const_method->name_index());
|
||||||
write_u2(const_method->signature_index());
|
write_u2(const_method->signature_index());
|
||||||
@ -619,8 +624,19 @@ void JvmtiClassFileReconstituter::write_method_infos() {
|
|||||||
HandleMark hm(thread());
|
HandleMark hm(thread());
|
||||||
Array<Method*>* methods = ikh()->methods();
|
Array<Method*>* methods = ikh()->methods();
|
||||||
int num_methods = methods->length();
|
int num_methods = methods->length();
|
||||||
|
int num_overpass = 0;
|
||||||
|
|
||||||
write_u2(num_methods);
|
// count the generated default interface methods
|
||||||
|
// these will not be re-created by write_method_info
|
||||||
|
// and should not be included in the total count
|
||||||
|
for (int index = 0; index < num_methods; index++) {
|
||||||
|
Method* method = methods->at(index);
|
||||||
|
if (method->is_overpass()) {
|
||||||
|
num_overpass++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
write_u2(num_methods - num_overpass);
|
||||||
if (JvmtiExport::can_maintain_original_method_order()) {
|
if (JvmtiExport::can_maintain_original_method_order()) {
|
||||||
int index;
|
int index;
|
||||||
int original_index;
|
int original_index;
|
||||||
|
@ -142,20 +142,20 @@ PERF_ENTRY(jobject, Perf_CreateLong(JNIEnv *env, jobject perf, jstring name,
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch(variability) {
|
switch(variability) {
|
||||||
case 1: /* V_Constant */
|
case PerfData::V_Constant:
|
||||||
pl = PerfDataManager::create_long_constant(NULL_NS, (char *)name_utf,
|
pl = PerfDataManager::create_long_constant(NULL_NS, (char *)name_utf,
|
||||||
(PerfData::Units)units, value,
|
(PerfData::Units)units, value,
|
||||||
CHECK_NULL);
|
CHECK_NULL);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 2: /* V_Variable */
|
case PerfData::V_Monotonic:
|
||||||
pl = PerfDataManager::create_long_variable(NULL_NS, (char *)name_utf,
|
pl = PerfDataManager::create_long_counter(NULL_NS, (char *)name_utf,
|
||||||
(PerfData::Units)units, value,
|
(PerfData::Units)units, value,
|
||||||
CHECK_NULL);
|
CHECK_NULL);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 3: /* V_Monotonic Counter */
|
case PerfData::V_Variable:
|
||||||
pl = PerfDataManager::create_long_counter(NULL_NS, (char *)name_utf,
|
pl = PerfDataManager::create_long_variable(NULL_NS, (char *)name_utf,
|
||||||
(PerfData::Units)units, value,
|
(PerfData::Units)units, value,
|
||||||
CHECK_NULL);
|
CHECK_NULL);
|
||||||
break;
|
break;
|
||||||
|
@ -439,9 +439,29 @@ JVM_ENTRY(void, JVM_RegisterWhiteBoxMethods(JNIEnv* env, jclass wbclass))
|
|||||||
instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass());
|
instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass());
|
||||||
Handle loader(ikh->class_loader());
|
Handle loader(ikh->class_loader());
|
||||||
if (loader.is_null()) {
|
if (loader.is_null()) {
|
||||||
|
ResourceMark rm;
|
||||||
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
|
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
|
||||||
jint result = env->RegisterNatives(wbclass, methods, sizeof(methods)/sizeof(methods[0]));
|
bool result = true;
|
||||||
if (result == 0) {
|
// one by one registration natives for exception catching
|
||||||
|
jclass exceptionKlass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string());
|
||||||
|
for (int i = 0, n = sizeof(methods) / sizeof(methods[0]); i < n; ++i) {
|
||||||
|
if (env->RegisterNatives(wbclass, methods + i, 1) != 0) {
|
||||||
|
result = false;
|
||||||
|
if (env->ExceptionCheck() && env->IsInstanceOf(env->ExceptionOccurred(), exceptionKlass)) {
|
||||||
|
// j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native
|
||||||
|
// ignoring the exception
|
||||||
|
tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature);
|
||||||
|
env->ExceptionClear();
|
||||||
|
} else {
|
||||||
|
// register is failed w/o exception or w/ unexpected exception
|
||||||
|
tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", methods[i].name, methods[i].signature);
|
||||||
|
env->UnregisterNatives(wbclass);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result) {
|
||||||
WhiteBox::set_used();
|
WhiteBox::set_used();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2224,6 +2224,55 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs* args) {
|
|||||||
return JNI_OK;
|
return JNI_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checks if name in command-line argument -agent{lib,path}:name[=options]
|
||||||
|
// represents a valid HPROF of JDWP agent. is_path==true denotes that we
|
||||||
|
// are dealing with -agentpath (case where name is a path), otherwise with
|
||||||
|
// -agentlib
|
||||||
|
bool valid_hprof_or_jdwp_agent(char *name, bool is_path) {
|
||||||
|
char *_name;
|
||||||
|
const char *_hprof = "hprof", *_jdwp = "jdwp";
|
||||||
|
size_t _len_hprof, _len_jdwp, _len_prefix;
|
||||||
|
|
||||||
|
if (is_path) {
|
||||||
|
if ((_name = strrchr(name, (int) *os::file_separator())) == NULL) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_name++; // skip past last path separator
|
||||||
|
_len_prefix = strlen(JNI_LIB_PREFIX);
|
||||||
|
|
||||||
|
if (strncmp(_name, JNI_LIB_PREFIX, _len_prefix) != 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_name += _len_prefix;
|
||||||
|
_len_hprof = strlen(_hprof);
|
||||||
|
_len_jdwp = strlen(_jdwp);
|
||||||
|
|
||||||
|
if (strncmp(_name, _hprof, _len_hprof) == 0) {
|
||||||
|
_name += _len_hprof;
|
||||||
|
}
|
||||||
|
else if (strncmp(_name, _jdwp, _len_jdwp) == 0) {
|
||||||
|
_name += _len_jdwp;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (strcmp(_name, JNI_LIB_SUFFIX) != 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (strcmp(name, _hprof) == 0 || strcmp(name, _jdwp) == 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||||
SysClassPath* scp_p,
|
SysClassPath* scp_p,
|
||||||
bool* scp_assembly_required_p,
|
bool* scp_assembly_required_p,
|
||||||
@ -2322,7 +2371,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
|||||||
options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1, mtInternal), pos + 1);
|
options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1, mtInternal), pos + 1);
|
||||||
}
|
}
|
||||||
#if !INCLUDE_JVMTI
|
#if !INCLUDE_JVMTI
|
||||||
if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
|
if (valid_hprof_or_jdwp_agent(name, is_absolute_path)) {
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
"Profiling and debugging agents are not supported in this VM\n");
|
"Profiling and debugging agents are not supported in this VM\n");
|
||||||
return JNI_ERR;
|
return JNI_ERR;
|
||||||
|
@ -109,6 +109,9 @@ bool CompilationPolicy::must_be_compiled(methodHandle m, int comp_level) {
|
|||||||
|
|
||||||
// Returns true if m is allowed to be compiled
|
// Returns true if m is allowed to be compiled
|
||||||
bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) {
|
bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) {
|
||||||
|
// allow any levels for WhiteBox
|
||||||
|
assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level");
|
||||||
|
|
||||||
if (m->is_abstract()) return false;
|
if (m->is_abstract()) return false;
|
||||||
if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
|
if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
|
||||||
|
|
||||||
@ -122,7 +125,13 @@ bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (comp_level == CompLevel_all) {
|
if (comp_level == CompLevel_all) {
|
||||||
return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization);
|
if (TieredCompilation) {
|
||||||
|
// enough to be compilable at any level for tiered
|
||||||
|
return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization);
|
||||||
|
} else {
|
||||||
|
// must be compilable at available level for non-tiered
|
||||||
|
return !m->is_not_compilable(CompLevel_highest_tier);
|
||||||
|
}
|
||||||
} else if (is_compile(comp_level)) {
|
} else if (is_compile(comp_level)) {
|
||||||
return !m->is_not_compilable(comp_level);
|
return !m->is_not_compilable(comp_level);
|
||||||
}
|
}
|
||||||
@ -436,7 +445,7 @@ void SimpleCompPolicy::method_invocation_event(methodHandle m, JavaThread* threa
|
|||||||
reset_counter_for_invocation_event(m);
|
reset_counter_for_invocation_event(m);
|
||||||
const char* comment = "count";
|
const char* comment = "count";
|
||||||
|
|
||||||
if (is_compilation_enabled() && can_be_compiled(m)) {
|
if (is_compilation_enabled() && can_be_compiled(m, comp_level)) {
|
||||||
nmethod* nm = m->code();
|
nmethod* nm = m->code();
|
||||||
if (nm == NULL ) {
|
if (nm == NULL ) {
|
||||||
CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, comment, thread);
|
CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, comment, thread);
|
||||||
@ -449,7 +458,7 @@ void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThr
|
|||||||
const int hot_count = m->backedge_count();
|
const int hot_count = m->backedge_count();
|
||||||
const char* comment = "backedge_count";
|
const char* comment = "backedge_count";
|
||||||
|
|
||||||
if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m)) {
|
if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) {
|
||||||
CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
|
CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
|
||||||
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
|
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
|
||||||
}
|
}
|
||||||
@ -467,7 +476,7 @@ void StackWalkCompPolicy::method_invocation_event(methodHandle m, JavaThread* th
|
|||||||
reset_counter_for_invocation_event(m);
|
reset_counter_for_invocation_event(m);
|
||||||
const char* comment = "count";
|
const char* comment = "count";
|
||||||
|
|
||||||
if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) {
|
if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) {
|
||||||
ResourceMark rm(thread);
|
ResourceMark rm(thread);
|
||||||
frame fr = thread->last_frame();
|
frame fr = thread->last_frame();
|
||||||
assert(fr.is_interpreted_frame(), "must be interpreted");
|
assert(fr.is_interpreted_frame(), "must be interpreted");
|
||||||
@ -505,7 +514,7 @@ void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, Java
|
|||||||
const int hot_count = m->backedge_count();
|
const int hot_count = m->backedge_count();
|
||||||
const char* comment = "backedge_count";
|
const char* comment = "backedge_count";
|
||||||
|
|
||||||
if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m)) {
|
if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) {
|
||||||
CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
|
CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
|
||||||
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
|
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
|
||||||
}
|
}
|
||||||
@ -600,7 +609,7 @@ RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack
|
|||||||
|
|
||||||
// If the caller method is too big or something then we do not want to
|
// If the caller method is too big or something then we do not want to
|
||||||
// compile it just to inline a method
|
// compile it just to inline a method
|
||||||
if (!can_be_compiled(next_m)) {
|
if (!can_be_compiled(next_m, CompLevel_any)) {
|
||||||
msg = "caller cannot be compiled";
|
msg = "caller cannot be compiled";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2123,6 +2123,9 @@ class CommandLineFlags {
|
|||||||
product(intx, PrefetchFieldsAhead, -1, \
|
product(intx, PrefetchFieldsAhead, -1, \
|
||||||
"How many fields ahead to prefetch in oop scan (<= 0 means off)") \
|
"How many fields ahead to prefetch in oop scan (<= 0 means off)") \
|
||||||
\
|
\
|
||||||
|
diagnostic(bool, VerifySilently, false, \
|
||||||
|
"Don't print print the verification progress") \
|
||||||
|
\
|
||||||
diagnostic(bool, VerifyDuringStartup, false, \
|
diagnostic(bool, VerifyDuringStartup, false, \
|
||||||
"Verify memory system before executing any Java code " \
|
"Verify memory system before executing any Java code " \
|
||||||
"during VM initialization") \
|
"during VM initialization") \
|
||||||
@ -3179,6 +3182,9 @@ class CommandLineFlags {
|
|||||||
product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \
|
product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \
|
||||||
"When less than X space left, start code cache cleaning") \
|
"When less than X space left, start code cache cleaning") \
|
||||||
\
|
\
|
||||||
|
product(uintx, CodeCacheFlushingFraction, 2, \
|
||||||
|
"Fraction of the code cache that is flushed when full") \
|
||||||
|
\
|
||||||
/* interpreter debugging */ \
|
/* interpreter debugging */ \
|
||||||
develop(intx, BinarySwitchThreshold, 5, \
|
develop(intx, BinarySwitchThreshold, 5, \
|
||||||
"Minimal number of lookupswitch entries for rewriting to binary " \
|
"Minimal number of lookupswitch entries for rewriting to binary " \
|
||||||
@ -3223,8 +3229,9 @@ class CommandLineFlags {
|
|||||||
develop(bool, ReplayCompiles, false, \
|
develop(bool, ReplayCompiles, false, \
|
||||||
"Enable replay of compilations from ReplayDataFile") \
|
"Enable replay of compilations from ReplayDataFile") \
|
||||||
\
|
\
|
||||||
develop(ccstr, ReplayDataFile, "replay.txt", \
|
product(ccstr, ReplayDataFile, NULL, \
|
||||||
"file containing compilation replay information") \
|
"File containing compilation replay information" \
|
||||||
|
"[default: ./replay_pid%p.log] (%p replaced with pid)") \
|
||||||
\
|
\
|
||||||
develop(intx, ReplaySuppressInitializers, 2, \
|
develop(intx, ReplaySuppressInitializers, 2, \
|
||||||
"Controls handling of class initialization during replay" \
|
"Controls handling of class initialization during replay" \
|
||||||
@ -3237,8 +3244,8 @@ class CommandLineFlags {
|
|||||||
develop(bool, ReplayIgnoreInitErrors, false, \
|
develop(bool, ReplayIgnoreInitErrors, false, \
|
||||||
"Ignore exceptions thrown during initialization for replay") \
|
"Ignore exceptions thrown during initialization for replay") \
|
||||||
\
|
\
|
||||||
develop(bool, DumpReplayDataOnError, true, \
|
product(bool, DumpReplayDataOnError, true, \
|
||||||
"record replay data for crashing compiler threads") \
|
"Record replay data for crashing compiler threads") \
|
||||||
\
|
\
|
||||||
product(bool, CICompilerCountPerCPU, false, \
|
product(bool, CICompilerCountPerCPU, false, \
|
||||||
"1 compiler thread for log(N CPUs)") \
|
"1 compiler thread for log(N CPUs)") \
|
||||||
@ -3247,7 +3254,9 @@ class CommandLineFlags {
|
|||||||
"Fire OutOfMemoryErrors throughout CI for testing the compiler " \
|
"Fire OutOfMemoryErrors throughout CI for testing the compiler " \
|
||||||
"(non-negative value throws OOM after this many CI accesses " \
|
"(non-negative value throws OOM after this many CI accesses " \
|
||||||
"in each compile)") \
|
"in each compile)") \
|
||||||
\
|
notproduct(intx, CICrashAt, -1, \
|
||||||
|
"id of compilation to trigger assert in compiler thread for " \
|
||||||
|
"the purpose of testing, e.g. generation of replay data") \
|
||||||
notproduct(bool, CIObjectFactoryVerify, false, \
|
notproduct(bool, CIObjectFactoryVerify, false, \
|
||||||
"enable potentially expensive verification in ciObjectFactory") \
|
"enable potentially expensive verification in ciObjectFactory") \
|
||||||
\
|
\
|
||||||
|
@ -454,6 +454,7 @@ class os: AllStatic {
|
|||||||
// File i/o operations
|
// File i/o operations
|
||||||
static const int default_file_open_flags();
|
static const int default_file_open_flags();
|
||||||
static int open(const char *path, int oflag, int mode);
|
static int open(const char *path, int oflag, int mode);
|
||||||
|
static FILE* open(int fd, const char* mode);
|
||||||
static int close(int fd);
|
static int close(int fd);
|
||||||
static jlong lseek(int fd, jlong offset, int whence);
|
static jlong lseek(int fd, jlong offset, int whence);
|
||||||
static char* native_path(char *path);
|
static char* native_path(char *path);
|
||||||
@ -477,7 +478,7 @@ class os: AllStatic {
|
|||||||
static const char* dll_file_extension();
|
static const char* dll_file_extension();
|
||||||
|
|
||||||
static const char* get_temp_directory();
|
static const char* get_temp_directory();
|
||||||
static const char* get_current_directory(char *buf, int buflen);
|
static const char* get_current_directory(char *buf, size_t buflen);
|
||||||
|
|
||||||
// Builds a platform-specific full library path given a ld path and lib name
|
// Builds a platform-specific full library path given a ld path and lib name
|
||||||
// Returns true if buffer contains full path to existing file, false otherwise
|
// Returns true if buffer contains full path to existing file, false otherwise
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -29,6 +29,8 @@
|
|||||||
#include "runtime/mutexLocker.hpp"
|
#include "runtime/mutexLocker.hpp"
|
||||||
#include "prims/jvmtiImpl.hpp"
|
#include "prims/jvmtiImpl.hpp"
|
||||||
#include "services/gcNotifier.hpp"
|
#include "services/gcNotifier.hpp"
|
||||||
|
#include "services/diagnosticArgument.hpp"
|
||||||
|
#include "services/diagnosticFramework.hpp"
|
||||||
|
|
||||||
ServiceThread* ServiceThread::_instance = NULL;
|
ServiceThread* ServiceThread::_instance = NULL;
|
||||||
|
|
||||||
@ -83,6 +85,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
|||||||
bool sensors_changed = false;
|
bool sensors_changed = false;
|
||||||
bool has_jvmti_events = false;
|
bool has_jvmti_events = false;
|
||||||
bool has_gc_notification_event = false;
|
bool has_gc_notification_event = false;
|
||||||
|
bool has_dcmd_notification_event = false;
|
||||||
JvmtiDeferredEvent jvmti_event;
|
JvmtiDeferredEvent jvmti_event;
|
||||||
{
|
{
|
||||||
// Need state transition ThreadBlockInVM so that this thread
|
// Need state transition ThreadBlockInVM so that this thread
|
||||||
@ -98,7 +101,8 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
|||||||
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
|
||||||
while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) &&
|
while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) &&
|
||||||
!(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) &&
|
!(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) &&
|
||||||
!(has_gc_notification_event = GCNotifier::has_event())) {
|
!(has_gc_notification_event = GCNotifier::has_event()) &&
|
||||||
|
!(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) {
|
||||||
// wait until one of the sensors has pending requests, or there is a
|
// wait until one of the sensors has pending requests, or there is a
|
||||||
// pending JVMTI event or JMX GC notification to post
|
// pending JVMTI event or JMX GC notification to post
|
||||||
Service_lock->wait(Mutex::_no_safepoint_check_flag);
|
Service_lock->wait(Mutex::_no_safepoint_check_flag);
|
||||||
@ -120,6 +124,10 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
|||||||
if(has_gc_notification_event) {
|
if(has_gc_notification_event) {
|
||||||
GCNotifier::sendNotification(CHECK);
|
GCNotifier::sendNotification(CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(has_dcmd_notification_event) {
|
||||||
|
DCmdFactory::send_notification(CHECK);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1316,12 +1316,6 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
|
|||||||
assert(stub_frame.is_runtime_frame(), "sanity check");
|
assert(stub_frame.is_runtime_frame(), "sanity check");
|
||||||
frame caller_frame = stub_frame.sender(®_map);
|
frame caller_frame = stub_frame.sender(®_map);
|
||||||
|
|
||||||
// MethodHandle invokes don't have a CompiledIC and should always
|
|
||||||
// simply redispatch to the callee_target.
|
|
||||||
address sender_pc = caller_frame.pc();
|
|
||||||
CodeBlob* sender_cb = caller_frame.cb();
|
|
||||||
nmethod* sender_nm = sender_cb->as_nmethod_or_null();
|
|
||||||
|
|
||||||
if (caller_frame.is_interpreted_frame() ||
|
if (caller_frame.is_interpreted_frame() ||
|
||||||
caller_frame.is_entry_frame()) {
|
caller_frame.is_entry_frame()) {
|
||||||
Method* callee = thread->callee_target();
|
Method* callee = thread->callee_target();
|
||||||
|
@ -154,9 +154,10 @@ void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {
|
|||||||
// Set carry flags on the counters if necessary
|
// Set carry flags on the counters if necessary
|
||||||
void SimpleThresholdPolicy::handle_counter_overflow(Method* method) {
|
void SimpleThresholdPolicy::handle_counter_overflow(Method* method) {
|
||||||
MethodCounters *mcs = method->method_counters();
|
MethodCounters *mcs = method->method_counters();
|
||||||
assert(mcs != NULL, "");
|
if (mcs != NULL) {
|
||||||
set_carry_if_necessary(mcs->invocation_counter());
|
set_carry_if_necessary(mcs->invocation_counter());
|
||||||
set_carry_if_necessary(mcs->backedge_counter());
|
set_carry_if_necessary(mcs->backedge_counter());
|
||||||
|
}
|
||||||
MethodData* mdo = method->method_data();
|
MethodData* mdo = method->method_data();
|
||||||
if (mdo != NULL) {
|
if (mdo != NULL) {
|
||||||
set_carry_if_necessary(mdo->invocation_counter());
|
set_carry_if_necessary(mdo->invocation_counter());
|
||||||
|
@ -136,13 +136,12 @@ volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progre
|
|||||||
|
|
||||||
jint NMethodSweeper::_locked_seen = 0;
|
jint NMethodSweeper::_locked_seen = 0;
|
||||||
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
|
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
|
||||||
bool NMethodSweeper::_rescan = false;
|
bool NMethodSweeper::_resweep = false;
|
||||||
bool NMethodSweeper::_do_sweep = false;
|
jint NMethodSweeper::_flush_token = 0;
|
||||||
bool NMethodSweeper::_was_full = false;
|
jlong NMethodSweeper::_last_full_flush_time = 0;
|
||||||
jint NMethodSweeper::_advise_to_sweep = 0;
|
int NMethodSweeper::_highest_marked = 0;
|
||||||
jlong NMethodSweeper::_last_was_full = 0;
|
int NMethodSweeper::_dead_compile_ids = 0;
|
||||||
uint NMethodSweeper::_highest_marked = 0;
|
long NMethodSweeper::_last_flush_traversal_id = 0;
|
||||||
long NMethodSweeper::_was_full_traversal = 0;
|
|
||||||
|
|
||||||
class MarkActivationClosure: public CodeBlobClosure {
|
class MarkActivationClosure: public CodeBlobClosure {
|
||||||
public:
|
public:
|
||||||
@ -155,20 +154,16 @@ public:
|
|||||||
};
|
};
|
||||||
static MarkActivationClosure mark_activation_closure;
|
static MarkActivationClosure mark_activation_closure;
|
||||||
|
|
||||||
|
bool NMethodSweeper::sweep_in_progress() {
|
||||||
|
return (_current != NULL);
|
||||||
|
}
|
||||||
|
|
||||||
void NMethodSweeper::scan_stacks() {
|
void NMethodSweeper::scan_stacks() {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
||||||
if (!MethodFlushing) return;
|
if (!MethodFlushing) return;
|
||||||
_do_sweep = true;
|
|
||||||
|
|
||||||
// No need to synchronize access, since this is always executed at a
|
// No need to synchronize access, since this is always executed at a
|
||||||
// safepoint. If we aren't in the middle of scan and a rescan
|
// safepoint.
|
||||||
// hasn't been requested then just return. If UseCodeCacheFlushing is on and
|
|
||||||
// code cache flushing is in progress, don't skip sweeping to help make progress
|
|
||||||
// clearing space in the code cache.
|
|
||||||
if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
|
|
||||||
_do_sweep = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure CompiledIC_lock in unlocked, since we might update some
|
// Make sure CompiledIC_lock in unlocked, since we might update some
|
||||||
// inline caches. If it is, we just bail-out and try later.
|
// inline caches. If it is, we just bail-out and try later.
|
||||||
@ -176,7 +171,7 @@ void NMethodSweeper::scan_stacks() {
|
|||||||
|
|
||||||
// Check for restart
|
// Check for restart
|
||||||
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
|
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
|
||||||
if (_current == NULL) {
|
if (!sweep_in_progress() && _resweep) {
|
||||||
_seen = 0;
|
_seen = 0;
|
||||||
_invocations = NmethodSweepFraction;
|
_invocations = NmethodSweepFraction;
|
||||||
_current = CodeCache::first_nmethod();
|
_current = CodeCache::first_nmethod();
|
||||||
@ -187,39 +182,30 @@ void NMethodSweeper::scan_stacks() {
|
|||||||
Threads::nmethods_do(&mark_activation_closure);
|
Threads::nmethods_do(&mark_activation_closure);
|
||||||
|
|
||||||
// reset the flags since we started a scan from the beginning.
|
// reset the flags since we started a scan from the beginning.
|
||||||
_rescan = false;
|
_resweep = false;
|
||||||
_locked_seen = 0;
|
_locked_seen = 0;
|
||||||
_not_entrant_seen_on_stack = 0;
|
_not_entrant_seen_on_stack = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UseCodeCacheFlushing) {
|
if (UseCodeCacheFlushing) {
|
||||||
if (!CodeCache::needs_flushing()) {
|
// only allow new flushes after the interval is complete.
|
||||||
// scan_stacks() runs during a safepoint, no race with setters
|
jlong now = os::javaTimeMillis();
|
||||||
_advise_to_sweep = 0;
|
jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
|
||||||
|
jlong curr_interval = now - _last_full_flush_time;
|
||||||
|
if (curr_interval > max_interval) {
|
||||||
|
_flush_token = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (was_full()) {
|
if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
|
||||||
// There was some progress so attempt to restart the compiler
|
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
||||||
jlong now = os::javaTimeMillis();
|
log_sweep("restart_compiler");
|
||||||
jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
|
|
||||||
jlong curr_interval = now - _last_was_full;
|
|
||||||
if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
|
|
||||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
|
||||||
set_was_full(false);
|
|
||||||
|
|
||||||
// Update the _last_was_full time so we can tell how fast the
|
|
||||||
// code cache is filling up
|
|
||||||
_last_was_full = os::javaTimeMillis();
|
|
||||||
|
|
||||||
log_sweep("restart_compiler");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void NMethodSweeper::possibly_sweep() {
|
void NMethodSweeper::possibly_sweep() {
|
||||||
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
|
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
|
||||||
if ((!MethodFlushing) || (!_do_sweep)) return;
|
if (!MethodFlushing || !sweep_in_progress()) return;
|
||||||
|
|
||||||
if (_invocations > 0) {
|
if (_invocations > 0) {
|
||||||
// Only one thread at a time will sweep
|
// Only one thread at a time will sweep
|
||||||
@ -253,6 +239,14 @@ void NMethodSweeper::sweep_code_cache() {
|
|||||||
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
|
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!CompileBroker::should_compile_new_jobs()) {
|
||||||
|
// If we have turned off compilations we might as well do full sweeps
|
||||||
|
// in order to reach the clean state faster. Otherwise the sleeping compiler
|
||||||
|
// threads will slow down sweeping. After a few iterations the cache
|
||||||
|
// will be clean and sweeping stops (_resweep will not be set)
|
||||||
|
_invocations = 1;
|
||||||
|
}
|
||||||
|
|
||||||
// We want to visit all nmethods after NmethodSweepFraction
|
// We want to visit all nmethods after NmethodSweepFraction
|
||||||
// invocations so divide the remaining number of nmethods by the
|
// invocations so divide the remaining number of nmethods by the
|
||||||
// remaining number of invocations. This is only an estimate since
|
// remaining number of invocations. This is only an estimate since
|
||||||
@ -296,7 +290,7 @@ void NMethodSweeper::sweep_code_cache() {
|
|||||||
|
|
||||||
assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
|
assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
|
||||||
|
|
||||||
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
|
if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
|
||||||
// we've completed a scan without making progress but there were
|
// we've completed a scan without making progress but there were
|
||||||
// nmethods we were unable to process either because they were
|
// nmethods we were unable to process either because they were
|
||||||
// locked or were still on stack. We don't have to aggresively
|
// locked or were still on stack. We don't have to aggresively
|
||||||
@ -318,6 +312,13 @@ void NMethodSweeper::sweep_code_cache() {
|
|||||||
if (_invocations == 1) {
|
if (_invocations == 1) {
|
||||||
log_sweep("finished");
|
log_sweep("finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sweeper is the only case where memory is released,
|
||||||
|
// check here if it is time to restart the compiler.
|
||||||
|
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
|
||||||
|
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
||||||
|
log_sweep("restart_compiler");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class NMethodMarker: public StackObj {
|
class NMethodMarker: public StackObj {
|
||||||
@ -392,7 +393,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
|
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
|
||||||
}
|
}
|
||||||
nm->mark_for_reclamation();
|
nm->mark_for_reclamation();
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
SWEEP(nm);
|
SWEEP(nm);
|
||||||
}
|
}
|
||||||
} else if (nm->is_not_entrant()) {
|
} else if (nm->is_not_entrant()) {
|
||||||
@ -403,7 +404,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
|
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
|
||||||
}
|
}
|
||||||
nm->make_zombie();
|
nm->make_zombie();
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
SWEEP(nm);
|
SWEEP(nm);
|
||||||
} else {
|
} else {
|
||||||
// Still alive, clean up its inline caches
|
// Still alive, clean up its inline caches
|
||||||
@ -425,16 +426,15 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
release_nmethod(nm);
|
release_nmethod(nm);
|
||||||
} else {
|
} else {
|
||||||
nm->make_zombie();
|
nm->make_zombie();
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
SWEEP(nm);
|
SWEEP(nm);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(nm->is_alive(), "should be alive");
|
assert(nm->is_alive(), "should be alive");
|
||||||
|
|
||||||
if (UseCodeCacheFlushing) {
|
if (UseCodeCacheFlushing) {
|
||||||
if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
|
if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
|
||||||
(_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
|
(_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
|
||||||
CodeCache::needs_flushing()) {
|
|
||||||
// This method has not been called since the forced cleanup happened
|
// This method has not been called since the forced cleanup happened
|
||||||
nm->make_not_entrant();
|
nm->make_not_entrant();
|
||||||
}
|
}
|
||||||
@ -457,41 +457,27 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
// _code field is restored and the Method*/nmethod
|
// _code field is restored and the Method*/nmethod
|
||||||
// go back to their normal state.
|
// go back to their normal state.
|
||||||
void NMethodSweeper::handle_full_code_cache(bool is_full) {
|
void NMethodSweeper::handle_full_code_cache(bool is_full) {
|
||||||
// Only the first one to notice can advise us to start early cleaning
|
|
||||||
if (!is_full){
|
|
||||||
jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
|
|
||||||
if (old != 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_full) {
|
if (is_full) {
|
||||||
// Since code cache is full, immediately stop new compiles
|
// Since code cache is full, immediately stop new compiles
|
||||||
bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
|
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
||||||
if (!did_set) {
|
log_sweep("disable_compiler");
|
||||||
// only the first to notice can start the cleaning,
|
|
||||||
// others will go back and block
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
set_was_full(true);
|
}
|
||||||
|
|
||||||
// If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
|
// Make sure only one thread can flush
|
||||||
jlong now = os::javaTimeMillis();
|
// The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
|
||||||
jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
|
// no need to check the timeout here.
|
||||||
jlong curr_interval = now - _last_was_full;
|
jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
|
||||||
if (curr_interval < max_interval) {
|
if (old != 0) {
|
||||||
_rescan = true;
|
return;
|
||||||
log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
|
|
||||||
curr_interval/1000);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VM_HandleFullCodeCache op(is_full);
|
VM_HandleFullCodeCache op(is_full);
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
|
|
||||||
// rescan again as soon as possible
|
// resweep again as soon as possible
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
|
void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
|
||||||
@ -500,62 +486,64 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
|
|||||||
|
|
||||||
debug_only(jlong start = os::javaTimeMillis();)
|
debug_only(jlong start = os::javaTimeMillis();)
|
||||||
|
|
||||||
if ((!was_full()) && (is_full)) {
|
|
||||||
if (!CodeCache::needs_flushing()) {
|
|
||||||
log_sweep("restart_compiler");
|
|
||||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Traverse the code cache trying to dump the oldest nmethods
|
// Traverse the code cache trying to dump the oldest nmethods
|
||||||
uint curr_max_comp_id = CompileBroker::get_compilation_id();
|
int curr_max_comp_id = CompileBroker::get_compilation_id();
|
||||||
uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
|
int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
|
||||||
|
|
||||||
log_sweep("start_cleaning");
|
log_sweep("start_cleaning");
|
||||||
|
|
||||||
nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
|
nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
|
||||||
jint disconnected = 0;
|
jint disconnected = 0;
|
||||||
jint made_not_entrant = 0;
|
jint made_not_entrant = 0;
|
||||||
|
jint nmethod_count = 0;
|
||||||
|
|
||||||
while ((nm != NULL)){
|
while ((nm != NULL)){
|
||||||
uint curr_comp_id = nm->compile_id();
|
int curr_comp_id = nm->compile_id();
|
||||||
|
|
||||||
// OSR methods cannot be flushed like this. Also, don't flush native methods
|
// OSR methods cannot be flushed like this. Also, don't flush native methods
|
||||||
// since they are part of the JDK in most cases
|
// since they are part of the JDK in most cases
|
||||||
if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
|
if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
|
||||||
(!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
|
|
||||||
|
|
||||||
if ((nm->method()->code() == nm)) {
|
// only count methods that can be speculatively disconnected
|
||||||
// This method has not been previously considered for
|
nmethod_count++;
|
||||||
// unloading or it was restored already
|
|
||||||
CodeCache::speculatively_disconnect(nm);
|
|
||||||
disconnected++;
|
|
||||||
} else if (nm->is_speculatively_disconnected()) {
|
|
||||||
// This method was previously considered for preemptive unloading and was not called since then
|
|
||||||
CompilationPolicy::policy()->delay_compilation(nm->method());
|
|
||||||
nm->make_not_entrant();
|
|
||||||
made_not_entrant++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (curr_comp_id > _highest_marked) {
|
if (nm->is_in_use() && (curr_comp_id < flush_target)) {
|
||||||
_highest_marked = curr_comp_id;
|
if ((nm->method()->code() == nm)) {
|
||||||
|
// This method has not been previously considered for
|
||||||
|
// unloading or it was restored already
|
||||||
|
CodeCache::speculatively_disconnect(nm);
|
||||||
|
disconnected++;
|
||||||
|
} else if (nm->is_speculatively_disconnected()) {
|
||||||
|
// This method was previously considered for preemptive unloading and was not called since then
|
||||||
|
CompilationPolicy::policy()->delay_compilation(nm->method());
|
||||||
|
nm->make_not_entrant();
|
||||||
|
made_not_entrant++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (curr_comp_id > _highest_marked) {
|
||||||
|
_highest_marked = curr_comp_id;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nm = CodeCache::alive_nmethod(CodeCache::next(nm));
|
nm = CodeCache::alive_nmethod(CodeCache::next(nm));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// remember how many compile_ids wheren't seen last flush.
|
||||||
|
_dead_compile_ids = curr_max_comp_id - nmethod_count;
|
||||||
|
|
||||||
log_sweep("stop_cleaning",
|
log_sweep("stop_cleaning",
|
||||||
"disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
|
"disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
|
||||||
disconnected, made_not_entrant);
|
disconnected, made_not_entrant);
|
||||||
|
|
||||||
// Shut off compiler. Sweeper will start over with a new stack scan and
|
// Shut off compiler. Sweeper will start over with a new stack scan and
|
||||||
// traversal cycle and turn it back on if it clears enough space.
|
// traversal cycle and turn it back on if it clears enough space.
|
||||||
if (was_full()) {
|
if (is_full) {
|
||||||
_last_was_full = os::javaTimeMillis();
|
_last_full_flush_time = os::javaTimeMillis();
|
||||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// After two more traversals the sweeper will get rid of unrestored nmethods
|
// After two more traversals the sweeper will get rid of unrestored nmethods
|
||||||
_was_full_traversal = _traversals;
|
_last_flush_traversal_id = _traversals;
|
||||||
|
_resweep = true;
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
jlong end = os::javaTimeMillis();
|
jlong end = os::javaTimeMillis();
|
||||||
if(PrintMethodFlushing && Verbose) {
|
if(PrintMethodFlushing && Verbose) {
|
||||||
|
@ -35,26 +35,29 @@ class NMethodSweeper : public AllStatic {
|
|||||||
static nmethod* _current; // Current nmethod
|
static nmethod* _current; // Current nmethod
|
||||||
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
|
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
|
||||||
|
|
||||||
static volatile int _invocations; // No. of invocations left until we are completed with this pass
|
static volatile int _invocations; // No. of invocations left until we are completed with this pass
|
||||||
static volatile int _sweep_started; // Flag to control conc sweeper
|
static volatile int _sweep_started; // Flag to control conc sweeper
|
||||||
|
|
||||||
static bool _rescan; // Indicates that we should do a full rescan of the
|
//The following are reset in scan_stacks and synchronized by the safepoint
|
||||||
// of the code cache looking for work to do.
|
static bool _resweep; // Indicates that a change has happend and we want another sweep,
|
||||||
static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened
|
// always checked and reset at a safepoint so memory will be in sync.
|
||||||
static int _locked_seen; // Number of locked nmethods encountered during the scan
|
static int _locked_seen; // Number of locked nmethods encountered during the scan
|
||||||
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
|
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
|
||||||
|
static jint _flush_token; // token that guards method flushing, making sure it is executed only once.
|
||||||
|
|
||||||
static bool _was_full; // remember if we did emergency unloading
|
// These are set during a flush, a VM-operation
|
||||||
static jint _advise_to_sweep; // flag to indicate code cache getting full
|
static long _last_flush_traversal_id; // trav number at last flush unloading
|
||||||
static jlong _last_was_full; // timestamp of last emergency unloading
|
static jlong _last_full_flush_time; // timestamp of last emergency unloading
|
||||||
static uint _highest_marked; // highest compile id dumped at last emergency unloading
|
|
||||||
static long _was_full_traversal; // trav number at last emergency unloading
|
// These are synchronized by the _sweep_started token
|
||||||
|
static int _highest_marked; // highest compile id dumped at last emergency unloading
|
||||||
|
static int _dead_compile_ids; // number of compile ids that where not in the cache last flush
|
||||||
|
|
||||||
static void process_nmethod(nmethod *nm);
|
static void process_nmethod(nmethod *nm);
|
||||||
|
|
||||||
static void release_nmethod(nmethod* nm);
|
static void release_nmethod(nmethod* nm);
|
||||||
|
|
||||||
static void log_sweep(const char* msg, const char* format = NULL, ...);
|
static void log_sweep(const char* msg, const char* format = NULL, ...);
|
||||||
|
static bool sweep_in_progress();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static long traversal_count() { return _traversals; }
|
static long traversal_count() { return _traversals; }
|
||||||
@ -71,17 +74,14 @@ class NMethodSweeper : public AllStatic {
|
|||||||
static void possibly_sweep(); // Compiler threads call this to sweep
|
static void possibly_sweep(); // Compiler threads call this to sweep
|
||||||
|
|
||||||
static void notify(nmethod* nm) {
|
static void notify(nmethod* nm) {
|
||||||
// Perform a full scan of the code cache from the beginning. No
|
// Request a new sweep of the code cache from the beginning. No
|
||||||
// need to synchronize the setting of this flag since it only
|
// need to synchronize the setting of this flag since it only
|
||||||
// changes to false at safepoint so we can never overwrite it with false.
|
// changes to false at safepoint so we can never overwrite it with false.
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
|
static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
|
||||||
static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure
|
static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure
|
||||||
|
|
||||||
static void set_was_full(bool state) { _was_full = state; }
|
|
||||||
static bool was_full() { return _was_full; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP
|
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP
|
||||||
|
@ -3447,7 +3447,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
|||||||
|
|
||||||
assert (Universe::is_fully_initialized(), "not initialized");
|
assert (Universe::is_fully_initialized(), "not initialized");
|
||||||
if (VerifyDuringStartup) {
|
if (VerifyDuringStartup) {
|
||||||
VM_Verify verify_op(false /* silent */); // make sure we're starting with a clean slate
|
// Make sure we're starting with a clean slate.
|
||||||
|
VM_Verify verify_op;
|
||||||
VMThread::execute(&verify_op);
|
VMThread::execute(&verify_op);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,72 +60,6 @@ ReservedSpace::ReservedSpace(size_t size, size_t alignment,
|
|||||||
initialize(size, alignment, large, NULL, 0, executable);
|
initialize(size, alignment, large, NULL, 0, executable);
|
||||||
}
|
}
|
||||||
|
|
||||||
char *
|
|
||||||
ReservedSpace::align_reserved_region(char* addr, const size_t len,
|
|
||||||
const size_t prefix_size,
|
|
||||||
const size_t prefix_align,
|
|
||||||
const size_t suffix_size,
|
|
||||||
const size_t suffix_align)
|
|
||||||
{
|
|
||||||
assert(addr != NULL, "sanity");
|
|
||||||
const size_t required_size = prefix_size + suffix_size;
|
|
||||||
assert(len >= required_size, "len too small");
|
|
||||||
|
|
||||||
const size_t s = size_t(addr);
|
|
||||||
const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
|
|
||||||
const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
|
|
||||||
|
|
||||||
if (len < beg_delta + required_size) {
|
|
||||||
return NULL; // Cannot do proper alignment.
|
|
||||||
}
|
|
||||||
const size_t end_delta = len - (beg_delta + required_size);
|
|
||||||
|
|
||||||
if (beg_delta != 0) {
|
|
||||||
os::release_memory(addr, beg_delta);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (end_delta != 0) {
|
|
||||||
char* release_addr = (char*) (s + beg_delta + required_size);
|
|
||||||
os::release_memory(release_addr, end_delta);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (char*) (s + beg_delta);
|
|
||||||
}
|
|
||||||
|
|
||||||
char* ReservedSpace::reserve_and_align(const size_t reserve_size,
|
|
||||||
const size_t prefix_size,
|
|
||||||
const size_t prefix_align,
|
|
||||||
const size_t suffix_size,
|
|
||||||
const size_t suffix_align)
|
|
||||||
{
|
|
||||||
assert(reserve_size > prefix_size + suffix_size, "should not be here");
|
|
||||||
|
|
||||||
char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
|
|
||||||
if (raw_addr == NULL) return NULL;
|
|
||||||
|
|
||||||
char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
|
|
||||||
prefix_align, suffix_size,
|
|
||||||
suffix_align);
|
|
||||||
if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
|
|
||||||
fatal("os::release_memory failed");
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
if (result != NULL) {
|
|
||||||
const size_t raw = size_t(raw_addr);
|
|
||||||
const size_t res = size_t(result);
|
|
||||||
assert(res >= raw, "alignment decreased start addr");
|
|
||||||
assert(res + prefix_size + suffix_size <= raw + reserve_size,
|
|
||||||
"alignment increased end addr");
|
|
||||||
assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
|
|
||||||
assert(((res + prefix_size) & (suffix_align - 1)) == 0,
|
|
||||||
"bad alignment of suffix");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper method.
|
// Helper method.
|
||||||
static bool failed_to_reserve_as_requested(char* base, char* requested_address,
|
static bool failed_to_reserve_as_requested(char* base, char* requested_address,
|
||||||
const size_t size, bool special)
|
const size_t size, bool special)
|
||||||
@ -155,92 +89,6 @@ static bool failed_to_reserve_as_requested(char* base, char* requested_address,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
ReservedSpace::ReservedSpace(const size_t suffix_size,
|
|
||||||
const size_t suffix_align,
|
|
||||||
char* requested_address,
|
|
||||||
const size_t noaccess_prefix)
|
|
||||||
{
|
|
||||||
assert(suffix_size != 0, "sanity");
|
|
||||||
assert(suffix_align != 0, "sanity");
|
|
||||||
assert((suffix_size & (suffix_align - 1)) == 0,
|
|
||||||
"suffix_size not divisible by suffix_align");
|
|
||||||
|
|
||||||
// Assert that if noaccess_prefix is used, it is the same as prefix_align.
|
|
||||||
// Add in noaccess_prefix to prefix
|
|
||||||
const size_t adjusted_prefix_size = noaccess_prefix;
|
|
||||||
const size_t size = adjusted_prefix_size + suffix_size;
|
|
||||||
|
|
||||||
// On systems where the entire region has to be reserved and committed up
|
|
||||||
// front, the compound alignment normally done by this method is unnecessary.
|
|
||||||
const bool try_reserve_special = UseLargePages &&
|
|
||||||
suffix_align == os::large_page_size();
|
|
||||||
if (!os::can_commit_large_page_memory() && try_reserve_special) {
|
|
||||||
initialize(size, suffix_align, true, requested_address, noaccess_prefix,
|
|
||||||
false);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
_base = NULL;
|
|
||||||
_size = 0;
|
|
||||||
_alignment = 0;
|
|
||||||
_special = false;
|
|
||||||
_noaccess_prefix = 0;
|
|
||||||
_executable = false;
|
|
||||||
|
|
||||||
// Optimistically try to reserve the exact size needed.
|
|
||||||
char* addr;
|
|
||||||
if (requested_address != 0) {
|
|
||||||
requested_address -= noaccess_prefix; // adjust address
|
|
||||||
assert(requested_address != NULL, "huge noaccess prefix?");
|
|
||||||
addr = os::attempt_reserve_memory_at(size, requested_address);
|
|
||||||
if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
|
|
||||||
// OS ignored requested address. Try different address.
|
|
||||||
addr = NULL;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
addr = os::reserve_memory(size, NULL, suffix_align);
|
|
||||||
}
|
|
||||||
if (addr == NULL) return;
|
|
||||||
|
|
||||||
// Check whether the result has the needed alignment
|
|
||||||
const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
|
|
||||||
if (ofs != 0) {
|
|
||||||
// Wrong alignment. Release, allocate more space and do manual alignment.
|
|
||||||
//
|
|
||||||
// On most operating systems, another allocation with a somewhat larger size
|
|
||||||
// will return an address "close to" that of the previous allocation. The
|
|
||||||
// result is often the same address (if the kernel hands out virtual
|
|
||||||
// addresses from low to high), or an address that is offset by the increase
|
|
||||||
// in size. Exploit that to minimize the amount of extra space requested.
|
|
||||||
if (!os::release_memory(addr, size)) {
|
|
||||||
fatal("os::release_memory failed");
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t extra = MAX2(ofs, suffix_align - ofs);
|
|
||||||
addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align,
|
|
||||||
suffix_size, suffix_align);
|
|
||||||
if (addr == NULL) {
|
|
||||||
// Try an even larger region. If this fails, address space is exhausted.
|
|
||||||
addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
|
|
||||||
suffix_align, suffix_size, suffix_align);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (requested_address != 0 &&
|
|
||||||
failed_to_reserve_as_requested(addr, requested_address, size, false)) {
|
|
||||||
// As a result of the alignment constraints, the allocated addr differs
|
|
||||||
// from the requested address. Return back to the caller who can
|
|
||||||
// take remedial action (like try again without a requested address).
|
|
||||||
assert(_base == NULL, "should be");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_base = addr;
|
|
||||||
_size = size;
|
|
||||||
_alignment = suffix_align;
|
|
||||||
_noaccess_prefix = noaccess_prefix;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||||
char* requested_address,
|
char* requested_address,
|
||||||
const size_t noaccess_prefix,
|
const size_t noaccess_prefix,
|
||||||
@ -476,20 +324,6 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
|
|||||||
protect_noaccess_prefix(size);
|
protect_noaccess_prefix(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size,
|
|
||||||
const size_t alignment,
|
|
||||||
char* requested_address) :
|
|
||||||
ReservedSpace(heap_space_size, alignment,
|
|
||||||
requested_address,
|
|
||||||
(UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
|
|
||||||
Universe::narrow_oop_use_implicit_null_checks()) ?
|
|
||||||
lcm(os::vm_page_size(), alignment) : 0) {
|
|
||||||
if (base() > 0) {
|
|
||||||
MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
|
|
||||||
}
|
|
||||||
protect_noaccess_prefix(heap_space_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reserve space for code segment. Same as Java heap only we mark this as
|
// Reserve space for code segment. Same as Java heap only we mark this as
|
||||||
// executable.
|
// executable.
|
||||||
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
|
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
|
||||||
|
@ -47,28 +47,6 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
|
|||||||
const size_t noaccess_prefix,
|
const size_t noaccess_prefix,
|
||||||
bool executable);
|
bool executable);
|
||||||
|
|
||||||
// Release parts of an already-reserved memory region [addr, addr + len) to
|
|
||||||
// get a new region that has "compound alignment." Return the start of the
|
|
||||||
// resulting region, or NULL on failure.
|
|
||||||
//
|
|
||||||
// The region is logically divided into a prefix and a suffix. The prefix
|
|
||||||
// starts at the result address, which is aligned to prefix_align. The suffix
|
|
||||||
// starts at result address + prefix_size, which is aligned to suffix_align.
|
|
||||||
// The total size of the result region is size prefix_size + suffix_size.
|
|
||||||
char* align_reserved_region(char* addr, const size_t len,
|
|
||||||
const size_t prefix_size,
|
|
||||||
const size_t prefix_align,
|
|
||||||
const size_t suffix_size,
|
|
||||||
const size_t suffix_align);
|
|
||||||
|
|
||||||
// Reserve memory, call align_reserved_region() to alignment it and return the
|
|
||||||
// result.
|
|
||||||
char* reserve_and_align(const size_t reserve_size,
|
|
||||||
const size_t prefix_size,
|
|
||||||
const size_t prefix_align,
|
|
||||||
const size_t suffix_size,
|
|
||||||
const size_t suffix_align);
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Create protection page at the beginning of the space.
|
// Create protection page at the beginning of the space.
|
||||||
void protect_noaccess_prefix(const size_t size);
|
void protect_noaccess_prefix(const size_t size);
|
||||||
@ -79,9 +57,6 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
|
|||||||
ReservedSpace(size_t size, size_t alignment, bool large,
|
ReservedSpace(size_t size, size_t alignment, bool large,
|
||||||
char* requested_address = NULL,
|
char* requested_address = NULL,
|
||||||
const size_t noaccess_prefix = 0);
|
const size_t noaccess_prefix = 0);
|
||||||
ReservedSpace(const size_t suffix_size, const size_t suffix_align,
|
|
||||||
char* requested_address,
|
|
||||||
const size_t noaccess_prefix = 0);
|
|
||||||
ReservedSpace(size_t size, size_t alignment, bool large, bool executable);
|
ReservedSpace(size_t size, size_t alignment, bool large, bool executable);
|
||||||
|
|
||||||
// Accessors
|
// Accessors
|
||||||
@ -128,8 +103,6 @@ public:
|
|||||||
// Constructor
|
// Constructor
|
||||||
ReservedHeapSpace(size_t size, size_t forced_base_alignment,
|
ReservedHeapSpace(size_t size, size_t forced_base_alignment,
|
||||||
bool large, char* requested_address);
|
bool large, char* requested_address);
|
||||||
ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align,
|
|
||||||
char* requested_address);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Class encapsulating behavior specific memory space for Code
|
// Class encapsulating behavior specific memory space for Code
|
||||||
|
@ -828,6 +828,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
|||||||
nonstatic_field(nmethod, _lock_count, jint) \
|
nonstatic_field(nmethod, _lock_count, jint) \
|
||||||
nonstatic_field(nmethod, _stack_traversal_mark, long) \
|
nonstatic_field(nmethod, _stack_traversal_mark, long) \
|
||||||
nonstatic_field(nmethod, _compile_id, int) \
|
nonstatic_field(nmethod, _compile_id, int) \
|
||||||
|
nonstatic_field(nmethod, _comp_level, int) \
|
||||||
nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \
|
nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \
|
||||||
nonstatic_field(nmethod, _marked_for_deoptimization, bool) \
|
nonstatic_field(nmethod, _marked_for_deoptimization, bool) \
|
||||||
\
|
\
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user