This commit is contained in:
Yuri Nesterenko 2009-09-21 01:26:57 -07:00
commit 979ba581fe
744 changed files with 86328 additions and 13316 deletions

View File

@ -44,3 +44,5 @@ d22867c5f1b295a0a2b3b4bc8999a2676f6e20c3 jdk7-b64
eb24af1404aec8aa140c4cd4d13d2839b150dd41 jdk7-b67 eb24af1404aec8aa140c4cd4d13d2839b150dd41 jdk7-b67
bca2225b66d78c4bf4d9801f54cac7715a598650 jdk7-b68 bca2225b66d78c4bf4d9801f54cac7715a598650 jdk7-b68
1b662b1ed14eb4ae31d5138a36c433b13d941dc5 jdk7-b69 1b662b1ed14eb4ae31d5138a36c433b13d941dc5 jdk7-b69
207f694795c448c17753eff1a2f50363106960c2 jdk7-b70
c5d39b6be65cba0effb5f466ea48fe43764d0e0c jdk7-b71

View File

@ -44,3 +44,5 @@ e01380cd1de4ce048b87d059d238e5ab5e341947 jdk7-b65
c4523c6f82048f420bf0d57c4cd47976753b7d2c jdk7-b67 c4523c6f82048f420bf0d57c4cd47976753b7d2c jdk7-b67
e1b972ff53cd58f825791f8ed9b2deffd16e768c jdk7-b68 e1b972ff53cd58f825791f8ed9b2deffd16e768c jdk7-b68
82e6c820c51ac27882b77755d42efefdbf1dcda0 jdk7-b69 82e6c820c51ac27882b77755d42efefdbf1dcda0 jdk7-b69
175cb3fe615998d1004c6d3fd96e6d2e86b6772d jdk7-b70
4c36e9853dda27bdac5ef4839a610509fbe31d34 jdk7-b71

View File

@ -68,7 +68,6 @@
</li> </li>
<li><a href="#zip">Zip and Unzip</a> </li> <li><a href="#zip">Zip and Unzip</a> </li>
<li><a href="#freetype">FreeType2 Fonts</a> </li> <li><a href="#freetype">FreeType2 Fonts</a> </li>
<li><a href="#jibx">JIBX Libraries</a> </li>
<li>Linux and Solaris: <li>Linux and Solaris:
<ul> <ul>
<li><a href="#cups">CUPS Include files</a> </li> <li><a href="#cups">CUPS Include files</a> </li>
@ -596,11 +595,6 @@
Install or upgrade the <a href="#freetype">FreeType development Install or upgrade the <a href="#freetype">FreeType development
package</a>. package</a>.
</li> </li>
<li>
Install the
<a href="#jibx">JIBX Libraries</a>, set
<tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>.
</li>
<li> <li>
Install Install
<a href="#ant">Ant</a>, <a href="#ant">Ant</a>,
@ -669,11 +663,6 @@
<li> <li>
Install the <a href="#xrender">XRender Include files</a>. Install the <a href="#xrender">XRender Include files</a>.
</li> </li>
<li>
Install the
<a href="#jibx">JIBX Libraries</a>, set
<tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>.
</li>
<li> <li>
Install Install
<a href="#ant">Ant</a>, <a href="#ant">Ant</a>,
@ -769,11 +758,6 @@
Install Install
<a href="#dxsdk">Microsoft DirectX SDK</a>. <a href="#dxsdk">Microsoft DirectX SDK</a>.
</li> </li>
<li>
Install the
<a href="#jibx">JIBX Libraries</a>, set
<tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>.
</li>
<li> <li>
Install Install
<a href="#ant">Ant</a>, <a href="#ant">Ant</a>,
@ -903,27 +887,6 @@
fine for most JDK developers. fine for most JDK developers.
</blockquote> </blockquote>
<!-- ------------------------------------------------------ --> <!-- ------------------------------------------------------ -->
<h4><a name="jibx">JIBX</a></h4>
<blockquote>
JIBX libraries version 1.1.5 is required for building the OpenJDK.
Namely, the following JAR files from the JIBX distribution package
are required:
<ul>
<li>bcel.jar
<li>jibx-bind.jar
<li>jibx-run.jar
<li>xpp3.jar
</ul>
<p>
You can download the package from the
<a href="http://jibx.sourceforge.net" target="_blank">JIBX site</a>.
<p>
You will need to set the
<tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>
environment variable to refer to place where the JAR files,
above, are located.
</blockquote>
<!-- ------------------------------------------------------ -->
<h4><a name="compilers">Compilers</a></h4> <h4><a name="compilers">Compilers</a></h4>
<blockquote> <blockquote>
<strong><a name="gcc">Linux gcc/binutils</a></strong> <strong><a name="gcc">Linux gcc/binutils</a></strong>
@ -1496,12 +1459,6 @@
The default will refer to The default will refer to
<tt>jdk/src/share/lib/security/cacerts</tt>. <tt>jdk/src/share/lib/security/cacerts</tt>.
</dd> </dd>
<dt><tt><a name="ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt></dt>
<dd>
The location of the <a href="#jibx">JIBX libraries</a> file.
The default value is
<tt>$(ALT_SLASH_JAVA)/devtools/share/jibx/lib</tt>.
</dd>
<dt><a name="ALT_CUPS_HEADERS_PATH"><tt>ALT_CUPS_HEADERS_PATH</tt></a> </dt> <dt><a name="ALT_CUPS_HEADERS_PATH"><tt>ALT_CUPS_HEADERS_PATH</tt></a> </dt>
<dd> <dd>
The location of the CUPS header files. The location of the CUPS header files.

View File

@ -44,3 +44,5 @@ a821e059a961bcb02830280d51f6dd030425c066 jdk7-b66
a12ea7c7b497b4ba7830550095ef633bd6f43971 jdk7-b67 a12ea7c7b497b4ba7830550095ef633bd6f43971 jdk7-b67
5182bcc9c60cac429d1f7988676cec7320752be3 jdk7-b68 5182bcc9c60cac429d1f7988676cec7320752be3 jdk7-b68
8120d308ec4e805c5588b8d9372844d781c4112d jdk7-b69 8120d308ec4e805c5588b8d9372844d781c4112d jdk7-b69
175bd68779546078dbdb6dacd7f0aced79ed22b1 jdk7-b70
3f1ef7f899ea2aec189c4fb67e5c8fa374437c50 jdk7-b71

View File

@ -55,10 +55,21 @@ else
ADD_CLIENT_VM_OPTION = true ADD_CLIENT_VM_OPTION = true
endif endif
endif endif
JAVA_JVM_FLAGS =
# Options for hotspot to turn off printing of options with fastdebug version
# and creating the hotspot.log file.
JAVA_HOTSPOT_DISABLE_PRINT_VMOPTIONS = \
-XX:-PrintVMOptions -XX:+UnlockDiagnosticVMOptions -XX:-LogVMOutput
# JVM options
JAVA_JVM_FLAGS = $(JAVA_HOTSPOT_DISABLE_PRINT_VMOPTIONS)
ifeq ($(ADD_CLIENT_VM_OPTION), true) ifeq ($(ADD_CLIENT_VM_OPTION), true)
JAVA_JVM_FLAGS += -client JAVA_JVM_FLAGS += -client
endif endif
ifdef USE_HOTSPOT_INTERPRETER_MODE
JAVA_JVM_FLAGS += -Xint
endif
# Various VM flags # Various VM flags
JAVA_TOOLS_FLAGS = $(JAVA_JVM_FLAGS) $(JAVA_MEM_FLAGS) JAVA_TOOLS_FLAGS = $(JAVA_JVM_FLAGS) $(JAVA_MEM_FLAGS)
@ -100,7 +111,10 @@ JAVACFLAGS =
ifeq ($(DEBUG_CLASSFILES),true) ifeq ($(DEBUG_CLASSFILES),true)
JAVACFLAGS += -g JAVACFLAGS += -g
endif endif
ifeq ($(COMPILER_WARNINGS_FATAL), true) ifeq ($(JAVAC_MAX_WARNINGS), true)
JAVACFLAGS += -Xlint:all
endif
ifeq ($(JAVAC_WARNINGS_FATAL), true)
JAVACFLAGS += -Werror JAVACFLAGS += -Werror
endif endif
@ -108,7 +122,8 @@ NO_PROPRIETARY_API_WARNINGS = -XDignore.symbol.file=true
JAVACFLAGS += $(NO_PROPRIETARY_API_WARNINGS) JAVACFLAGS += $(NO_PROPRIETARY_API_WARNINGS)
# Add the source level # Add the source level
LANGUAGE_VERSION = -source 7 SOURCE_LANGUAGE_VERSION = 7
LANGUAGE_VERSION = -source $(SOURCE_LANGUAGE_VERSION)
JAVACFLAGS += $(LANGUAGE_VERSION) JAVACFLAGS += $(LANGUAGE_VERSION)
# Add the class version we want # Add the class version we want
@ -176,10 +191,17 @@ endif
# The javac options supplied to the boot javac is limited. This compiler # The javac options supplied to the boot javac is limited. This compiler
# should only be used to build the 'make/tools' sources, which are not # should only be used to build the 'make/tools' sources, which are not
# class files that end up in the classes directory. # class files that end up in the classes directory.
ifeq ($(COMPILER_WARNINGS_FATAL), true) ifeq ($(JAVAC_MAX_WARNINGS), true)
BOOT_JAVACFLAGS += -Xlint:all
endif
ifeq ($(JAVAC_WARNINGS_FATAL), true)
BOOT_JAVACFLAGS += -Werror BOOT_JAVACFLAGS += -Werror
endif endif
BOOT_JAVACFLAGS += -encoding ascii
BOOT_SOURCE_LANGUAGE_VERSION = 6
BOOT_TARGET_CLASS_VERSION = 6
BOOT_JAVACFLAGS += -encoding ascii -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
BOOT_JAR_JFLAGS += $(JAR_JFLAGS)
BOOT_JAVA_CMD = $(BOOTDIR)/bin/java $(JAVA_TOOLS_FLAGS) BOOT_JAVA_CMD = $(BOOTDIR)/bin/java $(JAVA_TOOLS_FLAGS)
BOOT_JAVAC_CMD = $(BOOTDIR)/bin/javac $(JAVAC_JVM_FLAGS) $(BOOT_JAVACFLAGS) BOOT_JAVAC_CMD = $(BOOTDIR)/bin/javac $(JAVAC_JVM_FLAGS) $(BOOT_JAVACFLAGS)

View File

@ -44,3 +44,5 @@ ba313800759b678979434d6da8ed3bf49eb8bea4 jdk7-b65
18f526145aea355a9320b724373386fc2170f183 jdk7-b67 18f526145aea355a9320b724373386fc2170f183 jdk7-b67
d07e68298d4e17ebf93d8299e43fcc3ded26472a jdk7-b68 d07e68298d4e17ebf93d8299e43fcc3ded26472a jdk7-b68
54fd4d9232969ea6cd3d236e5ad276183bb0d423 jdk7-b69 54fd4d9232969ea6cd3d236e5ad276183bb0d423 jdk7-b69
0632c3e615a315ff11e2ab1d64f4d82ff9853461 jdk7-b70
50a95aa4a247f0cbbf66df285a8b1d78ffb153d9 jdk7-b71

View File

@ -48,6 +48,8 @@ if [ "$OS" = "Linux" ]; then
CPU=i386 CPU=i386
fi fi
else else
LD_AUDIT_32=$STARTDIR/../src/os/solaris/proc/`uname -p`/libsaproc_audit.so
export LD_AUDIT_32
SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/`uname -p`:$STARTDIR/solaris/`uname -p` SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/`uname -p`:$STARTDIR/solaris/`uname -p`
OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger" OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger"
CPU=sparc CPU=sparc

View File

@ -43,6 +43,8 @@ else
fi fi
fi fi
LD_AUDIT_64=$STARTDIR/../src/os/solaris/proc/$CPU/libsaproc_audit.so
export LD_AUDIT_64
SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/$CPU:$STARTDIR/solaris/$CPU SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/$CPU:$STARTDIR/solaris/$CPU
OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger" OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger"

View File

@ -56,24 +56,28 @@ i386:: javahomecheck
@javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal
CC -G -KPIC -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ CC -G -KPIC -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \
-M mapfile -o $@/libsaproc.so -ldemangle -M mapfile -o $@/libsaproc.so -ldemangle
CC -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc
amd64:: javahomecheck amd64:: javahomecheck
$(MKDIRS) $@ $(MKDIRS) $@
@javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal
CC -G -KPIC -xarch=amd64 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ CC -G -KPIC -xarch=amd64 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \
-M mapfile -o $@/libsaproc.so -ldemangle -M mapfile -o $@/libsaproc.so -ldemangle
CC -xarch=amd64 -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc
sparc:: javahomecheck sparc:: javahomecheck
$(MKDIRS) $@ $(MKDIRS) $@
@javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal
CC -G -KPIC -xarch=v8 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ CC -G -KPIC -xarch=v8 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \
-M mapfile -o $@/libsaproc.so -ldemangle -M mapfile -o $@/libsaproc.so -ldemangle
CC -xarch=v8 -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc
sparcv9:: javahomecheck sparcv9:: javahomecheck
$(MKDIRS) $@ $(MKDIRS) $@
@javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal
CC -G -KPIC -xarch=v9 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ CC -G -KPIC -xarch=v9 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \
-M mapfile -o $@/libsaproc.so -ldemangle -M mapfile -o $@/libsaproc.so -ldemangle
CC -xarch=v9 -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc
clean:: clean::
$(RM) -rf sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h $(RM) -rf sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h

View File

@ -45,6 +45,8 @@ SUNWprivate_1.1 {
Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_resume0; Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_resume0;
Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_suspend0; Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_suspend0;
Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_writeBytesToProcess0; Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_writeBytesToProcess0;
# this is needed by saproc_audit.c to redirect opens in libproc.so
libsaproc_open;
local: local:
*; *;
}; };

View File

@ -214,11 +214,17 @@ static void init_alt_root() {
} }
} }
static int find_file_hook(const char * name, int elf_checksum) { // This function is a complete substitute for the open system call
// since it's also used to override open calls from libproc to
// implement as a pathmap style facility for the SA. If libproc
// starts using other interfaces then this might have to extended to
// cover other calls.
extern "C" int libsaproc_open(const char * name, int oflag, ...) {
if (oflag == O_RDONLY) {
init_alt_root(); init_alt_root();
if (_libsaproc_debug) { if (_libsaproc_debug) {
printf("libsaproc DEBUG: find_file_hook %s 0x%x\n", name, elf_checksum); printf("libsaproc DEBUG: libsaproc_open %s\n", name);
} }
if (alt_root_len > 0) { if (alt_root_len > 0) {
@ -230,7 +236,7 @@ static int find_file_hook(const char * name, int elf_checksum) {
fd = open(alt_path, O_RDONLY); fd = open(alt_path, O_RDONLY);
if (fd >= 0) { if (fd >= 0) {
if (_libsaproc_debug) { if (_libsaproc_debug) {
printf("libsaproc DEBUG: find_file_hook substituted %s\n", alt_path); printf("libsaproc DEBUG: libsaproc_open substituted %s\n", alt_path);
} }
return fd; return fd;
} }
@ -241,22 +247,25 @@ static int find_file_hook(const char * name, int elf_checksum) {
fd = open(alt_path, O_RDONLY); fd = open(alt_path, O_RDONLY);
if (fd >= 0) { if (fd >= 0) {
if (_libsaproc_debug) { if (_libsaproc_debug) {
printf("libsaproc DEBUG: find_file_hook substituted %s\n", alt_path); printf("libsaproc DEBUG: libsaproc_open substituted %s\n", alt_path);
} }
return fd; return fd;
} }
} }
} }
return -1; }
{
mode_t mode;
va_list ap;
va_start(ap, oflag);
mode = va_arg(ap, mode_t);
va_end(ap);
return open(name, oflag, mode);
}
} }
static int pathmap_open(const char* name) {
int fd = open(name, O_RDONLY);
if (fd < 0) {
fd = find_file_hook(name, 0);
}
return fd;
}
static void * pathmap_dlopen(const char * name, int mode) { static void * pathmap_dlopen(const char * name, int mode) {
init_alt_root(); init_alt_root();
@ -608,7 +617,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
print_debug("looking for %s\n", classes_jsa); print_debug("looking for %s\n", classes_jsa);
// open the classes[_g].jsa // open the classes[_g].jsa
int fd = pathmap_open(classes_jsa); int fd = libsaproc_open(classes_jsa, O_RDONLY);
if (fd < 0) { if (fd < 0) {
char errMsg[ERR_MSG_SIZE]; char errMsg[ERR_MSG_SIZE];
sprintf(errMsg, "can't open shared archive file %s", classes_jsa); sprintf(errMsg, "can't open shared archive file %s", classes_jsa);
@ -1209,8 +1218,6 @@ JNIEXPORT jstring JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_d
return res; return res;
} }
typedef int (*find_file_hook_t)(const char *, int elf_checksum);
/* /*
* Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal * Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
* Method: initIDs * Method: initIDs
@ -1230,16 +1237,6 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_init
if (libproc_handle == 0) if (libproc_handle == 0)
THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!"); THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!");
// If possible, set shared object find file hook.
void (*set_hook)(find_file_hook_t) = (void(*)(find_file_hook_t))dlsym(libproc_handle, "Pset_find_file_hook");
if (set_hook) {
// we found find file hook symbol, set up our hook function.
set_hook(find_file_hook);
} else if (getenv(SA_ALTROOT)) {
printf("libsaproc WARNING: %s set, but can't set file hook. " \
"Did you use right version of libproc.so?\n", SA_ALTROOT);
}
p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J"); p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J");
CHECK_EXCEPTION; CHECK_EXCEPTION;

View File

@ -0,0 +1,98 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
#include <link.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <limits.h>
#include <varargs.h>
// This class sets up an interposer on open calls from libproc.so to
// support a pathmap facility in the SA.
static uintptr_t* libproc_cookie;
static uintptr_t* libc_cookie;
static uintptr_t* libsaproc_cookie;
uint_t
la_version(uint_t version)
{
return (LAV_CURRENT);
}
uint_t
la_objopen(Link_map * lmp, Lmid_t lmid, uintptr_t * cookie)
{
if (strstr(lmp->l_name, "/libproc.so") != NULL) {
libproc_cookie = cookie;
return LA_FLG_BINDFROM;
}
if (strstr(lmp->l_name, "/libc.so") != NULL) {
libc_cookie = cookie;
return LA_FLG_BINDTO;
}
if (strstr(lmp->l_name, "/libsaproc.so") != NULL) {
libsaproc_cookie = cookie;
return LA_FLG_BINDTO | LA_FLG_BINDFROM;
}
return 0;
}
#if defined(_LP64)
uintptr_t
la_symbind64(Elf64_Sym *symp, uint_t symndx, uintptr_t *refcook,
uintptr_t *defcook, uint_t *sb_flags, const char *sym_name)
#else
uintptr_t
la_symbind32(Elf32_Sym *symp, uint_t symndx, uintptr_t *refcook,
uintptr_t *defcook, uint_t *sb_flags)
#endif
{
#if !defined(_LP64)
const char *sym_name = (const char *)symp->st_name;
#endif
if (strcmp(sym_name, "open") == 0 && refcook == libproc_cookie) {
// redirect all open calls from libproc.so through libsaproc_open which will
// try the alternate library locations first.
void* handle = dlmopen(LM_ID_BASE, "libsaproc.so", RTLD_NOLOAD);
if (handle == NULL) {
fprintf(stderr, "libsaproc_audit.so: didn't find libsaproc.so during linking\n");
} else {
uintptr_t libsaproc_open = (uintptr_t)dlsym(handle, "libsaproc_open");
if (libsaproc_open == 0) {
fprintf(stderr, "libsaproc_audit.so: didn't find libsaproc_open during linking\n");
} else {
return libsaproc_open;
}
}
}
return symp->st_value;
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -259,7 +259,7 @@ public class NMethod extends CodeBlob {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(pd != null, "scope must be present"); Assert.that(pd != null, "scope must be present");
} }
return new ScopeDesc(this, pd.getScopeDecodeOffset()); return new ScopeDesc(this, pd.getScopeDecodeOffset(), pd.getReexecute());
} }
/** This is only for use by the debugging system, and is only /** This is only for use by the debugging system, and is only
@ -291,7 +291,7 @@ public class NMethod extends CodeBlob {
public ScopeDesc getScopeDescNearDbg(Address pc) { public ScopeDesc getScopeDescNearDbg(Address pc) {
PCDesc pd = getPCDescNearDbg(pc); PCDesc pd = getPCDescNearDbg(pc);
if (pd == null) return null; if (pd == null) return null;
return new ScopeDesc(this, pd.getScopeDecodeOffset()); return new ScopeDesc(this, pd.getScopeDecodeOffset(), pd.getReexecute());
} }
public Map/*<Address, PcDesc>*/ getSafepoints() { public Map/*<Address, PcDesc>*/ getSafepoints() {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2004 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,7 @@ import sun.jvm.hotspot.types.*;
public class PCDesc extends VMObject { public class PCDesc extends VMObject {
private static CIntegerField pcOffsetField; private static CIntegerField pcOffsetField;
private static CIntegerField scopeDecodeOffsetField; private static CIntegerField scopeDecodeOffsetField;
private static CIntegerField pcFlagsField;
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
@ -50,6 +51,7 @@ public class PCDesc extends VMObject {
pcOffsetField = type.getCIntegerField("_pc_offset"); pcOffsetField = type.getCIntegerField("_pc_offset");
scopeDecodeOffsetField = type.getCIntegerField("_scope_decode_offset"); scopeDecodeOffsetField = type.getCIntegerField("_scope_decode_offset");
pcFlagsField = type.getCIntegerField("_flags");
} }
public PCDesc(Address addr) { public PCDesc(Address addr) {
@ -70,6 +72,12 @@ public class PCDesc extends VMObject {
return code.instructionsBegin().addOffsetTo(getPCOffset()); return code.instructionsBegin().addOffsetTo(getPCOffset());
} }
public boolean getReexecute() {
int flags = (int)pcFlagsField.getValue(addr);
return ((flags & 0x1)== 1); //first is the reexecute bit
}
public void print(NMethod code) { public void print(NMethod code) {
printOn(System.out, code); printOn(System.out, code);
} }
@ -82,6 +90,7 @@ public class PCDesc extends VMObject {
tty.print(" "); tty.print(" ");
sd.getMethod().printValueOn(tty); sd.getMethod().printValueOn(tty);
tty.print(" @" + sd.getBCI()); tty.print(" @" + sd.getBCI());
tty.print(" reexecute=" + sd.getReexecute());
tty.println(); tty.println();
} }
} }

View File

@ -41,6 +41,7 @@ public class ScopeDesc {
private NMethod code; private NMethod code;
private Method method; private Method method;
private int bci; private int bci;
private boolean reexecute;
/** Decoding offsets */ /** Decoding offsets */
private int decodeOffset; private int decodeOffset;
private int senderDecodeOffset; private int senderDecodeOffset;
@ -51,10 +52,11 @@ public class ScopeDesc {
private List objects; // ArrayList<ScopeValue> private List objects; // ArrayList<ScopeValue>
public ScopeDesc(NMethod code, int decodeOffset) { public ScopeDesc(NMethod code, int decodeOffset, boolean reexecute) {
this.code = code; this.code = code;
this.decodeOffset = decodeOffset; this.decodeOffset = decodeOffset;
this.objects = decodeObjectValues(DebugInformationRecorder.SERIALIZED_NULL); this.objects = decodeObjectValues(DebugInformationRecorder.SERIALIZED_NULL);
this.reexecute = reexecute;
// Decode header // Decode header
DebugInfoReadStream stream = streamAt(decodeOffset); DebugInfoReadStream stream = streamAt(decodeOffset);
@ -68,10 +70,11 @@ public class ScopeDesc {
monitorsDecodeOffset = stream.readInt(); monitorsDecodeOffset = stream.readInt();
} }
public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset) { public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset, boolean reexecute) {
this.code = code; this.code = code;
this.decodeOffset = decodeOffset; this.decodeOffset = decodeOffset;
this.objects = decodeObjectValues(objectDecodeOffset); this.objects = decodeObjectValues(objectDecodeOffset);
this.reexecute = reexecute;
// Decode header // Decode header
DebugInfoReadStream stream = streamAt(decodeOffset); DebugInfoReadStream stream = streamAt(decodeOffset);
@ -88,6 +91,7 @@ public class ScopeDesc {
public NMethod getNMethod() { return code; } public NMethod getNMethod() { return code; }
public Method getMethod() { return method; } public Method getMethod() { return method; }
public int getBCI() { return bci; } public int getBCI() { return bci; }
public boolean getReexecute() { return reexecute;}
/** Returns a List&lt;ScopeValue&gt; */ /** Returns a List&lt;ScopeValue&gt; */
public List getLocals() { public List getLocals() {
@ -115,7 +119,7 @@ public class ScopeDesc {
return null; return null;
} }
return new ScopeDesc(code, senderDecodeOffset); return new ScopeDesc(code, senderDecodeOffset, false);
} }
/** Returns where the scope was decoded */ /** Returns where the scope was decoded */
@ -149,7 +153,8 @@ public class ScopeDesc {
public void printValueOn(PrintStream tty) { public void printValueOn(PrintStream tty) {
tty.print("ScopeDesc for "); tty.print("ScopeDesc for ");
method.printValueOn(tty); method.printValueOn(tty);
tty.println(" @bci " + bci); tty.print(" @bci " + bci);
tty.println(" reexecute=" + reexecute);
} }
// FIXME: add more accessors // FIXME: add more accessors
@ -157,7 +162,6 @@ public class ScopeDesc {
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
// Internals only below this point // Internals only below this point
// //
private DebugInfoReadStream streamAt(int decodeOffset) { private DebugInfoReadStream streamAt(int decodeOffset) {
return new DebugInfoReadStream(code, decodeOffset, objects); return new DebugInfoReadStream(code, decodeOffset, objects);
} }

View File

@ -176,19 +176,6 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
for (; cur.lessThan(limit);) { for (; cur.lessThan(limit);) {
Address klassOop = cur.getAddressAt(addressSize); Address klassOop = cur.getAddressAt(addressSize);
// FIXME: need to do a better job here.
// can I use bitMap here?
if (klassOop == null) {
//Find the object size using Printezis bits and skip over
System.err.println("Finding object size using Printezis bits and skipping over...");
long size = collector().blockSizeUsingPrintezisBits(cur);
if (size == -1) {
System.err.println("Printezis bits not set...");
break;
}
cur = cur.addOffsetTo(adjustObjectSizeInBytes(size));
}
if (FreeChunk.indicatesFreeChunk(cur)) { if (FreeChunk.indicatesFreeChunk(cur)) {
if (! cur.equals(regionStart)) { if (! cur.equals(regionStart)) {
res.add(new MemRegion(regionStart, cur)); res.add(new MemRegion(regionStart, cur));
@ -200,12 +187,21 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
} }
// note that fc.size() gives chunk size in heap words // note that fc.size() gives chunk size in heap words
cur = cur.addOffsetTo(chunkSize * addressSize); cur = cur.addOffsetTo(chunkSize * addressSize);
System.err.println("Free chunk in CMS heap, size="+chunkSize * addressSize);
regionStart = cur; regionStart = cur;
} else if (klassOop != null) { } else if (klassOop != null) {
Oop obj = heap.newOop(cur.addOffsetToAsOopHandle(0)); Oop obj = heap.newOop(cur.addOffsetToAsOopHandle(0));
long objectSize = obj.getObjectSize(); long objectSize = obj.getObjectSize();
cur = cur.addOffsetTo(adjustObjectSizeInBytes(objectSize)); cur = cur.addOffsetTo(adjustObjectSizeInBytes(objectSize));
} else {
// FIXME: need to do a better job here.
// can I use bitMap here?
//Find the object size using Printezis bits and skip over
long size = collector().blockSizeUsingPrintezisBits(cur);
if (size == -1) {
System.err.println("Printezis bits not set...");
break;
}
cur = cur.addOffsetTo(adjustObjectSizeInBytes(size));
} }
} }
return res; return res;

View File

@ -63,7 +63,7 @@ public class FreeChunk extends VMObject {
public long size() { public long size() {
if (VM.getVM().isCompressedOopsEnabled()) { if (VM.getVM().isCompressedOopsEnabled()) {
Mark mark = new Mark(sizeField.getValue(addr)); Mark mark = new Mark(addr.addOffsetTo(sizeField.getOffset()));
return mark.getSize(); return mark.getSize();
} else { } else {
Address size = sizeField.getValue(addr); Address size = sizeField.getValue(addr);
@ -83,7 +83,7 @@ public class FreeChunk extends VMObject {
public boolean isFree() { public boolean isFree() {
if (VM.getVM().isCompressedOopsEnabled()) { if (VM.getVM().isCompressedOopsEnabled()) {
Mark mark = new Mark(sizeField.getValue(addr)); Mark mark = new Mark(addr.addOffsetTo(sizeField.getOffset()));
return mark.isCmsFreeChunk(); return mark.isCmsFreeChunk();
} else { } else {
Address prev = prevField.getValue(addr); Address prev = prevField.getValue(addr);

View File

@ -33,9 +33,9 @@
# Don't put quotes (fail windows build). # Don't put quotes (fail windows build).
HOTSPOT_VM_COPYRIGHT=Copyright 2009 HOTSPOT_VM_COPYRIGHT=Copyright 2009
HS_MAJOR_VER=16 HS_MAJOR_VER=17
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=07 HS_BUILD_NUMBER=01
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=7

View File

@ -40,6 +40,10 @@ jprt.need.sibling.build=false
jprt.tools.default.release=${jprt.submit.release} jprt.tools.default.release=${jprt.submit.release}
# Disable syncing the source after builds and tests are done.
jprt.sync.push=false
# Define the Solaris platforms we want for the various releases # Define the Solaris platforms we want for the various releases
jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
@ -306,7 +310,6 @@ jprt.my.windows.x64.test.targets = \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_default, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,7 @@ enum {
// registers // registers
enum { enum {
pd_nof_cpu_regs_frame_map = 32, // number of registers used during code emission pd_nof_cpu_regs_frame_map = 32, // number of registers used during code emission
pd_nof_caller_save_cpu_regs_frame_map = 6, // number of cpu registers killed by calls pd_nof_caller_save_cpu_regs_frame_map = 10, // number of cpu registers killed by calls
pd_nof_cpu_regs_reg_alloc = 20, // number of registers that are visible to register allocator pd_nof_cpu_regs_reg_alloc = 20, // number of registers that are visible to register allocator
pd_nof_cpu_regs_linearscan = 32,// number of registers visible linear scan pd_nof_cpu_regs_linearscan = 32,// number of registers visible linear scan
pd_first_cpu_reg = 0, pd_first_cpu_reg = 0,

View File

@ -320,6 +320,10 @@ void FrameMap::init () {
_caller_save_cpu_regs[3] = FrameMap::O3_opr; _caller_save_cpu_regs[3] = FrameMap::O3_opr;
_caller_save_cpu_regs[4] = FrameMap::O4_opr; _caller_save_cpu_regs[4] = FrameMap::O4_opr;
_caller_save_cpu_regs[5] = FrameMap::O5_opr; _caller_save_cpu_regs[5] = FrameMap::O5_opr;
_caller_save_cpu_regs[6] = FrameMap::G1_opr;
_caller_save_cpu_regs[7] = FrameMap::G3_opr;
_caller_save_cpu_regs[8] = FrameMap::G4_opr;
_caller_save_cpu_regs[9] = FrameMap::G5_opr;
for (int i = 0; i < nof_caller_save_fpu_regs; i++) { for (int i = 0; i < nof_caller_save_fpu_regs; i++) {
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
} }

View File

@ -749,6 +749,10 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
void LIRGenerator::do_ArrayCopy(Intrinsic* x) { void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
assert(x->number_of_arguments() == 5, "wrong type"); assert(x->number_of_arguments() == 5, "wrong type");
// Make all state_for calls early since they can emit code
CodeEmitInfo* info = state_for(x, x->state());
// Note: spill caller save before setting the item // Note: spill caller save before setting the item
LIRItem src (x->argument_at(0), this); LIRItem src (x->argument_at(0), this);
LIRItem src_pos (x->argument_at(1), this); LIRItem src_pos (x->argument_at(1), this);
@ -767,7 +771,6 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
ciArrayKlass* expected_type; ciArrayKlass* expected_type;
arraycopy_helper(x, &flags, &expected_type); arraycopy_helper(x, &flags, &expected_type);
CodeEmitInfo* info = state_for(x, x->state());
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
length.result(), rlock_callee_saved(T_INT), length.result(), rlock_callee_saved(T_INT),
expected_type, flags, info); expected_type, flags, info);
@ -878,6 +881,9 @@ void LIRGenerator::do_NewInstance(NewInstance* x) {
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
// Evaluate state_for early since it may emit code
CodeEmitInfo* info = state_for(x, x->state());
LIRItem length(x->length(), this); LIRItem length(x->length(), this);
length.load_item(); length.load_item();
@ -892,7 +898,6 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
__ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg); __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
CodeEmitInfo* info = state_for(x, x->state());
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
@ -902,7 +907,8 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIRItem length(x->length(), this); // Evaluate state_for early since it may emit code.
CodeEmitInfo* info = state_for(x, x->state());
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
// and therefore provide the state before the parameters have been consumed // and therefore provide the state before the parameters have been consumed
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = NULL;
@ -910,6 +916,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
} }
LIRItem length(x->length(), this);
length.load_item(); length.load_item();
const LIR_Opr reg = result_register_for(x->type()); const LIR_Opr reg = result_register_for(x->type());
@ -919,7 +926,6 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIR_Opr tmp4 = FrameMap::O1_oop_opr; LIR_Opr tmp4 = FrameMap::O1_oop_opr;
LIR_Opr klass_reg = FrameMap::G5_oop_opr; LIR_Opr klass_reg = FrameMap::G5_oop_opr;
LIR_Opr len = length.result(); LIR_Opr len = length.result();
CodeEmitInfo* info = state_for(x, x->state());
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass()); ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass());
@ -943,25 +949,22 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
items->at_put(i, size); items->at_put(i, size);
} }
// need to get the info before, as the items may become invalid through item_free // Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
// cannot re-use same xhandlers for multiple CodeEmitInfos, so // cannot re-use same xhandlers for multiple CodeEmitInfos, so
// clone all handlers // clone all handlers. This is handled transparently in other
// places by the CodeEmitInfo cloning logic but is handled
// specially here because a stub isn't being used.
x->set_exception_handlers(new XHandlers(x->exception_handlers())); x->set_exception_handlers(new XHandlers(x->exception_handlers()));
} }
CodeEmitInfo* info = state_for(x, x->state());
i = dims->length(); i = dims->length();
while (i-- > 0) { while (i-- > 0) {
LIRItem* size = items->at(i); LIRItem* size = items->at(i);
// if a patching_info was generated above then debug information for the state before
// the call is going to be emitted. The LIRGenerator calls above may have left some values
// in registers and that's been recorded in the CodeEmitInfo. In that case the items
// for those values can't simply be freed if they are registers because the values
// might be destroyed by store_stack_parameter. So in the case of patching, delay the
// freeing of the items that already were in registers
size->load_item(); size->load_item();
store_stack_parameter (size->result(), store_stack_parameter (size->result(),
in_ByteSize(STACK_BIAS + in_ByteSize(STACK_BIAS +
@ -972,8 +975,6 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
// This instruction can be deoptimized in the slow path : use // This instruction can be deoptimized in the slow path : use
// O0 as result register. // O0 as result register.
const LIR_Opr reg = result_register_for(x->type()); const LIR_Opr reg = result_register_for(x->type());
CodeEmitInfo* info = state_for(x, x->state());
jobject2reg_with_patching(reg, x->klass(), patching_info); jobject2reg_with_patching(reg, x->klass(), patching_info);
LIR_Opr rank = FrameMap::O1_opr; LIR_Opr rank = FrameMap::O1_opr;
__ move(LIR_OprFact::intConst(x->rank()), rank); __ move(LIR_OprFact::intConst(x->rank()), rank);

View File

@ -8335,15 +8335,13 @@ void MacroAssembler::decode_heap_oop_not_null(Register r) {
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_base() == NULL) {
if (Universe::narrow_oop_shift() != 0) { if (Universe::narrow_oop_shift() != 0) {
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
shlq(r, LogMinObjAlignmentInBytes);
}
} else {
assert (Address::times_8 == LogMinObjAlignmentInBytes && assert (Address::times_8 == LogMinObjAlignmentInBytes &&
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
// Don't use Shift since it modifies flags.
leaq(r, Address(r12_heapbase, r, Address::times_8, 0)); leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
} else {
assert (Universe::narrow_oop_base() == NULL, "sanity");
} }
} }
@ -8358,6 +8356,7 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
} else if (dst != src) { } else if (dst != src) {
assert (Universe::narrow_oop_base() == NULL, "sanity");
movq(dst, src); movq(dst, src);
} }
} }

View File

@ -1047,16 +1047,17 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
items->at_put(i, size); items->at_put(i, size);
} }
// need to get the info before, as the items may become invalid through item_free // Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
// cannot re-use same xhandlers for multiple CodeEmitInfos, so // cannot re-use same xhandlers for multiple CodeEmitInfos, so
// clone all handlers. // clone all handlers. This is handled transparently in other
// places by the CodeEmitInfo cloning logic but is handled
// specially here because a stub isn't being used.
x->set_exception_handlers(new XHandlers(x->exception_handlers())); x->set_exception_handlers(new XHandlers(x->exception_handlers()));
} }
CodeEmitInfo* info = state_for(x, x->state()); CodeEmitInfo* info = state_for(x, x->state());
i = dims->length(); i = dims->length();

View File

@ -2381,7 +2381,7 @@ void SharedRuntime::generate_deopt_blob() {
// Save everything in sight. // Save everything in sight.
map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
// Normal deoptimization // Normal deoptimization
__ push(Deoptimization::Unpack_deopt); __ push(Deoptimization::Unpack_deopt);
__ jmp(cont); __ jmp(cont);
@ -2392,7 +2392,7 @@ void SharedRuntime::generate_deopt_blob() {
// return address is the pc describes what bci to do re-execute at // return address is the pc describes what bci to do re-execute at
// No need to update map as each call to save_live_registers will produce identical oopmap // No need to update map as each call to save_live_registers will produce identical oopmap
(void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
__ push(Deoptimization::Unpack_reexecute); __ push(Deoptimization::Unpack_reexecute);
__ jmp(cont); __ jmp(cont);
@ -2428,7 +2428,7 @@ void SharedRuntime::generate_deopt_blob() {
// Save everything in sight. // Save everything in sight.
// No need to update map as each call to save_live_registers will produce identical oopmap // No need to update map as each call to save_live_registers will produce identical oopmap
(void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
// Now it is safe to overwrite any register // Now it is safe to overwrite any register
@ -2515,6 +2515,11 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::restore_result_registers(masm); RegisterSaver::restore_result_registers(masm);
// Non standard control word may be leaked out through a safepoint blob, and we can
// deopt at a poll point with the non standard control word. However, we should make
// sure the control word is correct after restore_result_registers.
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
// All of the register save area has been popped of the stack. Only the // All of the register save area has been popped of the stack. Only the
// return address remains. // return address remains.

View File

@ -1643,7 +1643,8 @@ inline hrtime_t oldgetTimeNanos() {
inline hrtime_t getTimeNanos() { inline hrtime_t getTimeNanos() {
if (VM_Version::supports_cx8()) { if (VM_Version::supports_cx8()) {
const hrtime_t now = gethrtime(); const hrtime_t now = gethrtime();
const hrtime_t prev = max_hrtime; // Use atomic long load since 32-bit x86 uses 2 registers to keep long.
const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime);
if (now <= prev) return prev; // same or retrograde time; if (now <= prev) return prev; // same or retrograde time;
const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
assert(obsv >= prev, "invariant"); // Monotonicity assert(obsv >= prev, "invariant"); // Monotonicity

View File

@ -616,12 +616,13 @@ julong os::available_memory() {
} }
julong os::win32::available_memory() { julong os::win32::available_memory() {
// FIXME: GlobalMemoryStatus() may return incorrect value if total memory // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// is larger than 4GB // value if total memory is larger than 4GB
MEMORYSTATUS ms; MEMORYSTATUSEX ms;
GlobalMemoryStatus(&ms); ms.dwLength = sizeof(ms);
GlobalMemoryStatusEx(&ms);
return (julong)ms.dwAvailPhys; return (julong)ms.ullAvailPhys;
} }
julong os::physical_memory() { julong os::physical_memory() {
@ -1579,16 +1580,17 @@ void os::print_memory_info(outputStream* st) {
st->print("Memory:"); st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10); st->print(" %dk page", os::vm_page_size()>>10);
// FIXME: GlobalMemoryStatus() may return incorrect value if total memory // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// is larger than 4GB // value if total memory is larger than 4GB
MEMORYSTATUS ms; MEMORYSTATUSEX ms;
GlobalMemoryStatus(&ms); ms.dwLength = sizeof(ms);
GlobalMemoryStatusEx(&ms);
st->print(", physical %uk", os::physical_memory() >> 10); st->print(", physical %uk", os::physical_memory() >> 10);
st->print("(%uk free)", os::available_memory() >> 10); st->print("(%uk free)", os::available_memory() >> 10);
st->print(", swap %uk", ms.dwTotalPageFile >> 10); st->print(", swap %uk", ms.ullTotalPageFile >> 10);
st->print("(%uk free)", ms.dwAvailPageFile >> 10); st->print("(%uk free)", ms.ullAvailPageFile >> 10);
st->cr(); st->cr();
} }
@ -3135,11 +3137,13 @@ void os::win32::initialize_system_info() {
_processor_level = si.wProcessorLevel; _processor_level = si.wProcessorLevel;
_processor_count = si.dwNumberOfProcessors; _processor_count = si.dwNumberOfProcessors;
MEMORYSTATUS ms; MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
// also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
// dwMemoryLoad (% of memory in use) // dwMemoryLoad (% of memory in use)
GlobalMemoryStatus(&ms); GlobalMemoryStatusEx(&ms);
_physical_memory = ms.dwTotalPhys; _physical_memory = ms.ullTotalPhys;
OSVERSIONINFO oi; OSVERSIONINFO oi;
oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);

View File

@ -46,6 +46,8 @@ inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest);
inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
#ifdef _GNU_SOURCE #ifdef _GNU_SOURCE
inline jint Atomic::add (jint add_value, volatile jint* dest) { inline jint Atomic::add (jint add_value, volatile jint* dest) {

View File

@ -99,6 +99,8 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, (int) os::is_MP()); return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, (int) os::is_MP());
} }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
#else // !AMD64 #else // !AMD64
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
@ -131,6 +133,15 @@ inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t*
inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
} }
extern "C" void _Atomic_load_long(volatile jlong* src, volatile jlong* dst);
inline jlong Atomic::load(volatile jlong* src) {
volatile jlong dest;
_Atomic_load_long(src, &dest);
return dest;
}
#endif // AMD64 #endif // AMD64
#ifdef _GNU_SOURCE #ifdef _GNU_SOURCE

View File

@ -97,6 +97,15 @@
popl %ebx popl %ebx
.end .end
// Support for void Atomic::load(volatile jlong* src, volatile jlong* dest).
.inline _Atomic_load_long,2
movl 0(%esp), %eax // src
fildll (%eax)
movl 4(%esp), %eax // dest
fistpll (%eax)
.end
// Support for OrderAccess::acquire() // Support for OrderAccess::acquire()
.inline _OrderAccess_acquire,0 .inline _OrderAccess_acquire,0
movl 0(%esp), %eax movl 0(%esp), %eax

View File

@ -208,6 +208,15 @@ int IRScope::top_scope_bci() const {
return scope->caller_bci(); return scope->caller_bci();
} }
bool IRScopeDebugInfo::should_reexecute() {
ciMethod* cur_method = scope()->method();
int cur_bci = bci();
if (cur_method != NULL && cur_bci != SynchronizationEntryBCI) {
Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
return Interpreter::bytecode_should_reexecute(code);
} else
return false;
}
// Implementation of CodeEmitInfo // Implementation of CodeEmitInfo
@ -253,7 +262,7 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
// record the safepoint before recording the debug info for enclosing scopes // record the safepoint before recording the debug info for enclosing scopes
recorder->add_safepoint(pc_offset, _oop_map->deep_copy()); recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
_scope_debug_info->record_debug_info(recorder, pc_offset); _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/);
recorder->end_safepoint(pc_offset); recorder->end_safepoint(pc_offset);
} }

View File

@ -239,15 +239,20 @@ class IRScopeDebugInfo: public CompilationResourceObj {
GrowableArray<MonitorValue*>* monitors() { return _monitors; } GrowableArray<MonitorValue*>* monitors() { return _monitors; }
IRScopeDebugInfo* caller() { return _caller; } IRScopeDebugInfo* caller() { return _caller; }
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { //Whether we should reexecute this bytecode for deopt
bool should_reexecute();
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool topmost) {
if (caller() != NULL) { if (caller() != NULL) {
// Order is significant: Must record caller first. // Order is significant: Must record caller first.
caller()->record_debug_info(recorder, pc_offset); caller()->record_debug_info(recorder, pc_offset, false/*topmost*/);
} }
DebugToken* locvals = recorder->create_scope_values(locals()); DebugToken* locvals = recorder->create_scope_values(locals());
DebugToken* expvals = recorder->create_scope_values(expressions()); DebugToken* expvals = recorder->create_scope_values(expressions());
DebugToken* monvals = recorder->create_monitor_values(monitors()); DebugToken* monvals = recorder->create_monitor_values(monitors());
recorder->describe_scope(pc_offset, scope()->method(), bci(), locvals, expvals, monvals); // reexecute allowed only for the topmost frame
bool reexecute = topmost ? should_reexecute() : false;
recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, locvals, expvals, monvals);
} }
}; };

View File

@ -379,7 +379,8 @@ void LIR_Assembler::record_non_safepoint_debug_info() {
ValueStack* s = nth_oldest(vstack, n, s_bci); ValueStack* s = nth_oldest(vstack, n, s_bci);
if (s == NULL) break; if (s == NULL) break;
IRScope* scope = s->scope(); IRScope* scope = s->scope();
debug_info->describe_scope(pc_offset, scope->method(), s_bci); //Always pass false for reexecute since these ScopeDescs are never used for deopt
debug_info->describe_scope(pc_offset, scope->method(), s_bci, false/*reexecute*/);
} }
debug_info->end_non_safepoint(pc_offset); debug_info->end_non_safepoint(pc_offset);

View File

@ -219,24 +219,27 @@ ciObject* ciObjectFactory::get(oop key) {
ASSERT_IN_VM; ASSERT_IN_VM;
#ifdef ASSERT #ifdef ASSERT
if (CIObjectFactoryVerify) {
oop last = NULL; oop last = NULL;
for (int j = 0; j< _ci_objects->length(); j++) { for (int j = 0; j< _ci_objects->length(); j++) {
oop o = _ci_objects->at(j)->get_oop(); oop o = _ci_objects->at(j)->get_oop();
assert(last < o, "out of order"); assert(last < o, "out of order");
last = o; last = o;
} }
}
#endif // ASSERT #endif // ASSERT
int len = _ci_objects->length(); int len = _ci_objects->length();
int index = find(key, _ci_objects); int index = find(key, _ci_objects);
#ifdef ASSERT #ifdef ASSERT
if (CIObjectFactoryVerify) {
for (int i=0; i<_ci_objects->length(); i++) { for (int i=0; i<_ci_objects->length(); i++) {
if (_ci_objects->at(i)->get_oop() == key) { if (_ci_objects->at(i)->get_oop() == key) {
assert(index == i, " bad lookup"); assert(index == i, " bad lookup");
} }
} }
}
#endif #endif
if (!is_found_at(index, key, _ci_objects)) { if (!is_found_at(index, key, _ci_objects)) {
// Check in the non-perm area before putting it in the list. // Check in the non-perm area before putting it in the list.
NonPermObject* &bucket = find_non_perm(key); NonPermObject* &bucket = find_non_perm(key);
if (bucket != NULL) { if (bucket != NULL) {
@ -539,12 +542,14 @@ void ciObjectFactory::insert(int index, ciObject* obj, GrowableArray<ciObject*>*
objects->at_put(index, obj); objects->at_put(index, obj);
} }
#ifdef ASSERT #ifdef ASSERT
if (CIObjectFactoryVerify) {
oop last = NULL; oop last = NULL;
for (int j = 0; j< objects->length(); j++) { for (int j = 0; j< objects->length(); j++) {
oop o = objects->at(j)->get_oop(); oop o = objects->at(j)->get_oop();
assert(last < o, "out of order"); assert(last < o, "out of order");
last = o; last = o;
} }
}
#endif // ASSERT #endif // ASSERT
} }

View File

@ -547,7 +547,6 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp,
int length, int length,
Handle class_loader, Handle class_loader,
Handle protection_domain, Handle protection_domain,
PerfTraceTime* vmtimer,
symbolHandle class_name, symbolHandle class_name,
TRAPS) { TRAPS) {
ClassFileStream* cfs = stream(); ClassFileStream* cfs = stream();
@ -575,13 +574,11 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp,
guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY, guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY,
"Bad interface name in class file %s", CHECK_(nullHandle)); "Bad interface name in class file %s", CHECK_(nullHandle));
vmtimer->suspend(); // do not count recursive loading twice
// Call resolve_super so classcircularity is checked // Call resolve_super so classcircularity is checked
klassOop k = SystemDictionary::resolve_super_or_fail(class_name, klassOop k = SystemDictionary::resolve_super_or_fail(class_name,
unresolved_klass, class_loader, protection_domain, unresolved_klass, class_loader, protection_domain,
false, CHECK_(nullHandle)); false, CHECK_(nullHandle));
interf = KlassHandle(THREAD, k); interf = KlassHandle(THREAD, k);
vmtimer->resume();
if (LinkWellKnownClasses) // my super type is well known to me if (LinkWellKnownClasses) // my super type is well known to me
cp->klass_at_put(interface_index, interf()); // eagerly resolve cp->klass_at_put(interface_index, interf()); // eagerly resolve
@ -769,16 +766,16 @@ enum FieldAllocationType {
struct FieldAllocationCount { struct FieldAllocationCount {
int static_oop_count; unsigned int static_oop_count;
int static_byte_count; unsigned int static_byte_count;
int static_short_count; unsigned int static_short_count;
int static_word_count; unsigned int static_word_count;
int static_double_count; unsigned int static_double_count;
int nonstatic_oop_count; unsigned int nonstatic_oop_count;
int nonstatic_byte_count; unsigned int nonstatic_byte_count;
int nonstatic_short_count; unsigned int nonstatic_short_count;
int nonstatic_word_count; unsigned int nonstatic_word_count;
int nonstatic_double_count; unsigned int nonstatic_double_count;
}; };
typeArrayHandle ClassFileParser::parse_fields(constantPoolHandle cp, bool is_interface, typeArrayHandle ClassFileParser::parse_fields(constantPoolHandle cp, bool is_interface,
@ -2558,7 +2555,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
ClassFileStream* cfs = stream(); ClassFileStream* cfs = stream();
// Timing // Timing
PerfTraceTime vmtimer(ClassLoader::perf_accumulated_time()); assert(THREAD->is_Java_thread(), "must be a JavaThread");
JavaThread* jt = (JavaThread*) THREAD;
PerfClassTraceTime ctimer(ClassLoader::perf_class_parse_time(),
ClassLoader::perf_class_parse_selftime(),
NULL,
jt->get_thread_stat()->perf_recursion_counts_addr(),
jt->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::PARSE_CLASS);
_has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false; _has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false;
@ -2738,7 +2743,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
if (itfs_len == 0) { if (itfs_len == 0) {
local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array());
} else { } else {
local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, &vmtimer, _class_name, CHECK_(nullHandle)); local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, CHECK_(nullHandle));
} }
// Fields (offsets are filled in later) // Fields (offsets are filled in later)
@ -2782,6 +2787,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
protection_domain, protection_domain,
true, true,
CHECK_(nullHandle)); CHECK_(nullHandle));
KlassHandle kh (THREAD, k); KlassHandle kh (THREAD, k);
super_klass = instanceKlassHandle(THREAD, kh()); super_klass = instanceKlassHandle(THREAD, kh());
if (LinkWellKnownClasses) // my super class is well known to me if (LinkWellKnownClasses) // my super class is well known to me
@ -2902,11 +2908,11 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
} }
// end of "discovered" field compactibility fix // end of "discovered" field compactibility fix
int nonstatic_double_count = fac.nonstatic_double_count; unsigned int nonstatic_double_count = fac.nonstatic_double_count;
int nonstatic_word_count = fac.nonstatic_word_count; unsigned int nonstatic_word_count = fac.nonstatic_word_count;
int nonstatic_short_count = fac.nonstatic_short_count; unsigned int nonstatic_short_count = fac.nonstatic_short_count;
int nonstatic_byte_count = fac.nonstatic_byte_count; unsigned int nonstatic_byte_count = fac.nonstatic_byte_count;
int nonstatic_oop_count = fac.nonstatic_oop_count; unsigned int nonstatic_oop_count = fac.nonstatic_oop_count;
bool super_has_nonstatic_fields = bool super_has_nonstatic_fields =
(super_klass() != NULL && super_klass->has_nonstatic_fields()); (super_klass() != NULL && super_klass->has_nonstatic_fields());
@ -2916,24 +2922,24 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
nonstatic_oop_count) != 0); nonstatic_oop_count) != 0);
// Prepare list of oops for oop maps generation. // Prepare list of oops for oop map generation.
u2* nonstatic_oop_offsets; int* nonstatic_oop_offsets;
u2* nonstatic_oop_length; unsigned int* nonstatic_oop_counts;
int nonstatic_oop_map_count = 0; unsigned int nonstatic_oop_map_count = 0;
nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD( nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
THREAD, u2, nonstatic_oop_count+1); THREAD, int, nonstatic_oop_count + 1);
nonstatic_oop_length = NEW_RESOURCE_ARRAY_IN_THREAD( nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD(
THREAD, u2, nonstatic_oop_count+1); THREAD, unsigned int, nonstatic_oop_count + 1);
// Add fake fields for java.lang.Class instances (also see above). // Add fake fields for java.lang.Class instances (also see above).
// FieldsAllocationStyle and CompactFields values will be reset to default. // FieldsAllocationStyle and CompactFields values will be reset to default.
if(class_name() == vmSymbols::java_lang_Class() && class_loader.is_null()) { if(class_name() == vmSymbols::java_lang_Class() && class_loader.is_null()) {
java_lang_Class_fix_post(&next_nonstatic_field_offset); java_lang_Class_fix_post(&next_nonstatic_field_offset);
nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset; nonstatic_oop_offsets[0] = first_nonstatic_field_offset;
int fake_oop_count = (( next_nonstatic_field_offset - const uint fake_oop_count = (next_nonstatic_field_offset -
first_nonstatic_field_offset ) / heapOopSize); first_nonstatic_field_offset) / heapOopSize;
nonstatic_oop_length [0] = (u2)fake_oop_count; nonstatic_oop_counts[0] = fake_oop_count;
nonstatic_oop_map_count = 1; nonstatic_oop_map_count = 1;
nonstatic_oop_count -= fake_oop_count; nonstatic_oop_count -= fake_oop_count;
first_nonstatic_oop_offset = first_nonstatic_field_offset; first_nonstatic_oop_offset = first_nonstatic_field_offset;
@ -3113,13 +3119,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
// Update oop maps // Update oop maps
if( nonstatic_oop_map_count > 0 && if( nonstatic_oop_map_count > 0 &&
nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
(u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * heapOopSize) ) { real_offset -
int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
heapOopSize ) {
// Extend current oop map // Extend current oop map
nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1; nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
} else { } else {
// Create new oop map // Create new oop map
nonstatic_oop_offsets[nonstatic_oop_map_count] = (u2)real_offset; nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
nonstatic_oop_length [nonstatic_oop_map_count] = 1; nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
nonstatic_oop_map_count += 1; nonstatic_oop_map_count += 1;
if( first_nonstatic_oop_offset == 0 ) { // Undefined if( first_nonstatic_oop_offset == 0 ) { // Undefined
first_nonstatic_oop_offset = real_offset; first_nonstatic_oop_offset = real_offset;
@ -3176,8 +3184,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
assert(instance_size == align_object_size(align_size_up((instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), wordSize) / wordSize), "consistent layout helper value"); assert(instance_size == align_object_size(align_size_up((instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), wordSize) / wordSize), "consistent layout helper value");
// Size of non-static oop map blocks (in words) allocated at end of klass // Number of non-static oop map blocks allocated at end of klass.
int nonstatic_oop_map_size = compute_oop_map_size(super_klass, nonstatic_oop_map_count, first_nonstatic_oop_offset); const unsigned int total_oop_map_count =
compute_oop_map_count(super_klass, nonstatic_oop_map_count,
first_nonstatic_oop_offset);
// Compute reference type // Compute reference type
ReferenceType rt; ReferenceType rt;
@ -3188,14 +3198,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
} }
// We can now create the basic klassOop for this klass // We can now create the basic klassOop for this klass
klassOop ik = oopFactory::new_instanceKlass( klassOop ik = oopFactory::new_instanceKlass(vtable_size, itable_size,
vtable_size, itable_size, static_field_size,
static_field_size, nonstatic_oop_map_size, total_oop_map_count,
rt, CHECK_(nullHandle)); rt, CHECK_(nullHandle));
instanceKlassHandle this_klass (THREAD, ik); instanceKlassHandle this_klass (THREAD, ik);
assert(this_klass->static_field_size() == static_field_size && assert(this_klass->static_field_size() == static_field_size, "sanity");
this_klass->nonstatic_oop_map_size() == nonstatic_oop_map_size, "sanity check"); assert(this_klass->nonstatic_oop_map_count() == total_oop_map_count,
"sanity");
// Fill in information already parsed // Fill in information already parsed
this_klass->set_access_flags(access_flags); this_klass->set_access_flags(access_flags);
@ -3276,7 +3287,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
klassItable::setup_itable_offset_table(this_klass); klassItable::setup_itable_offset_table(this_klass);
// Do final class setup // Do final class setup
fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_length); fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts);
set_precomputed_flags(this_klass); set_precomputed_flags(this_klass);
@ -3369,66 +3380,73 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
} }
int ClassFileParser::compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_map_count, int first_nonstatic_oop_offset) { unsigned int
int map_size = super.is_null() ? 0 : super->nonstatic_oop_map_size(); ClassFileParser::compute_oop_map_count(instanceKlassHandle super,
unsigned int nonstatic_oop_map_count,
int first_nonstatic_oop_offset) {
unsigned int map_count =
super.is_null() ? 0 : super->nonstatic_oop_map_count();
if (nonstatic_oop_map_count > 0) { if (nonstatic_oop_map_count > 0) {
// We have oops to add to map // We have oops to add to map
if (map_size == 0) { if (map_count == 0) {
map_size = nonstatic_oop_map_count; map_count = nonstatic_oop_map_count;
} else { } else {
// Check whether we should add a new map block or whether the last one can be extended // Check whether we should add a new map block or whether the last one can
OopMapBlock* first_map = super->start_of_nonstatic_oop_maps(); // be extended
OopMapBlock* last_map = first_map + map_size - 1; OopMapBlock* const first_map = super->start_of_nonstatic_oop_maps();
OopMapBlock* const last_map = first_map + map_count - 1;
int next_offset = last_map->offset() + (last_map->length() * heapOopSize); int next_offset = last_map->offset() + last_map->count() * heapOopSize;
if (next_offset == first_nonstatic_oop_offset) { if (next_offset == first_nonstatic_oop_offset) {
// There is no gap bettwen superklass's last oop field and first // There is no gap bettwen superklass's last oop field and first
// local oop field, merge maps. // local oop field, merge maps.
nonstatic_oop_map_count -= 1; nonstatic_oop_map_count -= 1;
} else { } else {
// Superklass didn't end with a oop field, add extra maps // Superklass didn't end with a oop field, add extra maps
assert(next_offset<first_nonstatic_oop_offset, "just checking"); assert(next_offset < first_nonstatic_oop_offset, "just checking");
} }
map_size += nonstatic_oop_map_count; map_count += nonstatic_oop_map_count;
} }
} }
return map_size; return map_count;
} }
void ClassFileParser::fill_oop_maps(instanceKlassHandle k, void ClassFileParser::fill_oop_maps(instanceKlassHandle k,
int nonstatic_oop_map_count, unsigned int nonstatic_oop_map_count,
u2* nonstatic_oop_offsets, u2* nonstatic_oop_length) { int* nonstatic_oop_offsets,
unsigned int* nonstatic_oop_counts) {
OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps(); OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps();
OopMapBlock* last_oop_map = this_oop_map + k->nonstatic_oop_map_size(); const instanceKlass* const super = k->superklass();
instanceKlass* super = k->superklass(); const unsigned int super_count = super ? super->nonstatic_oop_map_count() : 0;
if (super != NULL) { if (super_count > 0) {
int super_oop_map_size = super->nonstatic_oop_map_size();
OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps();
// Copy maps from superklass // Copy maps from superklass
while (super_oop_map_size-- > 0) { OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps();
for (unsigned int i = 0; i < super_count; ++i) {
*this_oop_map++ = *super_oop_map++; *this_oop_map++ = *super_oop_map++;
} }
} }
if (nonstatic_oop_map_count > 0) { if (nonstatic_oop_map_count > 0) {
if (this_oop_map + nonstatic_oop_map_count > last_oop_map) { if (super_count + nonstatic_oop_map_count > k->nonstatic_oop_map_count()) {
// Calculated in compute_oop_map_size() number of oop maps is less then // The counts differ because there is no gap between superklass's last oop
// collected oop maps since there is no gap between superklass's last oop // field and the first local oop field. Extend the last oop map copied
// field and first local oop field. Extend the last oop map copied
// from the superklass instead of creating new one. // from the superklass instead of creating new one.
nonstatic_oop_map_count--; nonstatic_oop_map_count--;
nonstatic_oop_offsets++; nonstatic_oop_offsets++;
this_oop_map--; this_oop_map--;
this_oop_map->set_length(this_oop_map->length() + *nonstatic_oop_length++); this_oop_map->set_count(this_oop_map->count() + *nonstatic_oop_counts++);
this_oop_map++; this_oop_map++;
} }
assert((this_oop_map + nonstatic_oop_map_count) == last_oop_map, "just checking");
// Add new map blocks, fill them // Add new map blocks, fill them
while (nonstatic_oop_map_count-- > 0) { while (nonstatic_oop_map_count-- > 0) {
this_oop_map->set_offset(*nonstatic_oop_offsets++); this_oop_map->set_offset(*nonstatic_oop_offsets++);
this_oop_map->set_length(*nonstatic_oop_length++); this_oop_map->set_count(*nonstatic_oop_counts++);
this_oop_map++; this_oop_map++;
} }
assert(k->start_of_nonstatic_oop_maps() + k->nonstatic_oop_map_count() ==
this_oop_map, "sanity");
} }
} }

View File

@ -61,7 +61,6 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
int length, int length,
Handle class_loader, Handle class_loader,
Handle protection_domain, Handle protection_domain,
PerfTraceTime* vmtimer,
symbolHandle class_name, symbolHandle class_name,
TRAPS); TRAPS);
@ -126,10 +125,13 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
int runtime_invisible_annotations_length, TRAPS); int runtime_invisible_annotations_length, TRAPS);
// Final setup // Final setup
int compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_count, unsigned int compute_oop_map_count(instanceKlassHandle super,
unsigned int nonstatic_oop_count,
int first_nonstatic_oop_offset); int first_nonstatic_oop_offset);
void fill_oop_maps(instanceKlassHandle k, int nonstatic_oop_map_count, void fill_oop_maps(instanceKlassHandle k,
u2* nonstatic_oop_offsets, u2* nonstatic_oop_length); unsigned int nonstatic_oop_map_count,
int* nonstatic_oop_offsets,
unsigned int* nonstatic_oop_counts);
void set_precomputed_flags(instanceKlassHandle k); void set_precomputed_flags(instanceKlassHandle k);
objArrayHandle compute_transitive_interfaces(instanceKlassHandle super, objArrayHandle compute_transitive_interfaces(instanceKlassHandle super,
objArrayHandle local_ifs, TRAPS); objArrayHandle local_ifs, TRAPS);

View File

@ -48,9 +48,26 @@ static canonicalize_fn_t CanonicalizeEntry = NULL;
PerfCounter* ClassLoader::_perf_accumulated_time = NULL; PerfCounter* ClassLoader::_perf_accumulated_time = NULL;
PerfCounter* ClassLoader::_perf_classes_inited = NULL; PerfCounter* ClassLoader::_perf_classes_inited = NULL;
PerfCounter* ClassLoader::_perf_class_init_time = NULL; PerfCounter* ClassLoader::_perf_class_init_time = NULL;
PerfCounter* ClassLoader::_perf_class_init_selftime = NULL;
PerfCounter* ClassLoader::_perf_classes_verified = NULL;
PerfCounter* ClassLoader::_perf_class_verify_time = NULL; PerfCounter* ClassLoader::_perf_class_verify_time = NULL;
PerfCounter* ClassLoader::_perf_class_verify_selftime = NULL;
PerfCounter* ClassLoader::_perf_classes_linked = NULL; PerfCounter* ClassLoader::_perf_classes_linked = NULL;
PerfCounter* ClassLoader::_perf_class_link_time = NULL; PerfCounter* ClassLoader::_perf_class_link_time = NULL;
PerfCounter* ClassLoader::_perf_class_link_selftime = NULL;
PerfCounter* ClassLoader::_perf_class_parse_time = NULL;
PerfCounter* ClassLoader::_perf_class_parse_selftime = NULL;
PerfCounter* ClassLoader::_perf_sys_class_lookup_time = NULL;
PerfCounter* ClassLoader::_perf_shared_classload_time = NULL;
PerfCounter* ClassLoader::_perf_sys_classload_time = NULL;
PerfCounter* ClassLoader::_perf_app_classload_time = NULL;
PerfCounter* ClassLoader::_perf_app_classload_selftime = NULL;
PerfCounter* ClassLoader::_perf_app_classload_count = NULL;
PerfCounter* ClassLoader::_perf_define_appclasses = NULL;
PerfCounter* ClassLoader::_perf_define_appclass_time = NULL;
PerfCounter* ClassLoader::_perf_define_appclass_selftime = NULL;
PerfCounter* ClassLoader::_perf_app_classfile_bytes_read = NULL;
PerfCounter* ClassLoader::_perf_sys_classfile_bytes_read = NULL;
PerfCounter* ClassLoader::_sync_systemLoaderLockContentionRate = NULL; PerfCounter* ClassLoader::_sync_systemLoaderLockContentionRate = NULL;
PerfCounter* ClassLoader::_sync_nonSystemLoaderLockContentionRate = NULL; PerfCounter* ClassLoader::_sync_nonSystemLoaderLockContentionRate = NULL;
PerfCounter* ClassLoader::_sync_JVMFindLoadedClassLockFreeCounter = NULL; PerfCounter* ClassLoader::_sync_JVMFindLoadedClassLockFreeCounter = NULL;
@ -152,6 +169,9 @@ ClassFileStream* ClassPathDirEntry::open_stream(const char* name) {
hpi::close(file_handle); hpi::close(file_handle);
// construct ClassFileStream // construct ClassFileStream
if (num_read == (size_t)st.st_size) { if (num_read == (size_t)st.st_size) {
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(num_read);
}
return new ClassFileStream(buffer, st.st_size, _dir); // Resource allocated return new ClassFileStream(buffer, st.st_size, _dir); // Resource allocated
} }
} }
@ -198,6 +218,9 @@ ClassFileStream* ClassPathZipEntry::open_stream(const char* name) {
buffer = NEW_RESOURCE_ARRAY(u1, filesize); buffer = NEW_RESOURCE_ARRAY(u1, filesize);
if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL; if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
} }
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize);
}
// return result // return result
return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
} }
@ -825,7 +848,9 @@ instanceKlassHandle ClassLoader::load_classfile(symbolHandle h_name, TRAPS) {
ClassFileStream* stream = NULL; ClassFileStream* stream = NULL;
int classpath_index = 0; int classpath_index = 0;
{ {
PerfTraceTime vmtimer(perf_accumulated_time()); PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(),
((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_LOAD);
ClassPathEntry* e = _first_entry; ClassPathEntry* e = _first_entry;
while (e != NULL) { while (e != NULL) {
stream = e->open_stream(name); stream = e->open_stream(name);
@ -890,11 +915,29 @@ void ClassLoader::initialize() {
// jvmstat performance counters // jvmstat performance counters
NEWPERFTICKCOUNTER(_perf_accumulated_time, SUN_CLS, "time"); NEWPERFTICKCOUNTER(_perf_accumulated_time, SUN_CLS, "time");
NEWPERFTICKCOUNTER(_perf_class_init_time, SUN_CLS, "classInitTime"); NEWPERFTICKCOUNTER(_perf_class_init_time, SUN_CLS, "classInitTime");
NEWPERFTICKCOUNTER(_perf_class_init_selftime, SUN_CLS, "classInitTime.self");
NEWPERFTICKCOUNTER(_perf_class_verify_time, SUN_CLS, "classVerifyTime"); NEWPERFTICKCOUNTER(_perf_class_verify_time, SUN_CLS, "classVerifyTime");
NEWPERFTICKCOUNTER(_perf_class_verify_selftime, SUN_CLS, "classVerifyTime.self");
NEWPERFTICKCOUNTER(_perf_class_link_time, SUN_CLS, "classLinkedTime"); NEWPERFTICKCOUNTER(_perf_class_link_time, SUN_CLS, "classLinkedTime");
NEWPERFTICKCOUNTER(_perf_class_link_selftime, SUN_CLS, "classLinkedTime.self");
NEWPERFEVENTCOUNTER(_perf_classes_inited, SUN_CLS, "initializedClasses"); NEWPERFEVENTCOUNTER(_perf_classes_inited, SUN_CLS, "initializedClasses");
NEWPERFEVENTCOUNTER(_perf_classes_linked, SUN_CLS, "linkedClasses"); NEWPERFEVENTCOUNTER(_perf_classes_linked, SUN_CLS, "linkedClasses");
NEWPERFEVENTCOUNTER(_perf_classes_verified, SUN_CLS, "verifiedClasses");
NEWPERFTICKCOUNTER(_perf_class_parse_time, SUN_CLS, "parseClassTime");
NEWPERFTICKCOUNTER(_perf_class_parse_selftime, SUN_CLS, "parseClassTime.self");
NEWPERFTICKCOUNTER(_perf_sys_class_lookup_time, SUN_CLS, "lookupSysClassTime");
NEWPERFTICKCOUNTER(_perf_shared_classload_time, SUN_CLS, "sharedClassLoadTime");
NEWPERFTICKCOUNTER(_perf_sys_classload_time, SUN_CLS, "sysClassLoadTime");
NEWPERFTICKCOUNTER(_perf_app_classload_time, SUN_CLS, "appClassLoadTime");
NEWPERFTICKCOUNTER(_perf_app_classload_selftime, SUN_CLS, "appClassLoadTime.self");
NEWPERFEVENTCOUNTER(_perf_app_classload_count, SUN_CLS, "appClassLoadCount");
NEWPERFTICKCOUNTER(_perf_define_appclasses, SUN_CLS, "defineAppClasses");
NEWPERFTICKCOUNTER(_perf_define_appclass_time, SUN_CLS, "defineAppClassTime");
NEWPERFTICKCOUNTER(_perf_define_appclass_selftime, SUN_CLS, "defineAppClassTime.self");
NEWPERFBYTECOUNTER(_perf_app_classfile_bytes_read, SUN_CLS, "appClassBytes");
NEWPERFBYTECOUNTER(_perf_sys_classfile_bytes_read, SUN_CLS, "sysClassBytes");
// The following performance counters are added for measuring the impact // The following performance counters are added for measuring the impact
// of the bug fix of 6365597. They are mainly focused on finding out // of the bug fix of 6365597. They are mainly focused on finding out

View File

@ -149,9 +149,26 @@ class ClassLoader: AllStatic {
static PerfCounter* _perf_accumulated_time; static PerfCounter* _perf_accumulated_time;
static PerfCounter* _perf_classes_inited; static PerfCounter* _perf_classes_inited;
static PerfCounter* _perf_class_init_time; static PerfCounter* _perf_class_init_time;
static PerfCounter* _perf_class_init_selftime;
static PerfCounter* _perf_classes_verified;
static PerfCounter* _perf_class_verify_time; static PerfCounter* _perf_class_verify_time;
static PerfCounter* _perf_class_verify_selftime;
static PerfCounter* _perf_classes_linked; static PerfCounter* _perf_classes_linked;
static PerfCounter* _perf_class_link_time; static PerfCounter* _perf_class_link_time;
static PerfCounter* _perf_class_link_selftime;
static PerfCounter* _perf_class_parse_time;
static PerfCounter* _perf_class_parse_selftime;
static PerfCounter* _perf_sys_class_lookup_time;
static PerfCounter* _perf_shared_classload_time;
static PerfCounter* _perf_sys_classload_time;
static PerfCounter* _perf_app_classload_time;
static PerfCounter* _perf_app_classload_selftime;
static PerfCounter* _perf_app_classload_count;
static PerfCounter* _perf_define_appclasses;
static PerfCounter* _perf_define_appclass_time;
static PerfCounter* _perf_define_appclass_selftime;
static PerfCounter* _perf_app_classfile_bytes_read;
static PerfCounter* _perf_sys_classfile_bytes_read;
static PerfCounter* _sync_systemLoaderLockContentionRate; static PerfCounter* _sync_systemLoaderLockContentionRate;
static PerfCounter* _sync_nonSystemLoaderLockContentionRate; static PerfCounter* _sync_nonSystemLoaderLockContentionRate;
@ -199,9 +216,26 @@ class ClassLoader: AllStatic {
static PerfCounter* perf_accumulated_time() { return _perf_accumulated_time; } static PerfCounter* perf_accumulated_time() { return _perf_accumulated_time; }
static PerfCounter* perf_classes_inited() { return _perf_classes_inited; } static PerfCounter* perf_classes_inited() { return _perf_classes_inited; }
static PerfCounter* perf_class_init_time() { return _perf_class_init_time; } static PerfCounter* perf_class_init_time() { return _perf_class_init_time; }
static PerfCounter* perf_class_init_selftime() { return _perf_class_init_selftime; }
static PerfCounter* perf_classes_verified() { return _perf_classes_verified; }
static PerfCounter* perf_class_verify_time() { return _perf_class_verify_time; } static PerfCounter* perf_class_verify_time() { return _perf_class_verify_time; }
static PerfCounter* perf_class_verify_selftime() { return _perf_class_verify_selftime; }
static PerfCounter* perf_classes_linked() { return _perf_classes_linked; } static PerfCounter* perf_classes_linked() { return _perf_classes_linked; }
static PerfCounter* perf_class_link_time() { return _perf_class_link_time; } static PerfCounter* perf_class_link_time() { return _perf_class_link_time; }
static PerfCounter* perf_class_link_selftime() { return _perf_class_link_selftime; }
static PerfCounter* perf_class_parse_time() { return _perf_class_parse_time; }
static PerfCounter* perf_class_parse_selftime() { return _perf_class_parse_selftime; }
static PerfCounter* perf_sys_class_lookup_time() { return _perf_sys_class_lookup_time; }
static PerfCounter* perf_shared_classload_time() { return _perf_shared_classload_time; }
static PerfCounter* perf_sys_classload_time() { return _perf_sys_classload_time; }
static PerfCounter* perf_app_classload_time() { return _perf_app_classload_time; }
static PerfCounter* perf_app_classload_selftime() { return _perf_app_classload_selftime; }
static PerfCounter* perf_app_classload_count() { return _perf_app_classload_count; }
static PerfCounter* perf_define_appclasses() { return _perf_define_appclasses; }
static PerfCounter* perf_define_appclass_time() { return _perf_define_appclass_time; }
static PerfCounter* perf_define_appclass_selftime() { return _perf_define_appclass_selftime; }
static PerfCounter* perf_app_classfile_bytes_read() { return _perf_app_classfile_bytes_read; }
static PerfCounter* perf_sys_classfile_bytes_read() { return _perf_sys_classfile_bytes_read; }
// Record how often system loader lock object is contended // Record how often system loader lock object is contended
static PerfCounter* sync_systemLoaderLockContentionRate() { static PerfCounter* sync_systemLoaderLockContentionRate() {
@ -307,3 +341,118 @@ class ClassLoader: AllStatic {
static int compile_the_world_counter() { return _compile_the_world_counter; } static int compile_the_world_counter() { return _compile_the_world_counter; }
#endif //PRODUCT #endif //PRODUCT
}; };
// PerfClassTraceTime is used to measure time for class loading related events.
// This class tracks cumulative time and exclusive time for specific event types.
// During the execution of one event, other event types (e.g. class loading and
// resolution) as well as recursive calls of the same event type could happen.
// Only one elapsed timer (cumulative) and one thread-local self timer (exclusive)
// (i.e. only one event type) are active at a time even multiple PerfClassTraceTime
// instances have been created as multiple events are happening.
class PerfClassTraceTime {
public:
enum {
CLASS_LOAD = 0,
PARSE_CLASS = 1,
CLASS_LINK = 2,
CLASS_VERIFY = 3,
CLASS_CLINIT = 4,
DEFINE_CLASS = 5,
EVENT_TYPE_COUNT = 6
};
protected:
// _t tracks time from initialization to destruction of this timer instance
// including time for all other event types, and recursive calls of this type.
// When a timer is called recursively, the elapsedTimer _t would not be used.
elapsedTimer _t;
PerfLongCounter* _timep;
PerfLongCounter* _selftimep;
PerfLongCounter* _eventp;
// pointer to thread-local recursion counter and timer array
// The thread_local timers track cumulative time for specific event types
// exclusive of time for other event types, but including recursive calls
// of the same type.
int* _recursion_counters;
elapsedTimer* _timers;
int _event_type;
int _prev_active_event;
public:
inline PerfClassTraceTime(PerfLongCounter* timep, /* counter incremented with inclusive time */
PerfLongCounter* selftimep, /* counter incremented with exclusive time */
PerfLongCounter* eventp, /* event counter */
int* recursion_counters, /* thread-local recursion counter array */
elapsedTimer* timers, /* thread-local timer array */
int type /* event type */ ) :
_timep(timep), _selftimep(selftimep), _eventp(eventp), _recursion_counters(recursion_counters), _timers(timers), _event_type(type) {
initialize();
}
inline PerfClassTraceTime(PerfLongCounter* timep, /* counter incremented with inclusive time */
elapsedTimer* timers, /* thread-local timer array */
int type /* event type */ ) :
_timep(timep), _selftimep(NULL), _eventp(NULL), _recursion_counters(NULL), _timers(timers), _event_type(type) {
initialize();
}
void initialize() {
if (!UsePerfData) return;
if (_eventp != NULL) {
// increment the event counter
_eventp->inc();
}
// stop the current active thread-local timer to measure inclusive time
_prev_active_event = -1;
for (int i=0; i < EVENT_TYPE_COUNT; i++) {
if (_timers[i].is_active()) {
assert(_prev_active_event == -1, "should have only one active timer");
_prev_active_event = i;
_timers[i].stop();
}
}
if (_recursion_counters == NULL || (_recursion_counters[_event_type])++ == 0) {
// start the inclusive timer if not recursively called
_t.start();
}
// start thread-local timer of the given event type
if (!_timers[_event_type].is_active()) {
_timers[_event_type].start();
}
}
inline void suspend() { _t.stop(); _timers[_event_type].stop(); }
inline void resume() { _t.start(); _timers[_event_type].start(); }
~PerfClassTraceTime() {
if (!UsePerfData) return;
// stop the thread-local timer as the event completes
// and resume the thread-local timer of the event next on the stack
_timers[_event_type].stop();
jlong selftime = _timers[_event_type].ticks();
if (_prev_active_event >= 0) {
_timers[_prev_active_event].start();
}
if (_recursion_counters != NULL && --(_recursion_counters[_event_type]) > 0) return;
// increment the counters only on the leaf call
_t.stop();
_timep->inc(_t.ticks());
if (_selftimep != NULL) {
_selftimep->inc(selftime);
}
// add all class loading related event selftime to the accumulated time counter
ClassLoader::perf_accumulated_time()->inc(selftime);
// reset the timer
_timers[_event_type].reset();
}
};

View File

@ -1306,13 +1306,18 @@ static instanceKlassHandle download_and_retry_class_load(
instanceKlassHandle SystemDictionary::load_instance_class(symbolHandle class_name, Handle class_loader, TRAPS) { instanceKlassHandle SystemDictionary::load_instance_class(symbolHandle class_name, Handle class_loader, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle instanceKlassHandle nh = instanceKlassHandle(); // null Handle
if (class_loader.is_null()) { if (class_loader.is_null()) {
// Search the shared system dictionary for classes preloaded into the // Search the shared system dictionary for classes preloaded into the
// shared spaces. // shared spaces.
instanceKlassHandle k; instanceKlassHandle k;
{
PerfTraceTime vmtimer(ClassLoader::perf_shared_classload_time());
k = load_shared_class(class_name, class_loader, THREAD); k = load_shared_class(class_name, class_loader, THREAD);
}
if (k.is_null()) { if (k.is_null()) {
// Use VM class loader // Use VM class loader
PerfTraceTime vmtimer(ClassLoader::perf_sys_classload_time());
k = ClassLoader::load_classfile(class_name, CHECK_(nh)); k = ClassLoader::load_classfile(class_name, CHECK_(nh));
} }
@ -1334,6 +1339,16 @@ instanceKlassHandle SystemDictionary::load_instance_class(symbolHandle class_nam
// Use user specified class loader to load class. Call loadClass operation on class_loader. // Use user specified class loader to load class. Call loadClass operation on class_loader.
ResourceMark rm(THREAD); ResourceMark rm(THREAD);
assert(THREAD->is_Java_thread(), "must be a JavaThread");
JavaThread* jt = (JavaThread*) THREAD;
PerfClassTraceTime vmtimer(ClassLoader::perf_app_classload_time(),
ClassLoader::perf_app_classload_selftime(),
ClassLoader::perf_app_classload_count(),
jt->get_thread_stat()->perf_recursion_counts_addr(),
jt->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_LOAD);
Handle s = java_lang_String::create_from_symbol(class_name, CHECK_(nh)); Handle s = java_lang_String::create_from_symbol(class_name, CHECK_(nh));
// Translate to external class name format, i.e., convert '/' chars to '.' // Translate to external class name format, i.e., convert '/' chars to '.'
Handle string = java_lang_String::externalize_classname(s, CHECK_(nh)); Handle string = java_lang_String::externalize_classname(s, CHECK_(nh));

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -280,6 +280,7 @@ int DebugInformationRecorder::find_sharable_decode_offset(int stream_offset) {
void DebugInformationRecorder::describe_scope(int pc_offset, void DebugInformationRecorder::describe_scope(int pc_offset,
ciMethod* method, ciMethod* method,
int bci, int bci,
bool reexecute,
DebugToken* locals, DebugToken* locals,
DebugToken* expressions, DebugToken* expressions,
DebugToken* monitors) { DebugToken* monitors) {
@ -291,6 +292,9 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
int stream_offset = stream()->position(); int stream_offset = stream()->position();
last_pd->set_scope_decode_offset(stream_offset); last_pd->set_scope_decode_offset(stream_offset);
// Record reexecute bit into pcDesc
last_pd->set_should_reexecute(reexecute);
// serialize sender stream offest // serialize sender stream offest
stream()->write_int(sender_stream_offset); stream()->write_int(sender_stream_offset);

View File

@ -87,6 +87,7 @@ class DebugInformationRecorder: public ResourceObj {
void describe_scope(int pc_offset, void describe_scope(int pc_offset,
ciMethod* method, ciMethod* method,
int bci, int bci,
bool reexecute,
DebugToken* locals = NULL, DebugToken* locals = NULL,
DebugToken* expressions = NULL, DebugToken* expressions = NULL,
DebugToken* monitors = NULL); DebugToken* monitors = NULL);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -966,7 +966,7 @@ ScopeDesc* nmethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc); PcDesc* pd = pc_desc_at(pc);
guarantee(pd != NULL, "scope must be present"); guarantee(pd != NULL, "scope must be present");
return new ScopeDesc(this, pd->scope_decode_offset(), return new ScopeDesc(this, pd->scope_decode_offset(),
pd->obj_decode_offset()); pd->obj_decode_offset(), pd->should_reexecute());
} }
@ -1079,6 +1079,10 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
this, (address)_method, (address)cause); this, (address)_method, (address)cause);
cause->klass()->print(); cause->klass()->print();
} }
// Unlink the osr method, so we do not look this up again
if (is_osr_method()) {
invalidate_osr_method();
}
// If _method is already NULL the methodOop is about to be unloaded, // If _method is already NULL the methodOop is about to be unloaded,
// so we don't have to break the cycle. Note that it is possible to // so we don't have to break the cycle. Note that it is possible to
// have the methodOop live here, in case we unload the nmethod because // have the methodOop live here, in case we unload the nmethod because
@ -1148,7 +1152,7 @@ void nmethod::make_not_entrant_or_zombie(int state) {
// will never be used anymore. That the nmethods only gets removed when class unloading // will never be used anymore. That the nmethods only gets removed when class unloading
// happens, make life much simpler, since the nmethods are not just going to disappear // happens, make life much simpler, since the nmethods are not just going to disappear
// out of the blue. // out of the blue.
if (is_osr_only_method()) { if (is_osr_method()) {
if (osr_entry_bci() != InvalidOSREntryBci) { if (osr_entry_bci() != InvalidOSREntryBci) {
// only log this once // only log this once
log_state_change(state); log_state_change(state);
@ -1520,6 +1524,17 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive,
#endif // !PRODUCT #endif // !PRODUCT
} }
// This method is called twice during GC -- once while
// tracing the "active" nmethods on thread stacks during
// the (strong) marking phase, and then again when walking
// the code cache contents during the weak roots processing
// phase. The two uses are distinguished by means of the
// do_nmethods() method in the closure "f" below -- which
// answers "yes" in the first case, and "no" in the second
// case. We want to walk the weak roots in the nmethod
// only in the second case. The weak roots in the nmethod
// are the oops in the ExceptionCache and the InlineCache
// oops.
void nmethod::oops_do(OopClosure* f) { void nmethod::oops_do(OopClosure* f) {
// make sure the oops ready to receive visitors // make sure the oops ready to receive visitors
assert(!is_zombie() && !is_unloaded(), assert(!is_zombie() && !is_unloaded(),
@ -1538,19 +1553,25 @@ void nmethod::oops_do(OopClosure* f) {
// Compiled code // Compiled code
f->do_oop((oop*) &_method); f->do_oop((oop*) &_method);
if (!f->do_nmethods()) {
// weak roots processing phase -- update ExceptionCache oops
ExceptionCache* ec = exception_cache(); ExceptionCache* ec = exception_cache();
while(ec != NULL) { while(ec != NULL) {
f->do_oop((oop*)ec->exception_type_addr()); f->do_oop((oop*)ec->exception_type_addr());
ec = ec->next(); ec = ec->next();
} }
} // Else strong roots phase -- skip oops in ExceptionCache
RelocIterator iter(this, low_boundary); RelocIterator iter(this, low_boundary);
while (iter.next()) { while (iter.next()) {
if (iter.type() == relocInfo::oop_type ) { if (iter.type() == relocInfo::oop_type ) {
oop_Relocation* r = iter.oop_reloc(); oop_Relocation* r = iter.oop_reloc();
// In this loop, we must only follow those oops directly embedded in // In this loop, we must only follow those oops directly embedded in
// the code. Other oops (oop_index>0) are seen as part of scopes_oops. // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place"); assert(1 == (r->oop_is_immediate()) +
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) { if (r->oop_is_immediate() && r->oop_value() != NULL) {
f->do_oop(r->oop_addr()); f->do_oop(r->oop_addr());
} }
@ -1932,7 +1953,7 @@ void nmethod::verify_interrupt_point(address call_site) {
PcDesc* pd = pc_desc_at(ic->end_of_call()); PcDesc* pd = pc_desc_at(ic->end_of_call());
assert(pd != NULL, "PcDesc must exist"); assert(pd != NULL, "PcDesc must exist");
for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
pd->obj_decode_offset()); pd->obj_decode_offset(), pd->should_reexecute());
!sd->is_top(); sd = sd->sender()) { !sd->is_top(); sd = sd->sender()) {
sd->verify(); sd->verify();
} }
@ -2181,7 +2202,7 @@ ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
PcDesc* p = pc_desc_near(begin+1); PcDesc* p = pc_desc_near(begin+1);
if (p != NULL && p->real_pc(this) <= end) { if (p != NULL && p->real_pc(this) <= end) {
return new ScopeDesc(this, p->scope_decode_offset(), return new ScopeDesc(this, p->scope_decode_offset(),
p->obj_decode_offset()); p->obj_decode_offset(), p->should_reexecute());
} }
return NULL; return NULL;
} }

View File

@ -314,7 +314,6 @@ class nmethod : public CodeBlob {
bool is_java_method() const { return !method()->is_native(); } bool is_java_method() const { return !method()->is_native(); }
bool is_native_method() const { return method()->is_native(); } bool is_native_method() const { return method()->is_native(); }
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
bool is_osr_only_method() const { return is_osr_method(); }
bool is_compiled_by_c1() const; bool is_compiled_by_c1() const;
bool is_compiled_by_c2() const; bool is_compiled_by_c2() const;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,9 +26,11 @@
# include "incls/_pcDesc.cpp.incl" # include "incls/_pcDesc.cpp.incl"
PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) { PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
assert(sizeof(PcDescFlags) <= 4, "occupies more than a word");
_pc_offset = pc_offset; _pc_offset = pc_offset;
_scope_decode_offset = scope_decode_offset; _scope_decode_offset = scope_decode_offset;
_obj_decode_offset = obj_decode_offset; _obj_decode_offset = obj_decode_offset;
_flags.word = 0;
} }
address PcDesc::real_pc(const nmethod* code) const { address PcDesc::real_pc(const nmethod* code) const {
@ -50,6 +52,7 @@ void PcDesc::print(nmethod* code) {
tty->print(" "); tty->print(" ");
sd->method()->print_short_name(tty); sd->method()->print_short_name(tty);
tty->print(" @%d", sd->bci()); tty->print(" @%d", sd->bci());
tty->print(" reexecute=%s", sd->should_reexecute()?"true":"false");
tty->cr(); tty->cr();
} }
#endif #endif

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,13 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
int _scope_decode_offset; // offset for scope in nmethod int _scope_decode_offset; // offset for scope in nmethod
int _obj_decode_offset; int _obj_decode_offset;
union PcDescFlags {
int word;
struct {
unsigned int reexecute: 1;
} bits;
} _flags;
public: public:
int pc_offset() const { return _pc_offset; } int pc_offset() const { return _pc_offset; }
int scope_decode_offset() const { return _scope_decode_offset; } int scope_decode_offset() const { return _scope_decode_offset; }
@ -53,6 +60,10 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
upper_offset_limit = (unsigned int)-1 >> 1 upper_offset_limit = (unsigned int)-1 >> 1
}; };
// Flags
bool should_reexecute() const { return _flags.bits.reexecute; }
void set_should_reexecute(bool z) { _flags.bits.reexecute = z; }
// Returns the real pc // Returns the real pc
address real_pc(const nmethod* code) const; address real_pc(const nmethod* code) const;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,17 +26,19 @@
# include "incls/_scopeDesc.cpp.incl" # include "incls/_scopeDesc.cpp.incl"
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset) { ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute) {
_code = code; _code = code;
_decode_offset = decode_offset; _decode_offset = decode_offset;
_objects = decode_object_values(obj_decode_offset); _objects = decode_object_values(obj_decode_offset);
_reexecute = reexecute;
decode_body(); decode_body();
} }
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset) { ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute) {
_code = code; _code = code;
_decode_offset = decode_offset; _decode_offset = decode_offset;
_objects = decode_object_values(DebugInformationRecorder::serialized_null); _objects = decode_object_values(DebugInformationRecorder::serialized_null);
_reexecute = reexecute;
decode_body(); decode_body();
} }
@ -45,6 +47,7 @@ ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
_code = parent->_code; _code = parent->_code;
_decode_offset = parent->_sender_decode_offset; _decode_offset = parent->_sender_decode_offset;
_objects = parent->_objects; _objects = parent->_objects;
_reexecute = false; //reexecute only applies to the first scope
decode_body(); decode_body();
} }
@ -66,6 +69,7 @@ void ScopeDesc::decode_body() {
_sender_decode_offset = stream->read_int(); _sender_decode_offset = stream->read_int();
_method = methodHandle((methodOop) stream->read_oop()); _method = methodHandle((methodOop) stream->read_oop());
_bci = stream->read_bci(); _bci = stream->read_bci();
// decode offsets for body and sender // decode offsets for body and sender
_locals_decode_offset = stream->read_int(); _locals_decode_offset = stream->read_int();
_expressions_decode_offset = stream->read_int(); _expressions_decode_offset = stream->read_int();
@ -170,6 +174,7 @@ void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const {
st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->instructions_begin()); st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->instructions_begin());
st->print_cr(" offset: %d", _decode_offset); st->print_cr(" offset: %d", _decode_offset);
st->print_cr(" bci: %d", bci()); st->print_cr(" bci: %d", bci());
st->print_cr(" reexecute: %s", should_reexecute() ? "true" : "false");
st->print_cr(" locals: %d", _locals_decode_offset); st->print_cr(" locals: %d", _locals_decode_offset);
st->print_cr(" stack: %d", _expressions_decode_offset); st->print_cr(" stack: %d", _expressions_decode_offset);
st->print_cr(" monitor: %d", _monitors_decode_offset); st->print_cr(" monitor: %d", _monitors_decode_offset);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -52,16 +52,17 @@ class SimpleScopeDesc : public StackObj {
class ScopeDesc : public ResourceObj { class ScopeDesc : public ResourceObj {
public: public:
// Constructor // Constructor
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset); ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute);
// Calls above, giving default value of "serialized_null" to the // Calls above, giving default value of "serialized_null" to the
// "obj_decode_offset" argument. (We don't use a default argument to // "obj_decode_offset" argument. (We don't use a default argument to
// avoid a .hpp-.hpp dependency.) // avoid a .hpp-.hpp dependency.)
ScopeDesc(const nmethod* code, int decode_offset); ScopeDesc(const nmethod* code, int decode_offset, bool reexecute);
// JVM state // JVM state
methodHandle method() const { return _method; } methodHandle method() const { return _method; }
int bci() const { return _bci; } int bci() const { return _bci; }
bool should_reexecute() const { return _reexecute; }
GrowableArray<ScopeValue*>* locals(); GrowableArray<ScopeValue*>* locals();
GrowableArray<ScopeValue*>* expressions(); GrowableArray<ScopeValue*>* expressions();
@ -86,6 +87,7 @@ class ScopeDesc : public ResourceObj {
// JVM state // JVM state
methodHandle _method; methodHandle _method;
int _bci; int _bci;
bool _reexecute;
// Decoding offsets // Decoding offsets
int _decode_offset; int _decode_offset;

View File

@ -92,17 +92,50 @@ class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
} }
}; };
// The non-parallel version (the parallel version appears further below). // KlassRememberingOopClosure is used when marking of the permanent generation
class PushAndMarkClosure: public OopClosure { // is being done. It adds fields to support revisiting of klasses
private: // for class unloading. _should_remember_klasses should be set to
// indicate if klasses should be remembered. Currently that is whenever
// CMS class unloading is turned on. The _revisit_stack is used
// to save the klasses for later processing.
class KlassRememberingOopClosure : public OopClosure {
protected:
CMSCollector* _collector; CMSCollector* _collector;
CMSMarkStack* _revisit_stack;
bool const _should_remember_klasses;
public:
void check_remember_klasses() const PRODUCT_RETURN;
virtual const bool should_remember_klasses() const {
check_remember_klasses();
return _should_remember_klasses;
}
virtual void remember_klass(Klass* k);
KlassRememberingOopClosure(CMSCollector* collector,
ReferenceProcessor* rp,
CMSMarkStack* revisit_stack);
};
// Similar to KlassRememberingOopClosure for use when multiple
// GC threads will execute the closure.
class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
public:
Par_KlassRememberingOopClosure(CMSCollector* collector,
ReferenceProcessor* rp,
CMSMarkStack* revisit_stack):
KlassRememberingOopClosure(collector, rp, revisit_stack) {}
virtual void remember_klass(Klass* k);
};
// The non-parallel version (the parallel version appears further below).
class PushAndMarkClosure: public KlassRememberingOopClosure {
private:
MemRegion _span; MemRegion _span;
CMSBitMap* _bit_map; CMSBitMap* _bit_map;
CMSBitMap* _mod_union_table; CMSBitMap* _mod_union_table;
CMSMarkStack* _mark_stack; CMSMarkStack* _mark_stack;
CMSMarkStack* _revisit_stack;
bool _concurrent_precleaning; bool _concurrent_precleaning;
bool const _should_remember_klasses;
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
@ -122,10 +155,12 @@ class PushAndMarkClosure: public OopClosure {
Prefetch::style prefetch_style() { Prefetch::style prefetch_style() {
return Prefetch::do_read; return Prefetch::do_read;
} }
virtual const bool should_remember_klasses() const { // In support of class unloading
return _should_remember_klasses; virtual const bool should_remember_mdo() const {
return false;
// return _should_remember_klasses;
} }
virtual void remember_klass(Klass* k); virtual void remember_mdo(DataLayout* v);
}; };
// In the parallel case, the revisit stack, the bit map and the // In the parallel case, the revisit stack, the bit map and the
@ -134,14 +169,11 @@ class PushAndMarkClosure: public OopClosure {
// synchronization (for instance, via CAS). The marking stack // synchronization (for instance, via CAS). The marking stack
// used in the non-parallel case above is here replaced with // used in the non-parallel case above is here replaced with
// an OopTaskQueue structure to allow efficient work stealing. // an OopTaskQueue structure to allow efficient work stealing.
class Par_PushAndMarkClosure: public OopClosure { class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
private: private:
CMSCollector* _collector;
MemRegion _span; MemRegion _span;
CMSBitMap* _bit_map; CMSBitMap* _bit_map;
OopTaskQueue* _work_queue; OopTaskQueue* _work_queue;
CMSMarkStack* _revisit_stack;
bool const _should_remember_klasses;
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
@ -159,10 +191,12 @@ class Par_PushAndMarkClosure: public OopClosure {
Prefetch::style prefetch_style() { Prefetch::style prefetch_style() {
return Prefetch::do_read; return Prefetch::do_read;
} }
virtual const bool should_remember_klasses() const { // In support of class unloading
return _should_remember_klasses; virtual const bool should_remember_mdo() const {
return false;
// return _should_remember_klasses;
} }
virtual void remember_klass(Klass* k); virtual void remember_mdo(DataLayout* v);
}; };
// The non-parallel version (the parallel version appears further below). // The non-parallel version (the parallel version appears further below).
@ -201,6 +235,12 @@ class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
void set_freelistLock(Mutex* m) { void set_freelistLock(Mutex* m) {
_freelistLock = m; _freelistLock = m;
} }
virtual const bool should_remember_klasses() const {
return _pushAndMarkClosure.should_remember_klasses();
}
virtual void remember_klass(Klass* k) {
_pushAndMarkClosure.remember_klass(k);
}
private: private:
inline void do_yield_check(); inline void do_yield_check();
@ -234,6 +274,16 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
bool do_header() { return true; } bool do_header() { return true; }
virtual const bool do_nmethods() const { return true; } virtual const bool do_nmethods() const { return true; }
// When ScanMarkedObjectsAgainClosure is used,
// it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
// and this delegation is used.
virtual const bool should_remember_klasses() const {
return _par_pushAndMarkClosure.should_remember_klasses();
}
// See comment on should_remember_klasses() above.
virtual void remember_klass(Klass* k) {
_par_pushAndMarkClosure.remember_klass(k);
}
Prefetch::style prefetch_style() { Prefetch::style prefetch_style() {
return Prefetch::do_read; return Prefetch::do_read;
} }
@ -243,17 +293,14 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
// This closure is used during the concurrent marking phase // This closure is used during the concurrent marking phase
// following the first checkpoint. Its use is buried in // following the first checkpoint. Its use is buried in
// the closure MarkFromRootsClosure. // the closure MarkFromRootsClosure.
class PushOrMarkClosure: public OopClosure { class PushOrMarkClosure: public KlassRememberingOopClosure {
private: private:
CMSCollector* _collector;
MemRegion _span; MemRegion _span;
CMSBitMap* _bitMap; CMSBitMap* _bitMap;
CMSMarkStack* _markStack; CMSMarkStack* _markStack;
CMSMarkStack* _revisitStack;
HeapWord* const _finger; HeapWord* const _finger;
MarkFromRootsClosure* const MarkFromRootsClosure* const
_parent; _parent;
bool const _should_remember_klasses;
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
@ -268,10 +315,13 @@ class PushOrMarkClosure: public OopClosure {
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
virtual const bool should_remember_klasses() const { // In support of class unloading
return _should_remember_klasses; virtual const bool should_remember_mdo() const {
return false;
// return _should_remember_klasses;
} }
virtual void remember_klass(Klass* k); virtual void remember_mdo(DataLayout* v);
// Deal with a stack overflow condition // Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost); void handle_stack_overflow(HeapWord* lost);
private: private:
@ -282,20 +332,17 @@ class PushOrMarkClosure: public OopClosure {
// This closure is used during the concurrent marking phase // This closure is used during the concurrent marking phase
// following the first checkpoint. Its use is buried in // following the first checkpoint. Its use is buried in
// the closure Par_MarkFromRootsClosure. // the closure Par_MarkFromRootsClosure.
class Par_PushOrMarkClosure: public OopClosure { class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
private: private:
CMSCollector* _collector;
MemRegion _whole_span; MemRegion _whole_span;
MemRegion _span; // local chunk MemRegion _span; // local chunk
CMSBitMap* _bit_map; CMSBitMap* _bit_map;
OopTaskQueue* _work_queue; OopTaskQueue* _work_queue;
CMSMarkStack* _overflow_stack; CMSMarkStack* _overflow_stack;
CMSMarkStack* _revisit_stack;
HeapWord* const _finger; HeapWord* const _finger;
HeapWord** const _global_finger_addr; HeapWord** const _global_finger_addr;
Par_MarkFromRootsClosure* const Par_MarkFromRootsClosure* const
_parent; _parent;
bool const _should_remember_klasses;
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
@ -312,10 +359,13 @@ class Par_PushOrMarkClosure: public OopClosure {
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
virtual const bool should_remember_klasses() const { // In support of class unloading
return _should_remember_klasses; virtual const bool should_remember_mdo() const {
return false;
// return _should_remember_klasses;
} }
virtual void remember_klass(Klass* k); virtual void remember_mdo(DataLayout* v);
// Deal with a stack overflow condition // Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost); void handle_stack_overflow(HeapWord* lost);
private: private:
@ -328,9 +378,8 @@ class Par_PushOrMarkClosure: public OopClosure {
// processing phase of the CMS final checkpoint step, as // processing phase of the CMS final checkpoint step, as
// well as during the concurrent precleaning of the discovered // well as during the concurrent precleaning of the discovered
// reference lists. // reference lists.
class CMSKeepAliveClosure: public OopClosure { class CMSKeepAliveClosure: public KlassRememberingOopClosure {
private: private:
CMSCollector* _collector;
const MemRegion _span; const MemRegion _span;
CMSMarkStack* _mark_stack; CMSMarkStack* _mark_stack;
CMSBitMap* _bit_map; CMSBitMap* _bit_map;
@ -340,14 +389,7 @@ class CMSKeepAliveClosure: public OopClosure {
public: public:
CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, CMSMarkStack* mark_stack, CMSBitMap* bit_map, CMSMarkStack* mark_stack,
bool cpc): CMSMarkStack* revisit_stack, bool cpc);
_collector(collector),
_span(span),
_bit_map(bit_map),
_mark_stack(mark_stack),
_concurrent_precleaning(cpc) {
assert(!_span.is_empty(), "Empty span could spell trouble");
}
bool concurrent_precleaning() const { return _concurrent_precleaning; } bool concurrent_precleaning() const { return _concurrent_precleaning; }
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
@ -355,9 +397,8 @@ class CMSKeepAliveClosure: public OopClosure {
inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
}; };
class CMSInnerParMarkAndPushClosure: public OopClosure { class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
private: private:
CMSCollector* _collector;
MemRegion _span; MemRegion _span;
OopTaskQueue* _work_queue; OopTaskQueue* _work_queue;
CMSBitMap* _bit_map; CMSBitMap* _bit_map;
@ -366,11 +407,8 @@ class CMSInnerParMarkAndPushClosure: public OopClosure {
public: public:
CMSInnerParMarkAndPushClosure(CMSCollector* collector, CMSInnerParMarkAndPushClosure(CMSCollector* collector,
MemRegion span, CMSBitMap* bit_map, MemRegion span, CMSBitMap* bit_map,
OopTaskQueue* work_queue): CMSMarkStack* revisit_stack,
_collector(collector), OopTaskQueue* work_queue);
_span(span),
_bit_map(bit_map),
_work_queue(work_queue) { }
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
@ -380,9 +418,8 @@ class CMSInnerParMarkAndPushClosure: public OopClosure {
// A parallel (MT) version of the above, used when // A parallel (MT) version of the above, used when
// reference processing is parallel; the only difference // reference processing is parallel; the only difference
// is in the do_oop method. // is in the do_oop method.
class CMSParKeepAliveClosure: public OopClosure { class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
private: private:
CMSCollector* _collector;
MemRegion _span; MemRegion _span;
OopTaskQueue* _work_queue; OopTaskQueue* _work_queue;
CMSBitMap* _bit_map; CMSBitMap* _bit_map;
@ -394,7 +431,8 @@ class CMSParKeepAliveClosure: public OopClosure {
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, OopTaskQueue* work_queue); CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
OopTaskQueue* work_queue);
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }

View File

@ -37,16 +37,34 @@ inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
} }
} }
inline void PushOrMarkClosure::remember_klass(Klass* k) { #ifndef PRODUCT
if (!_revisitStack->push(oop(k))) { void KlassRememberingOopClosure::check_remember_klasses() const {
assert(_should_remember_klasses == must_remember_klasses(),
"Should remember klasses in this context.");
}
#endif
void KlassRememberingOopClosure::remember_klass(Klass* k) {
if (!_revisit_stack->push(oop(k))) {
fatal("Revisit stack overflow in PushOrMarkClosure"); fatal("Revisit stack overflow in PushOrMarkClosure");
} }
check_remember_klasses();
} }
inline void Par_PushOrMarkClosure::remember_klass(Klass* k) { inline void PushOrMarkClosure::remember_mdo(DataLayout* v) {
// TBD
}
void Par_KlassRememberingOopClosure::remember_klass(Klass* k) {
if (!_revisit_stack->par_push(oop(k))) { if (!_revisit_stack->par_push(oop(k))) {
fatal("Revisit stack overflow in PushOrMarkClosure"); fatal("Revisit stack overflow in Par_KlassRememberingOopClosure");
} }
check_remember_klasses();
}
inline void Par_PushOrMarkClosure::remember_mdo(DataLayout* v) {
// TBD
} }
inline void PushOrMarkClosure::do_yield_check() { inline void PushOrMarkClosure::do_yield_check() {

View File

@ -3499,6 +3499,7 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
ref_processor()->set_enqueuing_is_done(false); ref_processor()->set_enqueuing_is_done(false);
{ {
// This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
gch->gen_process_strong_roots(_cmsGen->level(), gch->gen_process_strong_roots(_cmsGen->level(),
@ -3623,6 +3624,8 @@ bool CMSCollector::markFromRootsWork(bool asynch) {
verify_overflow_empty(); verify_overflow_empty();
assert(_revisitStack.isEmpty(), "tabula rasa"); assert(_revisitStack.isEmpty(), "tabula rasa");
DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
bool result = false; bool result = false;
if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) { if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
result = do_marking_mt(asynch); result = do_marking_mt(asynch);
@ -3958,24 +3961,24 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
pst->all_tasks_completed(); pst->all_tasks_completed();
} }
class Par_ConcMarkingClosure: public OopClosure { class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
private: private:
CMSCollector* _collector;
MemRegion _span; MemRegion _span;
CMSBitMap* _bit_map; CMSBitMap* _bit_map;
CMSMarkStack* _overflow_stack; CMSMarkStack* _overflow_stack;
CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
OopTaskQueue* _work_queue; OopTaskQueue* _work_queue;
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack): CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
_collector(collector), CMSMarkStack* revisit_stack):
Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
_span(_collector->_span), _span(_collector->_span),
_work_queue(work_queue), _work_queue(work_queue),
_bit_map(bit_map), _bit_map(bit_map),
_overflow_stack(overflow_stack) { } // need to initialize revisit stack etc. _overflow_stack(overflow_stack)
{ }
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
void trim_queue(size_t max); void trim_queue(size_t max);
@ -4063,8 +4066,9 @@ void CMSConcMarkingTask::do_work_steal(int i) {
oop obj_to_scan; oop obj_to_scan;
CMSBitMap* bm = &(_collector->_markBitMap); CMSBitMap* bm = &(_collector->_markBitMap);
CMSMarkStack* ovflw = &(_collector->_markStack); CMSMarkStack* ovflw = &(_collector->_markStack);
CMSMarkStack* revisit = &(_collector->_revisitStack);
int* seed = _collector->hash_seed(i); int* seed = _collector->hash_seed(i);
Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw); Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit);
while (true) { while (true) {
cl.trim_queue(0); cl.trim_queue(0);
assert(work_q->size() == 0, "Should have been emptied above"); assert(work_q->size() == 0, "Should have been emptied above");
@ -4089,6 +4093,7 @@ void CMSConcMarkingTask::coordinator_yield() {
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"CMS thread should hold CMS token"); "CMS thread should hold CMS token");
DEBUG_ONLY(RememberKlassesChecker mux(false);)
// First give up the locks, then yield, then re-lock // First give up the locks, then yield, then re-lock
// We should probably use a constructor/destructor idiom to // We should probably use a constructor/destructor idiom to
// do this unlock/lock or modify the MutexUnlocker class to // do this unlock/lock or modify the MutexUnlocker class to
@ -4165,6 +4170,8 @@ bool CMSCollector::do_marking_mt(bool asynch) {
// multi-threaded marking phase. // multi-threaded marking phase.
ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
conc_workers()->start_task(&tsk); conc_workers()->start_task(&tsk);
while (tsk.yielded()) { while (tsk.yielded()) {
tsk.coordinator_yield(); tsk.coordinator_yield();
@ -4404,7 +4411,8 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
CMSPrecleanRefsYieldClosure yield_cl(this); CMSPrecleanRefsYieldClosure yield_cl(this);
assert(rp->span().equals(_span), "Spans should be equal"); assert(rp->span().equals(_span), "Spans should be equal");
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
&_markStack, true /* preclean */); &_markStack, &_revisitStack,
true /* preclean */);
CMSDrainMarkingStackClosure complete_trace(this, CMSDrainMarkingStackClosure complete_trace(this,
_span, &_markBitMap, &_markStack, _span, &_markBitMap, &_markStack,
&keep_alive, true /* preclean */); &keep_alive, true /* preclean */);
@ -4424,6 +4432,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
bitMapLock()); bitMapLock());
startTimer(); startTimer();
sample_eden(); sample_eden();
// The following will yield to allow foreground // The following will yield to allow foreground
// collection to proceed promptly. XXX YSR: // collection to proceed promptly. XXX YSR:
// The code in this method may need further // The code in this method may need further
@ -4453,6 +4462,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
SurvivorSpacePrecleanClosure SurvivorSpacePrecleanClosure
sss_cl(this, _span, &_markBitMap, &_markStack, sss_cl(this, _span, &_markBitMap, &_markStack,
&pam_cl, before_count, CMSYield); &pam_cl, before_count, CMSYield);
DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
dng->from()->object_iterate_careful(&sss_cl); dng->from()->object_iterate_careful(&sss_cl);
dng->to()->object_iterate_careful(&sss_cl); dng->to()->object_iterate_careful(&sss_cl);
} }
@ -4554,6 +4564,13 @@ size_t CMSCollector::preclean_mod_union_table(
verify_work_stacks_empty(); verify_work_stacks_empty();
verify_overflow_empty(); verify_overflow_empty();
// Turn off checking for this method but turn it back on
// selectively. There are yield points in this method
// but it is difficult to turn the checking off just around
// the yield points. It is simpler to selectively turn
// it on.
DEBUG_ONLY(RememberKlassesChecker mux(false);)
// strategy: starting with the first card, accumulate contiguous // strategy: starting with the first card, accumulate contiguous
// ranges of dirty cards; clear these cards, then scan the region // ranges of dirty cards; clear these cards, then scan the region
// covered by these cards. // covered by these cards.
@ -4582,6 +4599,7 @@ size_t CMSCollector::preclean_mod_union_table(
MemRegion dirtyRegion; MemRegion dirtyRegion;
{ {
stopTimer(); stopTimer();
// Potential yield point
CMSTokenSync ts(true); CMSTokenSync ts(true);
startTimer(); startTimer();
sample_eden(); sample_eden();
@ -4607,6 +4625,7 @@ size_t CMSCollector::preclean_mod_union_table(
assert(numDirtyCards > 0, "consistency check"); assert(numDirtyCards > 0, "consistency check");
HeapWord* stop_point = NULL; HeapWord* stop_point = NULL;
stopTimer(); stopTimer();
// Potential yield point
CMSTokenSyncWithLocks ts(true, gen->freelistLock(), CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
bitMapLock()); bitMapLock());
startTimer(); startTimer();
@ -4614,6 +4633,7 @@ size_t CMSCollector::preclean_mod_union_table(
verify_work_stacks_empty(); verify_work_stacks_empty();
verify_overflow_empty(); verify_overflow_empty();
sample_eden(); sample_eden();
DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
stop_point = stop_point =
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
} }
@ -4701,6 +4721,7 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
sample_eden(); sample_eden();
verify_work_stacks_empty(); verify_work_stacks_empty();
verify_overflow_empty(); verify_overflow_empty();
DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
HeapWord* stop_point = HeapWord* stop_point =
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
if (stop_point != NULL) { if (stop_point != NULL) {
@ -4800,6 +4821,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
assert(haveFreelistLocks(), "must have free list locks"); assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock()); assert_lock_strong(bitMapLock());
DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
if (!init_mark_was_synchronous) { if (!init_mark_was_synchronous) {
// We might assume that we need not fill TLAB's when // We might assume that we need not fill TLAB's when
// CMSScavengeBeforeRemark is set, because we may have just done // CMSScavengeBeforeRemark is set, because we may have just done
@ -4903,6 +4925,9 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
_markStack._hit_limit = 0; _markStack._hit_limit = 0;
_markStack._failed_double = 0; _markStack._failed_double = 0;
// Check that all the klasses have been checked
assert(_revisitStack.isEmpty(), "Not all klasses revisited");
if ((VerifyAfterGC || VerifyDuringGC) && if ((VerifyAfterGC || VerifyDuringGC) &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
verify_after_remark(); verify_after_remark();
@ -5574,9 +5599,13 @@ public:
void CMSRefProcTaskProxy::work(int i) { void CMSRefProcTaskProxy::work(int i) {
assert(_collector->_span.equals(_span), "Inconsistency in _span"); assert(_collector->_span.equals(_span), "Inconsistency in _span");
CMSParKeepAliveClosure par_keep_alive(_collector, _span, CMSParKeepAliveClosure par_keep_alive(_collector, _span,
_mark_bit_map, work_queue(i)); _mark_bit_map,
&_collector->_revisitStack,
work_queue(i));
CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
_mark_bit_map, work_queue(i)); _mark_bit_map,
&_collector->_revisitStack,
work_queue(i));
CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
_task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
if (_task.marks_oops_alive()) { if (_task.marks_oops_alive()) {
@ -5604,12 +5633,13 @@ public:
}; };
CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue): MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
_collector(collector), OopTaskQueue* work_queue):
Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
_span(span), _span(span),
_bit_map(bit_map), _bit_map(bit_map),
_work_queue(work_queue), _work_queue(work_queue),
_mark_and_push(collector, span, bit_map, work_queue), _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4), _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
{ } { }
@ -5696,7 +5726,8 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
verify_work_stacks_empty(); verify_work_stacks_empty();
CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
&_markStack, false /* !preclean */); &_markStack, &_revisitStack,
false /* !preclean */);
CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
_span, &_markBitMap, &_markStack, _span, &_markBitMap, &_markStack,
&cmsKeepAliveClosure, false /* !preclean */); &cmsKeepAliveClosure, false /* !preclean */);
@ -6531,6 +6562,7 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
assert_lock_strong(_freelistLock); assert_lock_strong(_freelistLock);
assert_lock_strong(_bit_map->lock()); assert_lock_strong(_bit_map->lock());
// relinquish the free_list_lock and bitMaplock() // relinquish the free_list_lock and bitMaplock()
DEBUG_ONLY(RememberKlassesChecker mux(false);)
_bit_map->lock()->unlock(); _bit_map->lock()->unlock();
_freelistLock->unlock(); _freelistLock->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
@ -6703,6 +6735,7 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
"CMS thread should hold CMS token"); "CMS thread should hold CMS token");
assert_lock_strong(_freelistLock); assert_lock_strong(_freelistLock);
assert_lock_strong(_bitMap->lock()); assert_lock_strong(_bitMap->lock());
DEBUG_ONLY(RememberKlassesChecker mux(false);)
// relinquish the free_list_lock and bitMaplock() // relinquish the free_list_lock and bitMaplock()
_bitMap->lock()->unlock(); _bitMap->lock()->unlock();
_freelistLock->unlock(); _freelistLock->unlock();
@ -6779,6 +6812,7 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"CMS thread should hold CMS token"); "CMS thread should hold CMS token");
assert_lock_strong(_bit_map->lock()); assert_lock_strong(_bit_map->lock());
DEBUG_ONLY(RememberKlassesChecker smx(false);)
// Relinquish the bit map lock // Relinquish the bit map lock
_bit_map->lock()->unlock(); _bit_map->lock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
@ -6941,6 +6975,7 @@ void MarkFromRootsClosure::do_yield_work() {
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"CMS thread should hold CMS token"); "CMS thread should hold CMS token");
assert_lock_strong(_bitMap->lock()); assert_lock_strong(_bitMap->lock());
DEBUG_ONLY(RememberKlassesChecker mux(false);)
_bitMap->lock()->unlock(); _bitMap->lock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request(); ConcurrentMarkSweepThread::acknowledge_yield_request();
@ -7295,15 +7330,12 @@ PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
CMSBitMap* bitMap, CMSMarkStack* markStack, CMSBitMap* bitMap, CMSMarkStack* markStack,
CMSMarkStack* revisitStack, CMSMarkStack* revisitStack,
HeapWord* finger, MarkFromRootsClosure* parent) : HeapWord* finger, MarkFromRootsClosure* parent) :
OopClosure(collector->ref_processor()), KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
_collector(collector),
_span(span), _span(span),
_bitMap(bitMap), _bitMap(bitMap),
_markStack(markStack), _markStack(markStack),
_revisitStack(revisitStack),
_finger(finger), _finger(finger),
_parent(parent), _parent(parent)
_should_remember_klasses(collector->should_unload_classes())
{ } { }
Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
@ -7315,18 +7347,17 @@ Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
HeapWord* finger, HeapWord* finger,
HeapWord** global_finger_addr, HeapWord** global_finger_addr,
Par_MarkFromRootsClosure* parent) : Par_MarkFromRootsClosure* parent) :
OopClosure(collector->ref_processor()), Par_KlassRememberingOopClosure(collector,
_collector(collector), collector->ref_processor(),
revisit_stack),
_whole_span(collector->_span), _whole_span(collector->_span),
_span(span), _span(span),
_bit_map(bit_map), _bit_map(bit_map),
_work_queue(work_queue), _work_queue(work_queue),
_overflow_stack(overflow_stack), _overflow_stack(overflow_stack),
_revisit_stack(revisit_stack),
_finger(finger), _finger(finger),
_global_finger_addr(global_finger_addr), _global_finger_addr(global_finger_addr),
_parent(parent), _parent(parent)
_should_remember_klasses(collector->should_unload_classes())
{ } { }
// Assumes thread-safe access by callers, who are // Assumes thread-safe access by callers, who are
@ -7456,6 +7487,14 @@ void Par_PushOrMarkClosure::do_oop(oop obj) {
void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
ReferenceProcessor* rp,
CMSMarkStack* revisit_stack) :
OopClosure(rp),
_collector(collector),
_revisit_stack(revisit_stack),
_should_remember_klasses(collector->should_unload_classes()) {}
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
MemRegion span, MemRegion span,
ReferenceProcessor* rp, ReferenceProcessor* rp,
@ -7464,15 +7503,12 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
CMSMarkStack* mark_stack, CMSMarkStack* mark_stack,
CMSMarkStack* revisit_stack, CMSMarkStack* revisit_stack,
bool concurrent_precleaning): bool concurrent_precleaning):
OopClosure(rp), KlassRememberingOopClosure(collector, rp, revisit_stack),
_collector(collector),
_span(span), _span(span),
_bit_map(bit_map), _bit_map(bit_map),
_mod_union_table(mod_union_table), _mod_union_table(mod_union_table),
_mark_stack(mark_stack), _mark_stack(mark_stack),
_revisit_stack(revisit_stack), _concurrent_precleaning(concurrent_precleaning)
_concurrent_precleaning(concurrent_precleaning),
_should_remember_klasses(collector->should_unload_classes())
{ {
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
} }
@ -7540,13 +7576,10 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
CMSBitMap* bit_map, CMSBitMap* bit_map,
OopTaskQueue* work_queue, OopTaskQueue* work_queue,
CMSMarkStack* revisit_stack): CMSMarkStack* revisit_stack):
OopClosure(rp), Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
_collector(collector),
_span(span), _span(span),
_bit_map(bit_map), _bit_map(bit_map),
_work_queue(work_queue), _work_queue(work_queue)
_revisit_stack(revisit_stack),
_should_remember_klasses(collector->should_unload_classes())
{ {
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
} }
@ -7599,19 +7632,16 @@ void Par_PushAndMarkClosure::do_oop(oop obj) {
void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
void PushAndMarkClosure::remember_klass(Klass* k) { void PushAndMarkClosure::remember_mdo(DataLayout* v) {
if (!_revisit_stack->push(oop(k))) { // TBD
fatal("Revisit stack overflowed in PushAndMarkClosure");
}
} }
void Par_PushAndMarkClosure::remember_klass(Klass* k) { void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
if (!_revisit_stack->par_push(oop(k))) { // TBD
fatal("Revist stack overflowed in Par_PushAndMarkClosure");
}
} }
void CMSPrecleanRefsYieldClosure::do_yield_work() { void CMSPrecleanRefsYieldClosure::do_yield_work() {
DEBUG_ONLY(RememberKlassesChecker mux(false);)
Mutex* bml = _collector->bitMapLock(); Mutex* bml = _collector->bitMapLock();
assert_lock_strong(bml); assert_lock_strong(bml);
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
@ -8302,6 +8332,19 @@ bool CMSIsAliveClosure::do_object_b(oop obj) {
(!_span.contains(addr) || _bit_map->isMarked(addr)); (!_span.contains(addr) || _bit_map->isMarked(addr));
} }
CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
MemRegion span,
CMSBitMap* bit_map, CMSMarkStack* mark_stack,
CMSMarkStack* revisit_stack, bool cpc):
KlassRememberingOopClosure(collector, NULL, revisit_stack),
_span(span),
_bit_map(bit_map),
_mark_stack(mark_stack),
_concurrent_precleaning(cpc) {
assert(!_span.is_empty(), "Empty span could spell trouble");
}
// CMSKeepAliveClosure: the serial version // CMSKeepAliveClosure: the serial version
void CMSKeepAliveClosure::do_oop(oop obj) { void CMSKeepAliveClosure::do_oop(oop obj) {
HeapWord* addr = (HeapWord*)obj; HeapWord* addr = (HeapWord*)obj;
@ -8385,6 +8428,16 @@ void CMSParKeepAliveClosure::trim_queue(uint max) {
} }
} }
CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
CMSCollector* collector,
MemRegion span, CMSBitMap* bit_map,
CMSMarkStack* revisit_stack,
OopTaskQueue* work_queue):
Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
_span(span),
_bit_map(bit_map),
_work_queue(work_queue) { }
void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
HeapWord* addr = (HeapWord*)obj; HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && if (_span.contains(addr) &&

View File

@ -1790,12 +1790,13 @@ class CMSParDrainMarkingStackClosure: public VoidClosure {
public: public:
CMSParDrainMarkingStackClosure(CMSCollector* collector, CMSParDrainMarkingStackClosure(CMSCollector* collector,
MemRegion span, CMSBitMap* bit_map, MemRegion span, CMSBitMap* bit_map,
CMSMarkStack* revisit_stack,
OopTaskQueue* work_queue): OopTaskQueue* work_queue):
_collector(collector), _collector(collector),
_span(span), _span(span),
_bit_map(bit_map), _bit_map(bit_map),
_work_queue(work_queue), _work_queue(work_queue),
_mark_and_push(collector, span, bit_map, work_queue) { } _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
public: public:
void trim_queue(uint max); void trim_queue(uint max);

View File

@ -25,11 +25,21 @@
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_concurrentG1Refine.cpp.incl" #include "incls/_concurrentG1Refine.cpp.incl"
// Possible sizes for the card counts cache: odd primes that roughly double in size.
// (See jvmtiTagMap.cpp).
int ConcurrentG1Refine::_cc_cache_sizes[] = {
16381, 32771, 76831, 150001, 307261,
614563, 1228891, 2457733, 4915219, 9830479,
19660831, 39321619, 78643219, 157286461, -1
};
ConcurrentG1Refine::ConcurrentG1Refine() : ConcurrentG1Refine::ConcurrentG1Refine() :
_card_counts(NULL), _cur_card_count_histo(NULL), _cum_card_count_histo(NULL), _card_counts(NULL), _card_epochs(NULL),
_n_card_counts(0), _max_n_card_counts(0),
_cache_size_index(0), _expand_card_counts(false),
_hot_cache(NULL), _hot_cache(NULL),
_def_use_cache(false), _use_cache(false), _def_use_cache(false), _use_cache(false),
_n_periods(0), _total_cards(0), _total_travs(0), _n_periods(0),
_threads(NULL), _n_threads(0) _threads(NULL), _n_threads(0)
{ {
if (G1ConcRefine) { if (G1ConcRefine) {
@ -57,32 +67,51 @@ size_t ConcurrentG1Refine::thread_num() {
} }
void ConcurrentG1Refine::init() { void ConcurrentG1Refine::init() {
if (G1ConcRSLogCacheSize > 0 || G1ConcRSCountTraversals) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_n_card_counts =
(unsigned) (g1h->g1_reserved_obj_bytes() >> CardTableModRefBS::card_shift);
_card_counts = NEW_C_HEAP_ARRAY(unsigned char, _n_card_counts);
for (size_t i = 0; i < _n_card_counts; i++) _card_counts[i] = 0;
ModRefBarrierSet* bs = g1h->mr_bs();
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
CardTableModRefBS* ctbs = (CardTableModRefBS*)bs;
_ct_bot = ctbs->byte_for_const(g1h->reserved_region().start());
if (G1ConcRSCountTraversals) {
_cur_card_count_histo = NEW_C_HEAP_ARRAY(unsigned, 256);
_cum_card_count_histo = NEW_C_HEAP_ARRAY(unsigned, 256);
for (int i = 0; i < 256; i++) {
_cur_card_count_histo[i] = 0;
_cum_card_count_histo[i] = 0;
}
}
}
if (G1ConcRSLogCacheSize > 0) { if (G1ConcRSLogCacheSize > 0) {
_g1h = G1CollectedHeap::heap();
_max_n_card_counts =
(unsigned) (_g1h->g1_reserved_obj_bytes() >> CardTableModRefBS::card_shift);
size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
guarantee(_max_n_card_counts < max_card_num, "card_num representation");
int desired = _max_n_card_counts / InitialCacheFraction;
for (_cache_size_index = 0;
_cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) {
if (_cc_cache_sizes[_cache_size_index] >= desired) break;
}
_cache_size_index = MAX2(0, (_cache_size_index - 1));
int initial_size = _cc_cache_sizes[_cache_size_index];
if (initial_size < 0) initial_size = _max_n_card_counts;
// Make sure we don't go bigger than we will ever need
_n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts);
_card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
_card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
Copy::fill_to_bytes(&_card_counts[0],
_n_card_counts * sizeof(CardCountCacheEntry));
Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
ModRefBarrierSet* bs = _g1h->mr_bs();
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
_ct_bs = (CardTableModRefBS*)bs;
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
_def_use_cache = true; _def_use_cache = true;
_use_cache = true; _use_cache = true;
_hot_cache_size = (1 << G1ConcRSLogCacheSize); _hot_cache_size = (1 << G1ConcRSLogCacheSize);
_hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size); _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size);
_n_hot = 0; _n_hot = 0;
_hot_cache_idx = 0; _hot_cache_idx = 0;
// For refining the cards in the hot cache in parallel
int n_workers = (ParallelGCThreads > 0 ?
_g1h->workers()->total_workers() : 1);
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
_hot_cache_par_claimed_idx = 0;
} }
} }
@ -95,15 +124,11 @@ void ConcurrentG1Refine::stop() {
} }
ConcurrentG1Refine::~ConcurrentG1Refine() { ConcurrentG1Refine::~ConcurrentG1Refine() {
if (G1ConcRSLogCacheSize > 0 || G1ConcRSCountTraversals) {
assert(_card_counts != NULL, "Logic");
FREE_C_HEAP_ARRAY(unsigned char, _card_counts);
assert(_cur_card_count_histo != NULL, "Logic");
FREE_C_HEAP_ARRAY(unsigned, _cur_card_count_histo);
assert(_cum_card_count_histo != NULL, "Logic");
FREE_C_HEAP_ARRAY(unsigned, _cum_card_count_histo);
}
if (G1ConcRSLogCacheSize > 0) { if (G1ConcRSLogCacheSize > 0) {
assert(_card_counts != NULL, "Logic");
FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
assert(_card_epochs != NULL, "Logic");
FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
assert(_hot_cache != NULL, "Logic"); assert(_hot_cache != NULL, "Logic");
FREE_C_HEAP_ARRAY(jbyte*, _hot_cache); FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
} }
@ -123,165 +148,232 @@ void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
} }
} }
bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
int ConcurrentG1Refine::add_card_count(jbyte* card_ptr) { HeapWord* start = _ct_bs->addr_for(card_ptr);
size_t card_num = (card_ptr - _ct_bot); HeapRegion* r = _g1h->heap_region_containing(start);
guarantee(0 <= card_num && card_num < _n_card_counts, "Bounds"); if (r != NULL && r->is_young()) {
unsigned char cnt = _card_counts[card_num]; return true;
if (cnt < 255) _card_counts[card_num]++; }
return cnt; // This card is not associated with a heap region
_total_travs++; // so can't be young.
return false;
} }
jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr) { jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
int count = add_card_count(card_ptr); unsigned new_card_num = ptr_2_card_num(card_ptr);
// Count previously unvisited cards. unsigned bucket = hash(new_card_num);
if (count == 0) _total_cards++; assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
// We'll assume a traversal unless we store it in the cache.
if (count < G1ConcRSHotCardLimit) { CardCountCacheEntry* count_ptr = &_card_counts[bucket];
_total_travs++; CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
// We have to construct a new entry if we haven't updated the counts
// during the current period, or if the count was updated for a
// different card number.
unsigned int new_epoch = (unsigned int) _n_periods;
julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
while (true) {
// Fetch the previous epoch value
julong prev_epoch_entry = epoch_ptr->_value;
julong cas_res;
if (extract_epoch(prev_epoch_entry) != new_epoch) {
// This entry has not yet been updated during this period.
// Note: we update the epoch value atomically to ensure
// that there is only one winner that updates the cached
// card_ptr value even though all the refine threads share
// the same epoch value.
cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
(volatile jlong*)&epoch_ptr->_value,
(jlong) prev_epoch_entry);
if (cas_res == prev_epoch_entry) {
// We have successfully won the race to update the
// epoch and card_num value. Make it look like the
// count and eviction count were previously cleared.
count_ptr->_count = 1;
count_ptr->_evict_count = 0;
*count = 0;
// We can defer the processing of card_ptr
*defer = true;
return card_ptr; return card_ptr;
} }
// Otherwise, it's hot. // We did not win the race to update the epoch field, so some other
// thread must have done it. The value that gets returned by CAS
// should be the new epoch value.
assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
// We could 'continue' here or just re-read the previous epoch value
prev_epoch_entry = epoch_ptr->_value;
}
// The epoch entry for card_ptr has been updated during this period.
unsigned old_card_num = extract_card_num(prev_epoch_entry);
// The card count that will be returned to caller
*count = count_ptr->_count;
// Are we updating the count for the same card?
if (new_card_num == old_card_num) {
// Same card - just update the count. We could have more than one
// thread racing to update count for the current card. It should be
// OK not to use a CAS as the only penalty should be some missed
// increments of the count which delays identifying the card as "hot".
if (*count < max_jubyte) count_ptr->_count++;
// We can defer the processing of card_ptr
*defer = true;
return card_ptr;
}
// Different card - evict old card info
if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
// Trigger a resize the next time we clear
_expand_card_counts = true;
}
cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
(volatile jlong*)&epoch_ptr->_value,
(jlong) prev_epoch_entry);
if (cas_res == prev_epoch_entry) {
// We successfully updated the card num value in the epoch entry
count_ptr->_count = 0; // initialize counter for new card num
// Even though the region containg the card at old_card_num was not
// in the young list when old_card_num was recorded in the epoch
// cache it could have been added to the free list and subsequently
// added to the young list in the intervening time. If the evicted
// card is in a young region just return the card_ptr and the evicted
// card will not be cleaned. See CR 6817995.
jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
if (is_young_card(old_card_ptr)) {
*count = 0;
// We can defer the processing of card_ptr
*defer = true;
return card_ptr;
}
// We do not want to defer processing of card_ptr in this case
// (we need to refine old_card_ptr and card_ptr)
*defer = false;
return old_card_ptr;
}
// Someone else beat us - try again.
}
}
jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
int count;
jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
assert(cached_ptr != NULL, "bad cached card ptr");
assert(!is_young_card(cached_ptr), "shouldn't get a card in young region");
// The card pointer we obtained from card count cache is not hot
// so do not store it in the cache; return it for immediate
// refining.
if (count < G1ConcRSHotCardLimit) {
return cached_ptr;
}
// Otherwise, the pointer we got from the _card_counts is hot.
jbyte* res = NULL; jbyte* res = NULL;
MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
if (_n_hot == _hot_cache_size) { if (_n_hot == _hot_cache_size) {
_total_travs++;
res = _hot_cache[_hot_cache_idx]; res = _hot_cache[_hot_cache_idx];
_n_hot--; _n_hot--;
} }
// Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx. // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
_hot_cache[_hot_cache_idx] = card_ptr; _hot_cache[_hot_cache_idx] = cached_ptr;
_hot_cache_idx++; _hot_cache_idx++;
if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0; if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
_n_hot++; _n_hot++;
if (res != NULL) {
// Even though the region containg res was not in the young list
// when it was recorded in the hot cache it could have been added
// to the free list and subsequently added to the young list in
// the intervening time. If res is in a young region, return NULL
// so that res is not cleaned. See CR 6817995.
if (is_young_card(res)) {
res = NULL;
}
}
return res; return res;
} }
void ConcurrentG1Refine::clean_up_cache(int worker_i, G1RemSet* g1rs) { void ConcurrentG1Refine::clean_up_cache(int worker_i, G1RemSet* g1rs) {
assert(!use_cache(), "cache should be disabled"); assert(!use_cache(), "cache should be disabled");
int start_ind = _hot_cache_idx-1; int start_idx;
for (int i = 0; i < _n_hot; i++) {
int ind = start_ind - i; while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
if (ind < 0) ind = ind + _hot_cache_size; int end_idx = start_idx + _hot_cache_par_chunk_size;
jbyte* entry = _hot_cache[ind];
if (start_idx ==
Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
// The current worker has successfully claimed the chunk [start_idx..end_idx)
end_idx = MIN2(end_idx, _n_hot);
for (int i = start_idx; i < end_idx; i++) {
jbyte* entry = _hot_cache[i];
if (entry != NULL) { if (entry != NULL) {
g1rs->concurrentRefineOneCard(entry, worker_i); g1rs->concurrentRefineOneCard(entry, worker_i);
} }
} }
_n_hot = 0; }
_hot_cache_idx = 0; }
}
void ConcurrentG1Refine::expand_card_count_cache() {
if (_n_card_counts < _max_n_card_counts) {
int new_idx = _cache_size_index+1;
int new_size = _cc_cache_sizes[new_idx];
if (new_size < 0) new_size = _max_n_card_counts;
// Make sure we don't go bigger than we will ever need
new_size = MIN2((unsigned) new_size, _max_n_card_counts);
// Expand the card count and card epoch tables
if (new_size > (int)_n_card_counts) {
// We can just free and allocate a new array as we're
// not interested in preserving the contents
assert(_card_counts != NULL, "Logic!");
assert(_card_epochs != NULL, "Logic!");
FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
_n_card_counts = new_size;
_card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
_card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
_cache_size_index = new_idx;
}
}
} }
void ConcurrentG1Refine::clear_and_record_card_counts() { void ConcurrentG1Refine::clear_and_record_card_counts() {
if (G1ConcRSLogCacheSize == 0 && !G1ConcRSCountTraversals) return; if (G1ConcRSLogCacheSize == 0) return;
#ifndef PRODUCT
double start = os::elapsedTime();
#endif
if (_expand_card_counts) {
expand_card_count_cache();
_expand_card_counts = false;
// Only need to clear the epochs.
Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
}
int this_epoch = (int) _n_periods;
assert((this_epoch+1) <= max_jint, "to many periods");
// Update epoch
_n_periods++; _n_periods++;
if (G1ConcRSCountTraversals) {
for (size_t i = 0; i < _n_card_counts; i++) { #ifndef PRODUCT
unsigned char bucket = _card_counts[i]; double elapsed = os::elapsedTime() - start;
_cur_card_count_histo[bucket]++; _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
_card_counts[i] = 0; #endif
}
gclog_or_tty->print_cr("Card counts:");
for (int i = 0; i < 256; i++) {
if (_cur_card_count_histo[i] > 0) {
gclog_or_tty->print_cr(" %3d: %9d", i, _cur_card_count_histo[i]);
_cum_card_count_histo[i] += _cur_card_count_histo[i];
_cur_card_count_histo[i] = 0;
}
}
} else {
assert(G1ConcRSLogCacheSize > 0, "Logic");
Copy::fill_to_words((HeapWord*)(&_card_counts[0]),
_n_card_counts / HeapWordSize);
}
}
void
ConcurrentG1Refine::
print_card_count_histo_range(unsigned* histo, int from, int to,
float& cum_card_pct,
float& cum_travs_pct) {
unsigned cards = 0;
unsigned travs = 0;
guarantee(to <= 256, "Precondition");
for (int i = from; i < to-1; i++) {
cards += histo[i];
travs += histo[i] * i;
}
if (to == 256) {
unsigned histo_card_sum = 0;
unsigned histo_trav_sum = 0;
for (int i = 1; i < 255; i++) {
histo_trav_sum += histo[i] * i;
}
cards += histo[255];
// correct traversals for the last one.
unsigned travs_255 = (unsigned) (_total_travs - histo_trav_sum);
travs += travs_255;
} else {
cards += histo[to-1];
travs += histo[to-1] * (to-1);
}
float fperiods = (float)_n_periods;
float f_tot_cards = (float)_total_cards/fperiods;
float f_tot_travs = (float)_total_travs/fperiods;
if (cards > 0) {
float fcards = (float)cards/fperiods;
float ftravs = (float)travs/fperiods;
if (to == 256) {
gclog_or_tty->print(" %4d- %10.2f%10.2f", from, fcards, ftravs);
} else {
gclog_or_tty->print(" %4d-%4d %10.2f%10.2f", from, to-1, fcards, ftravs);
}
float pct_cards = fcards*100.0/f_tot_cards;
cum_card_pct += pct_cards;
float pct_travs = ftravs*100.0/f_tot_travs;
cum_travs_pct += pct_travs;
gclog_or_tty->print_cr("%10.2f%10.2f%10.2f%10.2f",
pct_cards, cum_card_pct,
pct_travs, cum_travs_pct);
}
}
void ConcurrentG1Refine::print_final_card_counts() {
if (!G1ConcRSCountTraversals) return;
gclog_or_tty->print_cr("Did %d total traversals of %d distinct cards.",
_total_travs, _total_cards);
float fperiods = (float)_n_periods;
gclog_or_tty->print_cr(" This is an average of %8.2f traversals, %8.2f cards, "
"per collection.", (float)_total_travs/fperiods,
(float)_total_cards/fperiods);
gclog_or_tty->print_cr(" This is an average of %8.2f traversals/distinct "
"dirty card.\n",
_total_cards > 0 ?
(float)_total_travs/(float)_total_cards : 0.0);
gclog_or_tty->print_cr("Histogram:\n\n%10s %10s%10s%10s%10s%10s%10s",
"range", "# cards", "# travs", "% cards", "(cum)",
"% travs", "(cum)");
gclog_or_tty->print_cr("------------------------------------------------------------"
"-------------");
float cum_cards_pct = 0.0;
float cum_travs_pct = 0.0;
for (int i = 1; i < 10; i++) {
print_card_count_histo_range(_cum_card_count_histo, i, i+1,
cum_cards_pct, cum_travs_pct);
}
for (int i = 10; i < 100; i += 10) {
print_card_count_histo_range(_cum_card_count_histo, i, i+10,
cum_cards_pct, cum_travs_pct);
}
print_card_count_histo_range(_cum_card_count_histo, 100, 150,
cum_cards_pct, cum_travs_pct);
print_card_count_histo_range(_cum_card_count_histo, 150, 200,
cum_cards_pct, cum_travs_pct);
print_card_count_histo_range(_cum_card_count_histo, 150, 255,
cum_cards_pct, cum_travs_pct);
print_card_count_histo_range(_cum_card_count_histo, 255, 256,
cum_cards_pct, cum_travs_pct);
} }

View File

@ -29,29 +29,117 @@ class G1RemSet;
class ConcurrentG1Refine: public CHeapObj { class ConcurrentG1Refine: public CHeapObj {
ConcurrentG1RefineThread** _threads; ConcurrentG1RefineThread** _threads;
int _n_threads; int _n_threads;
// The cache for card refinement. // The cache for card refinement.
bool _use_cache; bool _use_cache;
bool _def_use_cache; bool _def_use_cache;
size_t _n_periods;
size_t _total_cards;
size_t _total_travs;
unsigned char* _card_counts; size_t _n_periods; // Used as clearing epoch
// An evicting cache of the number of times each card
// is accessed. Reduces, but does not eliminate, the amount
// of duplicated processing of dirty cards.
enum SomePrivateConstants {
epoch_bits = 32,
card_num_shift = epoch_bits,
epoch_mask = AllBits,
card_num_mask = AllBits,
// The initial cache size is approximately this fraction
// of a maximal cache (i.e. the size needed for all cards
// in the heap)
InitialCacheFraction = 512
};
const static julong card_num_mask_in_place =
(julong) card_num_mask << card_num_shift;
typedef struct {
julong _value; // | card_num | epoch |
} CardEpochCacheEntry;
julong make_epoch_entry(unsigned int card_num, unsigned int epoch) {
assert(0 <= card_num && card_num < _max_n_card_counts, "Bounds");
assert(0 <= epoch && epoch <= _n_periods, "must be");
return ((julong) card_num << card_num_shift) | epoch;
}
unsigned int extract_epoch(julong v) {
return (v & epoch_mask);
}
unsigned int extract_card_num(julong v) {
return (v & card_num_mask_in_place) >> card_num_shift;
}
typedef struct {
unsigned char _count;
unsigned char _evict_count;
} CardCountCacheEntry;
CardCountCacheEntry* _card_counts;
CardEpochCacheEntry* _card_epochs;
// The current number of buckets in the card count cache
unsigned _n_card_counts; unsigned _n_card_counts;
// The max number of buckets required for the number of
// cards for the entire reserved heap
unsigned _max_n_card_counts;
// Possible sizes of the cache: odd primes that roughly double in size.
// (See jvmtiTagMap.cpp).
static int _cc_cache_sizes[];
// The index in _cc_cache_sizes corresponding to the size of
// _card_counts.
int _cache_size_index;
bool _expand_card_counts;
const jbyte* _ct_bot; const jbyte* _ct_bot;
unsigned* _cur_card_count_histo;
unsigned* _cum_card_count_histo;
jbyte** _hot_cache; jbyte** _hot_cache;
int _hot_cache_size; int _hot_cache_size;
int _n_hot; int _n_hot;
int _hot_cache_idx; int _hot_cache_idx;
// Returns the count of this card after incrementing it. int _hot_cache_par_chunk_size;
int add_card_count(jbyte* card_ptr); volatile int _hot_cache_par_claimed_idx;
// Needed to workaround 6817995
CardTableModRefBS* _ct_bs;
G1CollectedHeap* _g1h;
// Expands the array that holds the card counts to the next size up
void expand_card_count_cache();
// hash a given key (index of card_ptr) with the specified size
static unsigned int hash(size_t key, int size) {
return (unsigned int) key % size;
}
// hash a given key (index of card_ptr)
unsigned int hash(size_t key) {
return hash(key, _n_card_counts);
}
unsigned ptr_2_card_num(jbyte* card_ptr) {
return (unsigned) (card_ptr - _ct_bot);
}
jbyte* card_num_2_ptr(unsigned card_num) {
return (jbyte*) (_ct_bot + card_num);
}
// Returns the count of this card after incrementing it.
jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer);
// Returns true if this card is in a young region
bool is_young_card(jbyte* card_ptr);
void print_card_count_histo_range(unsigned* histo, int from, int to,
float& cum_card_pct,
float& cum_travs_pct);
public: public:
ConcurrentG1Refine(); ConcurrentG1Refine();
~ConcurrentG1Refine(); ~ConcurrentG1Refine();
@ -65,11 +153,16 @@ class ConcurrentG1Refine: public CHeapObj {
// If this is the first entry for the slot, writes into the cache and // If this is the first entry for the slot, writes into the cache and
// returns NULL. If it causes an eviction, returns the evicted pointer. // returns NULL. If it causes an eviction, returns the evicted pointer.
// Otherwise, its a cache hit, and returns NULL. // Otherwise, its a cache hit, and returns NULL.
jbyte* cache_insert(jbyte* card_ptr); jbyte* cache_insert(jbyte* card_ptr, bool* defer);
// Process the cached entries. // Process the cached entries.
void clean_up_cache(int worker_i, G1RemSet* g1rs); void clean_up_cache(int worker_i, G1RemSet* g1rs);
// Set up for parallel processing of the cards in the hot cache
void clear_hot_cache_claimed_index() {
_hot_cache_par_claimed_idx = 0;
}
// Discard entries in the hot cache. // Discard entries in the hot cache.
void clear_hot_cache() { void clear_hot_cache() {
_hot_cache_idx = 0; _n_hot = 0; _hot_cache_idx = 0; _n_hot = 0;
@ -84,7 +177,6 @@ class ConcurrentG1Refine: public CHeapObj {
} }
void clear_and_record_card_counts(); void clear_and_record_card_counts();
void print_final_card_counts();
static size_t thread_num(); static size_t thread_num();
}; };

View File

@ -39,7 +39,6 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
_next(next), _next(next),
_cg1r(cg1r), _cg1r(cg1r),
_vtime_accum(0.0), _vtime_accum(0.0),
_co_tracker(G1CRGroup),
_interval_ms(5.0) _interval_ms(5.0)
{ {
create_and_start(); create_and_start();
@ -76,9 +75,6 @@ void ConcurrentG1RefineThread::run() {
_vtime_start = os::elapsedVTime(); _vtime_start = os::elapsedVTime();
wait_for_universe_init(); wait_for_universe_init();
_co_tracker.enable();
_co_tracker.start();
while (!_should_terminate) { while (!_should_terminate) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
// Wait for completed log buffers to exist. // Wait for completed log buffers to exist.
@ -104,17 +100,17 @@ void ConcurrentG1RefineThread::run() {
double start_vtime_sec; // only used when G1SmoothConcRefine is on double start_vtime_sec; // only used when G1SmoothConcRefine is on
int prev_buffer_num; // only used when G1SmoothConcRefine is on int prev_buffer_num; // only used when G1SmoothConcRefine is on
// This thread activation threshold // This thread activation threshold
int threshold = DCQBarrierProcessCompletedThreshold * _worker_id; int threshold = G1UpdateBufferQueueProcessingThreshold * _worker_id;
// Next thread activation threshold // Next thread activation threshold
int next_threshold = threshold + DCQBarrierProcessCompletedThreshold; int next_threshold = threshold + G1UpdateBufferQueueProcessingThreshold;
int deactivation_threshold = MAX2<int>(threshold - DCQBarrierProcessCompletedThreshold / 2, 0); int deactivation_threshold = MAX2<int>(threshold - G1UpdateBufferQueueProcessingThreshold / 2, 0);
if (G1SmoothConcRefine) { if (G1SmoothConcRefine) {
lower_limit = 0; lower_limit = 0;
start_vtime_sec = os::elapsedVTime(); start_vtime_sec = os::elapsedVTime();
prev_buffer_num = (int) dcqs.completed_buffers_num(); prev_buffer_num = (int) dcqs.completed_buffers_num();
} else { } else {
lower_limit = DCQBarrierProcessCompletedThreshold / 4; // For now. lower_limit = G1UpdateBufferQueueProcessingThreshold / 4; // For now.
} }
while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, lower_limit)) { while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, lower_limit)) {
double end_vtime_sec; double end_vtime_sec;
@ -147,7 +143,6 @@ void ConcurrentG1RefineThread::run() {
} }
break; break;
} }
_co_tracker.update(false);
// Check if we need to activate the next thread. // Check if we need to activate the next thread.
if (curr_buffer_num > next_threshold && _next != NULL && !_next->is_active()) { if (curr_buffer_num > next_threshold && _next != NULL && !_next->is_active()) {
@ -168,7 +163,6 @@ void ConcurrentG1RefineThread::run() {
} }
n_logs++; n_logs++;
} }
_co_tracker.update(false);
_sts.leave(); _sts.leave();
if (os::supports_vtime()) { if (os::supports_vtime()) {
@ -177,9 +171,6 @@ void ConcurrentG1RefineThread::run() {
_vtime_accum = 0.0; _vtime_accum = 0.0;
} }
} }
_sts.join();
_co_tracker.update(true);
_sts.leave();
assert(_should_terminate, "just checking"); assert(_should_terminate, "just checking");
terminate(); terminate();

View File

@ -51,7 +51,6 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
private: private:
ConcurrentG1Refine* _cg1r; ConcurrentG1Refine* _cg1r;
COTracker _co_tracker;
double _interval_ms; double _interval_ms;
void decreaseInterval(int processing_time_ms) { void decreaseInterval(int processing_time_ms) {

View File

@ -433,8 +433,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_total_counting_time(0.0), _total_counting_time(0.0),
_total_rs_scrub_time(0.0), _total_rs_scrub_time(0.0),
_parallel_workers(NULL), _parallel_workers(NULL)
_cleanup_co_tracker(G1CLGroup)
{ {
CMVerboseLevel verbose_level = CMVerboseLevel verbose_level =
(CMVerboseLevel) G1MarkingVerboseLevel; (CMVerboseLevel) G1MarkingVerboseLevel;
@ -823,18 +822,6 @@ void ConcurrentMark::checkpointRootsInitialPost() {
// when marking is on. So, it's also called at the end of the // when marking is on. So, it's also called at the end of the
// initial-mark pause to update the heap end, if the heap expands // initial-mark pause to update the heap end, if the heap expands
// during it. No need to call it here. // during it. No need to call it here.
guarantee( !_cleanup_co_tracker.enabled(), "invariant" );
size_t max_marking_threads =
MAX2((size_t) 1, parallel_marking_threads());
for (int i = 0; i < (int)_max_task_num; ++i) {
_tasks[i]->enable_co_tracker();
if (i < (int) max_marking_threads)
_tasks[i]->reset_co_tracker(marking_task_overhead());
else
_tasks[i]->reset_co_tracker(0.0);
}
} }
// Checkpoint the roots into this generation from outside // Checkpoint the roots into this generation from outside
@ -845,7 +832,6 @@ void ConcurrentMark::checkpointRootsInitial() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
double start = os::elapsedTime(); double start = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start);
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->record_concurrent_mark_init_start(); g1p->record_concurrent_mark_init_start();
@ -876,7 +862,6 @@ void ConcurrentMark::checkpointRootsInitial() {
// Statistics. // Statistics.
double end = os::elapsedTime(); double end = os::elapsedTime();
_init_times.add((end - start) * 1000.0); _init_times.add((end - start) * 1000.0);
GCOverheadReporter::recordSTWEnd(end);
g1p->record_concurrent_mark_init_end(); g1p->record_concurrent_mark_init_end();
} }
@ -1035,7 +1020,6 @@ public:
guarantee( (size_t)worker_i < _cm->active_tasks(), "invariant" ); guarantee( (size_t)worker_i < _cm->active_tasks(), "invariant" );
CMTask* the_task = _cm->task(worker_i); CMTask* the_task = _cm->task(worker_i);
the_task->start_co_tracker();
the_task->record_start_time(); the_task->record_start_time();
if (!_cm->has_aborted()) { if (!_cm->has_aborted()) {
do { do {
@ -1061,8 +1045,6 @@ public:
double end_time2_sec = os::elapsedTime(); double end_time2_sec = os::elapsedTime();
double elapsed_time2_sec = end_time2_sec - start_time_sec; double elapsed_time2_sec = end_time2_sec - start_time_sec;
the_task->update_co_tracker();
#if 0 #if 0
gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
"overhead %1.4lf", "overhead %1.4lf",
@ -1079,7 +1061,6 @@ public:
ConcurrentGCThread::stsLeave(); ConcurrentGCThread::stsLeave();
double end_vtime = os::elapsedVTime(); double end_vtime = os::elapsedVTime();
the_task->update_co_tracker(true);
_cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime); _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
} }
@ -1133,7 +1114,6 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
g1p->record_concurrent_mark_remark_start(); g1p->record_concurrent_mark_remark_start();
double start = os::elapsedTime(); double start = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start);
checkpointRootsFinalWork(); checkpointRootsFinalWork();
@ -1173,11 +1153,6 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
_remark_weak_ref_times.add((now - mark_work_end) * 1000.0); _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
_remark_times.add((now - start) * 1000.0); _remark_times.add((now - start) * 1000.0);
GCOverheadReporter::recordSTWEnd(now);
for (int i = 0; i < (int)_max_task_num; ++i)
_tasks[i]->disable_co_tracker();
_cleanup_co_tracker.enable();
_cleanup_co_tracker.reset(cleanup_task_overhead());
g1p->record_concurrent_mark_remark_end(); g1p->record_concurrent_mark_remark_end();
} }
@ -1188,7 +1163,6 @@ class CalcLiveObjectsClosure: public HeapRegionClosure {
CMBitMapRO* _bm; CMBitMapRO* _bm;
ConcurrentMark* _cm; ConcurrentMark* _cm;
COTracker* _co_tracker;
bool _changed; bool _changed;
bool _yield; bool _yield;
size_t _words_done; size_t _words_done;
@ -1216,12 +1190,10 @@ class CalcLiveObjectsClosure: public HeapRegionClosure {
public: public:
CalcLiveObjectsClosure(bool final, CalcLiveObjectsClosure(bool final,
CMBitMapRO *bm, ConcurrentMark *cm, CMBitMapRO *bm, ConcurrentMark *cm,
BitMap* region_bm, BitMap* card_bm, BitMap* region_bm, BitMap* card_bm) :
COTracker* co_tracker) :
_bm(bm), _cm(cm), _changed(false), _yield(true), _bm(bm), _cm(cm), _changed(false), _yield(true),
_words_done(0), _tot_live(0), _tot_used(0), _words_done(0), _tot_live(0), _tot_used(0),
_region_bm(region_bm), _card_bm(card_bm), _region_bm(region_bm), _card_bm(card_bm),_final(final),
_final(final), _co_tracker(co_tracker),
_regions_done(0), _start_vtime_sec(0.0) _regions_done(0), _start_vtime_sec(0.0)
{ {
_bottom_card_num = _bottom_card_num =
@ -1265,9 +1237,6 @@ public:
} }
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
if (_co_tracker != NULL)
_co_tracker->update();
if (!_final && _regions_done == 0) if (!_final && _regions_done == 0)
_start_vtime_sec = os::elapsedVTime(); _start_vtime_sec = os::elapsedVTime();
@ -1396,12 +1365,6 @@ public:
if (elapsed_vtime_sec > (10.0 / 1000.0)) { if (elapsed_vtime_sec > (10.0 / 1000.0)) {
jlong sleep_time_ms = jlong sleep_time_ms =
(jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0); (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0);
#if 0
gclog_or_tty->print_cr("CL: elapsed %1.4lf ms, sleep %1.4lf ms, "
"overhead %1.4lf",
elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
_co_tracker->concOverhead(os::elapsedTime()));
#endif
os::sleep(Thread::current(), sleep_time_ms, false); os::sleep(Thread::current(), sleep_time_ms, false);
_start_vtime_sec = end_vtime_sec; _start_vtime_sec = end_vtime_sec;
} }
@ -1421,15 +1384,11 @@ public:
void ConcurrentMark::calcDesiredRegions() { void ConcurrentMark::calcDesiredRegions() {
guarantee( _cleanup_co_tracker.enabled(), "invariant" );
_cleanup_co_tracker.start();
_region_bm.clear(); _region_bm.clear();
_card_bm.clear(); _card_bm.clear();
CalcLiveObjectsClosure calccl(false /*final*/, CalcLiveObjectsClosure calccl(false /*final*/,
nextMarkBitMap(), this, nextMarkBitMap(), this,
&_region_bm, &_card_bm, &_region_bm, &_card_bm);
&_cleanup_co_tracker);
G1CollectedHeap *g1h = G1CollectedHeap::heap(); G1CollectedHeap *g1h = G1CollectedHeap::heap();
g1h->heap_region_iterate(&calccl); g1h->heap_region_iterate(&calccl);
@ -1437,8 +1396,6 @@ void ConcurrentMark::calcDesiredRegions() {
calccl.reset(); calccl.reset();
g1h->heap_region_iterate(&calccl); g1h->heap_region_iterate(&calccl);
} while (calccl.changed()); } while (calccl.changed());
_cleanup_co_tracker.update(true);
} }
class G1ParFinalCountTask: public AbstractGangTask { class G1ParFinalCountTask: public AbstractGangTask {
@ -1472,8 +1429,7 @@ public:
void work(int i) { void work(int i) {
CalcLiveObjectsClosure calccl(true /*final*/, CalcLiveObjectsClosure calccl(true /*final*/,
_bm, _g1h->concurrent_mark(), _bm, _g1h->concurrent_mark(),
_region_bm, _card_bm, _region_bm, _card_bm);
NULL /* CO tracker */);
calccl.no_yield(); calccl.no_yield();
if (ParallelGCThreads > 0) { if (ParallelGCThreads > 0) {
_g1h->heap_region_par_iterate_chunked(&calccl, i, _g1h->heap_region_par_iterate_chunked(&calccl, i,
@ -1663,13 +1619,10 @@ void ConcurrentMark::cleanup() {
/* prev marking */ true); /* prev marking */ true);
} }
_cleanup_co_tracker.disable();
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->record_concurrent_mark_cleanup_start(); g1p->record_concurrent_mark_cleanup_start();
double start = os::elapsedTime(); double start = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start);
// Do counting once more with the world stopped for good measure. // Do counting once more with the world stopped for good measure.
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
@ -1774,7 +1727,6 @@ void ConcurrentMark::cleanup() {
// Statistics. // Statistics.
double end = os::elapsedTime(); double end = os::elapsedTime();
_cleanup_times.add((end - start) * 1000.0); _cleanup_times.add((end - start) * 1000.0);
GCOverheadReporter::recordSTWEnd(end);
// G1CollectedHeap::heap()->print(); // G1CollectedHeap::heap()->print();
// gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d", // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
@ -2401,7 +2353,7 @@ class CSMarkOopClosure: public OopClosure {
// Now process this portion of this one. // Now process this portion of this one.
int lim = MIN2(next_arr_ind, len); int lim = MIN2(next_arr_ind, len);
for (int j = arr_ind; j < lim; j++) { for (int j = arr_ind; j < lim; j++) {
do_oop(aobj->obj_at_addr<T>(j)); do_oop(aobj->objArrayOopDesc::obj_at_addr<T>(j));
} }
} else { } else {
@ -2625,24 +2577,6 @@ void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
_should_gray_objects = true; _should_gray_objects = true;
} }
void ConcurrentMark::disable_co_trackers() {
if (has_aborted()) {
if (_cleanup_co_tracker.enabled())
_cleanup_co_tracker.disable();
for (int i = 0; i < (int)_max_task_num; ++i) {
CMTask* task = _tasks[i];
if (task->co_tracker_enabled())
task->disable_co_tracker();
}
} else {
guarantee( !_cleanup_co_tracker.enabled(), "invariant" );
for (int i = 0; i < (int)_max_task_num; ++i) {
CMTask* task = _tasks[i];
guarantee( !task->co_tracker_enabled(), "invariant" );
}
}
}
// abandon current marking iteration due to a Full GC // abandon current marking iteration due to a Full GC
void ConcurrentMark::abort() { void ConcurrentMark::abort() {
// Clear all marks to force marking thread to do nothing // Clear all marks to force marking thread to do nothing
@ -4018,7 +3952,6 @@ CMTask::CMTask(int task_id,
CMTaskQueue* task_queue, CMTaskQueue* task_queue,
CMTaskQueueSet* task_queues) CMTaskQueueSet* task_queues)
: _g1h(G1CollectedHeap::heap()), : _g1h(G1CollectedHeap::heap()),
_co_tracker(G1CMGroup),
_task_id(task_id), _cm(cm), _task_id(task_id), _cm(cm),
_claimed(false), _claimed(false),
_nextMarkBitMap(NULL), _hash_seed(17), _nextMarkBitMap(NULL), _hash_seed(17),

View File

@ -407,8 +407,6 @@ protected:
// verbose level // verbose level
CMVerboseLevel _verbose_level; CMVerboseLevel _verbose_level;
COTracker _cleanup_co_tracker;
// These two fields are used to implement the optimisation that // These two fields are used to implement the optimisation that
// avoids pushing objects on the global/region stack if there are // avoids pushing objects on the global/region stack if there are
// no collection set regions above the lowest finger. // no collection set regions above the lowest finger.
@ -720,8 +718,6 @@ public:
// Called to abort the marking cycle after a Full GC takes palce. // Called to abort the marking cycle after a Full GC takes palce.
void abort(); void abort();
void disable_co_trackers();
// This prints the global/local fingers. It is used for debugging. // This prints the global/local fingers. It is used for debugging.
NOT_PRODUCT(void print_finger();) NOT_PRODUCT(void print_finger();)
@ -773,9 +769,6 @@ private:
// number of calls to this task // number of calls to this task
int _calls; int _calls;
// concurrent overhead over a single CPU for this task
COTracker _co_tracker;
// when the virtual timer reaches this time, the marking step should // when the virtual timer reaches this time, the marking step should
// exit // exit
double _time_target_ms; double _time_target_ms;
@ -928,27 +921,6 @@ public:
void set_concurrent(bool concurrent) { _concurrent = concurrent; } void set_concurrent(bool concurrent) { _concurrent = concurrent; }
void enable_co_tracker() {
guarantee( !_co_tracker.enabled(), "invariant" );
_co_tracker.enable();
}
void disable_co_tracker() {
guarantee( _co_tracker.enabled(), "invariant" );
_co_tracker.disable();
}
bool co_tracker_enabled() {
return _co_tracker.enabled();
}
void reset_co_tracker(double starting_conc_overhead = 0.0) {
_co_tracker.reset(starting_conc_overhead);
}
void start_co_tracker() {
_co_tracker.start();
}
void update_co_tracker(bool force_end = false) {
_co_tracker.update(force_end);
}
// The main method of this class which performs a marking step // The main method of this class which performs a marking step
// trying not to exceed the given duration. However, it might exit // trying not to exceed the given duration. However, it might exit
// prematurely, according to some conditions (i.e. SATB buffers are // prematurely, according to some conditions (i.e. SATB buffers are

View File

@ -260,10 +260,6 @@ void ConcurrentMarkThread::run() {
} }
} }
_sts.join();
_cm->disable_co_trackers();
_sts.leave();
// we now want to allow clearing of the marking bitmap to be // we now want to allow clearing of the marking bitmap to be
// suspended by a collection pause. // suspended by a collection pause.
_sts.join(); _sts.join();

View File

@ -35,8 +35,7 @@ int ConcurrentZFThread::_zf_waits = 0;
int ConcurrentZFThread::_regions_filled = 0; int ConcurrentZFThread::_regions_filled = 0;
ConcurrentZFThread::ConcurrentZFThread() : ConcurrentZFThread::ConcurrentZFThread() :
ConcurrentGCThread(), ConcurrentGCThread()
_co_tracker(G1ZFGroup)
{ {
create_and_start(); create_and_start();
} }
@ -71,8 +70,6 @@ void ConcurrentZFThread::run() {
Thread* thr_self = Thread::current(); Thread* thr_self = Thread::current();
_vtime_start = os::elapsedVTime(); _vtime_start = os::elapsedVTime();
wait_for_universe_init(); wait_for_universe_init();
_co_tracker.enable();
_co_tracker.start();
G1CollectedHeap* g1 = G1CollectedHeap::heap(); G1CollectedHeap* g1 = G1CollectedHeap::heap();
_sts.join(); _sts.join();
@ -135,10 +132,7 @@ void ConcurrentZFThread::run() {
} }
_vtime_accum = (os::elapsedVTime() - _vtime_start); _vtime_accum = (os::elapsedVTime() - _vtime_start);
_sts.join(); _sts.join();
_co_tracker.update();
} }
_co_tracker.update(false);
_sts.leave(); _sts.leave();
assert(_should_terminate, "just checking"); assert(_should_terminate, "just checking");

View File

@ -42,8 +42,6 @@ class ConcurrentZFThread: public ConcurrentGCThread {
// Number of regions CFZ thread fills. // Number of regions CFZ thread fills.
static int _regions_filled; static int _regions_filled;
COTracker _co_tracker;
double _vtime_start; // Initial virtual time. double _vtime_start; // Initial virtual time.
// These are static because the "print_summary_info" method is, and // These are static because the "print_summary_info" method is, and

View File

@ -80,8 +80,8 @@ void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
int max_completed_queue, int max_completed_queue,
Mutex* lock, PtrQueueSet* fl_owner) { Mutex* lock, PtrQueueSet* fl_owner) {
PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue, fl_owner); PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue, fl_owner);
set_buffer_size(DCQBarrierQueueBufferSize); set_buffer_size(G1UpdateBufferSize);
set_process_completed_threshold(DCQBarrierProcessCompletedThreshold); set_process_completed_threshold(G1UpdateBufferQueueProcessingThreshold);
_shared_dirty_card_queue.set_lock(lock); _shared_dirty_card_queue.set_lock(lock);
_free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon); _free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon);

View File

@ -25,6 +25,8 @@
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_g1CollectedHeap.cpp.incl" #include "incls/_g1CollectedHeap.cpp.incl"
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// turn it on so that the contents of the young list (scan-only / // turn it on so that the contents of the young list (scan-only /
// to-be-collected) are printed at "strategic" points before / during // to-be-collected) are printed at "strategic" points before / during
// / after the collection --- this is useful for debugging // / after the collection --- this is useful for debugging
@ -927,7 +929,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
double start = os::elapsedTime(); double start = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start);
g1_policy()->record_full_collection_start(); g1_policy()->record_full_collection_start();
gc_prologue(true); gc_prologue(true);
@ -1049,7 +1050,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
} }
double end = os::elapsedTime(); double end = os::elapsedTime();
GCOverheadReporter::recordSTWEnd(end);
g1_policy()->record_full_collection_end(); g1_policy()->record_full_collection_end();
#ifdef TRACESPINNING #ifdef TRACESPINNING
@ -1396,6 +1396,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
vm_exit_during_initialization("Failed necessary allocation."); vm_exit_during_initialization("Failed necessary allocation.");
} }
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
int n_queues = MAX2((int)ParallelGCThreads, 1); int n_queues = MAX2((int)ParallelGCThreads, 1);
_task_queues = new RefToScanQueueSet(n_queues); _task_queues = new RefToScanQueueSet(n_queues);
@ -1548,9 +1551,10 @@ jint G1CollectedHeap::initialize() {
const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
guarantee(cards_per_region < max_cards_per_region, "too many cards per region"); guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
"too many cards per region");
_bot_shared = new G1BlockOffsetSharedArray(_reserved, _bot_shared = new G1BlockOffsetSharedArray(_reserved,
heap_word_size(init_byte_size)); heap_word_size(init_byte_size));
@ -1591,7 +1595,7 @@ jint G1CollectedHeap::initialize() {
JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
DirtyCardQ_FL_lock, DirtyCardQ_FL_lock,
G1DirtyCardQueueMax, G1UpdateBufferQueueMaxLength,
Shared_DirtyCardQ_lock); Shared_DirtyCardQ_lock);
if (G1DeferredRSUpdate) { if (G1DeferredRSUpdate) {
@ -1610,9 +1614,6 @@ jint G1CollectedHeap::initialize() {
// Do later initialization work for concurrent refinement. // Do later initialization work for concurrent refinement.
_cg1r->init(); _cg1r->init();
const char* group_names[] = { "CR", "ZF", "CM", "CL" };
GCOverheadReporter::initGCOverheadReporter(4, group_names);
return JNI_OK; return JNI_OK;
} }
@ -1637,6 +1638,9 @@ size_t G1CollectedHeap::capacity() const {
void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
int worker_i) { int worker_i) {
// Clean cards in the hot card cache
concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set());
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
int n_completed_buffers = 0; int n_completed_buffers = 0;
while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) {
@ -1645,9 +1649,6 @@ void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
g1_policy()->record_update_rs_processed_buffers(worker_i, g1_policy()->record_update_rs_processed_buffers(worker_i,
(double) n_completed_buffers); (double) n_completed_buffers);
dcqs.clear_n_completed_buffers(); dcqs.clear_n_completed_buffers();
// Finish up the queue...
if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i,
g1_rem_set());
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
} }
@ -2414,8 +2415,6 @@ void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
} }
void G1CollectedHeap::print_tracing_info() const { void G1CollectedHeap::print_tracing_info() const {
concurrent_g1_refine()->print_final_card_counts();
// We'll overload this to mean "trace GC pause statistics." // We'll overload this to mean "trace GC pause statistics."
if (TraceGen0Time || TraceGen1Time) { if (TraceGen0Time || TraceGen1Time) {
// The "G1CollectorPolicy" is keeping track of these stats, so delegate // The "G1CollectorPolicy" is keeping track of these stats, so delegate
@ -2433,8 +2432,6 @@ void G1CollectedHeap::print_tracing_info() const {
} }
g1_policy()->print_yg_surv_rate_info(); g1_policy()->print_yg_surv_rate_info();
GCOverheadReporter::printGCOverhead();
SpecializationStats::print(); SpecializationStats::print();
} }
@ -2671,7 +2668,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
// The elapsed time induced by the start time below deliberately elides // The elapsed time induced by the start time below deliberately elides
// the possible verification above. // the possible verification above.
double start_time_sec = os::elapsedTime(); double start_time_sec = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start_time_sec);
size_t start_used_bytes = used(); size_t start_used_bytes = used();
g1_policy()->record_collection_pause_start(start_time_sec, g1_policy()->record_collection_pause_start(start_time_sec,
@ -2749,8 +2745,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
_in_cset_fast_test = NULL; _in_cset_fast_test = NULL;
_in_cset_fast_test_base = NULL; _in_cset_fast_test_base = NULL;
release_gc_alloc_regions(false /* totally */);
cleanup_surviving_young_words(); cleanup_surviving_young_words();
if (g1_policy()->in_young_gc_mode()) { if (g1_policy()->in_young_gc_mode()) {
@ -2800,7 +2794,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
double end_time_sec = os::elapsedTime(); double end_time_sec = os::elapsedTime();
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
g1_policy()->record_pause_time_ms(pause_time_ms); g1_policy()->record_pause_time_ms(pause_time_ms);
GCOverheadReporter::recordSTWEnd(end_time_sec);
g1_policy()->record_collection_pause_end(abandoned); g1_policy()->record_collection_pause_end(abandoned);
assert(regions_accounted_for(), "Region leakage."); assert(regions_accounted_for(), "Region leakage.");
@ -2845,6 +2838,11 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
Universe::print_heap_after_gc(); Universe::print_heap_after_gc();
} }
if (G1SummarizeRSetStats &&
(G1SummarizeRSetStatsPeriod > 0) &&
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
g1_rem_set()->print_summary_info();
}
} }
void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
@ -4106,6 +4104,8 @@ void G1CollectedHeap::evacuate_collection_set() {
g1_rem_set()->prepare_for_oops_into_collection_set_do(); g1_rem_set()->prepare_for_oops_into_collection_set_do();
concurrent_g1_refine()->set_use_cache(false); concurrent_g1_refine()->set_use_cache(false);
concurrent_g1_refine()->clear_hot_cache_claimed_index();
int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
set_par_threads(n_workers); set_par_threads(n_workers);
G1ParTask g1_par_task(this, n_workers, _task_queues); G1ParTask g1_par_task(this, n_workers, _task_queues);
@ -4136,8 +4136,10 @@ void G1CollectedHeap::evacuate_collection_set() {
G1KeepAliveClosure keep_alive(this); G1KeepAliveClosure keep_alive(this);
JNIHandles::weak_oops_do(&is_alive, &keep_alive); JNIHandles::weak_oops_do(&is_alive, &keep_alive);
} }
release_gc_alloc_regions(false /* totally */);
g1_rem_set()->cleanup_after_oops_into_collection_set_do(); g1_rem_set()->cleanup_after_oops_into_collection_set_do();
concurrent_g1_refine()->clear_hot_cache();
concurrent_g1_refine()->set_use_cache(true); concurrent_g1_refine()->set_use_cache(true);
finalize_for_evac_failure(); finalize_for_evac_failure();
@ -4268,12 +4270,18 @@ void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRe
class G1ParCleanupCTTask : public AbstractGangTask { class G1ParCleanupCTTask : public AbstractGangTask {
CardTableModRefBS* _ct_bs; CardTableModRefBS* _ct_bs;
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
HeapRegion* volatile _so_head;
HeapRegion* volatile _su_head;
public: public:
G1ParCleanupCTTask(CardTableModRefBS* ct_bs, G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
G1CollectedHeap* g1h) : G1CollectedHeap* g1h,
HeapRegion* scan_only_list,
HeapRegion* survivor_list) :
AbstractGangTask("G1 Par Cleanup CT Task"), AbstractGangTask("G1 Par Cleanup CT Task"),
_ct_bs(ct_bs), _ct_bs(ct_bs),
_g1h(g1h) _g1h(g1h),
_so_head(scan_only_list),
_su_head(survivor_list)
{ } { }
void work(int i) { void work(int i) {
@ -4281,22 +4289,64 @@ public:
while (r = _g1h->pop_dirty_cards_region()) { while (r = _g1h->pop_dirty_cards_region()) {
clear_cards(r); clear_cards(r);
} }
// Redirty the cards of the scan-only and survivor regions.
dirty_list(&this->_so_head);
dirty_list(&this->_su_head);
} }
void clear_cards(HeapRegion* r) { void clear_cards(HeapRegion* r) {
// Cards for Survivor and Scan-Only regions will be dirtied later. // Cards for Survivor and Scan-Only regions will be dirtied later.
if (!r->is_scan_only() && !r->is_survivor()) { if (!r->is_scan_only() && !r->is_survivor()) {
_ct_bs->clear(MemRegion(r->bottom(), r->end())); _ct_bs->clear(MemRegion(r->bottom(), r->end()));
} }
} }
void dirty_list(HeapRegion* volatile * head_ptr) {
HeapRegion* head;
do {
// Pop region off the list.
head = *head_ptr;
if (head != NULL) {
HeapRegion* r = (HeapRegion*)
Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
if (r == head) {
assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
_ct_bs->dirty(MemRegion(r->bottom(), r->end()));
}
}
} while (*head_ptr != NULL);
}
}; };
#ifndef PRODUCT
class G1VerifyCardTableCleanup: public HeapRegionClosure {
CardTableModRefBS* _ct_bs;
public:
G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
: _ct_bs(ct_bs)
{ }
virtual bool doHeapRegion(HeapRegion* r)
{
MemRegion mr(r->bottom(), r->end());
if (r->is_scan_only() || r->is_survivor()) {
_ct_bs->verify_dirty_region(mr);
} else {
_ct_bs->verify_clean_region(mr);
}
return false;
}
};
#endif
void G1CollectedHeap::cleanUpCardTable() { void G1CollectedHeap::cleanUpCardTable() {
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
double start = os::elapsedTime(); double start = os::elapsedTime();
// Iterate over the dirty cards region list. // Iterate over the dirty cards region list.
G1ParCleanupCTTask cleanup_task(ct_bs, this); G1ParCleanupCTTask cleanup_task(ct_bs, this,
_young_list->first_scan_only_region(),
_young_list->first_survivor_region());
if (ParallelGCThreads > 0) { if (ParallelGCThreads > 0) {
set_par_threads(workers()->total_workers()); set_par_threads(workers()->total_workers());
workers()->run_task(&cleanup_task); workers()->run_task(&cleanup_task);
@ -4312,18 +4362,22 @@ void G1CollectedHeap::cleanUpCardTable() {
} }
r->set_next_dirty_cards_region(NULL); r->set_next_dirty_cards_region(NULL);
} }
}
// now, redirty the cards of the scan-only and survivor regions // now, redirty the cards of the scan-only and survivor regions
// (it seemed faster to do it this way, instead of iterating over // (it seemed faster to do it this way, instead of iterating over
// all regions and then clearing / dirtying as appropriate) // all regions and then clearing / dirtying as appropriate)
dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
}
double elapsed = os::elapsedTime() - start; double elapsed = os::elapsedTime() - start;
g1_policy()->record_clear_ct_time( elapsed * 1000.0); g1_policy()->record_clear_ct_time( elapsed * 1000.0);
#ifndef PRODUCT
if (G1VerifyCTCleanup || VerifyAfterGC) {
G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
heap_region_iterate(&cleanup_verifier);
}
#endif
} }
void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
if (g1_policy()->should_do_collection_pause(word_size)) { if (g1_policy()->should_do_collection_pause(word_size)) {
do_collection_pause(); do_collection_pause();
@ -5016,7 +5070,7 @@ bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
return hr->is_in(p); return hr->is_in(p);
} }
} }
#endif // PRODUCT #endif // !PRODUCT
void G1CollectedHeap::g1_unimplemented() { void G1CollectedHeap::g1_unimplemented() {
// Unimplemented(); // Unimplemented();

View File

@ -167,16 +167,11 @@ class G1CollectedHeap : public SharedHeap {
friend class G1MarkSweep; friend class G1MarkSweep;
private: private:
enum SomePrivateConstants {
VeryLargeInBytes = HeapRegion::GrainBytes/2,
VeryLargeInWords = VeryLargeInBytes/HeapWordSize,
MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME
NumAPIs = HeapRegion::MaxAge
};
// The one and only G1CollectedHeap, so static functions can find it. // The one and only G1CollectedHeap, so static functions can find it.
static G1CollectedHeap* _g1h; static G1CollectedHeap* _g1h;
static size_t _humongous_object_threshold_in_words;
// Storage for the G1 heap (excludes the permanent generation). // Storage for the G1 heap (excludes the permanent generation).
VirtualSpace _g1_storage; VirtualSpace _g1_storage;
MemRegion _g1_reserved; MemRegion _g1_reserved;
@ -859,7 +854,7 @@ public:
return _g1_committed; return _g1_committed;
} }
NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; ) NOT_PRODUCT(bool is_in_closed_subset(const void* p) const;)
// Dirty card table entries covering a list of young regions. // Dirty card table entries covering a list of young regions.
void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
@ -1021,7 +1016,7 @@ public:
// Returns "true" iff the given word_size is "very large". // Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) { static bool isHumongous(size_t word_size) {
return word_size >= VeryLargeInWords; return word_size >= _humongous_object_threshold_in_words;
} }
// Update mod union table with the set of dirty cards. // Update mod union table with the set of dirty cards.

View File

@ -94,7 +94,14 @@ G1CollectorPolicy::G1CollectorPolicy() :
_summary(new Summary()), _summary(new Summary()),
_abandoned_summary(new AbandonedSummary()), _abandoned_summary(new AbandonedSummary()),
#ifndef PRODUCT
_cur_clear_ct_time_ms(0.0), _cur_clear_ct_time_ms(0.0),
_min_clear_cc_time_ms(-1.0),
_max_clear_cc_time_ms(-1.0),
_cur_clear_cc_time_ms(0.0),
_cum_clear_cc_time_ms(0.0),
_num_cc_clears(0L),
#endif
_region_num_young(0), _region_num_young(0),
_region_num_tenured(0), _region_num_tenured(0),
@ -194,6 +201,11 @@ G1CollectorPolicy::G1CollectorPolicy() :
_survivors_age_table(true) _survivors_age_table(true)
{ {
// Set up the region size and associated fields. Given that the
// policy is created before the heap, we have to set this up here,
// so it's done as soon as possible.
HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
@ -986,8 +998,6 @@ void G1CollectorPolicy::record_full_collection_end() {
double full_gc_time_sec = end_sec - _cur_collection_start_sec; double full_gc_time_sec = end_sec - _cur_collection_start_sec;
double full_gc_time_ms = full_gc_time_sec * 1000.0; double full_gc_time_ms = full_gc_time_sec * 1000.0;
checkpoint_conc_overhead();
_all_full_gc_times_ms->add(full_gc_time_ms); _all_full_gc_times_ms->add(full_gc_time_ms);
update_recent_gc_times(end_sec, full_gc_time_ms); update_recent_gc_times(end_sec, full_gc_time_ms);
@ -1157,7 +1167,6 @@ void G1CollectorPolicy::record_concurrent_mark_init_end() {
double end_time_sec = os::elapsedTime(); double end_time_sec = os::elapsedTime();
double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0; double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
_concurrent_mark_init_times_ms->add(elapsed_time_ms); _concurrent_mark_init_times_ms->add(elapsed_time_ms);
checkpoint_conc_overhead();
record_concurrent_mark_init_end_pre(elapsed_time_ms); record_concurrent_mark_init_end_pre(elapsed_time_ms);
_mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true); _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
@ -1171,7 +1180,6 @@ void G1CollectorPolicy::record_concurrent_mark_remark_start() {
void G1CollectorPolicy::record_concurrent_mark_remark_end() { void G1CollectorPolicy::record_concurrent_mark_remark_end() {
double end_time_sec = os::elapsedTime(); double end_time_sec = os::elapsedTime();
double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
checkpoint_conc_overhead();
_concurrent_mark_remark_times_ms->add(elapsed_time_ms); _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
_cur_mark_stop_world_time_ms += elapsed_time_ms; _cur_mark_stop_world_time_ms += elapsed_time_ms;
_prev_collection_pause_end_ms += elapsed_time_ms; _prev_collection_pause_end_ms += elapsed_time_ms;
@ -1203,7 +1211,6 @@ record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
// The important thing about this is that it includes "os::elapsedTime". // The important thing about this is that it includes "os::elapsedTime".
void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() { void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
checkpoint_conc_overhead();
double end_time_sec = os::elapsedTime(); double end_time_sec = os::elapsedTime();
double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0; double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
@ -1418,8 +1425,6 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
} }
#endif // PRODUCT #endif // PRODUCT
checkpoint_conc_overhead();
if (in_young_gc_mode()) { if (in_young_gc_mode()) {
last_pause_included_initial_mark = _should_initiate_conc_mark; last_pause_included_initial_mark = _should_initiate_conc_mark;
if (last_pause_included_initial_mark) if (last_pause_included_initial_mark)
@ -1648,6 +1653,15 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
print_stats(1, "Object Copying", obj_copy_time); print_stats(1, "Object Copying", obj_copy_time);
} }
} }
#ifndef PRODUCT
print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
if (_num_cc_clears > 0) {
print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
}
#endif
print_stats(1, "Other", other_time_ms); print_stats(1, "Other", other_time_ms);
for (int i = 0; i < _aux_num; ++i) { for (int i = 0; i < _aux_num; ++i) {
if (_cur_aux_times_set[i]) { if (_cur_aux_times_set[i]) {
@ -2509,19 +2523,6 @@ region_num_to_mbs(int length) {
} }
#endif // PRODUCT #endif // PRODUCT
void
G1CollectorPolicy::checkpoint_conc_overhead() {
double conc_overhead = 0.0;
if (G1AccountConcurrentOverhead)
conc_overhead = COTracker::totalPredConcOverhead();
_mmu_tracker->update_conc_overhead(conc_overhead);
#if 0
gclog_or_tty->print(" CO %1.4lf TARGET %1.4lf",
conc_overhead, _mmu_tracker->max_gc_time());
#endif
}
size_t G1CollectorPolicy::max_regions(int purpose) { size_t G1CollectorPolicy::max_regions(int purpose) {
switch (purpose) { switch (purpose) {
case GCAllocForSurvived: case GCAllocForSurvived:

View File

@ -92,9 +92,7 @@ protected:
int _parallel_gc_threads; int _parallel_gc_threads;
enum SomePrivateConstants { enum SomePrivateConstants {
NumPrevPausesForHeuristics = 10, NumPrevPausesForHeuristics = 10
NumPrevGCsForHeuristics = 10,
NumAPIs = HeapRegion::MaxAge
}; };
G1MMUTracker* _mmu_tracker; G1MMUTracker* _mmu_tracker;
@ -112,7 +110,6 @@ protected:
return 8*M; return 8*M;
} }
double _cur_collection_start_sec; double _cur_collection_start_sec;
size_t _cur_collection_pause_used_at_start_bytes; size_t _cur_collection_pause_used_at_start_bytes;
size_t _cur_collection_pause_used_regions_at_start; size_t _cur_collection_pause_used_regions_at_start;
@ -122,6 +119,15 @@ protected:
double _cur_clear_ct_time_ms; double _cur_clear_ct_time_ms;
bool _satb_drain_time_set; bool _satb_drain_time_set;
#ifndef PRODUCT
// Card Table Count Cache stats
double _min_clear_cc_time_ms; // min
double _max_clear_cc_time_ms; // max
double _cur_clear_cc_time_ms; // clearing time during current pause
double _cum_clear_cc_time_ms; // cummulative clearing time
jlong _num_cc_clears; // number of times the card count cache has been cleared
#endif
double _cur_CH_strong_roots_end_sec; double _cur_CH_strong_roots_end_sec;
double _cur_CH_strong_roots_dur_ms; double _cur_CH_strong_roots_dur_ms;
double _cur_G1_strong_roots_end_sec; double _cur_G1_strong_roots_end_sec;
@ -931,6 +937,18 @@ public:
_cur_aux_times_ms[i] += ms; _cur_aux_times_ms[i] += ms;
} }
#ifndef PRODUCT
void record_cc_clear_time(double ms) {
if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
_min_clear_cc_time_ms = ms;
if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
_max_clear_cc_time_ms = ms;
_cur_clear_cc_time_ms = ms;
_cum_clear_cc_time_ms += ms;
_num_cc_clears++;
}
#endif
// Record the fact that "bytes" bytes allocated in a region. // Record the fact that "bytes" bytes allocated in a region.
void record_before_bytes(size_t bytes); void record_before_bytes(size_t bytes);
void record_after_bytes(size_t bytes); void record_after_bytes(size_t bytes);
@ -961,8 +979,6 @@ public:
void set_should_initiate_conc_mark() { _should_initiate_conc_mark = true; } void set_should_initiate_conc_mark() { _should_initiate_conc_mark = true; }
void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; } void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; }
void checkpoint_conc_overhead();
// If an expansion would be appropriate, because recent GC overhead had // If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by. // exceeded the desired limit, return an amount to expand by.
virtual size_t expansion_amount(); virtual size_t expansion_amount();

View File

@ -37,21 +37,7 @@
G1MMUTracker::G1MMUTracker(double time_slice, double max_gc_time) : G1MMUTracker::G1MMUTracker(double time_slice, double max_gc_time) :
_time_slice(time_slice), _time_slice(time_slice),
_max_gc_time(max_gc_time), _max_gc_time(max_gc_time) { }
_conc_overhead_time_sec(0.0) { }
void
G1MMUTracker::update_conc_overhead(double conc_overhead) {
double conc_overhead_time_sec = _time_slice * conc_overhead;
if (conc_overhead_time_sec > 0.9 * _max_gc_time) {
// We are screwed, as we only seem to have <10% of the soft
// real-time goal available for pauses. Let's admit defeat and
// allow something more generous as a pause target.
conc_overhead_time_sec = 0.75 * _max_gc_time;
}
_conc_overhead_time_sec = conc_overhead_time_sec;
}
G1MMUTrackerQueue::G1MMUTrackerQueue(double time_slice, double max_gc_time) : G1MMUTrackerQueue::G1MMUTrackerQueue(double time_slice, double max_gc_time) :
G1MMUTracker(time_slice, max_gc_time), G1MMUTracker(time_slice, max_gc_time),
@ -128,7 +114,7 @@ double G1MMUTrackerQueue::longest_pause_internal(double current_time) {
while( 1 ) { while( 1 ) {
double gc_time = double gc_time =
calculate_gc_time(current_time + target_time) + _conc_overhead_time_sec; calculate_gc_time(current_time + target_time);
double diff = target_time + gc_time - _max_gc_time; double diff = target_time + gc_time - _max_gc_time;
if (!is_double_leq_0(diff)) { if (!is_double_leq_0(diff)) {
target_time -= diff; target_time -= diff;

View File

@ -33,19 +33,15 @@ protected:
double _time_slice; double _time_slice;
double _max_gc_time; // this is per time slice double _max_gc_time; // this is per time slice
double _conc_overhead_time_sec;
public: public:
G1MMUTracker(double time_slice, double max_gc_time); G1MMUTracker(double time_slice, double max_gc_time);
void update_conc_overhead(double conc_overhead);
virtual void add_pause(double start, double end, bool gc_thread) = 0; virtual void add_pause(double start, double end, bool gc_thread) = 0;
virtual double longest_pause(double current_time) = 0; virtual double longest_pause(double current_time) = 0;
virtual double when_sec(double current_time, double pause_time) = 0; virtual double when_sec(double current_time, double pause_time) = 0;
double max_gc_time() { double max_gc_time() {
return _max_gc_time - _conc_overhead_time_sec; return _max_gc_time;
} }
inline bool now_max_gc(double current_time) { inline bool now_max_gc(double current_time) {

View File

@ -102,9 +102,14 @@ void G1MarkSweep::allocate_stacks() {
GenMarkSweep::_marking_stack = GenMarkSweep::_marking_stack =
new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
size_t size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
GenMarkSweep::_revisit_klass_stack = GenMarkSweep::_revisit_klass_stack =
new (ResourceObj::C_HEAP) GrowableArray<Klass*>((int)size, true); new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// (#klass/k)^2 for k ~ 10 appears a better fit, but this will have to do
// for now until we have a chance to work out a more optimal setting.
GenMarkSweep::_revisit_mdo_stack =
new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
} }
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
@ -146,6 +151,11 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
assert(GenMarkSweep::_marking_stack->is_empty(), assert(GenMarkSweep::_marking_stack->is_empty(),
"stack should be empty by now"); "stack should be empty by now");
// Visit memoized MDO's and clear any unmarked weak refs
GenMarkSweep::follow_mdo_weak_refs();
assert(GenMarkSweep::_marking_stack->is_empty(), "just drained");
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(&GenMarkSweep::is_alive); SymbolTable::unlink(&GenMarkSweep::is_alive);
StringTable::unlink(&GenMarkSweep::is_alive); StringTable::unlink(&GenMarkSweep::is_alive);

View File

@ -676,61 +676,12 @@ void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
static IntHistogram out_of_histo(50, 50); static IntHistogram out_of_histo(50, 50);
void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) { void HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i) {
// If the card is no longer dirty, nothing to do.
if (*card_ptr != CardTableModRefBS::dirty_card_val()) return;
// Construct the region representing the card. // Construct the region representing the card.
HeapWord* start = _ct_bs->addr_for(card_ptr); HeapWord* start = _ct_bs->addr_for(card_ptr);
// And find the region containing it. // And find the region containing it.
HeapRegion* r = _g1->heap_region_containing(start); HeapRegion* r = _g1->heap_region_containing(start);
if (r == NULL) { assert(r != NULL, "unexpected null");
guarantee(_g1->is_in_permanent(start), "Or else where?");
return; // Not in the G1 heap (might be in perm, for example.)
}
// Why do we have to check here whether a card is on a young region,
// given that we dirty young regions and, as a result, the
// post-barrier is supposed to filter them out and never to enqueue
// them? When we allocate a new region as the "allocation region" we
// actually dirty its cards after we release the lock, since card
// dirtying while holding the lock was a performance bottleneck. So,
// as a result, it is possible for other threads to actually
// allocate objects in the region (after the acquire the lock)
// before all the cards on the region are dirtied. This is unlikely,
// and it doesn't happen often, but it can happen. So, the extra
// check below filters out those cards.
if (r->is_young()) {
return;
}
// While we are processing RSet buffers during the collection, we
// actually don't want to scan any cards on the collection set,
// since we don't want to update remebered sets with entries that
// point into the collection set, given that live objects from the
// collection set are about to move and such entries will be stale
// very soon. This change also deals with a reliability issue which
// involves scanning a card in the collection set and coming across
// an array that was being chunked and looking malformed. Note,
// however, that if evacuation fails, we have to scan any objects
// that were not moved and create any missing entries.
if (r->in_collection_set()) {
return;
}
// Should we defer it?
if (_cg1r->use_cache()) {
card_ptr = _cg1r->cache_insert(card_ptr);
// If it was not an eviction, nothing to do.
if (card_ptr == NULL) return;
// OK, we have to reset the card start, region, etc.
start = _ct_bs->addr_for(card_ptr);
r = _g1->heap_region_containing(start);
if (r == NULL) {
guarantee(_g1->is_in_permanent(start), "Or else where?");
return; // Not in the G1 heap (might be in perm, for example.)
}
guarantee(!r->is_young(), "It was evicted in the current minor cycle.");
}
HeapWord* end = _ct_bs->addr_for(card_ptr + 1); HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
MemRegion dirtyRegion(start, end); MemRegion dirtyRegion(start, end);
@ -774,6 +725,106 @@ void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {
} }
} }
void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {
// If the card is no longer dirty, nothing to do.
if (*card_ptr != CardTableModRefBS::dirty_card_val()) return;
// Construct the region representing the card.
HeapWord* start = _ct_bs->addr_for(card_ptr);
// And find the region containing it.
HeapRegion* r = _g1->heap_region_containing(start);
if (r == NULL) {
guarantee(_g1->is_in_permanent(start), "Or else where?");
return; // Not in the G1 heap (might be in perm, for example.)
}
// Why do we have to check here whether a card is on a young region,
// given that we dirty young regions and, as a result, the
// post-barrier is supposed to filter them out and never to enqueue
// them? When we allocate a new region as the "allocation region" we
// actually dirty its cards after we release the lock, since card
// dirtying while holding the lock was a performance bottleneck. So,
// as a result, it is possible for other threads to actually
// allocate objects in the region (after the acquire the lock)
// before all the cards on the region are dirtied. This is unlikely,
// and it doesn't happen often, but it can happen. So, the extra
// check below filters out those cards.
if (r->is_young()) {
return;
}
// While we are processing RSet buffers during the collection, we
// actually don't want to scan any cards on the collection set,
// since we don't want to update remebered sets with entries that
// point into the collection set, given that live objects from the
// collection set are about to move and such entries will be stale
// very soon. This change also deals with a reliability issue which
// involves scanning a card in the collection set and coming across
// an array that was being chunked and looking malformed. Note,
// however, that if evacuation fails, we have to scan any objects
// that were not moved and create any missing entries.
if (r->in_collection_set()) {
return;
}
// Should we defer processing the card?
//
// Previously the result from the insert_cache call would be
// either card_ptr (implying that card_ptr was currently "cold"),
// null (meaning we had inserted the card ptr into the "hot"
// cache, which had some headroom), or a "hot" card ptr
// extracted from the "hot" cache.
//
// Now that the _card_counts cache in the ConcurrentG1Refine
// instance is an evicting hash table, the result we get back
// could be from evicting the card ptr in an already occupied
// bucket (in which case we have replaced the card ptr in the
// bucket with card_ptr and "defer" is set to false). To avoid
// having a data structure (updates to which would need a lock)
// to hold these unprocessed dirty cards, we need to immediately
// process card_ptr. The actions needed to be taken on return
// from cache_insert are summarized in the following table:
//
// res defer action
// --------------------------------------------------------------
// null false card evicted from _card_counts & replaced with
// card_ptr; evicted ptr added to hot cache.
// No need to process res; immediately process card_ptr
//
// null true card not evicted from _card_counts; card_ptr added
// to hot cache.
// Nothing to do.
//
// non-null false card evicted from _card_counts & replaced with
// card_ptr; evicted ptr is currently "cold" or
// caused an eviction from the hot cache.
// Immediately process res; process card_ptr.
//
// non-null true card not evicted from _card_counts; card_ptr is
// currently cold, or caused an eviction from hot
// cache.
// Immediately process res; no need to process card_ptr.
jbyte* res = card_ptr;
bool defer = false;
if (_cg1r->use_cache()) {
jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
if (res != NULL && (res != card_ptr || defer)) {
start = _ct_bs->addr_for(res);
r = _g1->heap_region_containing(start);
if (r == NULL) {
assert(_g1->is_in_permanent(start), "Or else where?");
} else {
guarantee(!r->is_young(), "It was evicted in the current minor cycle.");
// Process card pointer we get back from the hot card cache
concurrentRefineOneCard_impl(res, worker_i);
}
}
}
if (!defer) {
concurrentRefineOneCard_impl(card_ptr, worker_i);
}
}
class HRRSStatsIter: public HeapRegionClosure { class HRRSStatsIter: public HeapRegionClosure {
size_t _occupied; size_t _occupied;
size_t _total_mem_sz; size_t _total_mem_sz;

View File

@ -157,6 +157,10 @@ protected:
} }
} }
// The routine that performs the actual work of refining a dirty
// card.
void concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i);
protected: protected:
template <class T> void write_ref_nv(HeapRegion* from, T* p); template <class T> void write_ref_nv(HeapRegion* from, T* p);
template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid); template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid);

View File

@ -37,11 +37,7 @@
develop(intx, G1MarkingOverheadPercent, 0, \ develop(intx, G1MarkingOverheadPercent, 0, \
"Overhead of concurrent marking") \ "Overhead of concurrent marking") \
\ \
develop(bool, G1AccountConcurrentOverhead, false, \ product(uintx, G1YoungGenSize, 0, \
"Whether soft real-time compliance in G1 will take into account" \
"concurrent overhead") \
\
product(intx, G1YoungGenSize, 0, \
"Size of the G1 young generation, 0 is the adaptive policy") \ "Size of the G1 young generation, 0 is the adaptive policy") \
\ \
develop(bool, G1Gen, true, \ develop(bool, G1Gen, true, \
@ -74,6 +70,12 @@
diagnostic(bool, G1SummarizeRSetStats, false, \ diagnostic(bool, G1SummarizeRSetStats, false, \
"Summarize remembered set processing info") \ "Summarize remembered set processing info") \
\ \
diagnostic(intx, G1SummarizeRSetStatsPeriod, 0, \
"The period (in number of GCs) at which we will generate " \
"update buffer processing info " \
"(0 means do not periodically generate this info); " \
"it also requires -XX:+G1SummarizeRSetStats") \
\
diagnostic(bool, G1SummarizeZFStats, false, \ diagnostic(bool, G1SummarizeZFStats, false, \
"Summarize zero-filling info") \ "Summarize zero-filling info") \
\ \
@ -167,17 +169,20 @@
develop(bool, G1DisablePostBarrier, false, \ develop(bool, G1DisablePostBarrier, false, \
"Disable generation of post-barrier (i.e., RS barrier) ") \ "Disable generation of post-barrier (i.e., RS barrier) ") \
\ \
product(intx, G1DirtyCardQueueMax, 30, \ product(intx, G1UpdateBufferSize, 256, \
"Maximum number of completed RS buffers before mutator threads " \ "Size of an update buffer") \
"start processing them.") \ \
product(intx, G1UpdateBufferQueueProcessingThreshold, 5, \
"Number of enqueued update buffers that will " \
"trigger concurrent processing") \
\
product(intx, G1UpdateBufferQueueMaxLength, 30, \
"Maximum number of enqueued update buffers before mutator " \
"threads start processing new ones instead of enqueueing them") \
\ \
develop(intx, G1ConcRSLogCacheSize, 10, \ develop(intx, G1ConcRSLogCacheSize, 10, \
"Log base 2 of the length of conc RS hot-card cache.") \ "Log base 2 of the length of conc RS hot-card cache.") \
\ \
develop(bool, G1ConcRSCountTraversals, false, \
"If true, gather data about the number of times CR traverses " \
"cards ") \
\
develop(intx, G1ConcRSHotCardLimit, 4, \ develop(intx, G1ConcRSHotCardLimit, 4, \
"The threshold that defines (>=) a hot card.") \ "The threshold that defines (>=) a hot card.") \
\ \
@ -241,6 +246,9 @@
"If non-0 is the size of the G1 survivor space, " \ "If non-0 is the size of the G1 survivor space, " \
"otherwise SurvivorRatio is used to determine the size") \ "otherwise SurvivorRatio is used to determine the size") \
\ \
product(uintx, G1HeapRegionSize, 0, \
"Size of the G1 regions.") \
\
experimental(bool, G1ParallelRSetUpdatingEnabled, false, \ experimental(bool, G1ParallelRSetUpdatingEnabled, false, \
"Enables the parallelization of remembered set updating " \ "Enables the parallelization of remembered set updating " \
"during evacuation pauses") \ "during evacuation pauses") \
@ -251,6 +259,13 @@
\ \
product(uintx, G1ParallelRSetThreads, 0, \ product(uintx, G1ParallelRSetThreads, 0, \
"If non-0 is the number of parallel rem set update threads, " \ "If non-0 is the number of parallel rem set update threads, " \
"otherwise the value is determined ergonomically.") "otherwise the value is determined ergonomically.") \
\
develop(intx, G1CardCountCacheExpandThreshold, 16, \
"Expand the card count cache if the number of collisions for " \
"a particular entry exceeds this value.") \
\
develop(bool, G1VerifyCTCleanup, false, \
"Verify card table cleanup.")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)

View File

@ -25,6 +25,12 @@
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_heapRegion.cpp.incl" #include "incls/_heapRegion.cpp.incl"
int HeapRegion::LogOfHRGrainBytes = 0;
int HeapRegion::LogOfHRGrainWords = 0;
int HeapRegion::GrainBytes = 0;
int HeapRegion::GrainWords = 0;
int HeapRegion::CardsPerRegion = 0;
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
HeapRegion* hr, OopClosure* cl, HeapRegion* hr, OopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
@ -231,6 +237,73 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
} }
} }
// Minimum region size; we won't go lower than that.
// We might want to decrease this in the future, to deal with small
// heaps a bit more efficiently.
#define MIN_REGION_SIZE ( 1024 * 1024 )
// Maximum region size; we don't go higher than that. There's a good
// reason for having an upper bound. We don't want regions to get too
// large, otherwise cleanup's effectiveness would decrease as there
// will be fewer opportunities to find totally empty regions after
// marking.
#define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
// The automatic region size calculation will try to have around this
// many regions in the heap (based on the min heap size).
#define TARGET_REGION_NUMBER 2048
void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
// region_size in bytes
uintx region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
// We base the automatic calculation on the min heap size. This
// can be problematic if the spread between min and max is quite
// wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
// the max size, the region size might be way too large for the
// min size. Either way, some users might have to set the region
// size manually for some -Xms / -Xmx combos.
region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
(uintx) MIN_REGION_SIZE);
}
int region_size_log = log2_long((jlong) region_size);
// Recalculate the region size to make sure it's a power of
// 2. This means that region_size is the largest power of 2 that's
// <= what we've calculated so far.
region_size = 1 << region_size_log;
// Now make sure that we don't go over or under our limits.
if (region_size < MIN_REGION_SIZE) {
region_size = MIN_REGION_SIZE;
} else if (region_size > MAX_REGION_SIZE) {
region_size = MAX_REGION_SIZE;
}
// And recalculate the log.
region_size_log = log2_long((jlong) region_size);
// Now, set up the globals.
guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
LogOfHRGrainBytes = region_size_log;
guarantee(LogOfHRGrainWords == 0, "we should only set it once");
LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
guarantee(GrainBytes == 0, "we should only set it once");
// The cast to int is safe, given that we've bounded region_size by
// MIN_REGION_SIZE and MAX_REGION_SIZE.
GrainBytes = (int) region_size;
guarantee(GrainWords == 0, "we should only set it once");
GrainWords = GrainBytes >> LogHeapWordSize;
guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
guarantee(CardsPerRegion == 0, "we should only set it once");
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
}
void HeapRegion::reset_after_compaction() { void HeapRegion::reset_after_compaction() {
G1OffsetTableContigSpace::reset_after_compaction(); G1OffsetTableContigSpace::reset_after_compaction();
// After a compaction the mark bitmap is invalid, so we must // After a compaction the mark bitmap is invalid, so we must

View File

@ -297,15 +297,24 @@ class HeapRegion: public G1OffsetTableContigSpace {
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed); MemRegion mr, bool is_zeroed);
enum SomePublicConstants { static int LogOfHRGrainBytes;
// HeapRegions are GrainBytes-aligned static int LogOfHRGrainWords;
// and have sizes that are multiples of GrainBytes. // The normal type of these should be size_t. However, they used to
LogOfHRGrainBytes = 20, // be members of an enum before and they are assumed by the
LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize, // compilers to be ints. To avoid going and fixing all their uses,
GrainBytes = 1 << LogOfHRGrainBytes, // I'm declaring them as ints. I'm not anticipating heap region
GrainWords = 1 <<LogOfHRGrainWords, // sizes to reach anywhere near 2g, so using an int here is safe.
MaxAge = 2, NoOfAges = MaxAge+1 static int GrainBytes;
}; static int GrainWords;
static int CardsPerRegion;
// It sets up the heap region size (GrainBytes / GrainWords), as
// well as other related fields that are based on the heap region
// size (LogOfHRGrainBytes / LogOfHRGrainWords /
// CardsPerRegion). All those fields are considered constant
// throughout the JVM's execution, therefore they should only be set
// up once during initialization time.
static void setup_heap_region_size(uintx min_heap_size);
enum ClaimValues { enum ClaimValues {
InitialClaimValue = 0, InitialClaimValue = 0,

View File

@ -57,10 +57,6 @@ class PerRegionTable: public CHeapObj {
#endif // _MSC_VER #endif // _MSC_VER
enum SomePrivateConstants {
CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
};
protected: protected:
// We need access in order to union things into the base table. // We need access in order to union things into the base table.
BitMap* bm() { return &_bm; } BitMap* bm() { return &_bm; }
@ -76,7 +72,7 @@ protected:
#if PRT_COUNT_OCCUPIED #if PRT_COUNT_OCCUPIED
_occupied(0), _occupied(0),
#endif #endif
_bm(CardsPerRegion, false /* in-resource-area */) _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
{} {}
static void free(PerRegionTable* prt) { static void free(PerRegionTable* prt) {
@ -144,7 +140,8 @@ protected:
CardIdx_t from_card = (CardIdx_t) CardIdx_t from_card = (CardIdx_t)
hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize); hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
assert(0 <= from_card && from_card < CardsPerRegion, "Must be in range."); assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
"Must be in range.");
add_card_work(from_card, par); add_card_work(from_card, par);
} }
} }
@ -631,7 +628,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
uintptr_t(from_hr->bottom()) uintptr_t(from_hr->bottom())
>> CardTableModRefBS::card_shift; >> CardTableModRefBS::card_shift;
CardIdx_t card_index = from_card - from_hr_bot_card_index; CardIdx_t card_index = from_card - from_hr_bot_card_index;
assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
"Must be in range."); "Must be in range.");
if (G1HRRSUseSparseTable && if (G1HRRSUseSparseTable &&
_sparse_table.add_card(from_hrs_ind, card_index)) { _sparse_table.add_card(from_hrs_ind, card_index)) {
@ -922,7 +919,7 @@ size_t OtherRegionsTable::occ_fine() const {
} }
size_t OtherRegionsTable::occ_coarse() const { size_t OtherRegionsTable::occ_coarse() const {
return (_n_coarse_entries * PosParPRT::CardsPerRegion); return (_n_coarse_entries * HeapRegion::CardsPerRegion);
} }
size_t OtherRegionsTable::occ_sparse() const { size_t OtherRegionsTable::occ_sparse() const {
@ -1049,7 +1046,8 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const
uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift; uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
assert(from_card >= hr_bot_card_index, "Inv"); assert(from_card >= hr_bot_card_index, "Inv");
CardIdx_t card_index = from_card - hr_bot_card_index; CardIdx_t card_index = from_card - hr_bot_card_index;
assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, "Must be in range."); assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
"Must be in range.");
return _sparse_table.contains_card(hr_ind, card_index); return _sparse_table.contains_card(hr_ind, card_index);
} }
@ -1176,7 +1174,7 @@ void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
_is = Sparse; _is = Sparse;
// Set these values so that we increment to the first region. // Set these values so that we increment to the first region.
_coarse_cur_region_index = -1; _coarse_cur_region_index = -1;
_coarse_cur_region_cur_card = (PosParPRT::CardsPerRegion-1);; _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
_cur_region_cur_card = 0; _cur_region_cur_card = 0;
@ -1195,7 +1193,7 @@ bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
// Go to the next card. // Go to the next card.
_coarse_cur_region_cur_card++; _coarse_cur_region_cur_card++;
// Was the last the last card in the current region? // Was the last the last card in the current region?
if (_coarse_cur_region_cur_card == PosParPRT::CardsPerRegion) { if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
// Yes: find the next region. This may leave _coarse_cur_region_index // Yes: find the next region. This may leave _coarse_cur_region_index
// Set to the last index, in which case there are no more coarse // Set to the last index, in which case there are no more coarse
// regions. // regions.
@ -1232,7 +1230,7 @@ bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
_fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1); _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
} }
while (!fine_has_next()) { while (!fine_has_next()) {
if (_cur_region_cur_card == PosParPRT::CardsPerRegion) { if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
_cur_region_cur_card = 0; _cur_region_cur_card = 0;
_fine_cur_prt = _fine_cur_prt->next(); _fine_cur_prt = _fine_cur_prt->next();
} }
@ -1255,7 +1253,7 @@ bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
bool HeapRegionRemSetIterator::fine_has_next() { bool HeapRegionRemSetIterator::fine_has_next() {
return return
_fine_cur_prt != NULL && _fine_cur_prt != NULL &&
_cur_region_cur_card < PosParPRT::CardsPerRegion; _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
} }
bool HeapRegionRemSetIterator::has_next(size_t& card_index) { bool HeapRegionRemSetIterator::has_next(size_t& card_index) {

View File

@ -347,7 +347,7 @@ CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) { size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
return return
_heap_bot_card_ind _heap_bot_card_ind
+ (_rsht->entry(_bl_ind)->r_ind() * CardsPerRegion) + (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
+ ci; + ci;
} }

View File

@ -172,10 +172,6 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
RSHashTable* _rsht; RSHashTable* _rsht;
size_t _heap_bot_card_ind; size_t _heap_bot_card_ind;
enum SomePrivateConstants {
CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
};
// If the bucket list pointed to by _bl_ind contains a card, sets // If the bucket list pointed to by _bl_ind contains a card, sets
// _bl_ind to the index of that entry, and returns the card. // _bl_ind to the index of that entry, and returns the card.
// Otherwise, returns SparseEntry::NullEntry. // Otherwise, returns SparseEntry::NullEntry.

View File

@ -145,6 +145,7 @@ concurrentMarkSweepGeneration.cpp genOopClosures.inline.hpp
concurrentMarkSweepGeneration.cpp globals_extension.hpp concurrentMarkSweepGeneration.cpp globals_extension.hpp
concurrentMarkSweepGeneration.cpp handles.inline.hpp concurrentMarkSweepGeneration.cpp handles.inline.hpp
concurrentMarkSweepGeneration.cpp isGCActiveMark.hpp concurrentMarkSweepGeneration.cpp isGCActiveMark.hpp
concurrentMarkSweepGeneration.cpp iterator.hpp
concurrentMarkSweepGeneration.cpp java.hpp concurrentMarkSweepGeneration.cpp java.hpp
concurrentMarkSweepGeneration.cpp jvmtiExport.hpp concurrentMarkSweepGeneration.cpp jvmtiExport.hpp
concurrentMarkSweepGeneration.cpp oop.inline.hpp concurrentMarkSweepGeneration.cpp oop.inline.hpp

View File

@ -45,11 +45,14 @@ concurrentG1Refine.cpp concurrentG1Refine.hpp
concurrentG1Refine.cpp concurrentG1RefineThread.hpp concurrentG1Refine.cpp concurrentG1RefineThread.hpp
concurrentG1Refine.cpp copy.hpp concurrentG1Refine.cpp copy.hpp
concurrentG1Refine.cpp g1CollectedHeap.inline.hpp concurrentG1Refine.cpp g1CollectedHeap.inline.hpp
concurrentG1Refine.cpp g1CollectorPolicy.hpp
concurrentG1Refine.cpp g1RemSet.hpp concurrentG1Refine.cpp g1RemSet.hpp
concurrentG1Refine.cpp space.inline.hpp concurrentG1Refine.cpp space.inline.hpp
concurrentG1Refine.cpp heapRegionSeq.inline.hpp
concurrentG1Refine.hpp globalDefinitions.hpp concurrentG1Refine.hpp globalDefinitions.hpp
concurrentG1Refine.hpp allocation.hpp concurrentG1Refine.hpp allocation.hpp
concurrentG1Refine.hpp cardTableModRefBS.hpp
concurrentG1Refine.hpp thread.hpp concurrentG1Refine.hpp thread.hpp
concurrentG1RefineThread.cpp concurrentG1Refine.hpp concurrentG1RefineThread.cpp concurrentG1Refine.hpp
@ -61,14 +64,12 @@ concurrentG1RefineThread.cpp mutexLocker.hpp
concurrentG1RefineThread.cpp resourceArea.hpp concurrentG1RefineThread.cpp resourceArea.hpp
concurrentG1RefineThread.hpp concurrentGCThread.hpp concurrentG1RefineThread.hpp concurrentGCThread.hpp
concurrentG1RefineThread.hpp coTracker.hpp
concurrentMark.cpp concurrentMark.hpp concurrentMark.cpp concurrentMark.hpp
concurrentMark.cpp concurrentMarkThread.inline.hpp concurrentMark.cpp concurrentMarkThread.inline.hpp
concurrentMark.cpp g1CollectedHeap.inline.hpp concurrentMark.cpp g1CollectedHeap.inline.hpp
concurrentMark.cpp g1CollectorPolicy.hpp concurrentMark.cpp g1CollectorPolicy.hpp
concurrentMark.cpp g1RemSet.hpp concurrentMark.cpp g1RemSet.hpp
concurrentMark.cpp gcOverheadReporter.hpp
concurrentMark.cpp genOopClosures.inline.hpp concurrentMark.cpp genOopClosures.inline.hpp
concurrentMark.cpp heapRegionRemSet.hpp concurrentMark.cpp heapRegionRemSet.hpp
concurrentMark.cpp heapRegionSeq.inline.hpp concurrentMark.cpp heapRegionSeq.inline.hpp
@ -79,7 +80,6 @@ concurrentMark.cpp referencePolicy.hpp
concurrentMark.cpp resourceArea.hpp concurrentMark.cpp resourceArea.hpp
concurrentMark.cpp symbolTable.hpp concurrentMark.cpp symbolTable.hpp
concurrentMark.hpp coTracker.hpp
concurrentMark.hpp heapRegion.hpp concurrentMark.hpp heapRegion.hpp
concurrentMark.hpp taskqueue.hpp concurrentMark.hpp taskqueue.hpp
@ -104,7 +104,6 @@ concurrentZFThread.cpp mutexLocker.hpp
concurrentZFThread.cpp space.inline.hpp concurrentZFThread.cpp space.inline.hpp
concurrentZFThread.hpp concurrentGCThread.hpp concurrentZFThread.hpp concurrentGCThread.hpp
concurrentZFThread.hpp coTracker.hpp
dirtyCardQueue.cpp atomic.hpp dirtyCardQueue.cpp atomic.hpp
dirtyCardQueue.cpp dirtyCardQueue.hpp dirtyCardQueue.cpp dirtyCardQueue.hpp
@ -144,7 +143,6 @@ g1CollectedHeap.cpp g1RemSet.inline.hpp
g1CollectedHeap.cpp g1OopClosures.inline.hpp g1CollectedHeap.cpp g1OopClosures.inline.hpp
g1CollectedHeap.cpp genOopClosures.inline.hpp g1CollectedHeap.cpp genOopClosures.inline.hpp
g1CollectedHeap.cpp gcLocker.inline.hpp g1CollectedHeap.cpp gcLocker.inline.hpp
g1CollectedHeap.cpp gcOverheadReporter.hpp
g1CollectedHeap.cpp generationSpec.hpp g1CollectedHeap.cpp generationSpec.hpp
g1CollectedHeap.cpp heapRegionRemSet.hpp g1CollectedHeap.cpp heapRegionRemSet.hpp
g1CollectedHeap.cpp heapRegionSeq.inline.hpp g1CollectedHeap.cpp heapRegionSeq.inline.hpp
@ -167,6 +165,7 @@ g1CollectedHeap.inline.hpp g1CollectedHeap.hpp
g1CollectedHeap.inline.hpp heapRegionSeq.hpp g1CollectedHeap.inline.hpp heapRegionSeq.hpp
g1CollectedHeap.inline.hpp taskqueue.hpp g1CollectedHeap.inline.hpp taskqueue.hpp
g1CollectorPolicy.cpp arguments.hpp
g1CollectorPolicy.cpp concurrentG1Refine.hpp g1CollectorPolicy.cpp concurrentG1Refine.hpp
g1CollectorPolicy.cpp concurrentMark.hpp g1CollectorPolicy.cpp concurrentMark.hpp
g1CollectorPolicy.cpp concurrentMarkThread.inline.hpp g1CollectorPolicy.cpp concurrentMarkThread.inline.hpp

View File

@ -253,10 +253,11 @@ psParallelCompact.cpp gcCause.hpp
psParallelCompact.cpp gcLocker.inline.hpp psParallelCompact.cpp gcLocker.inline.hpp
psParallelCompact.cpp gcTaskManager.hpp psParallelCompact.cpp gcTaskManager.hpp
psParallelCompact.cpp isGCActiveMark.hpp psParallelCompact.cpp isGCActiveMark.hpp
psParallelCompact.cpp management.hpp
psParallelCompact.cpp memoryService.hpp
psParallelCompact.cpp methodDataOop.hpp
psParallelCompact.cpp oop.inline.hpp psParallelCompact.cpp oop.inline.hpp
psParallelCompact.cpp oop.pcgc.inline.hpp psParallelCompact.cpp oop.pcgc.inline.hpp
psParallelCompact.cpp memoryService.hpp
psParallelCompact.cpp management.hpp
psParallelCompact.cpp parallelScavengeHeap.inline.hpp psParallelCompact.cpp parallelScavengeHeap.inline.hpp
psParallelCompact.cpp pcTasks.hpp psParallelCompact.cpp pcTasks.hpp
psParallelCompact.cpp psMarkSweep.hpp psParallelCompact.cpp psMarkSweep.hpp

View File

@ -35,12 +35,6 @@ concurrentGCThread.cpp systemDictionary.hpp
concurrentGCThread.hpp thread.hpp concurrentGCThread.hpp thread.hpp
coTracker.hpp globalDefinitions.hpp
coTracker.hpp numberSeq.hpp
coTracker.cpp coTracker.hpp
coTracker.cpp os.hpp
allocationStats.cpp allocationStats.hpp allocationStats.cpp allocationStats.hpp
allocationStats.cpp ostream.hpp allocationStats.cpp ostream.hpp
@ -54,13 +48,6 @@ gcAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
gcAdaptivePolicyCounters.cpp resourceArea.hpp gcAdaptivePolicyCounters.cpp resourceArea.hpp
gcAdaptivePolicyCounters.cpp gcAdaptivePolicyCounters.hpp gcAdaptivePolicyCounters.cpp gcAdaptivePolicyCounters.hpp
gcOverheadReporter.cpp allocation.inline.hpp
gcOverheadReporter.cpp concurrentGCThread.hpp
gcOverheadReporter.cpp coTracker.hpp
gcOverheadReporter.cpp gcOverheadReporter.hpp
gcOverheadReporter.cpp ostream.hpp
gcOverheadReporter.cpp thread_<os_family>.inline.hpp
gSpaceCounters.cpp generation.hpp gSpaceCounters.cpp generation.hpp
gSpaceCounters.cpp resourceArea.hpp gSpaceCounters.cpp resourceArea.hpp
gSpaceCounters.cpp gSpaceCounters.hpp gSpaceCounters.cpp gSpaceCounters.hpp

View File

@ -58,7 +58,6 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
// cm->allocate_stacks();
assert(cm->stacks_have_been_allocated(), assert(cm->stacks_have_been_allocated(),
"Stack space has not been allocated"); "Stack space has not been allocated");
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@ -129,7 +128,6 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
// cm->allocate_stacks();
assert(cm->stacks_have_been_allocated(), assert(cm->stacks_have_been_allocated(),
"Stack space has not been allocated"); "Stack space has not been allocated");
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);

View File

@ -61,12 +61,16 @@ ParCompactionManager::ParCompactionManager() :
int size = int size =
(SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads; (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
// have to do for now until we are able to investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
} }
ParCompactionManager::~ParCompactionManager() { ParCompactionManager::~ParCompactionManager() {
delete _overflow_stack; delete _overflow_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack;
// _manager_array and _stack_array are statics // _manager_array and _stack_array are statics
// shared with all instances of ParCompactionManager // shared with all instances of ParCompactionManager
// should not be deallocated. // should not be deallocated.
@ -195,6 +199,7 @@ ParCompactionManager::gc_thread_compaction_manager(int index) {
void ParCompactionManager::reset() { void ParCompactionManager::reset() {
for(uint i=0; i<ParallelGCThreads+1; i++) { for(uint i=0; i<ParallelGCThreads+1; i++) {
manager_array(i)->revisit_klass_stack()->clear(); manager_array(i)->revisit_klass_stack()->clear();
manager_array(i)->revisit_mdo_stack()->clear();
} }
} }
@ -296,6 +301,7 @@ void ParCompactionManager::drain_region_stacks() {
#ifdef ASSERT #ifdef ASSERT
bool ParCompactionManager::stacks_have_been_allocated() { bool ParCompactionManager::stacks_have_been_allocated() {
return (revisit_klass_stack()->data_addr() != NULL); return (revisit_klass_stack()->data_addr() != NULL &&
revisit_mdo_stack()->data_addr() != NULL);
} }
#endif #endif

View File

@ -93,6 +93,7 @@ class ParCompactionManager : public CHeapObj {
#if 1 // does this happen enough to need a per thread stack? #if 1 // does this happen enough to need a per thread stack?
GrowableArray<Klass*>* _revisit_klass_stack; GrowableArray<Klass*>* _revisit_klass_stack;
GrowableArray<DataLayout*>* _revisit_mdo_stack;
#endif #endif
static ParMarkBitMap* _mark_bitmap; static ParMarkBitMap* _mark_bitmap;
@ -154,6 +155,7 @@ class ParCompactionManager : public CHeapObj {
#if 1 #if 1
// Probably stays as a growable array // Probably stays as a growable array
GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; } GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; }
GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
#endif #endif
// Save oop for later processing. Must not fail. // Save oop for later processing. Must not fail.

View File

@ -482,6 +482,9 @@ void PSMarkSweep::allocate_stacks() {
int size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
// now until we investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
} }
@ -495,6 +498,7 @@ void PSMarkSweep::deallocate_stacks() {
delete _marking_stack; delete _marking_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack;
} }
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
@ -540,6 +544,10 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
follow_weak_klass_links(); follow_weak_klass_links();
assert(_marking_stack->is_empty(), "just drained"); assert(_marking_stack->is_empty(), "just drained");
// Visit memoized mdo's and clear unmarked weak refs
follow_mdo_weak_refs();
assert(_marking_stack->is_empty(), "just drained");
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(is_alive_closure()); SymbolTable::unlink(is_alive_closure());
StringTable::unlink(is_alive_closure()); StringTable::unlink(is_alive_closure());

View File

@ -2378,7 +2378,10 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Update subklass/sibling/implementor links of live klasses // Update subklass/sibling/implementor links of live klasses
// revisit_klass_stack is used in follow_weak_klass_links(). // revisit_klass_stack is used in follow_weak_klass_links().
follow_weak_klass_links(cm); follow_weak_klass_links();
// Revisit memoized MDO's and clear any unmarked weak refs
follow_mdo_weak_refs();
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(is_alive_closure()); SymbolTable::unlink(is_alive_closure());
@ -2721,17 +2724,25 @@ void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
} }
void void
PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) { PSParallelCompact::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point. // All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links. // Update and follow all subklass, sibling and implementor links.
for (uint i = 0; i < ParallelGCThreads+1; i++) { if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
}
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
ParCompactionManager* cm = ParCompactionManager::manager_array(i); ParCompactionManager* cm = ParCompactionManager::manager_array(i);
KeepAliveClosure keep_alive_closure(cm); KeepAliveClosure keep_alive_closure(cm);
for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) { int length = cm->revisit_klass_stack()->length();
cm->revisit_klass_stack()->at(i)->follow_weak_klass_links( if (PrintRevisitStats) {
gclog_or_tty->print_cr("Revisit klass stack[%d] length = %d", i, length);
}
for (int j = 0; j < length; j++) {
cm->revisit_klass_stack()->at(j)->follow_weak_klass_links(
is_alive_closure(), is_alive_closure(),
&keep_alive_closure); &keep_alive_closure);
} }
// revisit_klass_stack is cleared in reset()
follow_stack(cm); follow_stack(cm);
} }
} }
@ -2741,6 +2752,35 @@ PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
cm->revisit_klass_stack()->push(k); cm->revisit_klass_stack()->push(k);
} }
#if ( defined(COMPILER1) || defined(COMPILER2) )
void PSParallelCompact::revisit_mdo(ParCompactionManager* cm, DataLayout* p) {
cm->revisit_mdo_stack()->push(p);
}
void PSParallelCompact::follow_mdo_weak_refs() {
// All strongly reachable oops have been marked at this point;
// we can visit and clear any weak references from MDO's which
// we memoized during the strong marking phase.
if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
}
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
ParCompactionManager* cm = ParCompactionManager::manager_array(i);
GrowableArray<DataLayout*>* rms = cm->revisit_mdo_stack();
int length = rms->length();
if (PrintRevisitStats) {
gclog_or_tty->print_cr("Revisit MDO stack[%d] length = %d", i, length);
}
for (int j = 0; j < length; j++) {
rms->at(j)->follow_weak_refs(is_alive_closure());
}
// revisit_mdo_stack is cleared in reset()
follow_stack(cm);
}
}
#endif // ( COMPILER1 || COMPILER2 )
#ifdef VALIDATE_MARK_SWEEP #ifdef VALIDATE_MARK_SWEEP
void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) { void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {

View File

@ -901,7 +901,8 @@ class PSParallelCompact : AllStatic {
static void marking_phase(ParCompactionManager* cm, static void marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction); bool maximum_heap_compaction);
static void follow_stack(ParCompactionManager* cm); static void follow_stack(ParCompactionManager* cm);
static void follow_weak_klass_links(ParCompactionManager* cm); static void follow_weak_klass_links();
static void follow_mdo_weak_refs();
template <class T> static inline void adjust_pointer(T* p, bool is_root); template <class T> static inline void adjust_pointer(T* p, bool is_root);
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
@ -1221,6 +1222,9 @@ class PSParallelCompact : AllStatic {
// Update subklass/sibling/implementor links at end of marking. // Update subklass/sibling/implementor links at end of marking.
static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k); static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
// Clear unmarked oops in MDOs at the end of marking.
static void revisit_mdo(ParCompactionManager* cm, DataLayout* p);
#ifndef PRODUCT #ifndef PRODUCT
// Debugging support. // Debugging support.
static const char* space_names[last_space_id]; static const char* space_names[last_space_id];

View File

@ -1,189 +0,0 @@
/*
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_coTracker.cpp.incl"
COTracker* COTracker::_head = NULL;
double COTracker::_cpu_number = -1.0;
void
COTracker::resetPeriod(double now_sec, double vnow_sec) {
guarantee( _enabled, "invariant" );
_period_start_time_sec = now_sec;
_period_start_vtime_sec = vnow_sec;
}
void
COTracker::setConcOverhead(double time_stamp_sec,
double conc_overhead) {
guarantee( _enabled, "invariant" );
_conc_overhead = conc_overhead;
_time_stamp_sec = time_stamp_sec;
if (conc_overhead > 0.001)
_conc_overhead_seq.add(conc_overhead);
}
void
COTracker::reset(double starting_conc_overhead) {
guarantee( _enabled, "invariant" );
double now_sec = os::elapsedTime();
setConcOverhead(now_sec, starting_conc_overhead);
}
void
COTracker::start() {
guarantee( _enabled, "invariant" );
resetPeriod(os::elapsedTime(), os::elapsedVTime());
}
void
COTracker::update(bool force_end) {
assert( _enabled, "invariant" );
double end_time_sec = os::elapsedTime();
double elapsed_time_sec = end_time_sec - _period_start_time_sec;
if (force_end || elapsed_time_sec > _update_period_sec) {
// reached the end of the period
double end_vtime_sec = os::elapsedVTime();
double elapsed_vtime_sec = end_vtime_sec - _period_start_vtime_sec;
double conc_overhead = elapsed_vtime_sec / elapsed_time_sec;
setConcOverhead(end_time_sec, conc_overhead);
resetPeriod(end_time_sec, end_vtime_sec);
}
}
void
COTracker::updateForSTW(double start_sec, double end_sec) {
if (!_enabled)
return;
// During a STW pause, no concurrent GC thread has done any
// work. So, we can safely adjust the start of the current period by
// adding the duration of the STW pause to it, so that the STW pause
// doesn't affect the reading of the concurrent overhead (it's
// basically like excluding the time of the STW pause from the
// concurrent overhead calculation).
double stw_duration_sec = end_sec - start_sec;
guarantee( stw_duration_sec > 0.0, "invariant" );
if (outOfDate(start_sec))
_conc_overhead = 0.0;
else
_time_stamp_sec = end_sec;
_period_start_time_sec += stw_duration_sec;
_conc_overhead_seq = NumberSeq();
guarantee( os::elapsedTime() > _period_start_time_sec, "invariant" );
}
double
COTracker::predConcOverhead() {
if (_enabled) {
// tty->print(" %1.2lf", _conc_overhead_seq.maximum());
return _conc_overhead_seq.maximum();
} else {
// tty->print(" DD");
return 0.0;
}
}
void
COTracker::resetPred() {
_conc_overhead_seq = NumberSeq();
}
COTracker::COTracker(int group)
: _enabled(false),
_group(group),
_period_start_time_sec(-1.0),
_period_start_vtime_sec(-1.0),
_conc_overhead(-1.0),
_time_stamp_sec(-1.0),
_next(NULL) {
// GCOverheadReportingPeriodMS indicates how frequently the
// concurrent overhead will be recorded by the GC Overhead
// Reporter. We want to take readings less often than that. If we
// took readings more often than some of them might be lost.
_update_period_sec = ((double) GCOverheadReportingPeriodMS) / 1000.0 * 1.25;
_next = _head;
_head = this;
if (_cpu_number < 0.0)
_cpu_number = (double) os::processor_count();
}
// statics
void
COTracker::updateAllForSTW(double start_sec, double end_sec) {
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
curr->updateForSTW(start_sec, end_sec);
}
}
double
COTracker::totalConcOverhead(double now_sec) {
double total_conc_overhead = 0.0;
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
double conc_overhead = curr->concOverhead(now_sec);
total_conc_overhead += conc_overhead;
}
return total_conc_overhead;
}
double
COTracker::totalConcOverhead(double now_sec,
size_t group_num,
double* co_per_group) {
double total_conc_overhead = 0.0;
for (size_t i = 0; i < group_num; ++i)
co_per_group[i] = 0.0;
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
size_t group = curr->_group;
assert( 0 <= group && group < group_num, "invariant" );
double conc_overhead = curr->concOverhead(now_sec);
co_per_group[group] += conc_overhead;
total_conc_overhead += conc_overhead;
}
return total_conc_overhead;
}
double
COTracker::totalPredConcOverhead() {
double total_pred_conc_overhead = 0.0;
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
total_pred_conc_overhead += curr->predConcOverhead();
curr->resetPred();
}
return total_pred_conc_overhead / _cpu_number;
}

View File

@ -1,181 +0,0 @@
/*
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
// COTracker keeps track of the concurrent overhead of a GC thread.
// A thread that needs to be tracked must, itself, start up its
// tracker with the start() method and then call the update() method
// at regular intervals. What the tracker does is to calculate the
// concurrent overhead of a process at a given update period. The
// tracker starts and when is detects that it has exceeded the given
// period, it calculates the duration of the period in wall-clock time
// and the duration of the period in vtime (i.e. how much time the
// concurrent processes really took up during this period). The ratio
// of the latter over the former is the concurrent overhead of that
// process for that period over a single CPU. This overhead is stored
// on the tracker, "timestamped" with the wall-clock time of the end
// of the period. When the concurrent overhead of this process needs
// to be queried, this last "reading" provides a good approximation
// (we assume that the concurrent overhead of a particular thread
// stays largely constant over time). The timestamp is necessary to
// detect when the process has stopped working and the recorded
// reading hasn't been updated for some time.
// Each concurrent GC thread is considered to be part of a "group"
// (i.e. any available concurrent marking threads are part of the
// "concurrent marking thread group"). A COTracker is associated with
// a single group at construction-time. It's up to each collector to
// decide how groups will be mapped to such an id (ids should start
// from 0 and be consecutive; there's a hardcoded max group num
// defined on the GCOverheadTracker class). The notion of a group has
// been introduced to be able to identify how much overhead was
// imposed by each group, instead of getting a single value that
// covers all concurrent overhead.
class COTracker {
private:
// It indicates whether this tracker is enabled or not. When the
// tracker is disabled, then it returns 0.0 as the latest concurrent
// overhead and several methods (reset, start, and update) are not
// supposed to be called on it. This enabling / disabling facility
// is really provided to make a bit more explicit in the code when a
// particulary tracker of a processes that doesn't run all the time
// (e.g. concurrent marking) is supposed to be used and not it's not.
bool _enabled;
// The ID of the group associated with this tracker.
int _group;
// The update period of the tracker. A new value for the concurrent
// overhead of the associated process will be made at intervals no
// smaller than this.
double _update_period_sec;
// The start times (both wall-block time and vtime) of the current
// interval.
double _period_start_time_sec;
double _period_start_vtime_sec;
// Number seq of the concurrent overhead readings within a period
NumberSeq _conc_overhead_seq;
// The latest reading of the concurrent overhead (over a single CPU)
// imposed by the associated concurrent thread, made available at
// the indicated wall-clock time.
double _conc_overhead;
double _time_stamp_sec;
// The number of CPUs that the host machine has (for convenience
// really, as we'd have to keep translating it into a double)
static double _cpu_number;
// Fields that keep a list of all trackers created. This is useful,
// since it allows us to sum up the concurrent overhead without
// having to write code for a specific collector to broadcast a
// request to all its concurrent processes.
COTracker* _next;
static COTracker* _head;
// It indicates that a new period is starting by updating the
// _period_start_time_sec and _period_start_vtime_sec fields.
void resetPeriod(double now_sec, double vnow_sec);
// It updates the latest concurrent overhead reading, taken at a
// given wall-clock time.
void setConcOverhead(double time_stamp_sec, double conc_overhead);
// It determines whether the time stamp of the latest concurrent
// overhead reading is out of date or not.
bool outOfDate(double now_sec) {
// The latest reading is considered out of date, if it was taken
// 1.2x the update period.
return (now_sec - _time_stamp_sec) > 1.2 * _update_period_sec;
}
public:
// The constructor which associates the tracker with a group ID.
COTracker(int group);
// Methods to enable / disable the tracker and query whether it is enabled.
void enable() { _enabled = true; }
void disable() { _enabled = false; }
bool enabled() { return _enabled; }
// It resets the tracker and sets concurrent overhead reading to be
// the given parameter and the associated time stamp to be now.
void reset(double starting_conc_overhead = 0.0);
// The tracker starts tracking. IT should only be called from the
// concurrent thread that is tracked by this tracker.
void start();
// It updates the tracker and, if the current period is longer than
// the update period, the concurrent overhead reading will be
// updated. force_end being true indicates that it's the last call
// to update() by this process before the tracker is disabled (the
// tracker can be re-enabled later if necessary). It should only be
// called from the concurrent thread that is tracked by this tracker
// and while the thread has joined the STS.
void update(bool force_end = false);
// It adjusts the contents of the tracker to take into account a STW
// pause.
void updateForSTW(double start_sec, double end_sec);
// It returns the last concurrent overhead reading over a single
// CPU. If the reading is out of date, or the tracker is disabled,
// it returns 0.0.
double concCPUOverhead(double now_sec) {
if (!_enabled || outOfDate(now_sec))
return 0.0;
else
return _conc_overhead;
}
// It returns the last concurrent overhead reading over all CPUs
// that the host machine has. If the reading is out of date, or the
// tracker is disabled, it returns 0.0.
double concOverhead(double now_sec) {
return concCPUOverhead(now_sec) / _cpu_number;
}
double predConcOverhead();
void resetPred();
// statics
// It notifies all trackers about a STW pause.
static void updateAllForSTW(double start_sec, double end_sec);
// It returns the sum of the concurrent overhead readings of all
// available (and enabled) trackers for the given time stamp. The
// overhead is over all the CPUs of the host machine.
static double totalConcOverhead(double now_sec);
// Like the previous method, but it also sums up the overheads per
// group number. The length of the co_per_group array must be at
// least as large group_num
static double totalConcOverhead(double now_sec,
size_t group_num,
double* co_per_group);
static double totalPredConcOverhead();
};

View File

@ -1,179 +0,0 @@
/*
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_gcOverheadReporter.cpp.incl"
class COReportingThread : public ConcurrentGCThread {
private:
GCOverheadReporter* _reporter;
public:
COReportingThread(GCOverheadReporter* reporter) : _reporter(reporter) {
guarantee( _reporter != NULL, "precondition" );
create_and_start();
}
virtual void run() {
initialize_in_thread();
wait_for_universe_init();
int period_ms = GCOverheadReportingPeriodMS;
while ( true ) {
os::sleep(Thread::current(), period_ms, false);
_sts.join();
double now_sec = os::elapsedTime();
_reporter->collect_and_record_conc_overhead(now_sec);
_sts.leave();
}
terminate();
}
};
GCOverheadReporter* GCOverheadReporter::_reporter = NULL;
GCOverheadReporter::GCOverheadReporter(size_t group_num,
const char* group_names[],
size_t length)
: _group_num(group_num), _prev_end_sec(0.0) {
guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum,
"precondition" );
_base = NEW_C_HEAP_ARRAY(GCOverheadReporterEntry, length);
_top = _base + length;
_curr = _base;
for (size_t i = 0; i < group_num; ++i) {
guarantee( group_names[i] != NULL, "precondition" );
_group_names[i] = group_names[i];
}
}
void
GCOverheadReporter::add(double start_sec, double end_sec,
double* conc_overhead,
double stw_overhead) {
assert( _curr <= _top, "invariant" );
if (_curr == _top) {
guarantee( false, "trace full" );
return;
}
_curr->_start_sec = start_sec;
_curr->_end_sec = end_sec;
for (size_t i = 0; i < _group_num; ++i) {
_curr->_conc_overhead[i] =
(conc_overhead != NULL) ? conc_overhead[i] : 0.0;
}
_curr->_stw_overhead = stw_overhead;
++_curr;
}
void
GCOverheadReporter::collect_and_record_conc_overhead(double end_sec) {
double start_sec = _prev_end_sec;
guarantee( end_sec > start_sec, "invariant" );
double conc_overhead[MaxGCOverheadGroupNum];
COTracker::totalConcOverhead(end_sec, _group_num, conc_overhead);
add_conc_overhead(start_sec, end_sec, conc_overhead);
_prev_end_sec = end_sec;
}
void
GCOverheadReporter::record_stw_start(double start_sec) {
guarantee( start_sec > _prev_end_sec, "invariant" );
collect_and_record_conc_overhead(start_sec);
}
void
GCOverheadReporter::record_stw_end(double end_sec) {
double start_sec = _prev_end_sec;
COTracker::updateAllForSTW(start_sec, end_sec);
add_stw_overhead(start_sec, end_sec, 1.0);
_prev_end_sec = end_sec;
}
void
GCOverheadReporter::print() const {
tty->print_cr("");
tty->print_cr("GC Overhead (%d entries)", _curr - _base);
tty->print_cr("");
GCOverheadReporterEntry* curr = _base;
while (curr < _curr) {
double total = curr->_stw_overhead;
for (size_t i = 0; i < _group_num; ++i)
total += curr->_conc_overhead[i];
tty->print("OVERHEAD %12.8lf %12.8lf ",
curr->_start_sec, curr->_end_sec);
for (size_t i = 0; i < _group_num; ++i)
tty->print("%s %12.8lf ", _group_names[i], curr->_conc_overhead[i]);
tty->print_cr("STW %12.8lf TOT %12.8lf", curr->_stw_overhead, total);
++curr;
}
tty->print_cr("");
}
// statics
void
GCOverheadReporter::initGCOverheadReporter(size_t group_num,
const char* group_names[]) {
guarantee( _reporter == NULL, "should only be called once" );
guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum,
"precondition" );
guarantee( group_names != NULL, "pre-condition" );
if (GCOverheadReporting) {
_reporter = new GCOverheadReporter(group_num, group_names);
new COReportingThread(_reporter);
}
}
void
GCOverheadReporter::recordSTWStart(double start_sec) {
if (_reporter != NULL)
_reporter->record_stw_start(start_sec);
}
void
GCOverheadReporter::recordSTWEnd(double end_sec) {
if (_reporter != NULL)
_reporter->record_stw_end(end_sec);
}
void
GCOverheadReporter::printGCOverhead() {
if (_reporter != NULL)
_reporter->print();
}

View File

@ -1,141 +0,0 @@
/*
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
// Keeps track of the GC overhead (both concurrent and STW). It stores
// it in a large array and then prints it to tty at the end of the
// execution.
// See coTracker.hpp for the explanation on what groups are.
// Let's set a maximum number of concurrent overhead groups, to
// statically allocate any arrays we need and not to have to
// malloc/free them. This is just a bit more convenient.
enum {
MaxGCOverheadGroupNum = 4
};
typedef struct {
double _start_sec;
double _end_sec;
double _conc_overhead[MaxGCOverheadGroupNum];
double _stw_overhead;
} GCOverheadReporterEntry;
class GCOverheadReporter {
friend class COReportingThread;
private:
enum PrivateConstants {
DefaultReporterLength = 128 * 1024
};
// Reference to the single instance of this class.
static GCOverheadReporter* _reporter;
// These three references point to the array that contains the GC
// overhead entries (_base is the base of the array, _top is the
// address passed the last entry of the array, _curr is the next
// entry to be used).
GCOverheadReporterEntry* _base;
GCOverheadReporterEntry* _top;
GCOverheadReporterEntry* _curr;
// The number of concurrent overhead groups.
size_t _group_num;
// The wall-clock time of the end of the last recorded period of GC
// overhead.
double _prev_end_sec;
// Names for the concurrent overhead groups.
const char* _group_names[MaxGCOverheadGroupNum];
// Add a new entry to the large array. conc_overhead being NULL is
// equivalent to an array full of 0.0s. conc_overhead should have a
// length of at least _group_num.
void add(double start_sec, double end_sec,
double* conc_overhead,
double stw_overhead);
// Add an entry that represents concurrent GC overhead.
// conc_overhead must be at least of length _group_num.
// conc_overhead being NULL is equivalent to an array full of 0.0s.
void add_conc_overhead(double start_sec, double end_sec,
double* conc_overhead) {
add(start_sec, end_sec, conc_overhead, 0.0);
}
// Add an entry that represents STW GC overhead.
void add_stw_overhead(double start_sec, double end_sec,
double stw_overhead) {
add(start_sec, end_sec, NULL, stw_overhead);
}
// It records the start of a STW pause (i.e. it records the
// concurrent overhead up to that point)
void record_stw_start(double start_sec);
// It records the end of a STW pause (i.e. it records the overhead
// associated with the pause and adjusts all the trackers to reflect
// the pause)
void record_stw_end(double end_sec);
// It queries all the trackers of their concurrent overhead and
// records it.
void collect_and_record_conc_overhead(double end_sec);
// It prints the contents of the GC overhead array
void print() const;
// Constructor. The same preconditions for group_num and group_names
// from initGCOverheadReporter apply here too.
GCOverheadReporter(size_t group_num,
const char* group_names[],
size_t length = DefaultReporterLength);
public:
// statics
// It initialises the GCOverheadReporter and launches the concurrent
// overhead reporting thread. Both actions happen only if the
// GCOverheadReporting parameter is set. The length of the
// group_names array should be >= group_num and group_num should be
// <= MaxGCOverheadGroupNum. Entries group_namnes[0..group_num-1]
// should not be NULL.
static void initGCOverheadReporter(size_t group_num,
const char* group_names[]);
// The following three are provided for convenience and they are
// wrappers around record_stw_start(start_sec), record_stw_end(end_sec),
// and print(). Each of these checks whether GC overhead reporting
// is on (i.e. _reporter != NULL) and, if it is, calls the
// corresponding method. Saves from repeating this pattern again and
// again from the places where they need to be called.
static void recordSTWStart(double start_sec);
static void recordSTWEnd(double end_sec);
static void printGCOverhead();
};

View File

@ -27,6 +27,7 @@
GrowableArray<oop>* MarkSweep::_marking_stack = NULL; GrowableArray<oop>* MarkSweep::_marking_stack = NULL;
GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL; GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL;
GrowableArray<DataLayout*>* MarkSweep::_revisit_mdo_stack = NULL;
GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL; GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL;
GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL; GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL;
@ -62,12 +63,37 @@ void MarkSweep::revisit_weak_klass_link(Klass* k) {
void MarkSweep::follow_weak_klass_links() { void MarkSweep::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point. // All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links. // Update and follow all subklass, sibling and implementor links.
if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
gclog_or_tty->print_cr("Revisit klass stack length = %d", _revisit_klass_stack->length());
}
for (int i = 0; i < _revisit_klass_stack->length(); i++) { for (int i = 0; i < _revisit_klass_stack->length(); i++) {
_revisit_klass_stack->at(i)->follow_weak_klass_links(&is_alive,&keep_alive); _revisit_klass_stack->at(i)->follow_weak_klass_links(&is_alive,&keep_alive);
} }
follow_stack(); follow_stack();
} }
#if ( defined(COMPILER1) || defined(COMPILER2) )
void MarkSweep::revisit_mdo(DataLayout* p) {
_revisit_mdo_stack->push(p);
}
void MarkSweep::follow_mdo_weak_refs() {
// All strongly reachable oops have been marked at this point;
// we can visit and clear any weak references from MDO's which
// we memoized during the strong marking phase.
assert(_marking_stack->is_empty(), "Marking stack should be empty");
if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
gclog_or_tty->print_cr("Revisit MDO stack length = %d", _revisit_mdo_stack->length());
}
for (int i = 0; i < _revisit_mdo_stack->length(); i++) {
_revisit_mdo_stack->at(i)->follow_weak_refs(&is_alive);
}
follow_stack();
}
#endif // ( COMPILER1 || COMPILER2 )
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); } void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }

View File

@ -23,6 +23,7 @@
*/ */
class ReferenceProcessor; class ReferenceProcessor;
class DataLayout;
// MarkSweep takes care of global mark-compact garbage collection for a // MarkSweep takes care of global mark-compact garbage collection for a
// GenCollectedHeap using a four-phase pointer forwarding algorithm. All // GenCollectedHeap using a four-phase pointer forwarding algorithm. All
@ -65,6 +66,8 @@ class MarkSweep : AllStatic {
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; } virtual const bool do_nmethods() const { return true; }
virtual const bool should_remember_mdo() const { return true; }
virtual void remember_mdo(DataLayout* p) { MarkSweep::revisit_mdo(p); }
}; };
class FollowStackClosure: public VoidClosure { class FollowStackClosure: public VoidClosure {
@ -103,6 +106,7 @@ class MarkSweep : AllStatic {
friend class KeepAliveClosure; friend class KeepAliveClosure;
friend class VM_MarkSweep; friend class VM_MarkSweep;
friend void marksweep_init(); friend void marksweep_init();
friend class DataLayout;
// //
// Vars // Vars
@ -112,6 +116,8 @@ class MarkSweep : AllStatic {
static GrowableArray<oop>* _marking_stack; static GrowableArray<oop>* _marking_stack;
// Stack for live klasses to revisit at end of marking phase // Stack for live klasses to revisit at end of marking phase
static GrowableArray<Klass*>* _revisit_klass_stack; static GrowableArray<Klass*>* _revisit_klass_stack;
// Set (stack) of MDO's to revisit at end of marking phase
static GrowableArray<DataLayout*>* _revisit_mdo_stack;
// Space for storing/restoring mark word // Space for storing/restoring mark word
static GrowableArray<markOop>* _preserved_mark_stack; static GrowableArray<markOop>* _preserved_mark_stack;
@ -157,6 +163,10 @@ class MarkSweep : AllStatic {
// Class unloading. Update subklass/sibling/implementor links at end of marking phase. // Class unloading. Update subklass/sibling/implementor links at end of marking phase.
static void follow_weak_klass_links(); static void follow_weak_klass_links();
// Class unloading. Clear weak refs in MDO's (ProfileData)
// at the end of the marking phase.
static void follow_mdo_weak_refs();
// Debugging // Debugging
static void trace(const char* msg) PRODUCT_RETURN; static void trace(const char* msg) PRODUCT_RETURN;
@ -213,7 +223,10 @@ class MarkSweep : AllStatic {
#endif #endif
// Call backs for class unloading // Call backs for class unloading
static void revisit_weak_klass_link(Klass* k); // Update subklass/sibling/implementor links at end of marking. // Update subklass/sibling/implementor links at end of marking.
static void revisit_weak_klass_link(Klass* k);
// For weak refs clearing in MDO's
static void revisit_mdo(DataLayout* p);
}; };
class PreservedMark VALUE_OBJ_CLASS_SPEC { class PreservedMark VALUE_OBJ_CLASS_SPEC {

View File

@ -239,6 +239,9 @@ class CollectedHeap : public CHeapObj {
return p == NULL || is_in_closed_subset(p); return p == NULL || is_in_closed_subset(p);
} }
// XXX is_permanent() and is_in_permanent() should be better named
// to distinguish one from the other.
// Returns "TRUE" if "p" is allocated as "permanent" data. // Returns "TRUE" if "p" is allocated as "permanent" data.
// If the heap does not use "permanent" data, returns the same // If the heap does not use "permanent" data, returns the same
// value is_in_reserved() would return. // value is_in_reserved() would return.
@ -247,13 +250,17 @@ class CollectedHeap : public CHeapObj {
// space). If you need the more conservative answer use is_permanent(). // space). If you need the more conservative answer use is_permanent().
virtual bool is_in_permanent(const void *p) const = 0; virtual bool is_in_permanent(const void *p) const = 0;
bool is_in_permanent_or_null(const void *p) const {
return p == NULL || is_in_permanent(p);
}
// Returns "TRUE" if "p" is in the committed area of "permanent" data. // Returns "TRUE" if "p" is in the committed area of "permanent" data.
// If the heap does not use "permanent" data, returns the same // If the heap does not use "permanent" data, returns the same
// value is_in() would return. // value is_in() would return.
virtual bool is_permanent(const void *p) const = 0; virtual bool is_permanent(const void *p) const = 0;
bool is_in_permanent_or_null(const void *p) const { bool is_permanent_or_null(const void *p) const {
return p == NULL || is_in_permanent(p); return p == NULL || is_permanent(p);
} }
// Returns "TRUE" if "p" is a method oop in the // Returns "TRUE" if "p" is a method oop in the

View File

@ -409,8 +409,6 @@ codeBlob.cpp c1_Runtime1.hpp
compileBroker.cpp c1_Compiler.hpp compileBroker.cpp c1_Compiler.hpp
frame.hpp c1_Defs.hpp
frame_<arch>.cpp c1_Runtime1.hpp frame_<arch>.cpp c1_Runtime1.hpp
globals.cpp c1_globals.hpp globals.cpp c1_globals.hpp
@ -433,8 +431,6 @@ os_<os_family>.cpp c1_Runtime1.hpp
os_<os_arch>.cpp c1_Runtime1.hpp os_<os_arch>.cpp c1_Runtime1.hpp
registerMap.hpp c1_Defs.hpp
safepoint.cpp c1_globals.hpp safepoint.cpp c1_globals.hpp
sharedRuntime.cpp c1_Runtime1.hpp sharedRuntime.cpp c1_Runtime1.hpp

View File

@ -872,6 +872,7 @@ classFileParser.cpp signature.hpp
classFileParser.cpp symbolOop.hpp classFileParser.cpp symbolOop.hpp
classFileParser.cpp symbolTable.hpp classFileParser.cpp symbolTable.hpp
classFileParser.cpp systemDictionary.hpp classFileParser.cpp systemDictionary.hpp
classFileParser.cpp threadService.hpp
classFileParser.cpp timer.hpp classFileParser.cpp timer.hpp
classFileParser.cpp universe.inline.hpp classFileParser.cpp universe.inline.hpp
classFileParser.cpp verificationType.hpp classFileParser.cpp verificationType.hpp
@ -924,6 +925,7 @@ classLoader.cpp os_<os_family>.inline.hpp
classLoader.cpp symbolOop.hpp classLoader.cpp symbolOop.hpp
classLoader.cpp systemDictionary.hpp classLoader.cpp systemDictionary.hpp
classLoader.cpp threadCritical.hpp classLoader.cpp threadCritical.hpp
classLoader.cpp threadService.hpp
classLoader.cpp timer.hpp classLoader.cpp timer.hpp
classLoader.cpp universe.inline.hpp classLoader.cpp universe.inline.hpp
classLoader.cpp vmSymbols.hpp classLoader.cpp vmSymbols.hpp
@ -2682,6 +2684,7 @@ markOop.inline.hpp klassOop.hpp
markOop.inline.hpp markOop.hpp markOop.inline.hpp markOop.hpp
markSweep.cpp compileBroker.hpp markSweep.cpp compileBroker.hpp
markSweep.cpp methodDataOop.hpp
markSweep.hpp collectedHeap.hpp markSweep.hpp collectedHeap.hpp
@ -4019,6 +4022,7 @@ systemDictionary.cpp placeholders.hpp
systemDictionary.cpp resolutionErrors.hpp systemDictionary.cpp resolutionErrors.hpp
systemDictionary.cpp signature.hpp systemDictionary.cpp signature.hpp
systemDictionary.cpp systemDictionary.hpp systemDictionary.cpp systemDictionary.hpp
systemDictionary.cpp threadService.hpp
systemDictionary.cpp typeArrayKlass.hpp systemDictionary.cpp typeArrayKlass.hpp
systemDictionary.cpp vmSymbols.hpp systemDictionary.cpp vmSymbols.hpp

View File

@ -122,11 +122,15 @@ class AbstractInterpreter: AllStatic {
static int size_top_interpreter_activation(methodOop method); static int size_top_interpreter_activation(methodOop method);
// Deoptimization support // Deoptimization support
static address continuation_for(methodOop method, // Compute the entry address for continuation after
static address deopt_continue_after_entry(methodOop method,
address bcp, address bcp,
int callee_parameters, int callee_parameters,
bool is_top_frame, bool is_top_frame);
bool& use_next_mdp); // Compute the entry address for reexecution
static address deopt_reexecute_entry(methodOop method, address bcp);
// Deoptimization should reexecute this bytecode
static bool bytecode_should_reexecute(Bytecodes::Code code);
// share implementation of size_activation and layout_activation: // share implementation of size_activation and layout_activation:
static int size_activation(methodOop method, static int size_activation(methodOop method,

View File

@ -284,76 +284,19 @@ static BasicType constant_pool_type(methodOop method, int index) {
//------------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------------
// Deoptimization support // Deoptimization support
// If deoptimization happens, this method returns the point where to continue in // If deoptimization happens, this function returns the point of next bytecode to continue execution
// interpreter. For calls (invokexxxx, newxxxx) the continuation is at next address AbstractInterpreter::deopt_continue_after_entry(methodOop method, address bcp, int callee_parameters, bool is_top_frame) {
// bci and the top of stack is in eax/edx/FPU tos.
// For putfield/getfield, put/getstatic, the continuation is at the same
// bci and the TOS is on stack.
// Note: deopt_entry(type, 0) means reexecute bytecode
// deopt_entry(type, length) means continue at next bytecode
address AbstractInterpreter::continuation_for(methodOop method, address bcp, int callee_parameters, bool is_top_frame, bool& use_next_mdp) {
assert(method->contains(bcp), "just checkin'"); assert(method->contains(bcp), "just checkin'");
Bytecodes::Code code = Bytecodes::java_code_at(bcp); Bytecodes::Code code = Bytecodes::java_code_at(bcp);
assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
int bci = method->bci_from(bcp); int bci = method->bci_from(bcp);
int length = -1; // initial value for debugging int length = -1; // initial value for debugging
// compute continuation length // compute continuation length
length = Bytecodes::length_at(bcp); length = Bytecodes::length_at(bcp);
// compute result type // compute result type
BasicType type = T_ILLEGAL; BasicType type = T_ILLEGAL;
// when continuing after a compiler safepoint, re-execute the bytecode
// (an invoke is continued after the safepoint)
use_next_mdp = true;
switch (code) { switch (code) {
case Bytecodes::_lookupswitch:
case Bytecodes::_tableswitch:
case Bytecodes::_fast_binaryswitch:
case Bytecodes::_fast_linearswitch:
// recompute condtional expression folded into _if<cond>
case Bytecodes::_lcmp :
case Bytecodes::_fcmpl :
case Bytecodes::_fcmpg :
case Bytecodes::_dcmpl :
case Bytecodes::_dcmpg :
case Bytecodes::_ifnull :
case Bytecodes::_ifnonnull :
case Bytecodes::_goto :
case Bytecodes::_goto_w :
case Bytecodes::_ifeq :
case Bytecodes::_ifne :
case Bytecodes::_iflt :
case Bytecodes::_ifge :
case Bytecodes::_ifgt :
case Bytecodes::_ifle :
case Bytecodes::_if_icmpeq :
case Bytecodes::_if_icmpne :
case Bytecodes::_if_icmplt :
case Bytecodes::_if_icmpge :
case Bytecodes::_if_icmpgt :
case Bytecodes::_if_icmple :
case Bytecodes::_if_acmpeq :
case Bytecodes::_if_acmpne :
// special cases
case Bytecodes::_getfield :
case Bytecodes::_putfield :
case Bytecodes::_getstatic :
case Bytecodes::_putstatic :
case Bytecodes::_aastore :
// reexecute the operation and TOS value is on stack
assert(is_top_frame, "must be top frame");
use_next_mdp = false;
return Interpreter::deopt_entry(vtos, 0);
break;
#ifdef COMPILER1
case Bytecodes::_athrow :
assert(is_top_frame, "must be top frame");
use_next_mdp = false;
return Interpreter::rethrow_exception_entry();
break;
#endif /* COMPILER1 */
case Bytecodes::_invokevirtual : case Bytecodes::_invokevirtual :
case Bytecodes::_invokespecial : case Bytecodes::_invokespecial :
case Bytecodes::_invokestatic : case Bytecodes::_invokestatic :
@ -392,6 +335,70 @@ address AbstractInterpreter::continuation_for(methodOop method, address bcp, int
: Interpreter::return_entry(as_TosState(type), length); : Interpreter::return_entry(as_TosState(type), length);
} }
// If deoptimization happens, this function returns the point where the interpreter reexecutes
// the bytecode.
// Note: Bytecodes::_athrow is a special case in that it does not return
// Interpreter::deopt_entry(vtos, 0) like others
address AbstractInterpreter::deopt_reexecute_entry(methodOop method, address bcp) {
assert(method->contains(bcp), "just checkin'");
Bytecodes::Code code = Bytecodes::java_code_at(bcp);
#ifdef COMPILER1
if(code == Bytecodes::_athrow ) {
return Interpreter::rethrow_exception_entry();
}
#endif /* COMPILER1 */
return Interpreter::deopt_entry(vtos, 0);
}
// If deoptimization happens, the interpreter should reexecute these bytecodes.
// This function mainly helps the compilers to set up the reexecute bit.
bool AbstractInterpreter::bytecode_should_reexecute(Bytecodes::Code code) {
switch (code) {
case Bytecodes::_lookupswitch:
case Bytecodes::_tableswitch:
case Bytecodes::_fast_binaryswitch:
case Bytecodes::_fast_linearswitch:
// recompute condtional expression folded into _if<cond>
case Bytecodes::_lcmp :
case Bytecodes::_fcmpl :
case Bytecodes::_fcmpg :
case Bytecodes::_dcmpl :
case Bytecodes::_dcmpg :
case Bytecodes::_ifnull :
case Bytecodes::_ifnonnull :
case Bytecodes::_goto :
case Bytecodes::_goto_w :
case Bytecodes::_ifeq :
case Bytecodes::_ifne :
case Bytecodes::_iflt :
case Bytecodes::_ifge :
case Bytecodes::_ifgt :
case Bytecodes::_ifle :
case Bytecodes::_if_icmpeq :
case Bytecodes::_if_icmpne :
case Bytecodes::_if_icmplt :
case Bytecodes::_if_icmpge :
case Bytecodes::_if_icmpgt :
case Bytecodes::_if_icmple :
case Bytecodes::_if_acmpeq :
case Bytecodes::_if_acmpne :
// special cases
case Bytecodes::_getfield :
case Bytecodes::_putfield :
case Bytecodes::_getstatic :
case Bytecodes::_putstatic :
case Bytecodes::_aastore :
#ifdef COMPILER1
//special case of reexecution
case Bytecodes::_athrow :
#endif
return true;
default:
return false;
}
}
void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// Quick & dirty stack overflow checking: bang the stack & handle trap. // Quick & dirty stack overflow checking: bang the stack & handle trap.
// Note that we do the banging after the frame is setup, since the exception // Note that we do the banging after the frame is setup, since the exception

View File

@ -849,8 +849,25 @@ static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
} }
#endif // !PRODUCT #endif // !PRODUCT
nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
if (branch_bcp != NULL && nm != NULL) {
// This was a successful request for an OSR nmethod. Because
// frequency_counter_overflow_inner ends with a safepoint check,
// nm could have been unloaded so look it up again. It's unsafe
// to examine nm directly since it might have been freed and used
// for something else.
frame fr = thread->last_frame();
methodOop method = fr.interpreter_frame_method();
int bci = method->bci_from(fr.interpreter_frame_bcp());
nm = method->lookup_osr_nmethod_for(bci);
}
return nm;
}
IRT_ENTRY(nmethod*, IRT_ENTRY(nmethod*,
InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp)) InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp))
// use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
// flag, in case this method triggers classloading which will call into Java. // flag, in case this method triggers classloading which will call into Java.
UnlockFlagSaver fs(thread); UnlockFlagSaver fs(thread);
@ -923,7 +940,6 @@ IRT_ENTRY(nmethod*,
} }
BiasedLocking::revoke(objects_to_revoke); BiasedLocking::revoke(objects_to_revoke);
} }
return osr_nm; return osr_nm;
} }
} }

View File

@ -49,6 +49,9 @@ class InterpreterRuntime: AllStatic {
static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); } static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); }
static void note_trap(JavaThread *thread, int reason, TRAPS); static void note_trap(JavaThread *thread, int reason, TRAPS);
// Inner work method for Interpreter's frequency counter overflow
static nmethod* frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp);
public: public:
// Constants // Constants
static void ldc (JavaThread* thread, bool wide); static void ldc (JavaThread* thread, bool wide);

Some files were not shown because too many files have changed in this diff Show More