This commit is contained in:
Lana Steuck 2014-03-25 14:52:03 -07:00
commit f334b48a84
102 changed files with 5872 additions and 1753 deletions
hotspot
agent/src/share/classes/sun/jvm/hotspot
make
src
os
os_cpu/bsd_x86/vm
share/vm
test

@ -51,9 +51,9 @@ public class G1CollectedHeap extends SharedHeap {
static private CIntegerField summaryBytesUsedField;
// G1MonitoringSupport* _g1mm;
static private AddressField g1mmField;
// MasterOldRegionSet _old_set;
// HeapRegionSet _old_set;
static private long oldSetFieldOffset;
// MasterHumongousRegionSet _humongous_set;
// HeapRegionSet _humongous_set;
static private long humongousSetFieldOffset;
static {

@ -40,12 +40,8 @@ import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionSetBase. Represents a group of regions.
public class HeapRegionSetBase extends VMObject {
// uint _length;
static private CIntegerField lengthField;
// uint _region_num;
static private CIntegerField regionNumField;
// size_t _total_used_bytes;
static private CIntegerField totalUsedBytesField;
static private long countField;
static {
VM.registerVMInitializedObserver(new Observer() {
@ -58,21 +54,13 @@ public class HeapRegionSetBase extends VMObject {
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionSetBase");
lengthField = type.getCIntegerField("_length");
regionNumField = type.getCIntegerField("_region_num");
totalUsedBytesField = type.getCIntegerField("_total_used_bytes");
countField = type.getField("_count").getOffset();
}
public long length() {
return lengthField.getValue(addr);
}
public long regionNum() {
return regionNumField.getValue(addr);
}
public long totalUsedBytes() {
return totalUsedBytesField.getValue(addr);
public HeapRegionSetCount count() {
Address countFieldAddr = addr.addOffsetTo(countField);
return (HeapRegionSetCount) VMObjectFactory.newObject(HeapRegionSetCount.class, countFieldAddr);
}
public HeapRegionSetBase(Address addr) {

@ -0,0 +1,73 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.Iterator;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionSetCount. Represents a group of regions.
public class HeapRegionSetCount extends VMObject {
static private CIntegerField lengthField;
static private CIntegerField capacityField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionSetCount");
lengthField = type.getCIntegerField("_length");
capacityField = type.getCIntegerField("_capacity");
}
public long length() {
return lengthField.getValue(addr);
}
public long capacity() {
return capacityField.getValue(addr);
}
public HeapRegionSetCount(Address addr) {
super(addr);
}
}

@ -114,7 +114,8 @@ public class HeapSummary extends Tool {
long survivorRegionNum = g1mm.survivorRegionNum();
HeapRegionSetBase oldSet = g1h.oldSet();
HeapRegionSetBase humongousSet = g1h.humongousSet();
long oldRegionNum = oldSet.regionNum() + humongousSet.regionNum();
long oldRegionNum = oldSet.count().length()
+ humongousSet.count().capacity() / HeapRegion.grainBytes();
printG1Space("G1 Heap:", g1h.n_regions(),
g1h.used(), g1h.capacity());
System.out.println("G1 Young Generation:");

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -87,9 +87,10 @@ ifeq ($(INCLUDE_ALL_GCS), false)
g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1StringDedup.cpp g1StringDedupStat.cpp \
g1StringDedupTable.cpp g1StringDedupThread.cpp g1StringDedupQueue.cpp g1_globals.cpp heapRegion.cpp \
g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp g1CodeCacheRemSet.cpp \
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \

@ -33,7 +33,7 @@ jprt.need.sibling.build=false
# This tells jprt what default release we want to build
jprt.hotspot.default.release=jdk8
jprt.hotspot.default.release=jdk9
jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
@ -47,72 +47,50 @@ jprt.sync.push=false
# sparc etc.
# Define the Solaris platforms we want for the various releases
jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
jprt.my.solaris.sparcv9.jdk9=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
jprt.my.solaris.x64.jdk8=solaris_x64_5.10
jprt.my.solaris.x64.jdk7=solaris_x64_5.10
jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
jprt.my.solaris.x64.jdk9=solaris_x64_5.10
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
jprt.my.linux.i586.jdk8=linux_i586_2.6
jprt.my.linux.i586.jdk7=linux_i586_2.6
jprt.my.linux.i586.jdk7u8=${jprt.my.linux.i586.jdk7}
jprt.my.linux.i586.jdk9=linux_i586_2.6
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
jprt.my.linux.x64.jdk8=linux_x64_2.6
jprt.my.linux.x64.jdk7=linux_x64_2.6
jprt.my.linux.x64.jdk7u8=${jprt.my.linux.x64.jdk7}
jprt.my.linux.x64.jdk9=linux_x64_2.6
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
jprt.my.linux.ppc.jdk8=linux_ppc_2.6
jprt.my.linux.ppc.jdk7=linux_ppc_2.6
jprt.my.linux.ppc.jdk7u8=${jprt.my.linux.ppc.jdk7}
jprt.my.linux.ppc.jdk9=linux_ppc_2.6
jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7u8=${jprt.my.linux.ppcv2.jdk7}
jprt.my.linux.ppcv2.jdk9=linux_ppcv2_2.6
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7u8=${jprt.my.linux.ppcsflt.jdk7}
jprt.my.linux.ppcsflt.jdk9=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
jprt.my.linux.armvfpsflt.jdk8=linux_armvfpsflt_2.6
jprt.my.linux.armvfpsflt.jdk9=linux_armvfpsflt_2.6
jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
jprt.my.linux.armvfphflt.jdk8=linux_armvfphflt_2.6
jprt.my.linux.armvfphflt.jdk9=linux_armvfphflt_2.6
jprt.my.linux.armvfphflt=${jprt.my.linux.armvfphflt.${jprt.tools.default.release}}
# The ARM GP vfp-sflt build is not currently supported
#jprt.my.linux.armvs.jdk8=linux_armvs_2.6
#jprt.my.linux.armvs.jdk9=linux_armvs_2.6
#jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}}
jprt.my.linux.armvh.jdk8=linux_armvh_2.6
jprt.my.linux.armvh.jdk9=linux_armvh_2.6
jprt.my.linux.armvh=${jprt.my.linux.armvh.${jprt.tools.default.release}}
jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7u8=${jprt.my.linux.armsflt.jdk7}
jprt.my.linux.armsflt.jdk9=linux_armsflt_2.6
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
jprt.my.macosx.x64.jdk8=macosx_x64_10.7
jprt.my.macosx.x64.jdk7=macosx_x64_10.7
jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
jprt.my.macosx.x64.jdk9=macosx_x64_10.7
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8=windows_i586_6.1
jprt.my.windows.i586.jdk7=windows_i586_6.1
jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586.jdk9=windows_i586_6.1
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk8=windows_x64_6.1
jprt.my.windows.x64.jdk7=windows_x64_6.1
jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
jprt.my.windows.x64.jdk9=windows_x64_6.1
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
# Standard list of jprt build targets for this source tree
@ -143,9 +121,7 @@ jprt.build.targets.embedded= \
jprt.build.targets.all=${jprt.build.targets.standard}, \
${jprt.build.targets.embedded}, ${jprt.build.targets.open}
jprt.build.targets.jdk8=${jprt.build.targets.all}
jprt.build.targets.jdk7=${jprt.build.targets.all}
jprt.build.targets.jdk7u8=${jprt.build.targets.all}
jprt.build.targets.jdk9=${jprt.build.targets.all}
jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
# Subset lists of test targets for this source tree
@ -349,9 +325,7 @@ jprt.test.targets.embedded= \
${jprt.my.windows.i586.test.targets}, \
${jprt.my.windows.x64.test.targets}
jprt.test.targets.jdk8=${jprt.test.targets.standard}
jprt.test.targets.jdk7=${jprt.test.targets.standard}
jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
jprt.test.targets.jdk9=${jprt.test.targets.standard}
jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
# The default test/Makefile targets that should be run
@ -399,9 +373,7 @@ jprt.make.rule.test.targets.standard = \
jprt.make.rule.test.targets.embedded = \
${jprt.make.rule.test.targets.standard.client}
jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7}
jprt.make.rule.test.targets.jdk9=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
# 7155453: Work-around to prevent popups on OSX from blocking test completion

@ -3593,6 +3593,11 @@ void os::Aix::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
exception_name(sig, buf, O_BUFLEN));
}
} else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));

@ -3374,6 +3374,11 @@ void os::Bsd::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
exception_name(sig, buf, O_BUFLEN));
}
} else if(os::Bsd::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Bsd::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Bsd::get_our_sigflags(sig));

@ -4560,6 +4560,11 @@ void os::Linux::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
exception_name(sig, buf, O_BUFLEN));
}
} else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Linux::get_our_sigflags(sig));

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,6 @@ void OSThread::pd_initialize() {
_thread_id = 0;
sigemptyset(&_caller_sigmask);
_saved_interrupt_thread_state = _thread_new;
_vm_created_thread = false;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -82,20 +82,6 @@
void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; }
static void SR_handler(Thread* thread, ucontext_t* uc);
// ***************************************************************
// java.lang.Thread.interrupt state.
// ***************************************************************
private:
JavaThreadState _saved_interrupt_thread_state; // the thread state before a system call -- restored afterward
public:
JavaThreadState saved_interrupt_thread_state() { return _saved_interrupt_thread_state; }
void set_saved_interrupt_thread_state(JavaThreadState state) { _saved_interrupt_thread_state = state; }
static void handle_spinlock_contention(int tries); // Used for thread local eden locking
// ***************************************************************

@ -311,33 +311,6 @@ struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
return localtime_r(clock, res);
}
// interruptible infrastructure
// setup_interruptible saves the thread state before going into an
// interruptible system call.
// The saved state is used to restore the thread to
// its former state whether or not an interrupt is received.
// Used by classloader os::read
// os::restartable_read calls skip this layer and stay in _thread_in_native
void os::Solaris::setup_interruptible(JavaThread* thread) {
JavaThreadState thread_state = thread->thread_state();
assert(thread_state != _thread_blocked, "Coming from the wrong thread");
assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
OSThread* osthread = thread->osthread();
osthread->set_saved_interrupt_thread_state(thread_state);
thread->frame_anchor()->make_walkable(thread);
ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
}
JavaThread* os::Solaris::setup_interruptible() {
JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
setup_interruptible(thread);
return thread;
}
void os::Solaris::try_enable_extended_io() {
typedef int (*enable_extended_FILE_stdio_t)(int, int);
@ -353,41 +326,6 @@ void os::Solaris::try_enable_extended_io() {
}
}
#ifdef ASSERT
JavaThread* os::Solaris::setup_interruptible_native() {
JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
JavaThreadState thread_state = thread->thread_state();
assert(thread_state == _thread_in_native, "Assumed thread_in_native");
return thread;
}
void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
JavaThreadState thread_state = thread->thread_state();
assert(thread_state == _thread_in_native, "Assumed thread_in_native");
}
#endif
// cleanup_interruptible reverses the effects of setup_interruptible
// setup_interruptible_already_blocked() does not need any cleanup.
void os::Solaris::cleanup_interruptible(JavaThread* thread) {
OSThread* osthread = thread->osthread();
ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
}
// I/O interruption related counters called in _INTERRUPTIBLE
void os::Solaris::bump_interrupted_before_count() {
RuntimeService::record_interrupted_before_count();
}
void os::Solaris::bump_interrupted_during_count() {
RuntimeService::record_interrupted_during_count();
}
static int _processors_online = 0;
jint os::Solaris::_os_thread_limit = 0;
@ -3366,11 +3304,20 @@ bool os::can_execute_large_page_memory() {
// Read calls from inside the vm need to perform state transitions
size_t os::read(int fd, void *buf, unsigned int nBytes) {
INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
size_t res;
JavaThread* thread = (JavaThread*)Thread::current();
assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
ThreadBlockInVM tbiv(thread);
RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
return res;
}
size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
size_t res;
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
return res;
}
void os::naked_short_sleep(jlong ms) {
@ -4471,6 +4418,11 @@ void os::Solaris::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
exception_name(sig, buf, O_BUFLEN));
}
} else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
@ -5305,6 +5257,8 @@ int os::fsync(int fd) {
}
int os::available(int fd, jlong *bytes) {
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
jlong cur, end;
int mode;
struct stat64 buf64;
@ -5312,14 +5266,9 @@ int os::available(int fd, jlong *bytes) {
if (::fstat64(fd, &buf64) >= 0) {
mode = buf64.st_mode;
if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
/*
* XXX: is the following call interruptible? If so, this might
* need to go through the INTERRUPT_IO() wrapper as for other
* blocking, interruptible calls in this file.
*/
int n,ioctl_return;
INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
if (ioctl_return>= 0) {
*bytes = n;
return 1;
@ -6250,7 +6199,11 @@ bool os::is_headless_jre() {
}
size_t os::write(int fd, const void *buf, unsigned int nBytes) {
INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
size_t res;
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
return res;
}
int os::close(int fd) {
@ -6262,11 +6215,15 @@ int os::socket_close(int fd) {
}
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
}
int os::send(int fd, char* buf, size_t nBytes, uint flags) {
INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
}
int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
@ -6287,11 +6244,14 @@ int os::timeout(int fd, long timeout) {
pfd.fd = fd;
pfd.events = POLLIN;
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
gettimeofday(&t, &aNull);
prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
for(;;) {
INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
res = ::poll(&pfd, 1, timeout);
if(res == OS_ERR && errno == EINTR) {
if(timeout != -1) {
gettimeofday(&t, &aNull);
@ -6307,17 +6267,30 @@ int os::timeout(int fd, long timeout) {
int os::connect(int fd, struct sockaddr *him, socklen_t len) {
int _result;
INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
os::Solaris::clear_interrupted);
_result = ::connect(fd, him, len);
// Depending on when thread interruption is reset, _result could be
// one of two values when errno == EINTR
if (((_result == OS_INTRPT) || (_result == OS_ERR))
&& (errno == EINTR)) {
// On Solaris, when a connect() call is interrupted, the connection
// can be established asynchronously (see 6343810). Subsequent calls
// to connect() must check the errno value which has the semantic
// described below (copied from the connect() man page). Handling
// of asynchronously established connections is required for both
// blocking and non-blocking sockets.
// EINTR The connection attempt was interrupted
// before any data arrived by the delivery of
// a signal. The connection, however, will be
// established asynchronously.
//
// EINPROGRESS The socket is non-blocking, and the connec-
// tion cannot be completed immediately.
//
// EALREADY The socket is non-blocking, and a previous
// connection attempt has not yet been com-
// pleted.
//
// EISCONN The socket is already connected.
if (_result == OS_ERR && errno == EINTR) {
/* restarting a connect() changes its errno semantics */
INTERRUPTIBLE(::connect(fd, him, len), _result,\
os::Solaris::clear_interrupted);
RESTARTABLE(::connect(fd, him, len), _result);
/* undo these changes */
if (_result == OS_ERR) {
if (errno == EALREADY) {
@ -6335,20 +6308,23 @@ int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
if (fd < 0) {
return OS_ERR;
}
INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
os::Solaris::clear_interrupted);
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
}
int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
sockaddr* from, socklen_t* fromlen) {
INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
os::Solaris::clear_interrupted);
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
}
int os::sendto(int fd, char* buf, size_t len, uint flags,
struct sockaddr* to, socklen_t tolen) {
INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
os::Solaris::clear_interrupted);
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
}
int os::socket_available(int fd, jint *pbytes) {
@ -6363,8 +6339,9 @@ int os::socket_available(int fd, jint *pbytes) {
}
int os::bind(int fd, struct sockaddr* him, socklen_t len) {
INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
os::Solaris::clear_interrupted);
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
return ::bind(fd, him, len);
}
// Get the default path to the core file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -311,24 +311,6 @@ class Solaris {
outdata, validity) : -1;
}
enum {
clear_interrupted = true
};
static void setup_interruptible(JavaThread* thread);
static void setup_interruptible_already_blocked(JavaThread* thread);
static JavaThread* setup_interruptible();
static void cleanup_interruptible(JavaThread* thread);
// perf counter incrementers used by _INTERRUPTIBLE
static void bump_interrupted_before_count();
static void bump_interrupted_during_count();
#ifdef ASSERT
static JavaThread* setup_interruptible_native();
static void cleanup_interruptible_native(JavaThread* thread);
#endif
static sigset_t* unblocked_signals();
static sigset_t* vm_signals();
static sigset_t* allowdebug_blocked_signals();

@ -111,104 +111,7 @@ inline int os::closedir(DIR *dirp) {
//////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// macros for interruptible io and system calls and system call restarting
#define _INTERRUPTIBLE(_setup, _cmd, _result, _thread, _clear, _before, _after, _int_enable) \
do { \
_setup; \
_before; \
OSThread* _osthread = _thread->osthread(); \
if (_int_enable && _thread->has_last_Java_frame()) { \
/* this is java interruptible io stuff */ \
if (os::is_interrupted(_thread, _clear)) { \
os::Solaris::bump_interrupted_before_count(); \
_result = OS_INTRPT; \
} else { \
/* _cmd always expands to an assignment to _result */ \
if ((_cmd) < 0 && errno == EINTR \
&& os::is_interrupted(_thread, _clear)) { \
os::Solaris::bump_interrupted_during_count(); \
_result = OS_INTRPT; \
} \
} \
} else { \
/* this is normal blocking io stuff */ \
_cmd; \
} \
_after; \
} while(false)
// Interruptible io support + restarting of interrupted system calls
#ifndef ASSERT
#define INTERRUPTIBLE(_cmd, _result, _clear) do { \
_INTERRUPTIBLE( JavaThread* _thread = (JavaThread*)ThreadLocalStorage::thread(),_result = _cmd, _result, _thread, _clear, , , UseVMInterruptibleIO); \
} while((_result == OS_ERR) && (errno == EINTR))
#else
// This adds an assertion that it is only called from thread_in_native
// The call overhead is skipped for performance in product mode
#define INTERRUPTIBLE(_cmd, _result, _clear) do { \
_INTERRUPTIBLE(JavaThread* _thread = os::Solaris::setup_interruptible_native(), _result = _cmd, _result, _thread, _clear, , os::Solaris::cleanup_interruptible_native(_thread), UseVMInterruptibleIO ); \
} while((_result == OS_ERR) && (errno == EINTR))
#endif
// Used for calls from _thread_in_vm, not from _thread_in_native
#define INTERRUPTIBLE_VM(_cmd, _result, _clear) do { \
_INTERRUPTIBLE(JavaThread* _thread = os::Solaris::setup_interruptible(), _result = _cmd, _result, _thread, _clear, , os::Solaris::cleanup_interruptible(_thread), UseVMInterruptibleIO ); \
} while((_result == OS_ERR) && (errno == EINTR))
/* Use NORESTART when the system call cannot return EINTR, when something other
than a system call is being invoked, or when the caller must do EINTR
handling. */
#ifndef ASSERT
#define INTERRUPTIBLE_NORESTART(_cmd, _result, _clear) \
_INTERRUPTIBLE( JavaThread* _thread = (JavaThread*)ThreadLocalStorage::thread(),_result = _cmd, _result, _thread, _clear, , , UseVMInterruptibleIO)
#else
// This adds an assertion that it is only called from thread_in_native
// The call overhead is skipped for performance in product mode
#define INTERRUPTIBLE_NORESTART(_cmd, _result, _clear) \
_INTERRUPTIBLE(JavaThread* _thread = os::Solaris::setup_interruptible_native(), _result = _cmd, _result, _thread, _clear, , os::Solaris::cleanup_interruptible_native(_thread), UseVMInterruptibleIO )
#endif
// Don't attend to UseVMInterruptibleIO. Always allow interruption.
// Also assumes that it is called from the _thread_blocked state.
// Used by os_sleep().
#define INTERRUPTIBLE_NORESTART_VM_ALWAYS(_cmd, _result, _thread, _clear) \
_INTERRUPTIBLE(os::Solaris::setup_interruptible_already_blocked(_thread), _result = _cmd, _result, _thread, _clear, , , true )
#define INTERRUPTIBLE_RETURN_INT(_cmd, _clear) do { \
int _result; \
do { \
INTERRUPTIBLE(_cmd, _result, _clear); \
} while((_result == OS_ERR) && (errno == EINTR)); \
return _result; \
} while(false)
#define INTERRUPTIBLE_RETURN_INT_VM(_cmd, _clear) do { \
int _result; \
do { \
INTERRUPTIBLE_VM(_cmd, _result, _clear); \
} while((_result == OS_ERR) && (errno == EINTR)); \
return _result; \
} while(false)
#define INTERRUPTIBLE_RETURN_INT_NORESTART(_cmd, _clear) do { \
int _result; \
INTERRUPTIBLE_NORESTART(_cmd, _result, _clear); \
return _result; \
} while(false)
/* Use the RESTARTABLE macros when interruptible io is not needed */
// macros for restartable system calls
#define RESTARTABLE(_cmd, _result) do { \
do { \

@ -42,7 +42,6 @@ define_pd_global(intx, VMThreadStackSize, 512);
#endif // AMD64
define_pd_global(intx, CompilerThreadStackSize, 0);
define_pd_global(uintx, SurvivorRatio, 8);
define_pd_global(uintx, JVMInvokeMethodSlack, 8192);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,10 +61,6 @@ class java_lang_String : AllStatic {
static Handle basic_create(int length, TRAPS);
static void set_value( oop string, typeArrayOop buffer) {
assert(initialized, "Must be initialized");
string->obj_field_put(value_offset, (oop)buffer);
}
static void set_offset(oop string, int offset) {
assert(initialized, "Must be initialized");
if (offset_offset > 0) {
@ -122,12 +118,26 @@ class java_lang_String : AllStatic {
return hash_offset;
}
static void set_value(oop string, typeArrayOop buffer) {
assert(initialized && (value_offset > 0), "Must be initialized");
string->obj_field_put(value_offset, (oop)buffer);
}
static void set_hash(oop string, unsigned int hash) {
assert(initialized && (hash_offset > 0), "Must be initialized");
string->int_field_put(hash_offset, hash);
}
// Accessors
static typeArrayOop value(oop java_string) {
assert(initialized && (value_offset > 0), "Must be initialized");
assert(is_instance(java_string), "must be java_string");
return (typeArrayOop) java_string->obj_field(value_offset);
}
static unsigned int hash(oop java_string) {
assert(initialized && (hash_offset > 0), "Must be initialized");
assert(is_instance(java_string), "must be java_string");
return java_string->int_field(hash_offset);
}
static int offset(oop java_string) {
assert(initialized, "Must be initialized");
assert(is_instance(java_string), "must be java_string");

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,6 +35,9 @@
#include "oops/oop.inline2.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/hashtable.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1StringDedup.hpp"
#endif
// --------------------------------------------------------------------------
@ -728,6 +731,15 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
string = java_lang_String::create_from_unicode(name, len, CHECK_NULL);
}
#if INCLUDE_ALL_GCS
if (G1StringDedup::is_enabled()) {
// Deduplicate the string before it is interned. Note that we should never
// deduplicate a string after it has been interned. Doing so will counteract
// compiler optimizations done on e.g. interned string literals.
G1StringDedup::deduplicate(string());
}
#endif
// Grab the StringTable_lock before getting the_table() because it could
// change at safepoint.
MutexLocker ml(StringTable_lock, THREAD);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -497,6 +497,7 @@
template(int_StringBuffer_signature, "(I)Ljava/lang/StringBuffer;") \
template(char_StringBuffer_signature, "(C)Ljava/lang/StringBuffer;") \
template(int_String_signature, "(I)Ljava/lang/String;") \
template(codesource_permissioncollection_signature, "(Ljava/security/CodeSource;Ljava/security/PermissionCollection;)V") \
/* signature symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE) \
\

@ -2496,7 +2496,8 @@ void CMSCollector::save_heap_summary() {
}
void CMSCollector::report_heap_summary(GCWhen::Type when) {
_gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
_gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
_gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
}
void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {

@ -1809,8 +1809,8 @@ class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
uint _regions_claimed;
size_t _freed_bytes;
FreeRegionList* _local_cleanup_list;
OldRegionSet* _old_proxy_set;
HumongousRegionSet* _humongous_proxy_set;
HeapRegionSetCount _old_regions_removed;
HeapRegionSetCount _humongous_regions_removed;
HRRSCleanupTask* _hrrs_cleanup_task;
double _claimed_region_time;
double _max_region_time;
@ -1819,19 +1819,19 @@ public:
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
int worker_num,
FreeRegionList* local_cleanup_list,
OldRegionSet* old_proxy_set,
HumongousRegionSet* humongous_proxy_set,
HRRSCleanupTask* hrrs_cleanup_task) :
_g1(g1), _worker_num(worker_num),
_max_live_bytes(0), _regions_claimed(0),
_freed_bytes(0),
_claimed_region_time(0.0), _max_region_time(0.0),
_local_cleanup_list(local_cleanup_list),
_old_proxy_set(old_proxy_set),
_humongous_proxy_set(humongous_proxy_set),
_old_regions_removed(),
_humongous_regions_removed(),
_hrrs_cleanup_task(hrrs_cleanup_task) { }
size_t freed_bytes() { return _freed_bytes; }
const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
bool doHeapRegion(HeapRegion *hr) {
if (hr->continuesHumongous()) {
@ -1844,13 +1844,22 @@ public:
_regions_claimed++;
hr->note_end_of_marking();
_max_live_bytes += hr->max_live_bytes();
_g1->free_region_if_empty(hr,
&_freed_bytes,
_local_cleanup_list,
_old_proxy_set,
_humongous_proxy_set,
_hrrs_cleanup_task,
true /* par */);
if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
_freed_bytes += hr->used();
hr->set_containing_set(NULL);
if (hr->isHumongous()) {
assert(hr->startsHumongous(), "we should only see starts humongous");
_humongous_regions_removed.increment(1u, hr->capacity());
_g1->free_humongous_region(hr, _local_cleanup_list, true);
} else {
_old_regions_removed.increment(1u, hr->capacity());
_g1->free_region(hr, _local_cleanup_list, true);
}
} else {
hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
}
double region_time = (os::elapsedTime() - start);
_claimed_region_time += region_time;
if (region_time > _max_region_time) {
@ -1883,12 +1892,8 @@ public:
void work(uint worker_id) {
double start = os::elapsedTime();
FreeRegionList local_cleanup_list("Local Cleanup List");
OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
HRRSCleanupTask hrrs_cleanup_task;
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
&old_proxy_set,
&humongous_proxy_set,
&hrrs_cleanup_task);
if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
@ -1900,13 +1905,10 @@ public:
assert(g1_note_end.complete(), "Shouldn't have yielded!");
// Now update the lists
_g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
NULL /* free_list */,
&old_proxy_set,
&humongous_proxy_set,
true /* par */);
_g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
{
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
_g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
_max_live_bytes += g1_note_end.max_live_bytes();
_freed_bytes += g1_note_end.freed_bytes();
@ -1920,14 +1922,14 @@ public:
G1HRPrinter* hr_printer = _g1h->hr_printer();
if (hr_printer->is_active()) {
HeapRegionLinkedListIterator iter(&local_cleanup_list);
FreeRegionListIterator iter(&local_cleanup_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
hr_printer->cleanup(hr);
}
}
_cleanup_list->add_as_tail(&local_cleanup_list);
_cleanup_list->add_ordered(&local_cleanup_list);
assert(local_cleanup_list.is_empty(), "post-condition");
HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
@ -1971,7 +1973,6 @@ void ConcurrentMark::cleanup() {
return;
}
HRSPhaseSetter x(HRSPhaseCleanup);
g1h->verify_region_sets_optional();
if (VerifyDuringGC) {
@ -2144,7 +2145,7 @@ void ConcurrentMark::completeCleanup() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_cleanup_list.verify_optional();
_cleanup_list.verify_list();
FreeRegionList tmp_free_list("Tmp Free List");
if (G1ConcRegionFreeingVerbose) {
@ -2157,9 +2158,9 @@ void ConcurrentMark::completeCleanup() {
// so it's not necessary to take any locks
while (!_cleanup_list.is_empty()) {
HeapRegion* hr = _cleanup_list.remove_head();
assert(hr != NULL, "the list was not empty");
assert(hr != NULL, "Got NULL from a non-empty list");
hr->par_clear();
tmp_free_list.add_as_tail(hr);
tmp_free_list.add_ordered(hr);
// Instead of adding one region at a time to the secondary_free_list,
// we accumulate them in the local list and move them a few at a
@ -2179,7 +2180,7 @@ void ConcurrentMark::completeCleanup() {
{
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
g1h->secondary_free_list_add_as_tail(&tmp_free_list);
g1h->secondary_free_list_add(&tmp_free_list);
SecondaryFreeList_lock->notify_all();
}
@ -2528,6 +2529,11 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
assert(!rp->discovery_enabled(), "Post condition");
}
if (has_overflown()) {
// We can not trust g1_is_alive if the marking stack overflowed
return;
}
g1h->unlink_string_and_symbol_table(&g1_is_alive,
/* process_strings */ false, // currently strings are always roots
/* process_symbols */ true);

@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "utilities/taskqueue.hpp"
class G1CollectedHeap;

@ -24,6 +24,14 @@
#include "precompiled.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "memory/padded.inline.hpp"
// Allocate a new array, generic version.
address G1BiasedMappedArrayBase::create_new_base_array(size_t length, size_t elem_size) {
assert(length > 0, "just checking");
assert(elem_size > 0, "just checking");
return PaddedPrimitiveArray<u_char, mtGC>::create_unfreeable(length * elem_size);
}
#ifndef PRODUCT
void G1BiasedMappedArrayBase::verify_index(idx_t index) const {

@ -25,8 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
#include "memory/allocation.inline.hpp"
// Implements the common base functionality for arrays that contain provisions
// for accessing its elements using a biased index.
@ -48,11 +48,7 @@ protected:
_bias(0), _shift_by(0) { }
// Allocate a new array, generic version.
static address create_new_base_array(size_t length, size_t elem_size) {
assert(length > 0, "just checking");
assert(elem_size > 0, "just checking");
return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
}
static address create_new_base_array(size_t length, size_t elem_size);
// Initialize the members of this class. The biased start address of this array
// is the bias (in elements) multiplied by the element size.

@ -0,0 +1,317 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/nmethod.hpp"
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#include "memory/iterator.hpp"
G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) {
_top = bottom();
}
void G1CodeRootChunk::reset() {
_next = _prev = NULL;
_top = bottom();
}
void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
nmethod** cur = bottom();
while (cur != _top) {
cl->do_code_blob(*cur);
cur++;
}
}
FreeList<G1CodeRootChunk> G1CodeRootSet::_free_list;
size_t G1CodeRootSet::_num_chunks_handed_out = 0;
G1CodeRootChunk* G1CodeRootSet::new_chunk() {
G1CodeRootChunk* result = _free_list.get_chunk_at_head();
if (result == NULL) {
result = new G1CodeRootChunk();
}
G1CodeRootSet::_num_chunks_handed_out++;
result->reset();
return result;
}
void G1CodeRootSet::free_chunk(G1CodeRootChunk* chunk) {
_free_list.return_chunk_at_head(chunk);
G1CodeRootSet::_num_chunks_handed_out--;
}
void G1CodeRootSet::free_all_chunks(FreeList<G1CodeRootChunk>* list) {
G1CodeRootSet::_num_chunks_handed_out -= list->count();
_free_list.prepend(list);
}
void G1CodeRootSet::purge_chunks(size_t keep_ratio) {
size_t keep = G1CodeRootSet::_num_chunks_handed_out * keep_ratio / 100;
if (keep >= (size_t)_free_list.count()) {
return;
}
FreeList<G1CodeRootChunk> temp;
temp.initialize();
temp.set_size(G1CodeRootChunk::word_size());
_free_list.getFirstNChunksFromList((size_t)_free_list.count() - keep, &temp);
G1CodeRootChunk* cur = temp.get_chunk_at_head();
while (cur != NULL) {
delete cur;
cur = temp.get_chunk_at_head();
}
}
size_t G1CodeRootSet::static_mem_size() {
return sizeof(_free_list) + sizeof(_num_chunks_handed_out);
}
size_t G1CodeRootSet::fl_mem_size() {
return _free_list.count() * _free_list.size();
}
void G1CodeRootSet::initialize() {
_free_list.initialize();
_free_list.set_size(G1CodeRootChunk::word_size());
}
G1CodeRootSet::G1CodeRootSet() : _list(), _length(0) {
_list.initialize();
_list.set_size(G1CodeRootChunk::word_size());
}
G1CodeRootSet::~G1CodeRootSet() {
clear();
}
void G1CodeRootSet::add(nmethod* method) {
if (!contains(method)) {
// Try to add the nmethod. If there is not enough space, get a new chunk.
if (_list.head() == NULL || _list.head()->is_full()) {
G1CodeRootChunk* cur = new_chunk();
_list.return_chunk_at_head(cur);
}
bool result = _list.head()->add(method);
guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
_length++;
}
}
void G1CodeRootSet::remove(nmethod* method) {
G1CodeRootChunk* found = find(method);
if (found != NULL) {
bool result = found->remove(method);
guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method));
// eventually free completely emptied chunk
if (found->is_empty()) {
_list.remove_chunk(found);
free(found);
}
_length--;
}
assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
}
nmethod* G1CodeRootSet::pop() {
do {
G1CodeRootChunk* cur = _list.head();
if (cur == NULL) {
assert(_length == 0, "when there are no chunks, there should be no elements");
return NULL;
}
nmethod* result = cur->pop();
if (result != NULL) {
_length--;
return result;
} else {
free(_list.get_chunk_at_head());
}
} while (true);
}
G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
G1CodeRootChunk* cur = _list.head();
while (cur != NULL) {
if (cur->contains(method)) {
return cur;
}
cur = (G1CodeRootChunk*)cur->next();
}
return NULL;
}
void G1CodeRootSet::free(G1CodeRootChunk* chunk) {
free_chunk(chunk);
}
bool G1CodeRootSet::contains(nmethod* method) {
return find(method) != NULL;
}
void G1CodeRootSet::clear() {
free_all_chunks(&_list);
_length = 0;
}
void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
G1CodeRootChunk* cur = _list.head();
while (cur != NULL) {
cur->nmethods_do(blk);
cur = (G1CodeRootChunk*)cur->next();
}
}
size_t G1CodeRootSet::mem_size() {
return sizeof(this) + _list.count() * _list.size();
}
#ifndef PRODUCT
void G1CodeRootSet::test() {
initialize();
assert(_free_list.count() == 0, "Free List must be empty");
assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet");
// The number of chunks that we allocate for purge testing.
size_t const num_chunks = 10;
{
G1CodeRootSet set1;
assert(set1.is_empty(), "Code root set must be initially empty but is not.");
set1.add((nmethod*)1);
assert(_num_chunks_handed_out == 1,
err_msg("Must have allocated and handed out one chunk, but handed out "
SIZE_FORMAT" chunks", _num_chunks_handed_out));
assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
SIZE_FORMAT" elements", set1.length()));
// G1CodeRootChunk::word_size() is larger than G1CodeRootChunk::num_entries which
// we cannot access.
for (uint i = 0; i < G1CodeRootChunk::word_size() + 1; i++) {
set1.add((nmethod*)1);
}
assert(_num_chunks_handed_out == 1,
err_msg("Duplicate detection must have prevented allocation of further "
"chunks but contains "SIZE_FORMAT, _num_chunks_handed_out));
assert(set1.length() == 1,
err_msg("Duplicate detection should not have increased the set size but "
"is "SIZE_FORMAT, set1.length()));
size_t num_total_after_add = G1CodeRootChunk::word_size() + 1;
for (size_t i = 0; i < num_total_after_add - 1; i++) {
set1.add((nmethod*)(2 + i));
}
assert(_num_chunks_handed_out > 1,
"After adding more code roots, more than one chunks should have been handed out");
assert(set1.length() == num_total_after_add,
err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
"need to be in the set, but there are only "SIZE_FORMAT,
num_total_after_add, set1.length()));
size_t num_popped = 0;
while (set1.pop() != NULL) {
num_popped++;
}
assert(num_popped == num_total_after_add,
err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
"were added", num_popped, num_total_after_add));
assert(_num_chunks_handed_out == 0,
err_msg("After popping all elements, all chunks must have been returned "
"but are still "SIZE_FORMAT, _num_chunks_handed_out));
purge_chunks(0);
assert(_free_list.count() == 0,
err_msg("After purging everything, the free list must be empty but still "
"contains "SIZE_FORMAT" chunks", _free_list.count()));
// Add some more handed out chunks.
size_t i = 0;
while (_num_chunks_handed_out < num_chunks) {
set1.add((nmethod*)i);
i++;
}
{
// Generate chunks on the free list.
G1CodeRootSet set2;
size_t i = 0;
while (_num_chunks_handed_out < num_chunks * 2) {
set2.add((nmethod*)i);
i++;
}
// Exit of the scope of the set2 object will call the destructor that generates
// num_chunks elements on the free list.
}
assert(_num_chunks_handed_out == num_chunks,
err_msg("Deletion of the second set must have resulted in giving back "
"those, but there is still "SIZE_FORMAT" handed out, expecting "
SIZE_FORMAT, _num_chunks_handed_out, num_chunks));
assert((size_t)_free_list.count() == num_chunks,
err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
"but there are only "SIZE_FORMAT, num_chunks, _free_list.count()));
size_t const test_percentage = 50;
purge_chunks(test_percentage);
assert(_num_chunks_handed_out == num_chunks,
err_msg("Purging must not hand out chunks but there are "SIZE_FORMAT,
_num_chunks_handed_out));
assert((size_t)_free_list.count() == (ssize_t)(num_chunks * test_percentage / 100),
err_msg("Must have purged "SIZE_FORMAT" percent of "SIZE_FORMAT" chunks"
"but there are "SSIZE_FORMAT, test_percentage, num_chunks,
_free_list.count()));
// Purge the remainder of the chunks on the free list.
purge_chunks(0);
assert(_free_list.count() == 0, "Free List must be empty");
assert(_num_chunks_handed_out == num_chunks,
err_msg("Expected to be "SIZE_FORMAT" chunks handed out from the first set "
"but there are "SIZE_FORMAT, num_chunks, _num_chunks_handed_out));
// Exit of the scope of the set1 object will call the destructor that generates
// num_chunks additional elements on the free list.
}
assert(_num_chunks_handed_out == 0,
err_msg("Deletion of the only set must have resulted in no chunks handed "
"out, but there is still "SIZE_FORMAT" handed out", _num_chunks_handed_out));
assert((size_t)_free_list.count() == num_chunks,
err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
"but there are only "SSIZE_FORMAT, num_chunks, _free_list.count()));
// Restore initial state.
purge_chunks(0);
assert(_free_list.count() == 0, "Free List must be empty");
assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet");
}
void TestCodeCacheRemSet_test() {
G1CodeRootSet::test();
}
#endif

@ -0,0 +1,188 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
#include "memory/allocation.hpp"
#include "memory/freeList.hpp"
#include "runtime/globals.hpp"
class CodeBlobClosure;
class G1CodeRootChunk : public CHeapObj<mtGC> {
private:
static const int NUM_ENTRIES = 32;
public:
G1CodeRootChunk* _next;
G1CodeRootChunk* _prev;
nmethod** _top;
nmethod* _data[NUM_ENTRIES];
nmethod** bottom() const {
return (nmethod**) &(_data[0]);
}
nmethod** end() const {
return (nmethod**) &(_data[NUM_ENTRIES]);
}
public:
G1CodeRootChunk();
~G1CodeRootChunk() {}
static size_t word_size() { return (size_t)(align_size_up_(sizeof(G1CodeRootChunk), HeapWordSize) / HeapWordSize); }
// FreeList "interface" methods
G1CodeRootChunk* next() const { return _next; }
G1CodeRootChunk* prev() const { return _prev; }
void set_next(G1CodeRootChunk* v) { _next = v; assert(v != this, "Boom");}
void set_prev(G1CodeRootChunk* v) { _prev = v; assert(v != this, "Boom");}
void clear_next() { set_next(NULL); }
void clear_prev() { set_prev(NULL); }
size_t size() const { return word_size(); }
void link_next(G1CodeRootChunk* ptr) { set_next(ptr); }
void link_prev(G1CodeRootChunk* ptr) { set_prev(ptr); }
void link_after(G1CodeRootChunk* ptr) {
link_next(ptr);
if (ptr != NULL) ptr->link_prev((G1CodeRootChunk*)this);
}
bool is_free() { return true; }
// New G1CodeRootChunk routines
void reset();
bool is_empty() const {
return _top == bottom();
}
bool is_full() const {
return _top == (nmethod**)end();
}
bool contains(nmethod* method) {
nmethod** cur = bottom();
while (cur != _top) {
if (*cur == method) return true;
cur++;
}
return false;
}
bool add(nmethod* method) {
if (is_full()) return false;
*_top = method;
_top++;
return true;
}
bool remove(nmethod* method) {
nmethod** cur = bottom();
while (cur != _top) {
if (*cur == method) {
memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**));
_top--;
return true;
}
cur++;
}
return false;
}
void nmethods_do(CodeBlobClosure* blk);
nmethod* pop() {
if (is_empty()) {
return NULL;
}
_top--;
return *_top;
}
};
// Implements storage for a set of code roots.
// All methods that modify the set are not thread-safe except if otherwise noted.
class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
private:
// Global free chunk list management
static FreeList<G1CodeRootChunk> _free_list;
// Total number of chunks handed out
static size_t _num_chunks_handed_out;
static G1CodeRootChunk* new_chunk();
static void free_chunk(G1CodeRootChunk* chunk);
// Free all elements of the given list.
static void free_all_chunks(FreeList<G1CodeRootChunk>* list);
// Return the chunk that contains the given nmethod, NULL otherwise.
// Scans the list of chunks backwards, as this method is used to add new
// entries, which are typically added in bulk for a single nmethod.
G1CodeRootChunk* find(nmethod* method);
void free(G1CodeRootChunk* chunk);
size_t _length;
FreeList<G1CodeRootChunk> _list;
public:
G1CodeRootSet();
~G1CodeRootSet();
static void initialize();
static void purge_chunks(size_t keep_ratio);
static size_t static_mem_size();
static size_t fl_mem_size();
// Search for the code blob from the recently allocated ones to find duplicates more quickly, as this
// method is likely to be repeatedly called with the same nmethod.
void add(nmethod* method);
void remove(nmethod* method);
nmethod* pop();
bool contains(nmethod* method);
void clear();
void nmethods_do(CodeBlobClosure* blk) const;
bool is_empty() { return length() == 0; }
// Length in elements
size_t length() const { return _length; }
// Memory size in bytes taken by this set.
size_t mem_size();
static void test() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP

@ -39,6 +39,7 @@
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
@ -169,14 +170,6 @@ public:
int calls() { return _calls; }
};
class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
public:
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
*card_ptr = CardTableModRefBS::dirty_card_val();
return true;
}
};
YoungList::YoungList(G1CollectedHeap* g1h) :
_g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
@ -524,7 +517,7 @@ G1CollectedHeap* G1CollectedHeap::_g1h;
// Private methods.
HeapRegion*
G1CollectedHeap::new_region_try_secondary_free_list() {
G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
while (!_secondary_free_list.is_empty() || free_regions_coming()) {
if (!_secondary_free_list.is_empty()) {
@ -540,7 +533,7 @@ G1CollectedHeap::new_region_try_secondary_free_list() {
assert(!_free_list.is_empty(), "if the secondary_free_list was not "
"empty we should have moved at least one entry to the free_list");
HeapRegion* res = _free_list.remove_head();
HeapRegion* res = _free_list.remove_region(is_old);
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"allocated "HR_FORMAT" from secondary_free_list",
@ -562,7 +555,7 @@ G1CollectedHeap::new_region_try_secondary_free_list() {
return NULL;
}
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
"the only time we use this to allocate a humongous region is "
"when we are allocating a single humongous region");
@ -574,19 +567,21 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"forced to look at the secondary_free_list");
}
res = new_region_try_secondary_free_list();
res = new_region_try_secondary_free_list(is_old);
if (res != NULL) {
return res;
}
}
}
res = _free_list.remove_head_or_null();
res = _free_list.remove_region(is_old);
if (res == NULL) {
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"res == NULL, trying the secondary_free_list");
}
res = new_region_try_secondary_free_list();
res = new_region_try_secondary_free_list(is_old);
}
if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
// Currently, only attempts to allocate GC alloc regions set
@ -603,12 +598,9 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
if (expand(word_size * HeapWordSize)) {
// Given that expand() succeeded in expanding the heap, and we
// always expand the heap by an amount aligned to the heap
// region size, the free list should in theory not be empty. So
// it would probably be OK to use remove_head(). But the extra
// check for NULL is unlikely to be a performance issue here (we
// just expanded the heap!) so let's just be conservative and
// use remove_head_or_null().
res = _free_list.remove_head_or_null();
// region size, the free list should in theory not be empty.
// In either case remove_region() will check for NULL.
res = _free_list.remove_region(is_old);
} else {
_expand_heap_after_alloc_failure = false;
}
@ -626,7 +618,7 @@ uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
// Only one region to allocate, no need to go through the slower
// path. The caller will attempt the expansion if this fails, so
// let's not try to expand here too.
HeapRegion* hr = new_region(word_size, false /* do_expand */);
HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
if (hr != NULL) {
first = hr->hrs_index();
} else {
@ -1298,7 +1290,6 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
HRSPhaseSetter x(HRSPhaseFullGC);
verify_region_sets_optional();
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
@ -1928,10 +1919,10 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_g1mm(NULL),
_refine_cte_cl(NULL),
_full_collection(false),
_free_list("Master Free List"),
_secondary_free_list("Secondary Free List"),
_old_set("Old Set"),
_humongous_set("Master Humongous Set"),
_free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
_free_regions_coming(false),
_young_list(new YoungList(this)),
_gc_time_stamp(0),
@ -1963,7 +1954,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
int n_queues = MAX2((int)ParallelGCThreads, 1);
_task_queues = new RefToScanQueueSet(n_queues);
int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
assert(n_rem_sets > 0, "Invariant.");
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
@ -2079,7 +2070,7 @@ jint G1CollectedHeap::initialize() {
guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
"too many cards per region");
HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
_bot_shared = new G1BlockOffsetSharedArray(_reserved,
heap_word_size(init_byte_size));
@ -2182,6 +2173,8 @@ jint G1CollectedHeap::initialize() {
// values in the heap have been properly initialized.
_g1mm = new G1MonitoringSupport(this);
G1StringDedup::initialize();
return JNI_OK;
}
@ -2369,8 +2362,12 @@ public:
};
size_t G1CollectedHeap::recalculate_used() const {
double recalculate_used_start = os::elapsedTime();
SumUsedClosure blk;
heap_region_iterate(&blk);
g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
return blk.result();
}
@ -3462,6 +3459,11 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
if (!silent) gclog_or_tty->print("RemSet ");
rem_set()->verify();
if (G1StringDedup::is_enabled()) {
if (!silent) gclog_or_tty->print("StrDedup ");
G1StringDedup::verify();
}
if (failures) {
gclog_or_tty->print_cr("Heap:");
// It helps to have the per-region information in the output to
@ -3479,8 +3481,13 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
}
guarantee(!failures, "there should not have been any failures");
} else {
if (!silent)
gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) ");
if (!silent) {
gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
if (G1StringDedup::is_enabled()) {
gclog_or_tty->print(", StrDedup");
}
gclog_or_tty->print(") ");
}
}
}
@ -3573,6 +3580,9 @@ void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
st->cr();
_cm->print_worker_threads_on(st);
_cg1r->print_worker_threads_on(st);
if (G1StringDedup::is_enabled()) {
G1StringDedup::print_worker_threads_on(st);
}
}
void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
@ -3581,6 +3591,9 @@ void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
}
tc->do_thread(_cmThread);
_cg1r->threads_do(tc);
if (G1StringDedup::is_enabled()) {
G1StringDedup::threads_do(tc);
}
}
void G1CollectedHeap::print_tracing_info() const {
@ -3887,7 +3900,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
print_heap_before_gc();
trace_heap_before_gc(_gc_tracer_stw);
HRSPhaseSetter x(HRSPhaseEvacuation);
verify_region_sets_optional();
verify_dirty_young_regions();
@ -4386,6 +4398,8 @@ void G1CollectedHeap::finalize_for_evac_failure() {
void G1CollectedHeap::remove_self_forwarding_pointers() {
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
double remove_self_forwards_start = os::elapsedTime();
G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
if (G1CollectedHeap::use_parallel_gc_threads()) {
@ -4413,6 +4427,8 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
}
_objs_with_preserved_marks.clear(true);
_preserved_marks_of_objs.clear(true);
g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
}
void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
@ -4634,9 +4650,7 @@ bool G1ParScanThreadState::verify_task(StarTask ref) const {
#endif // ASSERT
void G1ParScanThreadState::trim_queue() {
assert(_evac_cl != NULL, "not set");
assert(_evac_failure_cl != NULL, "not set");
assert(_partial_scan_cl != NULL, "not set");
StarTask ref;
do {
@ -4727,6 +4741,12 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
oop forward_ptr = old->forward_to_atomic(obj);
if (forward_ptr == NULL) {
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
// alloc_purpose is just a hint to allocate() above, recheck the type of region
// we actually allocated from and update alloc_purpose accordingly
HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
if (g1p->track_object_age(alloc_purpose)) {
// We could simply do obj->incr_age(). However, this causes a
// performance issue. obj->incr_age() will first check whether
@ -4754,6 +4774,13 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
obj->set_mark(m);
}
if (G1StringDedup::is_enabled()) {
G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
to_region->is_young(),
queue_num(),
obj);
}
size_t* surv_young_words = surviving_young_words();
surv_young_words[young_index] += word_sz;
@ -4832,55 +4859,6 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
assert(from_obj->is_objArray(), "must be obj array");
objArrayOop from_obj_array = objArrayOop(from_obj);
// The from-space object contains the real length.
int length = from_obj_array->length();
assert(from_obj->is_forwarded(), "must be forwarded");
oop to_obj = from_obj->forwardee();
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
objArrayOop to_obj_array = objArrayOop(to_obj);
// We keep track of the next start index in the length field of the
// to-space object.
int next_index = to_obj_array->length();
assert(0 <= next_index && next_index < length,
err_msg("invariant, next index: %d, length: %d", next_index, length));
int start = next_index;
int end = length;
int remainder = end - start;
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
if (remainder > 2 * ParGCArrayScanChunk) {
end = start + ParGCArrayScanChunk;
to_obj_array->set_length(end);
// Push the remainder before we process the range in case another
// worker has run out of things to do and can steal it.
oop* from_obj_p = set_partial_array_mask(from_obj);
_par_scan_state->push_on_queue(from_obj_p);
} else {
assert(length == end, "sanity");
// We'll process the final range for this object. Restore the length
// so that the heap remains parsable in case of evacuation failure.
to_obj_array->set_length(end);
}
_scanner.set_region(_g1->heap_region_containing_raw(to_obj));
// Process indexes [start,end). It will also process the header
// along with the first chunk (i.e., the chunk with start == 0).
// Note that at this point the length field of to_obj_array is not
// correct given that we are using it to keep track of the next
// start index. oop_iterate_range() (thankfully!) ignores the length
// field and only relies on the start / end parameters. It does
// however return the size of the object which will be incorrect. So
// we have to ignore it even if we wanted to use it.
to_obj_array->oop_iterate_range(&_scanner, start, end);
}
class G1ParEvacuateFollowersClosure : public VoidClosure {
protected:
G1CollectedHeap* _g1h;
@ -5022,13 +5000,9 @@ public:
ReferenceProcessor* rp = _g1h->ref_processor_stw();
G1ParScanThreadState pss(_g1h, worker_id, rp);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
pss.set_evac_closure(&scan_evac_cl);
pss.set_evac_failure_closure(&evac_failure_cl);
pss.set_partial_scan_closure(&partial_scan_cl);
G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
@ -5270,6 +5244,33 @@ void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive
g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
}
if (G1StringDedup::is_enabled()) {
G1StringDedup::unlink(is_alive);
}
}
class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
public:
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
*card_ptr = CardTableModRefBS::dirty_card_val();
return true;
}
};
void G1CollectedHeap::redirty_logged_cards() {
guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
double redirty_logged_cards_start = os::elapsedTime();
RedirtyLoggedCardTableEntryFastClosure redirty;
dirty_card_queue_set().set_closure(&redirty);
dirty_card_queue_set().apply_closure_to_all_completed_buffers();
DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
dcq.merge_bufferlists(&dirty_card_queue_set());
assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
}
// Weak Reference Processing support
@ -5453,14 +5454,9 @@ public:
G1STWIsAliveClosure is_alive(_g1h);
G1ParScanThreadState pss(_g1h, worker_id, NULL);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
pss.set_evac_closure(&scan_evac_cl);
pss.set_evac_failure_closure(&evac_failure_cl);
pss.set_partial_scan_closure(&partial_scan_cl);
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
@ -5565,13 +5561,9 @@ public:
HandleMark hm;
G1ParScanThreadState pss(_g1h, worker_id, NULL);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
pss.set_evac_closure(&scan_evac_cl);
pss.set_evac_failure_closure(&evac_failure_cl);
pss.set_partial_scan_closure(&partial_scan_cl);
assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
@ -5695,13 +5687,9 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
// We do not embed a reference processor in the copying/scanning
// closures while we're actually processing the discovered
// reference objects.
G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
G1ParScanPartialArrayClosure partial_scan_cl(this, &pss, NULL);
pss.set_evac_closure(&scan_evac_cl);
pss.set_evac_failure_closure(&evac_failure_cl);
pss.set_partial_scan_closure(&partial_scan_cl);
assert(pss.refs()->is_empty(), "pre-condition");
@ -5883,6 +5871,9 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
G1STWIsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
JNIHandles::weak_oops_do(&is_alive, &keep_alive);
if (G1StringDedup::is_enabled()) {
G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
}
}
release_gc_alloc_regions(n_workers, evacuation_info);
@ -5900,6 +5891,8 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
// strong code roots for a particular heap region.
migrate_strong_code_roots();
purge_code_root_memory();
if (g1_policy()->during_initial_mark_pause()) {
// Reset the claim values set during marking the strong code roots
reset_heap_region_claim_values();
@ -5926,41 +5919,15 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
enqueue_discovered_references(n_workers);
if (G1DeferredRSUpdate) {
RedirtyLoggedCardTableEntryFastClosure redirty;
dirty_card_queue_set().set_closure(&redirty);
dirty_card_queue_set().apply_closure_to_all_completed_buffers();
DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
dcq.merge_bufferlists(&dirty_card_queue_set());
assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
redirty_logged_cards();
}
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
}
void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
size_t* pre_used,
FreeRegionList* free_list,
OldRegionSet* old_proxy_set,
HumongousRegionSet* humongous_proxy_set,
HRRSCleanupTask* hrrs_cleanup_task,
bool par) {
if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
if (hr->isHumongous()) {
assert(hr->startsHumongous(), "we should only see starts humongous");
free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
} else {
_old_set.remove_with_proxy(hr, old_proxy_set);
free_region(hr, pre_used, free_list, par);
}
} else {
hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
}
}
void G1CollectedHeap::free_region(HeapRegion* hr,
size_t* pre_used,
FreeRegionList* free_list,
bool par) {
bool par,
bool locked) {
assert(!hr->isHumongous(), "this is only for non-humongous regions");
assert(!hr->is_empty(), "the region should not be empty");
assert(free_list != NULL, "pre-condition");
@ -5971,72 +5938,58 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
if (!hr->is_young()) {
_cg1r->hot_card_cache()->reset_card_counts(hr);
}
*pre_used += hr->used();
hr->hr_clear(par, true /* clear_space */);
free_list->add_as_head(hr);
hr->hr_clear(par, true /* clear_space */, locked /* locked */);
free_list->add_ordered(hr);
}
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
size_t* pre_used,
FreeRegionList* free_list,
HumongousRegionSet* humongous_proxy_set,
bool par) {
assert(hr->startsHumongous(), "this is only for starts humongous regions");
assert(free_list != NULL, "pre-condition");
assert(humongous_proxy_set != NULL, "pre-condition");
size_t hr_used = hr->used();
size_t hr_capacity = hr->capacity();
size_t hr_pre_used = 0;
_humongous_set.remove_with_proxy(hr, humongous_proxy_set);
// We need to read this before we make the region non-humongous,
// otherwise the information will be gone.
uint last_index = hr->last_hc_index();
hr->set_notHumongous();
free_region(hr, &hr_pre_used, free_list, par);
free_region(hr, free_list, par);
uint i = hr->hrs_index() + 1;
while (i < last_index) {
HeapRegion* curr_hr = region_at(i);
assert(curr_hr->continuesHumongous(), "invariant");
curr_hr->set_notHumongous();
free_region(curr_hr, &hr_pre_used, free_list, par);
free_region(curr_hr, free_list, par);
i += 1;
}
assert(hr_pre_used == hr_used,
err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
"should be the same", hr_pre_used, hr_used));
*pre_used += hr_pre_used;
}
void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
FreeRegionList* free_list,
OldRegionSet* old_proxy_set,
HumongousRegionSet* humongous_proxy_set,
bool par) {
if (pre_used > 0) {
Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
assert(_summary_bytes_used >= pre_used,
err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
"should be >= pre_used: "SIZE_FORMAT,
_summary_bytes_used, pre_used));
_summary_bytes_used -= pre_used;
void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
const HeapRegionSetCount& humongous_regions_removed) {
if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
_old_set.bulk_remove(old_regions_removed);
_humongous_set.bulk_remove(humongous_regions_removed);
}
if (free_list != NULL && !free_list->is_empty()) {
}
void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
assert(list != NULL, "list can't be null");
if (!list->is_empty()) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
_free_list.add_as_head(free_list);
}
if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
_old_set.update_from_proxy(old_proxy_set);
}
if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
_humongous_set.update_from_proxy(humongous_proxy_set);
_free_list.add_ordered(list);
}
}
void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
assert(_summary_bytes_used >= bytes,
err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
_summary_bytes_used, bytes));
_summary_bytes_used -= bytes;
}
class G1ParCleanupCTTask : public AbstractGangTask {
G1SATBCardTableModRefBS* _ct_bs;
G1CollectedHeap* _g1h;
@ -6194,7 +6147,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e
}
}
rs_lengths += cur->rem_set()->occupied();
rs_lengths += cur->rem_set()->occupied_locked();
HeapRegion* next = cur->next_in_collection_set();
assert(cur->in_collection_set(), "bad CS");
@ -6227,7 +6180,8 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e
// And the region is empty.
assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
free_region(cur, &pre_used, &local_free_list, false /* par */);
pre_used += cur->used();
free_region(cur, &local_free_list, false /* par */, true /* locked */);
} else {
cur->uninstall_surv_rate_group();
if (cur->is_young()) {
@ -6255,10 +6209,8 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e
young_time_ms += elapsed_ms;
}
update_sets_after_freeing_regions(pre_used, &local_free_list,
NULL /* old_proxy_set */,
NULL /* humongous_proxy_set */,
false /* par */);
prepend_to_freelist(&local_free_list);
decrement_summary_bytes(pre_used);
policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
}
@ -6370,10 +6322,10 @@ bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample)
class TearDownRegionSetsClosure : public HeapRegionClosure {
private:
OldRegionSet *_old_set;
HeapRegionSet *_old_set;
public:
TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
bool doHeapRegion(HeapRegion* r) {
if (r->is_empty()) {
@ -6402,9 +6354,10 @@ void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
TearDownRegionSetsClosure cl(&_old_set);
heap_region_iterate(&cl);
// Need to do this after the heap iteration to be able to
// recognize the young regions and ignore them during the iteration.
_young_list->empty_list();
// Note that emptying the _young_list is postponed and instead done as
// the first step when rebuilding the regions sets again. The reason for
// this is that during a full GC string deduplication needs to know if
// a collected region was young or old when the full GC was initiated.
}
_free_list.remove_all();
}
@ -6412,13 +6365,13 @@ void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
class RebuildRegionSetsClosure : public HeapRegionClosure {
private:
bool _free_list_only;
OldRegionSet* _old_set;
HeapRegionSet* _old_set;
FreeRegionList* _free_list;
size_t _total_used;
public:
RebuildRegionSetsClosure(bool free_list_only,
OldRegionSet* old_set, FreeRegionList* free_list) :
HeapRegionSet* old_set, FreeRegionList* free_list) :
_free_list_only(free_list_only),
_old_set(old_set), _free_list(free_list), _total_used(0) {
assert(_free_list->is_empty(), "pre-condition");
@ -6458,6 +6411,10 @@ public:
void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
assert_at_safepoint(true /* should_be_vm_thread */);
if (!free_list_only) {
_young_list->empty_list();
}
RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
heap_region_iterate(&cl);
@ -6493,6 +6450,7 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
bool young_list_full = g1_policy()->is_young_list_full();
if (force || !young_list_full) {
HeapRegion* new_alloc_region = new_region(word_size,
false /* is_old */,
false /* do_expand */);
if (new_alloc_region != NULL) {
set_region_short_lived_locked(new_alloc_region);
@ -6551,14 +6509,16 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
assert(FreeList_lock->owned_by_self(), "pre-condition");
if (count < g1_policy()->max_regions(ap)) {
bool survivor = (ap == GCAllocForSurvived);
HeapRegion* new_alloc_region = new_region(word_size,
!survivor,
true /* do_expand */);
if (new_alloc_region != NULL) {
// We really only need to do this for old regions given that we
// should never scan survivors. But it doesn't hurt to do it
// for survivors too.
new_alloc_region->set_saved_mark();
if (ap == GCAllocForSurvived) {
if (survivor) {
new_alloc_region->set_survivor();
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
} else {
@ -6615,23 +6575,22 @@ void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
class VerifyRegionListsClosure : public HeapRegionClosure {
private:
FreeRegionList* _free_list;
OldRegionSet* _old_set;
HumongousRegionSet* _humongous_set;
uint _region_count;
HeapRegionSet* _old_set;
HeapRegionSet* _humongous_set;
FreeRegionList* _free_list;
public:
VerifyRegionListsClosure(OldRegionSet* old_set,
HumongousRegionSet* humongous_set,
FreeRegionList* free_list) :
_old_set(old_set), _humongous_set(humongous_set),
_free_list(free_list), _region_count(0) { }
HeapRegionSetCount _old_count;
HeapRegionSetCount _humongous_count;
HeapRegionSetCount _free_count;
uint region_count() { return _region_count; }
VerifyRegionListsClosure(HeapRegionSet* old_set,
HeapRegionSet* humongous_set,
FreeRegionList* free_list) :
_old_set(old_set), _humongous_set(humongous_set), _free_list(free_list),
_old_count(), _humongous_count(), _free_count(){ }
bool doHeapRegion(HeapRegion* hr) {
_region_count += 1;
if (hr->continuesHumongous()) {
return false;
}
@ -6639,14 +6598,31 @@ public:
if (hr->is_young()) {
// TODO
} else if (hr->startsHumongous()) {
_humongous_set->verify_next_region(hr);
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
_humongous_count.increment(1u, hr->capacity());
} else if (hr->is_empty()) {
_free_list->verify_next_region(hr);
assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
_free_count.increment(1u, hr->capacity());
} else {
_old_set->verify_next_region(hr);
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
_old_count.increment(1u, hr->capacity());
}
return false;
}
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) {
guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
old_set->total_capacity_bytes(), _old_count.capacity()));
guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length()));
guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
free_list->total_capacity_bytes(), _free_count.capacity()));
}
};
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
@ -6662,16 +6638,14 @@ void G1CollectedHeap::verify_region_sets() {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
// First, check the explicit lists.
_free_list.verify();
_free_list.verify_list();
{
// Given that a concurrent operation might be adding regions to
// the secondary free list we have to take the lock before
// verifying it.
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
_secondary_free_list.verify();
_secondary_free_list.verify_list();
}
_old_set.verify();
_humongous_set.verify();
// If a concurrent region freeing operation is in progress it will
// be difficult to correctly attributed any free regions we come
@ -6694,16 +6668,10 @@ void G1CollectedHeap::verify_region_sets() {
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
_old_set.verify_start();
_humongous_set.verify_start();
_free_list.verify_start();
VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
heap_region_iterate(&cl);
_old_set.verify_end();
_humongous_set.verify_end();
_free_list.verify_end();
cl.verify_counts(&_old_set, &_humongous_set, &_free_list);
}
// Optimized nmethod scanning
@ -6804,6 +6772,13 @@ void G1CollectedHeap::migrate_strong_code_roots() {
g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
}
void G1CollectedHeap::purge_code_root_memory() {
double purge_start = os::elapsedTime();
G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
}
// Mark all the code roots that point into regions *not* in the
// collection set.
//
@ -6874,7 +6849,7 @@ public:
// Code roots should never be attached to a continuation of a humongous region
assert(hrrs->strong_code_roots_list_length() == 0,
err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
" starting at "HR_FORMAT", but has "INT32_FORMAT,
" starting at "HR_FORMAT", but has "SIZE_FORMAT,
HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
hrrs->strong_code_roots_list_length()));
return false;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "memory/barrierSet.hpp"
@ -243,18 +243,18 @@ private:
MemRegion _g1_committed;
// The master free list. It will satisfy all new region allocations.
MasterFreeRegionList _free_list;
FreeRegionList _free_list;
// The secondary free list which contains regions that have been
// freed up during the cleanup process. This will be appended to the
// master free list when appropriate.
SecondaryFreeRegionList _secondary_free_list;
FreeRegionList _secondary_free_list;
// It keeps track of the old regions.
MasterOldRegionSet _old_set;
HeapRegionSet _old_set;
// It keeps track of the humongous regions.
MasterHumongousRegionSet _humongous_set;
HeapRegionSet _humongous_set;
// The number of regions we could create by expansion.
uint _expansion_regions;
@ -497,13 +497,14 @@ protected:
// check whether there's anything available on the
// secondary_free_list and/or wait for more regions to appear on
// that list, if _free_regions_coming is set.
HeapRegion* new_region_try_secondary_free_list();
HeapRegion* new_region_try_secondary_free_list(bool is_old);
// Try to allocate a single non-humongous HeapRegion sufficient for
// an allocation of the given word_size. If do_expand is true,
// attempt to expand the heap if necessary to satisfy the allocation
// request.
HeapRegion* new_region(size_t word_size, bool do_expand);
// request. If the region is to be used as an old region or for a
// humongous object, set is_old to true. If not, to false.
HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
@ -757,6 +758,29 @@ public:
G1HRPrinter* hr_printer() { return &_hr_printer; }
// Frees a non-humongous region by initializing its contents and
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
// pre_used. If par is true, the region's RSet will not be freed
// up. The assumption is that this will be done later.
// The locked parameter indicates if the caller has already taken
// care of proper synchronization. This may allow some optimizations.
void free_region(HeapRegion* hr,
FreeRegionList* free_list,
bool par,
bool locked = false);
// Frees a humongous region by collapsing it into individual regions
// and calling free_region() for each of them. The freed regions
// will be added to the free list that's passed as a parameter (this
// is usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
// pre_used. If par is true, the region's RSet will not be freed
// up. The assumption is that this will be done later.
void free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list,
bool par);
protected:
// Shrink the garbage-first heap by at most the given size (in bytes!).
@ -835,30 +859,6 @@ protected:
G1KlassScanClosure* scan_klasses,
int worker_i);
// Frees a non-humongous region by initializing its contents and
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
// pre_used. If par is true, the region's RSet will not be freed
// up. The assumption is that this will be done later.
void free_region(HeapRegion* hr,
size_t* pre_used,
FreeRegionList* free_list,
bool par);
// Frees a humongous region by collapsing it into individual regions
// and calling free_region() for each of them. The freed regions
// will be added to the free list that's passed as a parameter (this
// is usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
// pre_used. If par is true, the region's RSet will not be freed
// up. The assumption is that this will be done later.
void free_humongous_region(HeapRegion* hr,
size_t* pre_used,
FreeRegionList* free_list,
HumongousRegionSet* humongous_proxy_set,
bool par);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
// after _g1_storage is updated.
@ -1228,21 +1228,17 @@ public:
bool is_on_master_free_list(HeapRegion* hr) {
return hr->containing_set() == &_free_list;
}
bool is_in_humongous_set(HeapRegion* hr) {
return hr->containing_set() == &_humongous_set;
}
#endif // ASSERT
// Wrapper for the region list operations that can be called from
// methods outside this class.
void secondary_free_list_add_as_tail(FreeRegionList* list) {
_secondary_free_list.add_as_tail(list);
void secondary_free_list_add(FreeRegionList* list) {
_secondary_free_list.add_ordered(list);
}
void append_secondary_free_list() {
_free_list.add_as_head(&_secondary_free_list);
_free_list.add_ordered(&_secondary_free_list);
}
void append_secondary_free_list_if_not_empty_with_lock() {
@ -1284,27 +1280,9 @@ public:
// True iff an evacuation has failed in the most-recent collection.
bool evacuation_failed() { return _evacuation_failed; }
// It will free a region if it has allocated objects in it that are
// all dead. It calls either free_region() or
// free_humongous_region() depending on the type of the region that
// is passed to it.
void free_region_if_empty(HeapRegion* hr,
size_t* pre_used,
FreeRegionList* free_list,
OldRegionSet* old_proxy_set,
HumongousRegionSet* humongous_proxy_set,
HRRSCleanupTask* hrrs_cleanup_task,
bool par);
// It appends the free list to the master free list and updates the
// master humongous list according to the contents of the proxy
// list. It also adjusts the total used bytes according to pre_used
// (if par is true, it will do so by taking the ParGCRareEvent_lock).
void update_sets_after_freeing_regions(size_t pre_used,
FreeRegionList* free_list,
OldRegionSet* old_proxy_set,
HumongousRegionSet* humongous_proxy_set,
bool par);
void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
void prepend_to_freelist(FreeRegionList* list);
void decrement_summary_bytes(size_t bytes);
// Returns "TRUE" iff "p" points into the committed areas of the heap.
virtual bool is_in(const void* p) const;
@ -1659,6 +1637,9 @@ public:
// that were not successfully evacuated are not migrated.
void migrate_strong_code_roots();
// Free up superfluous code root memory.
void purge_code_root_memory();
// During an initial mark pause, mark all the code roots that
// point into regions *not* in the collection set.
void mark_strong_code_roots(uint worker_id);
@ -1671,6 +1652,8 @@ public:
// in symbol table, possibly in parallel.
void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
// Redirty logged cards in the refinement queue.
void redirty_logged_cards();
// Verification
// The following is just to alert the verification code
@ -1797,8 +1780,6 @@ protected:
size_t _undo_waste;
OopsInHeapRegionClosure* _evac_failure_cl;
G1ParScanHeapEvacClosure* _evac_cl;
G1ParScanPartialArrayClosure* _partial_scan_cl;
int _hash_seed;
uint _queue_num;
@ -1926,14 +1907,6 @@ public:
return _evac_failure_cl;
}
void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
_evac_cl = evac_cl;
}
void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
_partial_scan_cl = partial_scan_cl;
}
int* hash_seed() { return &_hash_seed; }
uint queue_num() { return _queue_num; }
@ -1981,19 +1954,121 @@ public:
false /* retain */);
}
}
private:
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) const {
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
}
// We never encode partial array oops as narrowOop*, so return false immediately.
// This allows the compiler to create optimized code when popping references from
// the work queue.
inline bool has_partial_array_mask(narrowOop* ref) const {
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
return false;
}
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
// We always encode partial arrays as regular oop, to allow the
// specialization for has_partial_array_mask() for narrowOops above.
// This means that unintentional use of this method with narrowOops are caught
// by the compiler.
inline oop* set_partial_array_mask(oop obj) const {
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
}
inline oop clear_partial_array_mask(oop* ref) const {
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
void do_oop_partial_array(oop* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
assert(from_obj->is_objArray(), "must be obj array");
objArrayOop from_obj_array = objArrayOop(from_obj);
// The from-space object contains the real length.
int length = from_obj_array->length();
assert(from_obj->is_forwarded(), "must be forwarded");
oop to_obj = from_obj->forwardee();
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
objArrayOop to_obj_array = objArrayOop(to_obj);
// We keep track of the next start index in the length field of the
// to-space object.
int next_index = to_obj_array->length();
assert(0 <= next_index && next_index < length,
err_msg("invariant, next index: %d, length: %d", next_index, length));
int start = next_index;
int end = length;
int remainder = end - start;
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
if (remainder > 2 * ParGCArrayScanChunk) {
end = start + ParGCArrayScanChunk;
to_obj_array->set_length(end);
// Push the remainder before we process the range in case another
// worker has run out of things to do and can steal it.
oop* from_obj_p = set_partial_array_mask(from_obj);
push_on_queue(from_obj_p);
} else {
assert(length == end, "sanity");
// We'll process the final range for this object. Restore the length
// so that the heap remains parsable in case of evacuation failure.
to_obj_array->set_length(end);
}
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
// Process indexes [start,end). It will also process the header
// along with the first chunk (i.e., the chunk with start == 0).
// Note that at this point the length field of to_obj_array is not
// correct given that we are using it to keep track of the next
// start index. oop_iterate_range() (thankfully!) ignores the length
// field and only relies on the start / end parameters. It does
// however return the size of the object which will be incorrect. So
// we have to ignore it even if we wanted to use it.
to_obj_array->oop_iterate_range(&_scanner, start, end);
}
// This method is applied to the fields of the objects that have just been copied.
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
"Reference should not be NULL here as such are never pushed to the task queue.");
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// Although we never intentionally push references outside of the collection
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
if (_g1h->in_cset_fast_test(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = copy_to_survivor_space(obj);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
}
assert(obj != NULL, "Must be");
update_rs(from, p, queue_num());
}
public:
oop copy_to_survivor_space(oop const obj);
template <class T> void deal_with_reference(T* ref_to_scan) {
if (has_partial_array_mask(ref_to_scan)) {
_partial_scan_cl->do_oop_nv(ref_to_scan);
} else {
if (!has_partial_array_mask(ref_to_scan)) {
// Note: we can use "raw" versions of "region_containing" because
// "obj_to_scan" is definitely in the heap, and is not in a
// humongous region.
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
_evac_cl->set_region(r);
_evac_cl->do_oop_nv(ref_to_scan);
do_oop_evac(ref_to_scan, r);
} else {
do_oop_partial_array((oop*)ref_to_scan);
}
}

@ -30,6 +30,7 @@
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "utilities/taskqueue.hpp"

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
// Helper class for avoiding interleaved logging
class LineBuffer: public StackObj {
@ -168,7 +169,9 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
_last_gc_worker_end_times_ms(_max_gc_threads, "%.1lf", false),
_last_gc_worker_times_ms(_max_gc_threads, "%.1lf"),
_last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf")
_last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf"),
_cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads, "%.1lf"),
_cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads, "%.1lf")
{
assert(max_gc_threads > 0, "Must have some GC threads");
}
@ -229,6 +232,16 @@ void G1GCPhaseTimes::note_gc_end() {
_last_gc_worker_other_times_ms.verify();
}
void G1GCPhaseTimes::note_string_dedup_fixup_start() {
_cur_string_dedup_queue_fixup_worker_times_ms.reset();
_cur_string_dedup_table_fixup_worker_times_ms.reset();
}
void G1GCPhaseTimes::note_string_dedup_fixup_end() {
_cur_string_dedup_queue_fixup_worker_times_ms.verify();
_cur_string_dedup_table_fixup_worker_times_ms.verify();
}
void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
}
@ -250,6 +263,14 @@ double G1GCPhaseTimes::accounted_time_ms() {
// Strong code root migration time
misc_time_ms += _cur_strong_code_root_migration_time_ms;
// Strong code root purge time
misc_time_ms += _cur_strong_code_root_purge_time_ms;
if (G1StringDedup::is_enabled()) {
// String dedup fixup time
misc_time_ms += _cur_string_dedup_fixup_time_ms;
}
// Subtract the time taken to clean the card table from the
// current value of "other time"
misc_time_ms += _cur_clear_ct_time_ms;
@ -299,20 +320,43 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
}
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
if (G1StringDedup::is_enabled()) {
print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
_cur_string_dedup_queue_fixup_worker_times_ms.print(2, "Queue Fixup (ms)");
_cur_string_dedup_table_fixup_worker_times_ms.print(2, "Table Fixup (ms)");
}
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
print_stats(1, "Other", misc_time_ms);
if (_cur_verify_before_time_ms > 0.0) {
print_stats(2, "Verify Before", _cur_verify_before_time_ms);
}
if (G1CollectedHeap::heap()->evacuation_failed()) {
double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +
_cur_evac_fail_restore_remsets;
print_stats(2, "Evacuation Failure", evac_fail_handling);
if (G1Log::finest()) {
print_stats(3, "Recalculate Used", _cur_evac_fail_recalc_used);
print_stats(3, "Remove Self Forwards", _cur_evac_fail_remove_self_forwards);
print_stats(3, "Restore RemSet", _cur_evac_fail_restore_remsets);
}
}
print_stats(2, "Choose CSet",
(_recorded_young_cset_choice_time_ms +
_recorded_non_young_cset_choice_time_ms));
print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
if (G1DeferredRSUpdate) {
print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
}
print_stats(2, "Free CSet",
(_recorded_young_free_cset_time_ms +
_recorded_non_young_free_cset_time_ms));
if (G1Log::finest()) {
print_stats(3, "Young Free CSet", _recorded_young_free_cset_time_ms);
print_stats(3, "Non-Young Free CSet", _recorded_non_young_free_cset_time_ms);
}
if (_cur_verify_after_time_ms > 0.0) {
print_stats(2, "Verify After", _cur_verify_after_time_ms);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -131,6 +131,15 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
double _cur_strong_code_root_migration_time_ms;
double _cur_strong_code_root_purge_time_ms;
double _cur_evac_fail_recalc_used;
double _cur_evac_fail_restore_remsets;
double _cur_evac_fail_remove_self_forwards;
double _cur_string_dedup_fixup_time_ms;
WorkerDataArray<double> _cur_string_dedup_queue_fixup_worker_times_ms;
WorkerDataArray<double> _cur_string_dedup_table_fixup_worker_times_ms;
double _cur_clear_ct_time_ms;
double _cur_ref_proc_time_ms;
@ -142,6 +151,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _recorded_young_cset_choice_time_ms;
double _recorded_non_young_cset_choice_time_ms;
double _recorded_redirty_logged_cards_time_ms;
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
@ -223,6 +234,37 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_cur_strong_code_root_migration_time_ms = ms;
}
void record_strong_code_root_purge_time(double ms) {
_cur_strong_code_root_purge_time_ms = ms;
}
void record_evac_fail_recalc_used_time(double ms) {
_cur_evac_fail_recalc_used = ms;
}
void record_evac_fail_restore_remsets(double ms) {
_cur_evac_fail_restore_remsets = ms;
}
void record_evac_fail_remove_self_forwards(double ms) {
_cur_evac_fail_remove_self_forwards = ms;
}
void note_string_dedup_fixup_start();
void note_string_dedup_fixup_end();
void record_string_dedup_fixup_time(double ms) {
_cur_string_dedup_fixup_time_ms = ms;
}
void record_string_dedup_queue_fixup_worker_time(uint worker_id, double ms) {
_cur_string_dedup_queue_fixup_worker_times_ms.set(worker_id, ms);
}
void record_string_dedup_table_fixup_worker_time(uint worker_id, double ms) {
_cur_string_dedup_table_fixup_worker_times_ms.set(worker_id, ms);
}
void record_ref_proc_time(double ms) {
_cur_ref_proc_time_ms = ms;
}
@ -251,6 +293,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_recorded_non_young_cset_choice_time_ms = time_ms;
}
void record_redirty_logged_cards_time_ms(double time_ms) {
_recorded_redirty_logged_cards_time_ms = time_ms;
}
void record_cur_collection_start_sec(double time_ms) {
_cur_collection_start_sec = time_ms;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,7 @@
#include "code/icBuffer.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
@ -194,17 +195,19 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
ModRefBarrierSet* _mrbs;
CompactPoint _cp;
HumongousRegionSet _humongous_proxy_set;
HeapRegionSetCount _humongous_regions_removed;
void free_humongous_region(HeapRegion* hr) {
HeapWord* end = hr->end();
size_t dummy_pre_used;
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
assert(hr->startsHumongous(),
"Only the start of a humongous region should be freed.");
_g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list,
&_humongous_proxy_set, false /* par */);
hr->set_containing_set(NULL);
_humongous_regions_removed.increment(1u, hr->capacity());
_g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
hr->prepare_for_compaction(&_cp);
// Also clear the part of the card table that will be unused after
// compaction.
@ -217,16 +220,13 @@ public:
: _g1h(G1CollectedHeap::heap()),
_mrbs(_g1h->g1_barrier_set()),
_cp(NULL, cs, cs->initialize_threshold()),
_humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
_humongous_regions_removed() { }
void update_sets() {
// We'll recalculate total used bytes and recreate the free list
// at the end of the GC, so no point in updating those values here.
_g1h->update_sets_after_freeing_regions(0, /* pre_used */
NULL, /* free_list */
NULL, /* old_proxy_set */
&_humongous_proxy_set,
false /* par */);
HeapRegionSetCount empty_set;
_g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
}
bool doHeapRegion(HeapRegion* hr) {
@ -317,6 +317,10 @@ void G1MarkSweep::mark_sweep_phase3() {
// have been cleared if they pointed to non-surviving objects.)
sh->process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
if (G1StringDedup::is_enabled()) {
G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
}
GenMarkSweep::adjust_marks();
G1AdjustPointersClosure blk;

@ -80,53 +80,6 @@ public:
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) {
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
}
// We never encode partial array oops as narrowOop*, so return false immediately.
// This allows the compiler to create optimized code when popping references from
// the work queue.
inline bool has_partial_array_mask(narrowOop* ref) {
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
return false;
}
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
// We always encode partial arrays as regular oop, to allow the
// specialization for has_partial_array_mask() for narrowOops above.
// This means that unintentional use of this method with narrowOops are caught
// by the compiler.
inline oop* set_partial_array_mask(oop obj) {
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
}
template <class T> inline oop clear_partial_array_mask(T* ref) {
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
G1ParScanClosure _scanner;
public:
G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
{
assert(_ref_processor == NULL, "sanity");
}
G1ParScanClosure* scanner() {
return &_scanner;
}
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
// Add back base class for metadata
class G1ParCopyHelper : public G1ParClosureSuper {
protected:
@ -173,15 +126,8 @@ typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
// The following closure type is defined in g1_specialized_oop_closures.hpp:
//
// typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
// We use a separate closure to handle references during evacuation
// failure processing.
// We could have used another instance of G1ParScanHeapEvacClosure
// (since that closure no longer assumes that the references it
// handles point into the collection set).
typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;

@ -462,8 +462,9 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
if (_g1->evacuation_failed()) {
// Restore remembered sets for the regions pointing into the collection set.
double restore_remembered_set_start = os::elapsedTime();
// Restore remembered sets for the regions pointing into the collection set.
if (G1DeferredRSUpdate) {
// If deferred RS updates are enabled then we just need to transfer
// the completed buffers from (a) the DirtyCardQueueSet used to hold
@ -482,6 +483,8 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
}
assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
}
_g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0);
}
// Free any completed buffers in the DirtyCardQueueSet used to hold cards

@ -0,0 +1,208 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/g1StringDedupQueue.hpp"
#include "gc_implementation/g1/g1StringDedupStat.hpp"
#include "gc_implementation/g1/g1StringDedupTable.hpp"
#include "gc_implementation/g1/g1StringDedupThread.hpp"
bool G1StringDedup::_enabled = false;
void G1StringDedup::initialize() {
assert(UseG1GC, "String deduplication only available with G1");
if (UseStringDeduplication) {
_enabled = true;
G1StringDedupQueue::create();
G1StringDedupTable::create();
G1StringDedupThread::create();
}
}
bool G1StringDedup::is_candidate_from_mark(oop obj) {
if (java_lang_String::is_instance(obj)) {
bool from_young = G1CollectedHeap::heap()->heap_region_containing_raw(obj)->is_young();
if (from_young && obj->age() < StringDeduplicationAgeThreshold) {
// Candidate found. String is being evacuated from young to old but has not
// reached the deduplication age threshold, i.e. has not previously been a
// candidate during its life in the young generation.
return true;
}
}
// Not a candidate
return false;
}
void G1StringDedup::enqueue_from_mark(oop java_string) {
assert(is_enabled(), "String deduplication not enabled");
if (is_candidate_from_mark(java_string)) {
G1StringDedupQueue::push(0 /* worker_id */, java_string);
}
}
bool G1StringDedup::is_candidate_from_evacuation(bool from_young, bool to_young, oop obj) {
if (from_young && java_lang_String::is_instance(obj)) {
if (to_young && obj->age() == StringDeduplicationAgeThreshold) {
// Candidate found. String is being evacuated from young to young and just
// reached the deduplication age threshold.
return true;
}
if (!to_young && obj->age() < StringDeduplicationAgeThreshold) {
// Candidate found. String is being evacuated from young to old but has not
// reached the deduplication age threshold, i.e. has not previously been a
// candidate during its life in the young generation.
return true;
}
}
// Not a candidate
return false;
}
void G1StringDedup::enqueue_from_evacuation(bool from_young, bool to_young, uint worker_id, oop java_string) {
assert(is_enabled(), "String deduplication not enabled");
if (is_candidate_from_evacuation(from_young, to_young, java_string)) {
G1StringDedupQueue::push(worker_id, java_string);
}
}
void G1StringDedup::deduplicate(oop java_string) {
assert(is_enabled(), "String deduplication not enabled");
G1StringDedupStat dummy; // Statistics from this path is never used
G1StringDedupTable::deduplicate(java_string, dummy);
}
void G1StringDedup::oops_do(OopClosure* keep_alive) {
assert(is_enabled(), "String deduplication not enabled");
unlink_or_oops_do(NULL, keep_alive);
}
void G1StringDedup::unlink(BoolObjectClosure* is_alive) {
assert(is_enabled(), "String deduplication not enabled");
// Don't allow a potential resize or rehash during unlink, as the unlink
// operation itself might remove enough entries to invalidate such a decision.
unlink_or_oops_do(is_alive, NULL, false /* allow_resize_and_rehash */);
}
//
// Task for parallel unlink_or_oops_do() operation on the deduplication queue
// and table.
//
class G1StringDedupUnlinkOrOopsDoTask : public AbstractGangTask {
private:
G1StringDedupUnlinkOrOopsDoClosure _cl;
public:
G1StringDedupUnlinkOrOopsDoTask(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool allow_resize_and_rehash) :
AbstractGangTask("G1StringDedupUnlinkOrOopsDoTask"),
_cl(is_alive, keep_alive, allow_resize_and_rehash) {
}
virtual void work(uint worker_id) {
double queue_fixup_start = os::elapsedTime();
G1StringDedupQueue::unlink_or_oops_do(&_cl);
double table_fixup_start = os::elapsedTime();
G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
double queue_fixup_time_ms = (table_fixup_start - queue_fixup_start) * 1000.0;
double table_fixup_time_ms = (os::elapsedTime() - table_fixup_start) * 1000.0;
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->phase_times()->record_string_dedup_queue_fixup_worker_time(worker_id, queue_fixup_time_ms);
g1p->phase_times()->record_string_dedup_table_fixup_worker_time(worker_id, table_fixup_time_ms);
}
};
void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, bool allow_resize_and_rehash) {
assert(is_enabled(), "String deduplication not enabled");
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->phase_times()->note_string_dedup_fixup_start();
double fixup_start = os::elapsedTime();
G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash);
if (G1CollectedHeap::use_parallel_gc_threads()) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->set_par_threads();
g1h->workers()->run_task(&task);
g1h->set_par_threads(0);
} else {
task.work(0);
}
double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
g1p->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
g1p->phase_times()->note_string_dedup_fixup_end();
}
void G1StringDedup::threads_do(ThreadClosure* tc) {
assert(is_enabled(), "String deduplication not enabled");
tc->do_thread(G1StringDedupThread::thread());
}
void G1StringDedup::print_worker_threads_on(outputStream* st) {
assert(is_enabled(), "String deduplication not enabled");
G1StringDedupThread::thread()->print_on(st);
st->cr();
}
void G1StringDedup::verify() {
assert(is_enabled(), "String deduplication not enabled");
G1StringDedupQueue::verify();
G1StringDedupTable::verify();
}
G1StringDedupUnlinkOrOopsDoClosure::G1StringDedupUnlinkOrOopsDoClosure(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool allow_resize_and_rehash) :
_is_alive(is_alive),
_keep_alive(keep_alive),
_resized_table(NULL),
_rehashed_table(NULL),
_next_queue(0),
_next_bucket(0) {
if (allow_resize_and_rehash) {
// If both resize and rehash is needed, only do resize. Rehash of
// the table will eventually happen if the situation persists.
_resized_table = G1StringDedupTable::prepare_resize();
if (!is_resizing()) {
_rehashed_table = G1StringDedupTable::prepare_rehash();
}
}
}
G1StringDedupUnlinkOrOopsDoClosure::~G1StringDedupUnlinkOrOopsDoClosure() {
assert(!is_resizing() || !is_rehashing(), "Can not both resize and rehash");
if (is_resizing()) {
G1StringDedupTable::finish_resize(_resized_table);
} else if (is_rehashing()) {
G1StringDedupTable::finish_rehash(_rehashed_table);
}
}

@ -0,0 +1,202 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUP_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUP_HPP
//
// String Deduplication
//
// String deduplication aims to reduce the heap live-set by deduplicating identical
// instances of String so that they share the same backing character array.
//
// The deduplication process is divided in two main parts, 1) finding the objects to
// deduplicate, and 2) deduplicating those objects. The first part is done as part of
// a normal GC cycle when objects are marked or evacuated. At this time a check is
// applied on each object to check if it is a candidate for deduplication. If so, the
// object is placed on the deduplication queue for later processing. The second part,
// processing the objects on the deduplication queue, is a concurrent phase which
// starts right after the stop-the-wold marking/evacuation phase. This phase is
// executed by the deduplication thread, which pulls deduplication candidates of the
// deduplication queue and tries to deduplicate them.
//
// A deduplication hashtable is used to keep track of all unique character arrays
// used by String objects. When deduplicating, a lookup is made in this table to see
// if there is already an identical character array somewhere on the heap. If so, the
// String object is adjusted to point to that character array, releasing the reference
// to the original array allowing it to eventually be garbage collected. If the lookup
// fails the character array is instead inserted into the hashtable so that this array
// can be shared at some point in the future.
//
// Candidate selection
//
// An object is considered a deduplication candidate if all of the following
// statements are true:
//
// - The object is an instance of java.lang.String
//
// - The object is being evacuated from a young heap region
//
// - The object is being evacuated to a young/survivor heap region and the
// object's age is equal to the deduplication age threshold
//
// or
//
// The object is being evacuated to an old heap region and the object's age is
// less than the deduplication age threshold
//
// Once an string object has been promoted to an old region, or its age is higher
// than the deduplication age threshold, is will never become a candidate again.
// This approach avoids making the same object a candidate more than once.
//
// Interned strings are a bit special. They are explicitly deduplicated just before
// being inserted into the StringTable (to avoid counteracting C2 optimizations done
// on string literals), then they also become deduplication candidates if they reach
// the deduplication age threshold or are evacuated to an old heap region. The second
// attempt to deduplicate such strings will be in vain, but we have no fast way of
// filtering them out. This has not shown to be a problem, as the number of interned
// strings is usually dwarfed by the number of normal (non-interned) strings.
//
// For additional information on string deduplication, please see JEP 192,
// http://openjdk.java.net/jeps/192
//
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
class OopClosure;
class BoolObjectClosure;
class ThreadClosure;
class outputStream;
class G1StringDedupTable;
//
// Main interface for interacting with string deduplication.
//
class G1StringDedup : public AllStatic {
private:
// Single state for checking if both G1 and string deduplication is enabled.
static bool _enabled;
// Candidate selection policies, returns true if the given object is
// candidate for string deduplication.
static bool is_candidate_from_mark(oop obj);
static bool is_candidate_from_evacuation(bool from_young, bool to_young, oop obj);
public:
// Returns true if both G1 and string deduplication is enabled.
static bool is_enabled() {
return _enabled;
}
static void initialize();
// Immediately deduplicates the given String object, bypassing the
// the deduplication queue.
static void deduplicate(oop java_string);
// Enqueues a deduplication candidate for later processing by the deduplication
// thread. Before enqueuing, these functions apply the appropriate candidate
// selection policy to filters out non-candidates.
static void enqueue_from_mark(oop java_string);
static void enqueue_from_evacuation(bool from_young, bool to_young,
unsigned int queue, oop java_string);
static void oops_do(OopClosure* keep_alive);
static void unlink(BoolObjectClosure* is_alive);
static void unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive,
bool allow_resize_and_rehash = true);
static void threads_do(ThreadClosure* tc);
static void print_worker_threads_on(outputStream* st);
static void verify();
};
//
// This closure encapsulates the state and the closures needed when scanning
// the deduplication queue and table during the unlink_or_oops_do() operation.
// A single instance of this closure is created and then shared by all worker
// threads participating in the scan. The _next_queue and _next_bucket fields
// provide a simple mechanism for GC workers to claim exclusive access to a
// queue or a table partition.
//
class G1StringDedupUnlinkOrOopsDoClosure : public StackObj {
private:
BoolObjectClosure* _is_alive;
OopClosure* _keep_alive;
G1StringDedupTable* _resized_table;
G1StringDedupTable* _rehashed_table;
size_t _next_queue;
size_t _next_bucket;
public:
G1StringDedupUnlinkOrOopsDoClosure(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool allow_resize_and_rehash);
~G1StringDedupUnlinkOrOopsDoClosure();
bool is_resizing() {
return _resized_table != NULL;
}
G1StringDedupTable* resized_table() {
return _resized_table;
}
bool is_rehashing() {
return _rehashed_table != NULL;
}
// Atomically claims the next available queue for exclusive access by
// the current thread. Returns the queue number of the claimed queue.
size_t claim_queue() {
return (size_t)Atomic::add_ptr(1, &_next_queue) - 1;
}
// Atomically claims the next available table partition for exclusive
// access by the current thread. Returns the table bucket number where
// the claimed partition starts.
size_t claim_table_partition(size_t partition_size) {
return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size;
}
// Applies and returns the result from the is_alive closure, or
// returns true if no such closure was provided.
bool is_alive(oop o) {
if (_is_alive != NULL) {
return _is_alive->do_object_b(o);
}
return true;
}
// Applies the keep_alive closure, or does nothing if no such
// closure was provided.
void keep_alive(oop* p) {
if (_keep_alive != NULL) {
_keep_alive->do_oop(p);
}
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUP_HPP

@ -0,0 +1,162 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "gc_implementation/g1/g1StringDedupQueue.hpp"
#include "memory/gcLocker.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/stack.inline.hpp"
G1StringDedupQueue* G1StringDedupQueue::_queue = NULL;
const size_t G1StringDedupQueue::_max_size = 1000000; // Max number of elements per queue
const size_t G1StringDedupQueue::_max_cache_size = 0; // Max cache size per queue
G1StringDedupQueue::G1StringDedupQueue() :
_cursor(0),
_empty(true),
_dropped(0) {
_nqueues = MAX2(ParallelGCThreads, (size_t)1);
_queues = NEW_C_HEAP_ARRAY(G1StringDedupWorkerQueue, _nqueues, mtGC);
for (size_t i = 0; i < _nqueues; i++) {
new (_queues + i) G1StringDedupWorkerQueue(G1StringDedupWorkerQueue::default_segment_size(), _max_cache_size, _max_size);
}
}
G1StringDedupQueue::~G1StringDedupQueue() {
ShouldNotReachHere();
}
void G1StringDedupQueue::create() {
assert(_queue == NULL, "One string deduplication queue allowed");
_queue = new G1StringDedupQueue();
}
void G1StringDedupQueue::wait() {
MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
while (_queue->_empty) {
ml.wait(Mutex::_no_safepoint_check_flag);
}
}
void G1StringDedupQueue::push(uint worker_id, oop java_string) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
assert(worker_id < _queue->_nqueues, "Invalid queue");
// Push and notify waiter
G1StringDedupWorkerQueue& worker_queue = _queue->_queues[worker_id];
if (!worker_queue.is_full()) {
worker_queue.push(java_string);
if (_queue->_empty) {
MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
if (_queue->_empty) {
// Mark non-empty and notify waiter
_queue->_empty = false;
ml.notify();
}
}
} else {
// Queue is full, drop the string and update the statistics
Atomic::inc_ptr(&_queue->_dropped);
}
}
oop G1StringDedupQueue::pop() {
assert(!SafepointSynchronize::is_at_safepoint(), "Must not be at safepoint");
No_Safepoint_Verifier nsv;
// Try all queues before giving up
for (size_t tries = 0; tries < _queue->_nqueues; tries++) {
// The cursor indicates where we left of last time
G1StringDedupWorkerQueue* queue = &_queue->_queues[_queue->_cursor];
while (!queue->is_empty()) {
oop obj = queue->pop();
// The oop we pop can be NULL if it was marked
// dead. Just ignore those and pop the next oop.
if (obj != NULL) {
return obj;
}
}
// Try next queue
_queue->_cursor = (_queue->_cursor + 1) % _queue->_nqueues;
}
// Mark empty
_queue->_empty = true;
return NULL;
}
void G1StringDedupQueue::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl) {
// A worker thread first claims a queue, which ensures exclusive
// access to that queue, then continues to process it.
for (;;) {
// Grab next queue to scan
size_t queue = cl->claim_queue();
if (queue >= _queue->_nqueues) {
// End of queues
break;
}
// Scan the queue
unlink_or_oops_do(cl, queue);
}
}
void G1StringDedupQueue::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, size_t queue) {
assert(queue < _queue->_nqueues, "Invalid queue");
StackIterator<oop, mtGC> iter(_queue->_queues[queue]);
while (!iter.is_empty()) {
oop* p = iter.next_addr();
if (*p != NULL) {
if (cl->is_alive(*p)) {
cl->keep_alive(p);
} else {
// Clear dead reference
*p = NULL;
}
}
}
}
void G1StringDedupQueue::print_statistics(outputStream* st) {
st->print_cr(
" [Queue]\n"
" [Dropped: "UINTX_FORMAT"]", _queue->_dropped);
}
void G1StringDedupQueue::verify() {
for (size_t i = 0; i < _queue->_nqueues; i++) {
StackIterator<oop, mtGC> iter(_queue->_queues[i]);
while (!iter.is_empty()) {
oop obj = iter.next();
if (obj != NULL) {
guarantee(Universe::heap()->is_in_reserved(obj), "Object must be on the heap");
guarantee(!obj->is_forwarded(), "Object must not be forwarded");
guarantee(java_lang_String::is_instance(obj), "Object must be a String");
}
}
}
}

@ -0,0 +1,97 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPQUEUE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPQUEUE_HPP
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "utilities/stack.hpp"
class G1StringDedupUnlinkOrOopsDoClosure;
//
// The deduplication queue acts as the communication channel between the stop-the-world
// mark/evacuation phase and the concurrent deduplication phase. Deduplication candidates
// found during mark/evacuation are placed on this queue for later processing in the
// deduplication thread. A queue entry is an oop pointing to a String object (as opposed
// to entries in the deduplication hashtable which points to character arrays).
//
// While users of the queue treat it as a single queue, it is implemented as a set of
// queues, one queue per GC worker thread, to allow lock-free and cache-friendly enqueue
// operations by the GC workers.
//
// The oops in the queue are treated as weak pointers, meaning the objects they point to
// can become unreachable and pruned (cleared) before being popped by the deduplication
// thread.
//
// Pushing to the queue is thread safe (this relies on each thread using a unique worker
// id), but only allowed during a safepoint. Popping from the queue is NOT thread safe
// and can only be done by the deduplication thread outside a safepoint.
//
// The StringDedupQueue_lock is only used for blocking and waking up the deduplication
// thread in case the queue is empty or becomes non-empty, respectively. This lock does
// not otherwise protect the queue content.
//
class G1StringDedupQueue : public CHeapObj<mtGC> {
private:
typedef Stack<oop, mtGC> G1StringDedupWorkerQueue;
static G1StringDedupQueue* _queue;
static const size_t _max_size;
static const size_t _max_cache_size;
G1StringDedupWorkerQueue* _queues;
size_t _nqueues;
size_t _cursor;
volatile bool _empty;
// Statistics counter, only used for logging.
uintx _dropped;
G1StringDedupQueue();
~G1StringDedupQueue();
static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, size_t queue);
public:
static void create();
// Blocks and waits for the queue to become non-empty.
static void wait();
// Pushes a deduplication candidate onto a specific GC worker queue.
static void push(uint worker_id, oop java_string);
// Pops a deduplication candidate from any queue, returns NULL if
// all queues are empty.
static oop pop();
static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl);
static void print_statistics(outputStream* st);
static void verify();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPQUEUE_HPP

@ -0,0 +1,162 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1StringDedupStat.hpp"
G1StringDedupStat::G1StringDedupStat() :
_inspected(0),
_skipped(0),
_hashed(0),
_known(0),
_new(0),
_new_bytes(0),
_deduped(0),
_deduped_bytes(0),
_deduped_young(0),
_deduped_young_bytes(0),
_deduped_old(0),
_deduped_old_bytes(0),
_idle(0),
_exec(0),
_block(0),
_start(0.0),
_idle_elapsed(0.0),
_exec_elapsed(0.0),
_block_elapsed(0.0) {
}
void G1StringDedupStat::add(const G1StringDedupStat& stat) {
_inspected += stat._inspected;
_skipped += stat._skipped;
_hashed += stat._hashed;
_known += stat._known;
_new += stat._new;
_new_bytes += stat._new_bytes;
_deduped += stat._deduped;
_deduped_bytes += stat._deduped_bytes;
_deduped_young += stat._deduped_young;
_deduped_young_bytes += stat._deduped_young_bytes;
_deduped_old += stat._deduped_old;
_deduped_old_bytes += stat._deduped_old_bytes;
_idle += stat._idle;
_exec += stat._exec;
_block += stat._block;
_idle_elapsed += stat._idle_elapsed;
_exec_elapsed += stat._exec_elapsed;
_block_elapsed += stat._block_elapsed;
}
void G1StringDedupStat::print_summary(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
double total_deduped_bytes_percent = 0.0;
if (total_stat._new_bytes > 0) {
// Avoid division by zero
total_deduped_bytes_percent = (double)total_stat._deduped_bytes / (double)total_stat._new_bytes * 100.0;
}
st->date_stamp(PrintGCDateStamps);
st->stamp(PrintGCTimeStamps);
st->print_cr(
"[GC concurrent-string-deduplication, "
G1_STRDEDUP_BYTES_FORMAT_NS"->"G1_STRDEDUP_BYTES_FORMAT_NS"("G1_STRDEDUP_BYTES_FORMAT_NS"), avg "
G1_STRDEDUP_PERCENT_FORMAT_NS", "G1_STRDEDUP_TIME_FORMAT"]",
G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes),
G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes - last_stat._deduped_bytes),
G1_STRDEDUP_BYTES_PARAM(last_stat._deduped_bytes),
total_deduped_bytes_percent,
last_stat._exec_elapsed);
}
void G1StringDedupStat::print_statistics(outputStream* st, const G1StringDedupStat& stat, bool total) {
double young_percent = 0.0;
double old_percent = 0.0;
double skipped_percent = 0.0;
double hashed_percent = 0.0;
double known_percent = 0.0;
double new_percent = 0.0;
double deduped_percent = 0.0;
double deduped_bytes_percent = 0.0;
double deduped_young_percent = 0.0;
double deduped_young_bytes_percent = 0.0;
double deduped_old_percent = 0.0;
double deduped_old_bytes_percent = 0.0;
if (stat._inspected > 0) {
// Avoid division by zero
skipped_percent = (double)stat._skipped / (double)stat._inspected * 100.0;
hashed_percent = (double)stat._hashed / (double)stat._inspected * 100.0;
known_percent = (double)stat._known / (double)stat._inspected * 100.0;
new_percent = (double)stat._new / (double)stat._inspected * 100.0;
}
if (stat._new > 0) {
// Avoid division by zero
deduped_percent = (double)stat._deduped / (double)stat._new * 100.0;
}
if (stat._deduped > 0) {
// Avoid division by zero
deduped_young_percent = (double)stat._deduped_young / (double)stat._deduped * 100.0;
deduped_old_percent = (double)stat._deduped_old / (double)stat._deduped * 100.0;
}
if (stat._new_bytes > 0) {
// Avoid division by zero
deduped_bytes_percent = (double)stat._deduped_bytes / (double)stat._new_bytes * 100.0;
}
if (stat._deduped_bytes > 0) {
// Avoid division by zero
deduped_young_bytes_percent = (double)stat._deduped_young_bytes / (double)stat._deduped_bytes * 100.0;
deduped_old_bytes_percent = (double)stat._deduped_old_bytes / (double)stat._deduped_bytes * 100.0;
}
if (total) {
st->print_cr(
" [Total Exec: "UINTX_FORMAT"/"G1_STRDEDUP_TIME_FORMAT", Idle: "UINTX_FORMAT"/"G1_STRDEDUP_TIME_FORMAT", Blocked: "UINTX_FORMAT"/"G1_STRDEDUP_TIME_FORMAT"]",
stat._exec, stat._exec_elapsed, stat._idle, stat._idle_elapsed, stat._block, stat._block_elapsed);
} else {
st->print_cr(
" [Last Exec: "G1_STRDEDUP_TIME_FORMAT", Idle: "G1_STRDEDUP_TIME_FORMAT", Blocked: "UINTX_FORMAT"/"G1_STRDEDUP_TIME_FORMAT"]",
stat._exec_elapsed, stat._idle_elapsed, stat._block, stat._block_elapsed);
}
st->print_cr(
" [Inspected: "G1_STRDEDUP_OBJECTS_FORMAT"]\n"
" [Skipped: "G1_STRDEDUP_OBJECTS_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT")]\n"
" [Hashed: "G1_STRDEDUP_OBJECTS_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT")]\n"
" [Known: "G1_STRDEDUP_OBJECTS_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT")]\n"
" [New: "G1_STRDEDUP_OBJECTS_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT") "G1_STRDEDUP_BYTES_FORMAT"]\n"
" [Deduplicated: "G1_STRDEDUP_OBJECTS_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT") "G1_STRDEDUP_BYTES_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT")]\n"
" [Young: "G1_STRDEDUP_OBJECTS_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT") "G1_STRDEDUP_BYTES_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT")]\n"
" [Old: "G1_STRDEDUP_OBJECTS_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT") "G1_STRDEDUP_BYTES_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT")]",
stat._inspected,
stat._skipped, skipped_percent,
stat._hashed, hashed_percent,
stat._known, known_percent,
stat._new, new_percent, G1_STRDEDUP_BYTES_PARAM(stat._new_bytes),
stat._deduped, deduped_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_bytes), deduped_bytes_percent,
stat._deduped_young, deduped_young_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_young_bytes), deduped_young_bytes_percent,
stat._deduped_old, deduped_old_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_old_bytes), deduped_old_bytes_percent);
}

@ -0,0 +1,142 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPSTAT_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPSTAT_HPP
#include "memory/allocation.hpp"
#include "runtime/os.hpp"
// Macros for GC log output formating
#define G1_STRDEDUP_OBJECTS_FORMAT UINTX_FORMAT_W(12)
#define G1_STRDEDUP_TIME_FORMAT "%1.7lf secs"
#define G1_STRDEDUP_PERCENT_FORMAT "%5.1lf%%"
#define G1_STRDEDUP_PERCENT_FORMAT_NS "%.1lf%%"
#define G1_STRDEDUP_BYTES_FORMAT "%8.1lf%s"
#define G1_STRDEDUP_BYTES_FORMAT_NS "%.1lf%s"
#define G1_STRDEDUP_BYTES_PARAM(bytes) byte_size_in_proper_unit((double)(bytes)), proper_unit_for_byte_size((bytes))
//
// Statistics gathered by the deduplication thread.
//
class G1StringDedupStat : public StackObj {
private:
// Counters
uintx _inspected;
uintx _skipped;
uintx _hashed;
uintx _known;
uintx _new;
uintx _new_bytes;
uintx _deduped;
uintx _deduped_bytes;
uintx _deduped_young;
uintx _deduped_young_bytes;
uintx _deduped_old;
uintx _deduped_old_bytes;
uintx _idle;
uintx _exec;
uintx _block;
// Time spent by the deduplication thread in different phases
double _start;
double _idle_elapsed;
double _exec_elapsed;
double _block_elapsed;
public:
G1StringDedupStat();
void inc_inspected() {
_inspected++;
}
void inc_skipped() {
_skipped++;
}
void inc_hashed() {
_hashed++;
}
void inc_known() {
_known++;
}
void inc_new(uintx bytes) {
_new++;
_new_bytes += bytes;
}
void inc_deduped_young(uintx bytes) {
_deduped++;
_deduped_bytes += bytes;
_deduped_young++;
_deduped_young_bytes += bytes;
}
void inc_deduped_old(uintx bytes) {
_deduped++;
_deduped_bytes += bytes;
_deduped_old++;
_deduped_old_bytes += bytes;
}
void mark_idle() {
_start = os::elapsedTime();
_idle++;
}
void mark_exec() {
double now = os::elapsedTime();
_idle_elapsed = now - _start;
_start = now;
_exec++;
}
void mark_block() {
double now = os::elapsedTime();
_exec_elapsed += now - _start;
_start = now;
_block++;
}
void mark_unblock() {
double now = os::elapsedTime();
_block_elapsed += now - _start;
_start = now;
}
void mark_done() {
double now = os::elapsedTime();
_exec_elapsed += now - _start;
}
void add(const G1StringDedupStat& stat);
static void print_summary(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
static void print_statistics(outputStream* st, const G1StringDedupStat& stat, bool total);
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPSTAT_HPP

@ -0,0 +1,569 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/javaClasses.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1StringDedupTable.hpp"
#include "memory/gcLocker.hpp"
#include "memory/padded.inline.hpp"
#include "oops/typeArrayOop.hpp"
#include "runtime/mutexLocker.hpp"
//
// Freelist in the deduplication table entry cache. Links table
// entries together using their _next fields.
//
class G1StringDedupEntryFreeList : public CHeapObj<mtGC> {
private:
G1StringDedupEntry* _list;
size_t _length;
public:
G1StringDedupEntryFreeList() :
_list(NULL),
_length(0) {
}
void add(G1StringDedupEntry* entry) {
entry->set_next(_list);
_list = entry;
_length++;
}
G1StringDedupEntry* remove() {
G1StringDedupEntry* entry = _list;
if (entry != NULL) {
_list = entry->next();
_length--;
}
return entry;
}
size_t length() {
return _length;
}
};
//
// Cache of deduplication table entries. This cache provides fast allocation and
// reuse of table entries to lower the pressure on the underlying allocator.
// But more importantly, it provides fast/deferred freeing of table entries. This
// is important because freeing of table entries is done during stop-the-world
// phases and it is not uncommon for large number of entries to be freed at once.
// Tables entries that are freed during these phases are placed onto a freelist in
// the cache. The deduplication thread, which executes in a concurrent phase, will
// later reuse or free the underlying memory for these entries.
//
// The cache allows for single-threaded allocations and multi-threaded frees.
// Allocations are synchronized by StringDedupTable_lock as part of a table
// modification.
//
class G1StringDedupEntryCache : public CHeapObj<mtGC> {
private:
// One freelist per GC worker to allow lock less freeing of
// entries while doing a parallel scan of the table. Using
// PaddedEnd to avoid false sharing.
PaddedEnd<G1StringDedupEntryFreeList>* _lists;
size_t _nlists;
public:
G1StringDedupEntryCache();
~G1StringDedupEntryCache();
// Get a table entry from the cache freelist, or allocate a new
// entry if the cache is empty.
G1StringDedupEntry* alloc();
// Insert a table entry into the cache freelist.
void free(G1StringDedupEntry* entry, uint worker_id);
// Returns current number of entries in the cache.
size_t size();
// If the cache has grown above the given max size, trim it down
// and deallocate the memory occupied by trimmed of entries.
void trim(size_t max_size);
};
G1StringDedupEntryCache::G1StringDedupEntryCache() {
_nlists = MAX2(ParallelGCThreads, (size_t)1);
_lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);
}
G1StringDedupEntryCache::~G1StringDedupEntryCache() {
ShouldNotReachHere();
}
G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
for (size_t i = 0; i < _nlists; i++) {
G1StringDedupEntry* entry = _lists[i].remove();
if (entry != NULL) {
return entry;
}
}
return new G1StringDedupEntry();
}
void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) {
assert(entry->obj() != NULL, "Double free");
assert(worker_id < _nlists, "Invalid worker id");
entry->set_obj(NULL);
entry->set_hash(0);
_lists[worker_id].add(entry);
}
size_t G1StringDedupEntryCache::size() {
size_t size = 0;
for (size_t i = 0; i < _nlists; i++) {
size += _lists[i].length();
}
return size;
}
void G1StringDedupEntryCache::trim(size_t max_size) {
size_t cache_size = 0;
for (size_t i = 0; i < _nlists; i++) {
G1StringDedupEntryFreeList* list = &_lists[i];
cache_size += list->length();
while (cache_size > max_size) {
G1StringDedupEntry* entry = list->remove();
assert(entry != NULL, "Should not be null");
cache_size--;
delete entry;
}
}
}
G1StringDedupTable* G1StringDedupTable::_table = NULL;
G1StringDedupEntryCache* G1StringDedupTable::_entry_cache = NULL;
const size_t G1StringDedupTable::_min_size = (1 << 10); // 1024
const size_t G1StringDedupTable::_max_size = (1 << 24); // 16777216
const double G1StringDedupTable::_grow_load_factor = 2.0; // Grow table at 200% load
const double G1StringDedupTable::_shrink_load_factor = _grow_load_factor / 3.0; // Shrink table at 67% load
const double G1StringDedupTable::_max_cache_factor = 0.1; // Cache a maximum of 10% of the table size
const uintx G1StringDedupTable::_rehash_multiple = 60; // Hash bucket has 60 times more collisions than expected
const uintx G1StringDedupTable::_rehash_threshold = (uintx)(_rehash_multiple * _grow_load_factor);
uintx G1StringDedupTable::_entries_added = 0;
uintx G1StringDedupTable::_entries_removed = 0;
uintx G1StringDedupTable::_resize_count = 0;
uintx G1StringDedupTable::_rehash_count = 0;
G1StringDedupTable::G1StringDedupTable(size_t size, jint hash_seed) :
_size(size),
_entries(0),
_grow_threshold((uintx)(size * _grow_load_factor)),
_shrink_threshold((uintx)(size * _shrink_load_factor)),
_rehash_needed(false),
_hash_seed(hash_seed) {
assert(is_power_of_2(size), "Table size must be a power of 2");
_buckets = NEW_C_HEAP_ARRAY(G1StringDedupEntry*, _size, mtGC);
memset(_buckets, 0, _size * sizeof(G1StringDedupEntry*));
}
G1StringDedupTable::~G1StringDedupTable() {
FREE_C_HEAP_ARRAY(G1StringDedupEntry*, _buckets, mtGC);
}
void G1StringDedupTable::create() {
assert(_table == NULL, "One string deduplication table allowed");
_entry_cache = new G1StringDedupEntryCache();
_table = new G1StringDedupTable(_min_size);
}
void G1StringDedupTable::add(typeArrayOop value, unsigned int hash, G1StringDedupEntry** list) {
G1StringDedupEntry* entry = _entry_cache->alloc();
entry->set_obj(value);
entry->set_hash(hash);
entry->set_next(*list);
*list = entry;
_entries++;
}
void G1StringDedupTable::remove(G1StringDedupEntry** pentry, uint worker_id) {
G1StringDedupEntry* entry = *pentry;
*pentry = entry->next();
_entry_cache->free(entry, worker_id);
}
void G1StringDedupTable::transfer(G1StringDedupEntry** pentry, G1StringDedupTable* dest) {
G1StringDedupEntry* entry = *pentry;
*pentry = entry->next();
unsigned int hash = entry->hash();
size_t index = dest->hash_to_index(hash);
G1StringDedupEntry** list = dest->bucket(index);
entry->set_next(*list);
*list = entry;
}
bool G1StringDedupTable::equals(typeArrayOop value1, typeArrayOop value2) {
return (value1 == value2 ||
(value1->length() == value2->length() &&
(!memcmp(value1->base(T_CHAR),
value2->base(T_CHAR),
value1->length() * sizeof(jchar)))));
}
typeArrayOop G1StringDedupTable::lookup(typeArrayOop value, unsigned int hash,
G1StringDedupEntry** list, uintx &count) {
for (G1StringDedupEntry* entry = *list; entry != NULL; entry = entry->next()) {
if (entry->hash() == hash) {
typeArrayOop existing_value = entry->obj();
if (equals(value, existing_value)) {
// Match found
return existing_value;
}
}
count++;
}
// Not found
return NULL;
}
typeArrayOop G1StringDedupTable::lookup_or_add_inner(typeArrayOop value, unsigned int hash) {
size_t index = hash_to_index(hash);
G1StringDedupEntry** list = bucket(index);
uintx count = 0;
// Lookup in list
typeArrayOop existing_value = lookup(value, hash, list, count);
// Check if rehash is needed
if (count > _rehash_threshold) {
_rehash_needed = true;
}
if (existing_value == NULL) {
// Not found, add new entry
add(value, hash, list);
// Update statistics
_entries_added++;
}
return existing_value;
}
unsigned int G1StringDedupTable::hash_code(typeArrayOop value) {
unsigned int hash;
int length = value->length();
const jchar* data = (jchar*)value->base(T_CHAR);
if (use_java_hash()) {
hash = java_lang_String::hash_code(data, length);
} else {
hash = AltHashing::murmur3_32(_table->_hash_seed, data, length);
}
return hash;
}
void G1StringDedupTable::deduplicate(oop java_string, G1StringDedupStat& stat) {
assert(java_lang_String::is_instance(java_string), "Must be a string");
No_Safepoint_Verifier nsv;
stat.inc_inspected();
typeArrayOop value = java_lang_String::value(java_string);
if (value == NULL) {
// String has no value
stat.inc_skipped();
return;
}
unsigned int hash = 0;
if (use_java_hash()) {
// Get hash code from cache
hash = java_lang_String::hash(java_string);
}
if (hash == 0) {
// Compute hash
hash = hash_code(value);
stat.inc_hashed();
}
if (use_java_hash() && hash != 0) {
// Store hash code in cache
java_lang_String::set_hash(java_string, hash);
}
typeArrayOop existing_value = lookup_or_add(value, hash);
if (existing_value == value) {
// Same value, already known
stat.inc_known();
return;
}
// Get size of value array
uintx size_in_bytes = value->size() * HeapWordSize;
stat.inc_new(size_in_bytes);
if (existing_value != NULL) {
// Enqueue the reference to make sure it is kept alive. Concurrent mark might
// otherwise declare it dead if there are no other strong references to this object.
G1SATBCardTableModRefBS::enqueue(existing_value);
// Existing value found, deduplicate string
java_lang_String::set_value(java_string, existing_value);
if (G1CollectedHeap::heap()->is_in_young(value)) {
stat.inc_deduped_young(size_in_bytes);
} else {
stat.inc_deduped_old(size_in_bytes);
}
}
}
G1StringDedupTable* G1StringDedupTable::prepare_resize() {
size_t size = _table->_size;
// Check if the hashtable needs to be resized
if (_table->_entries > _table->_grow_threshold) {
// Grow table, double the size
size *= 2;
if (size > _max_size) {
// Too big, don't resize
return NULL;
}
} else if (_table->_entries < _table->_shrink_threshold) {
// Shrink table, half the size
size /= 2;
if (size < _min_size) {
// Too small, don't resize
return NULL;
}
} else if (StringDeduplicationResizeALot) {
// Force grow
size *= 2;
if (size > _max_size) {
// Too big, force shrink instead
size /= 4;
}
} else {
// Resize not needed
return NULL;
}
// Update statistics
_resize_count++;
// Allocate the new table. The new table will be populated by workers
// calling unlink_or_oops_do() and finally installed by finish_resize().
return new G1StringDedupTable(size, _table->_hash_seed);
}
void G1StringDedupTable::finish_resize(G1StringDedupTable* resized_table) {
assert(resized_table != NULL, "Invalid table");
resized_table->_entries = _table->_entries;
// Free old table
delete _table;
// Install new table
_table = resized_table;
}
void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id) {
// The table is divided into partitions to allow lock-less parallel processing by
// multiple worker threads. A worker thread first claims a partition, which ensures
// exclusive access to that part of the table, then continues to process it. To allow
// shrinking of the table in parallel we also need to make sure that the same worker
// thread processes all partitions where entries will hash to the same destination
// partition. Since the table size is always a power of two and we always shrink by
// dividing the table in half, we know that for a given partition there is only one
// other partition whoes entries will hash to the same destination partition. That
// other partition is always the sibling partition in the second half of the table.
// For example, if the table is divided into 8 partitions, the sibling of partition 0
// is partition 4, the sibling of partition 1 is partition 5, etc.
size_t table_half = _table->_size / 2;
// Let each partition be one page worth of buckets
size_t partition_size = MIN2(table_half, os::vm_page_size() / sizeof(G1StringDedupEntry*));
assert(table_half % partition_size == 0, "Invalid partition size");
// Number of entries removed during the scan
uintx removed = 0;
for (;;) {
// Grab next partition to scan
size_t partition_begin = cl->claim_table_partition(partition_size);
size_t partition_end = partition_begin + partition_size;
if (partition_begin >= table_half) {
// End of table
break;
}
// Scan the partition followed by the sibling partition in the second half of the table
removed += unlink_or_oops_do(cl, partition_begin, partition_end, worker_id);
removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id);
}
// Delayed update avoid contention on the table lock
if (removed > 0) {
MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
_table->_entries -= removed;
_entries_removed += removed;
}
}
uintx G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl,
size_t partition_begin,
size_t partition_end,
uint worker_id) {
uintx removed = 0;
for (size_t bucket = partition_begin; bucket < partition_end; bucket++) {
G1StringDedupEntry** entry = _table->bucket(bucket);
while (*entry != NULL) {
oop* p = (oop*)(*entry)->obj_addr();
if (cl->is_alive(*p)) {
cl->keep_alive(p);
if (cl->is_resizing()) {
// We are resizing the table, transfer entry to the new table
_table->transfer(entry, cl->resized_table());
} else {
if (cl->is_rehashing()) {
// We are rehashing the table, rehash the entry but keep it
// in the table. We can't transfer entries into the new table
// at this point since we don't have exclusive access to all
// destination partitions. finish_rehash() will do a single
// threaded transfer of all entries.
typeArrayOop value = (typeArrayOop)*p;
unsigned int hash = hash_code(value);
(*entry)->set_hash(hash);
}
// Move to next entry
entry = (*entry)->next_addr();
}
} else {
// Not alive, remove entry from table
_table->remove(entry, worker_id);
removed++;
}
}
}
return removed;
}
G1StringDedupTable* G1StringDedupTable::prepare_rehash() {
if (!_table->_rehash_needed && !StringDeduplicationRehashALot) {
// Rehash not needed
return NULL;
}
// Update statistics
_rehash_count++;
// Compute new hash seed
_table->_hash_seed = AltHashing::compute_seed();
// Allocate the new table, same size and hash seed
return new G1StringDedupTable(_table->_size, _table->_hash_seed);
}
void G1StringDedupTable::finish_rehash(G1StringDedupTable* rehashed_table) {
assert(rehashed_table != NULL, "Invalid table");
// Move all newly rehashed entries into the correct buckets in the new table
for (size_t bucket = 0; bucket < _table->_size; bucket++) {
G1StringDedupEntry** entry = _table->bucket(bucket);
while (*entry != NULL) {
_table->transfer(entry, rehashed_table);
}
}
rehashed_table->_entries = _table->_entries;
// Free old table
delete _table;
// Install new table
_table = rehashed_table;
}
void G1StringDedupTable::verify() {
for (size_t bucket = 0; bucket < _table->_size; bucket++) {
// Verify entries
G1StringDedupEntry** entry = _table->bucket(bucket);
while (*entry != NULL) {
typeArrayOop value = (*entry)->obj();
guarantee(value != NULL, "Object must not be NULL");
guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap");
guarantee(!value->is_forwarded(), "Object must not be forwarded");
guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
unsigned int hash = hash_code(value);
guarantee((*entry)->hash() == hash, "Table entry has inorrect hash");
guarantee(_table->hash_to_index(hash) == bucket, "Table entry has incorrect index");
entry = (*entry)->next_addr();
}
// Verify that we do not have entries with identical oops or identical arrays.
// We only need to compare entries in the same bucket. If the same oop or an
// identical array has been inserted more than once into different/incorrect
// buckets the verification step above will catch that.
G1StringDedupEntry** entry1 = _table->bucket(bucket);
while (*entry1 != NULL) {
typeArrayOop value1 = (*entry1)->obj();
G1StringDedupEntry** entry2 = (*entry1)->next_addr();
while (*entry2 != NULL) {
typeArrayOop value2 = (*entry2)->obj();
guarantee(!equals(value1, value2), "Table entries must not have identical arrays");
entry2 = (*entry2)->next_addr();
}
entry1 = (*entry1)->next_addr();
}
}
}
void G1StringDedupTable::trim_entry_cache() {
MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
size_t max_cache_size = (size_t)(_table->_size * _max_cache_factor);
_entry_cache->trim(max_cache_size);
}
void G1StringDedupTable::print_statistics(outputStream* st) {
st->print_cr(
" [Table]\n"
" [Memory Usage: "G1_STRDEDUP_BYTES_FORMAT_NS"]\n"
" [Size: "SIZE_FORMAT", Min: "SIZE_FORMAT", Max: "SIZE_FORMAT"]\n"
" [Entries: "UINTX_FORMAT", Load: "G1_STRDEDUP_PERCENT_FORMAT_NS", Cached: " UINTX_FORMAT ", Added: "UINTX_FORMAT", Removed: "UINTX_FORMAT"]\n"
" [Resize Count: "UINTX_FORMAT", Shrink Threshold: "UINTX_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT_NS"), Grow Threshold: "UINTX_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT_NS")]\n"
" [Rehash Count: "UINTX_FORMAT", Rehash Threshold: "UINTX_FORMAT", Hash Seed: 0x%x]\n"
" [Age Threshold: "UINTX_FORMAT"]",
G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)),
_table->_size, _min_size, _max_size,
_table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed,
_resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0,
_rehash_count, _rehash_threshold, _table->_hash_seed,
StringDeduplicationAgeThreshold);
}

@ -0,0 +1,230 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPTABLE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPTABLE_HPP
#include "gc_implementation/g1/g1StringDedupStat.hpp"
#include "runtime/mutexLocker.hpp"
class G1StringDedupEntryCache;
//
// Table entry in the deduplication hashtable. Points weakly to the
// character array. Can be chained in a linked list in case of hash
// collisions or when placed in a freelist in the entry cache.
//
class G1StringDedupEntry : public CHeapObj<mtGC> {
private:
G1StringDedupEntry* _next;
unsigned int _hash;
typeArrayOop _obj;
public:
G1StringDedupEntry() :
_next(NULL),
_hash(0),
_obj(NULL) {
}
G1StringDedupEntry* next() {
return _next;
}
G1StringDedupEntry** next_addr() {
return &_next;
}
void set_next(G1StringDedupEntry* next) {
_next = next;
}
unsigned int hash() {
return _hash;
}
void set_hash(unsigned int hash) {
_hash = hash;
}
typeArrayOop obj() {
return _obj;
}
typeArrayOop* obj_addr() {
return &_obj;
}
void set_obj(typeArrayOop obj) {
_obj = obj;
}
};
//
// The deduplication hashtable keeps track of all unique character arrays used
// by String objects. Each table entry weakly points to an character array, allowing
// otherwise unreachable character arrays to be declared dead and pruned from the
// table.
//
// The table is dynamically resized to accommodate the current number of table entries.
// The table has hash buckets with chains for hash collision. If the average chain
// length goes above or below given thresholds the table grows or shrinks accordingly.
//
// The table is also dynamically rehashed (using a new hash seed) if it becomes severely
// unbalanced, i.e., a hash chain is significantly longer than average.
//
// All access to the table is protected by the StringDedupTable_lock, except under
// safepoints in which case GC workers are allowed to access a table partitions they
// have claimed without first acquiring the lock. Note however, that this applies only
// the table partition (i.e. a range of elements in _buckets), not other parts of the
// table such as the _entries field, statistics counters, etc.
//
class G1StringDedupTable : public CHeapObj<mtGC> {
private:
// The currently active hashtable instance. Only modified when
// the table is resizes or rehashed.
static G1StringDedupTable* _table;
// Cache for reuse and fast alloc/free of table entries.
static G1StringDedupEntryCache* _entry_cache;
G1StringDedupEntry** _buckets;
size_t _size;
uintx _entries;
uintx _shrink_threshold;
uintx _grow_threshold;
bool _rehash_needed;
// The hash seed also dictates which hash function to use. A
// zero hash seed means we will use the Java compatible hash
// function (which doesn't use a seed), and a non-zero hash
// seed means we use the murmur3 hash function.
jint _hash_seed;
// Constants governing table resize/rehash/cache.
static const size_t _min_size;
static const size_t _max_size;
static const double _grow_load_factor;
static const double _shrink_load_factor;
static const uintx _rehash_multiple;
static const uintx _rehash_threshold;
static const double _max_cache_factor;
// Table statistics, only used for logging.
static uintx _entries_added;
static uintx _entries_removed;
static uintx _resize_count;
static uintx _rehash_count;
G1StringDedupTable(size_t size, jint hash_seed = 0);
~G1StringDedupTable();
// Returns the hash bucket at the given index.
G1StringDedupEntry** bucket(size_t index) {
return _buckets + index;
}
// Returns the hash bucket index for the given hash code.
size_t hash_to_index(unsigned int hash) {
return (size_t)hash & (_size - 1);
}
// Adds a new table entry to the given hash bucket.
void add(typeArrayOop value, unsigned int hash, G1StringDedupEntry** list);
// Removes the given table entry from the table.
void remove(G1StringDedupEntry** pentry, uint worker_id);
// Transfers a table entry from the current table to the destination table.
void transfer(G1StringDedupEntry** pentry, G1StringDedupTable* dest);
// Returns an existing character array in the given hash bucket, or NULL
// if no matching character array exists.
typeArrayOop lookup(typeArrayOop value, unsigned int hash,
G1StringDedupEntry** list, uintx &count);
// Returns an existing character array in the table, or inserts a new
// table entry if no matching character array exists.
typeArrayOop lookup_or_add_inner(typeArrayOop value, unsigned int hash);
// Thread safe lookup or add of table entry
static typeArrayOop lookup_or_add(typeArrayOop value, unsigned int hash) {
// Protect the table from concurrent access. Also note that this lock
// acts as a fence for _table, which could have been replaced by a new
// instance if the table was resized or rehashed.
MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
return _table->lookup_or_add_inner(value, hash);
}
// Returns true if the hashtable is currently using a Java compatible
// hash function.
static bool use_java_hash() {
return _table->_hash_seed == 0;
}
static bool equals(typeArrayOop value1, typeArrayOop value2);
// Computes the hash code for the given character array, using the
// currently active hash function and hash seed.
static unsigned int hash_code(typeArrayOop value);
static uintx unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl,
size_t partition_begin,
size_t partition_end,
uint worker_id);
public:
static void create();
// Deduplicates the given String object, or adds its backing
// character array to the deduplication hashtable.
static void deduplicate(oop java_string, G1StringDedupStat& stat);
// If a table resize is needed, returns a newly allocated empty
// hashtable of the proper size.
static G1StringDedupTable* prepare_resize();
// Installs a newly resized table as the currently active table
// and deletes the previously active table.
static void finish_resize(G1StringDedupTable* resized_table);
// If a table rehash is needed, returns a newly allocated empty
// hashtable and updates the hash seed.
static G1StringDedupTable* prepare_rehash();
// Transfers rehashed entries from the currently active table into
// the new table. Installs the new table as the currently active table
// and deletes the previously active table.
static void finish_rehash(G1StringDedupTable* rehashed_table);
// If the table entry cache has grown too large, trim it down according to policy
static void trim_entry_cache();
static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id);
static void print_statistics(outputStream* st);
static void verify();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPTABLE_HPP

@ -0,0 +1,124 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/g1StringDedupTable.hpp"
#include "gc_implementation/g1/g1StringDedupThread.hpp"
#include "gc_implementation/g1/g1StringDedupQueue.hpp"
G1StringDedupThread* G1StringDedupThread::_thread = NULL;
G1StringDedupThread::G1StringDedupThread() :
ConcurrentGCThread() {
set_name("String Deduplication Thread");
create_and_start();
}
G1StringDedupThread::~G1StringDedupThread() {
ShouldNotReachHere();
}
void G1StringDedupThread::create() {
assert(G1StringDedup::is_enabled(), "String deduplication not enabled");
assert(_thread == NULL, "One string deduplication thread allowed");
_thread = new G1StringDedupThread();
}
G1StringDedupThread* G1StringDedupThread::thread() {
assert(G1StringDedup::is_enabled(), "String deduplication not enabled");
assert(_thread != NULL, "String deduplication thread not created");
return _thread;
}
void G1StringDedupThread::print_on(outputStream* st) const {
st->print("\"%s\" ", name());
Thread::print_on(st);
st->cr();
}
void G1StringDedupThread::run() {
G1StringDedupStat total_stat;
initialize_in_thread();
wait_for_universe_init();
// Main loop
for (;;) {
G1StringDedupStat stat;
stat.mark_idle();
// Wait for the queue to become non-empty
G1StringDedupQueue::wait();
// Include this thread in safepoints
stsJoin();
stat.mark_exec();
// Process the queue
for (;;) {
oop java_string = G1StringDedupQueue::pop();
if (java_string == NULL) {
break;
}
G1StringDedupTable::deduplicate(java_string, stat);
// Safepoint this thread if needed
if (stsShouldYield()) {
stat.mark_block();
stsYield(NULL);
stat.mark_unblock();
}
}
G1StringDedupTable::trim_entry_cache();
stat.mark_done();
// Print statistics
total_stat.add(stat);
print(gclog_or_tty, stat, total_stat);
// Exclude this thread from safepoints
stsLeave();
}
ShouldNotReachHere();
}
void G1StringDedupThread::print(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
if (G1Log::fine() || PrintStringDeduplicationStatistics) {
G1StringDedupStat::print_summary(st, last_stat, total_stat);
if (PrintStringDeduplicationStatistics) {
G1StringDedupStat::print_statistics(st, last_stat, false);
G1StringDedupStat::print_statistics(st, total_stat, true);
G1StringDedupTable::print_statistics(st);
G1StringDedupQueue::print_statistics(st);
}
}
}

@ -0,0 +1,56 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPTHREAD_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPTHREAD_HPP
#include "gc_implementation/g1/g1StringDedupStat.hpp"
#include "gc_implementation/shared/concurrentGCThread.hpp"
//
// The deduplication thread is where the actual deduplication occurs. It waits for
// deduplication candidates to appear on the deduplication queue, removes them from
// the queue and tries to deduplicate them. It uses the deduplication hashtable to
// find identical, already existing, character arrays on the heap. The thread runs
// concurrently with the Java application but participates in safepoints to allow
// the GC to adjust and unlink oops from the deduplication queue and table.
//
class G1StringDedupThread: public ConcurrentGCThread {
private:
static G1StringDedupThread* _thread;
G1StringDedupThread();
~G1StringDedupThread();
void print(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
public:
static void create();
static G1StringDedupThread* thread();
virtual void run();
virtual void print_on(outputStream* st) const;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPTHREAD_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -285,6 +285,10 @@
product(uintx, G1MixedGCCountTarget, 8, \
"The target number of mixed GCs after a marking cycle.") \
\
experimental(uintx, G1CodeRootsChunkCacheKeepPercent, 10, \
"The amount of code root chunks that should be kept at most " \
"as percentage of already allocated.") \
\
experimental(uintx, G1OldCSetRegionThresholdPercent, 10, \
"An upper bound for the number of old CSet regions expressed " \
"as a percentage of the heap size.") \

@ -43,8 +43,6 @@ class G1ParCopyClosure;
class G1ParScanClosure;
class G1ParPushHeapRSClosure;
typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
class FilterIntoCSClosure;
class FilterOutOfRegionClosure;
class G1CMOopClosure;
@ -61,7 +59,6 @@ class G1UpdateRSOrPushRefOopClosure;
#endif
#define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \
f(G1ParScanHeapEvacClosure,_nv) \
f(G1ParScanClosure,_nv) \
f(G1ParPushHeapRSClosure,_nv) \
f(FilterIntoCSClosure,_nv) \

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -205,7 +205,7 @@ void HeapRegion::reset_after_compaction() {
init_top_at_mark_start();
}
void HeapRegion::hr_clear(bool par, bool clear_space) {
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
assert(_humongous_type == NotHumongous,
"we should have already filtered out humongous regions");
assert(_humongous_start_region == NULL,
@ -223,7 +223,11 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
if (!par) {
// If this is parallel, this will be done later.
HeapRegionRemSet* hrrs = rem_set();
hrrs->clear();
if (locked) {
hrrs->clear_locked();
} else {
hrrs->clear();
}
_claimed = InitialClaimValue;
}
zero_marked_bytes();
@ -352,7 +356,7 @@ HeapRegion::HeapRegion(uint hrs_index,
_claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
_young_type(NotYoung), _next_young_region(NULL),
_next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
_next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false),
#ifdef ASSERT
_containing_set(NULL),
#endif // ASSERT
@ -710,14 +714,14 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
}
HeapRegionRemSet* hrrs = rem_set();
int strong_code_roots_length = hrrs->strong_code_roots_list_length();
size_t strong_code_roots_length = hrrs->strong_code_roots_list_length();
// if this region is empty then there should be no entries
// on its strong code root list
if (is_empty()) {
if (strong_code_roots_length > 0) {
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
"but has "INT32_FORMAT" code root entries",
"but has "SIZE_FORMAT" code root entries",
bottom(), end(), strong_code_roots_length);
*failures = true;
}
@ -727,7 +731,7 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
if (continuesHumongous()) {
if (strong_code_roots_length > 0) {
gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
"region but has "INT32_FORMAT" code root entries",
"region but has "SIZE_FORMAT" code root entries",
HR_FORMAT_PARAMS(this), strong_code_roots_length);
*failures = true;
}

@ -271,6 +271,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Fields used by the HeapRegionSetBase class and subclasses.
HeapRegion* _next;
HeapRegion* _prev;
#ifdef ASSERT
HeapRegionSetBase* _containing_set;
#endif // ASSERT
@ -531,11 +532,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Methods used by the HeapRegionSetBase class and subclasses.
// Getter and setter for the next field used to link regions into
// Getter and setter for the next and prev fields used to link regions into
// linked lists.
HeapRegion* next() { return _next; }
HeapRegion* prev() { return _prev; }
void set_next(HeapRegion* next) { _next = next; }
void set_prev(HeapRegion* prev) { _prev = prev; }
// Every region added to a set is tagged with a reference to that
// set. This is used for doing consistency checking to make sure that
@ -596,7 +599,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
void save_marks();
// Reset HR stuff to default values.
void hr_clear(bool par, bool clear_space);
void hr_clear(bool par, bool clear_space, bool locked = false);
void par_clear();
// Get the start of the unmarked area in this region.

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "memory/allocation.hpp"
#include "memory/padded.inline.hpp"
#include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/bitMap.inline.hpp"
@ -259,10 +260,9 @@ size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
size_t OtherRegionsTable::_fine_eviction_stride = 0;
size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
_g1h(G1CollectedHeap::heap()),
_m(Mutex::leaf, "An OtherRegionsTable lock", true),
_hr(hr),
_hr(hr), _m(m),
_coarse_map(G1CollectedHeap::heap()->max_regions(),
false /* in-resource-area */),
_fine_grain_regions(NULL),
@ -358,46 +358,66 @@ void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
"just checking");
}
int** OtherRegionsTable::_from_card_cache = NULL;
size_t OtherRegionsTable::_from_card_cache_max_regions = 0;
size_t OtherRegionsTable::_from_card_cache_mem_size = 0;
int** FromCardCache::_cache = NULL;
uint FromCardCache::_max_regions = 0;
size_t FromCardCache::_static_mem_size = 0;
void OtherRegionsTable::init_from_card_cache(size_t max_regions) {
_from_card_cache_max_regions = max_regions;
void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
guarantee(_cache == NULL, "Should not call this multiple times");
int n_par_rs = HeapRegionRemSet::num_par_rem_sets();
_from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs, mtGC);
for (int i = 0; i < n_par_rs; i++) {
_from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions, mtGC);
for (size_t j = 0; j < max_regions; j++) {
_from_card_cache[i][j] = -1; // An invalid value.
_max_regions = max_num_regions;
_cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
_max_regions,
&_static_mem_size);
for (uint i = 0; i < n_par_rs; i++) {
for (uint j = 0; j < _max_regions; j++) {
set(i, j, InvalidCard);
}
}
_from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int);
}
void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) {
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max.");
for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) {
_from_card_cache[i][j] = -1; // An invalid value.
void FromCardCache::shrink(uint new_num_regions) {
for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
assert(new_num_regions <= _max_regions, "Must be within max.");
for (uint j = new_num_regions; j < _max_regions; j++) {
set(i, j, InvalidCard);
}
}
}
#ifndef PRODUCT
void OtherRegionsTable::print_from_card_cache() {
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
for (size_t j = 0; j < _from_card_cache_max_regions; j++) {
gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.",
i, j, _from_card_cache[i][j]);
void FromCardCache::print(outputStream* out) {
for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
for (uint j = 0; j < _max_regions; j++) {
out->print_cr("_from_card_cache["UINT32_FORMAT"]["UINT32_FORMAT"] = "INT32_FORMAT".",
i, j, at(i, j));
}
}
}
#endif
void FromCardCache::clear(uint region_idx) {
uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
for (uint i = 0; i < num_par_remsets; i++) {
set(i, region_idx, InvalidCard);
}
}
void OtherRegionsTable::init_from_card_cache(uint max_regions) {
FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
}
void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
FromCardCache::shrink(new_num_regions);
}
void OtherRegionsTable::print_from_card_cache() {
FromCardCache::print();
}
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
size_t cur_hrs_ind = (size_t) hr()->hrs_index();
uint cur_hrs_ind = hr()->hrs_index();
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
@ -410,19 +430,17 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
hr()->bottom(), from_card,
_from_card_cache[tid][cur_hrs_ind]);
FromCardCache::at((uint)tid, cur_hrs_ind));
}
if (from_card == _from_card_cache[tid][cur_hrs_ind]) {
if (FromCardCache::contains_or_replace((uint)tid, cur_hrs_ind, from_card)) {
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr(" from-card cache hit.");
}
assert(contains_reference(from), "We just added it!");
return;
} else {
_from_card_cache[tid][cur_hrs_ind] = from_card;
}
// Note that this may be a continued H region.
@ -442,7 +460,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
PerRegionTable* prt = find_region_table(ind, from_hr);
if (prt == NULL) {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
// Confirm that it's really not there...
prt = find_region_table(ind, from_hr);
if (prt == NULL) {
@ -544,7 +562,7 @@ OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
jint OtherRegionsTable::_n_coarsenings = 0;
PerRegionTable* OtherRegionsTable::delete_region_table() {
assert(_m.owned_by_self(), "Precondition");
assert(_m->owned_by_self(), "Precondition");
assert(_n_fine_entries == _max_fine_entries, "Precondition");
PerRegionTable* max = NULL;
jint max_occ = 0;
@ -676,8 +694,6 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
size_t OtherRegionsTable::occupied() const {
// Cast away const in this case.
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
size_t sum = occ_fine();
sum += occ_sparse();
sum += occ_coarse();
@ -707,8 +723,6 @@ size_t OtherRegionsTable::occ_sparse() const {
}
size_t OtherRegionsTable::mem_size() const {
// Cast away const in this case.
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
size_t sum = 0;
// all PRTs are of the same size so it is sufficient to query only one of them.
if (_first_all_fine_prts != NULL) {
@ -724,7 +738,7 @@ size_t OtherRegionsTable::mem_size() const {
}
size_t OtherRegionsTable::static_mem_size() {
return _from_card_cache_mem_size;
return FromCardCache::static_mem_size();
}
size_t OtherRegionsTable::fl_mem_size() {
@ -732,14 +746,10 @@ size_t OtherRegionsTable::fl_mem_size() {
}
void OtherRegionsTable::clear_fcc() {
size_t hrs_idx = hr()->hrs_index();
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
_from_card_cache[i][hrs_idx] = -1;
}
FromCardCache::clear(hr()->hrs_index());
}
void OtherRegionsTable::clear() {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
// if there are no entries, skip this step
if (_first_all_fine_prts != NULL) {
guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
@ -759,7 +769,7 @@ void OtherRegionsTable::clear() {
}
void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
size_t hrs_ind = (size_t) from_hr->hrs_index();
size_t ind = hrs_ind & _mod_max_fine_entries_mask;
if (del_single_region_table(ind, from_hr)) {
@ -768,15 +778,15 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
_coarse_map.par_at_put(hrs_ind, 0);
}
// Check to see if any of the fcc entries come from here.
size_t hr_ind = (size_t) hr()->hrs_index();
for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
int fcc_ent = _from_card_cache[tid][hr_ind];
if (fcc_ent != -1) {
uint hr_ind = hr()->hrs_index();
for (uint tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
int fcc_ent = FromCardCache::at(tid, hr_ind);
if (fcc_ent != FromCardCache::InvalidCard) {
HeapWord* card_addr = (HeapWord*)
(uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
if (hr()->is_in_reserved(card_addr)) {
// Clear the from card cache.
_from_card_cache[tid][hr_ind] = -1;
FromCardCache::set(tid, hr_ind, FromCardCache::InvalidCard);
}
}
}
@ -805,7 +815,7 @@ bool OtherRegionsTable::del_single_region_table(size_t ind,
bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
// Cast away const in this case.
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
return contains_reference_locked(from);
}
@ -832,8 +842,6 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const
"Must be in range.");
return _sparse_table.contains_card(hr_ind, card_index);
}
}
void
@ -844,13 +852,15 @@ OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
// Determines how many threads can add records to an rset in parallel.
// This can be done by either mutator threads together with the
// concurrent refinement threads or GC threads.
int HeapRegionRemSet::num_par_rem_sets() {
return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
uint HeapRegionRemSet::num_par_rem_sets() {
return (uint)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
}
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr)
: _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) {
: _bosa(bosa),
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true),
_code_roots(), _other_regions(hr, &_m) {
reset_for_par_iteration();
}
@ -883,7 +893,7 @@ bool HeapRegionRemSet::iter_is_complete() {
}
#ifndef PRODUCT
void HeapRegionRemSet::print() const {
void HeapRegionRemSet::print() {
HeapRegionRemSetIterator iter(this);
size_t card_index;
while (iter.has_next(card_index)) {
@ -909,14 +919,14 @@ void HeapRegionRemSet::cleanup() {
}
void HeapRegionRemSet::clear() {
if (_strong_code_roots_list != NULL) {
delete _strong_code_roots_list;
}
_strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC)
GrowableArray<nmethod*>(10, 0, NULL, true);
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
clear_locked();
}
void HeapRegionRemSet::clear_locked() {
_code_roots.clear();
_other_regions.clear();
assert(occupied() == 0, "Should be clear.");
assert(occupied_locked() == 0, "Should be clear.");
reset_for_par_iteration();
}
@ -932,27 +942,18 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
_other_regions.scrub(ctbs, region_bm, card_bm);
}
// Code roots support
void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity");
// Search for the code blob from the RHS to avoid
// duplicate entries as much as possible
if (_strong_code_roots_list->find_from_end(nm) < 0) {
// Code blob isn't already in the list
_strong_code_roots_list->push(nm);
}
_code_roots.add(nm);
}
void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity");
int idx = _strong_code_roots_list->find(nm);
if (idx >= 0) {
_strong_code_roots_list->remove_at(idx);
}
_code_roots.remove(nm);
// Check that there were no duplicates
guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found");
guarantee(!_code_roots.contains(nm), "duplicate entry found");
}
class NMethodMigrationOopClosure : public OopClosure {
@ -1014,8 +1015,8 @@ void HeapRegionRemSet::migrate_strong_code_roots() {
GrowableArray<nmethod*> to_be_retained(10);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
while (_strong_code_roots_list->is_nonempty()) {
nmethod *nm = _strong_code_roots_list->pop();
while (!_code_roots.is_empty()) {
nmethod *nm = _code_roots.pop();
if (nm != NULL) {
NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
nm->oops_do(&oop_cl);
@ -1038,20 +1039,16 @@ void HeapRegionRemSet::migrate_strong_code_roots() {
}
void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
for (int i = 0; i < _strong_code_roots_list->length(); i += 1) {
nmethod* nm = _strong_code_roots_list->at(i);
blk->do_code_blob(nm);
}
_code_roots.nmethods_do(blk);
}
size_t HeapRegionRemSet::strong_code_roots_mem_size() {
return sizeof(GrowableArray<nmethod*>) +
_strong_code_roots_list->max_length() * sizeof(nmethod*);
return _code_roots.mem_size();
}
//-------------------- Iteration --------------------
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
_hrrs(hrrs),
_g1h(G1CollectedHeap::heap()),
_coarse_map(&hrrs->_other_regions._coarse_map),

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#include "gc_implementation/g1/sparsePRT.hpp"
// Remembered set for a heap region. Represent a set of "cards" that
@ -44,6 +45,54 @@ class nmethod;
class HRRSCleanupTask : public SparsePRTCleanupTask {
};
// The FromCardCache remembers the most recently processed card on the heap on
// a per-region and per-thread basis.
class FromCardCache : public AllStatic {
private:
// Array of card indices. Indexed by thread X and heap region to minimize
// thread contention.
static int** _cache;
static uint _max_regions;
static size_t _static_mem_size;
public:
enum {
InvalidCard = -1 // Card value of an invalid card, i.e. a card index not otherwise used.
};
static void clear(uint region_idx);
// Returns true if the given card is in the cache at the given location, or
// replaces the card at that location and returns false.
static bool contains_or_replace(uint worker_id, uint region_idx, int card) {
int card_in_cache = at(worker_id, region_idx);
if (card_in_cache == card) {
return true;
} else {
set(worker_id, region_idx, card);
return false;
}
}
static int at(uint worker_id, uint region_idx) {
return _cache[worker_id][region_idx];
}
static void set(uint worker_id, uint region_idx, int val) {
_cache[worker_id][region_idx] = val;
}
static void initialize(uint n_par_rs, uint max_num_regions);
static void shrink(uint new_num_regions);
static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
static size_t static_mem_size() {
return _static_mem_size;
}
};
// The "_coarse_map" is a bitmap with one bit for each region, where set
// bits indicate that the corresponding region may contain some pointer
// into the owning region.
@ -72,7 +121,7 @@ class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
friend class HeapRegionRemSetIterator;
G1CollectedHeap* _g1h;
Mutex _m;
Mutex* _m;
HeapRegion* _hr;
// These are protected by "_m".
@ -118,18 +167,13 @@ class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
// false.
bool del_single_region_table(size_t ind, HeapRegion* hr);
// Indexed by thread X heap region, to minimize thread contention.
static int** _from_card_cache;
static size_t _from_card_cache_max_regions;
static size_t _from_card_cache_mem_size;
// link/add the given fine grain remembered set into the "all" list
void link_to_all(PerRegionTable * prt);
// unlink/remove the given fine grain remembered set into the "all" list
void unlink_from_all(PerRegionTable * prt);
public:
OtherRegionsTable(HeapRegion* hr);
OtherRegionsTable(HeapRegion* hr, Mutex* m);
HeapRegion* hr() const { return _hr; }
@ -141,7 +185,6 @@ public:
// objects.
void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
// Not const because it takes a lock.
size_t occupied() const;
size_t occ_fine() const;
size_t occ_coarse() const;
@ -170,11 +213,11 @@ public:
// Declare the heap size (in # of regions) to the OtherRegionsTable.
// (Uses it to initialize from_card_cache).
static void init_from_card_cache(size_t max_regions);
static void init_from_card_cache(uint max_regions);
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
// Make sure any entries for higher regions are invalid.
static void shrink_from_card_cache(size_t new_n_regs);
static void shrink_from_card_cache(uint new_num_regions);
static void print_from_card_cache();
};
@ -192,9 +235,11 @@ private:
G1BlockOffsetSharedArray* _bosa;
G1BlockOffsetSharedArray* bosa() const { return _bosa; }
// A list of code blobs (nmethods) whose code contains pointers into
// A set of code blobs (nmethods) whose code contains pointers into
// the region that owns this RSet.
GrowableArray<nmethod*>* _strong_code_roots_list;
G1CodeRootSet _code_roots;
Mutex _m;
OtherRegionsTable _other_regions;
@ -218,17 +263,20 @@ private:
static void print_event(outputStream* str, Event evnt);
public:
HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr);
HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr);
static int num_par_rem_sets();
static uint num_par_rem_sets();
static void setup_remset_size();
HeapRegion* hr() const {
return _other_regions.hr();
}
size_t occupied() const {
size_t occupied() {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
return occupied_locked();
}
size_t occupied_locked() {
return _other_regions.occupied();
}
size_t occ_fine() const {
@ -260,6 +308,7 @@ public:
// The region is being reclaimed; clear its remset, and any mention of
// entries for this region in other remsets.
void clear();
void clear_locked();
// Attempt to claim the region. Returns true iff this call caused an
// atomic transition from Unclaimed to Claimed.
@ -289,6 +338,7 @@ public:
// The actual # of bytes this hr_remset takes up.
// Note also includes the strong code root set.
size_t mem_size() {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
return _other_regions.mem_size()
// This correction is necessary because the above includes the second
// part.
@ -299,13 +349,13 @@ public:
// Returns the memory occupancy of all static data structures associated
// with remembered sets.
static size_t static_mem_size() {
return OtherRegionsTable::static_mem_size();
return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size();
}
// Returns the memory occupancy of all free_list data structures associated
// with remembered sets.
static size_t fl_mem_size() {
return OtherRegionsTable::fl_mem_size();
return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::fl_mem_size();
}
bool contains_reference(OopOrNarrowOopStar from) const {
@ -328,21 +378,21 @@ public:
void strong_code_roots_do(CodeBlobClosure* blk) const;
// Returns the number of elements in the strong code roots list
int strong_code_roots_list_length() {
return _strong_code_roots_list->length();
size_t strong_code_roots_list_length() {
return _code_roots.length();
}
// Returns true if the strong code roots contains the given
// nmethod.
bool strong_code_roots_list_contains(nmethod* nm) {
return _strong_code_roots_list->contains(nm);
return _code_roots.contains(nm);
}
// Returns the amount of memory, in bytes, currently
// consumed by the strong code roots.
size_t strong_code_roots_mem_size();
void print() const;
void print() PRODUCT_RETURN;
// Called during a stop-world phase to perform any deferred cleanups.
static void cleanup();
@ -350,12 +400,13 @@ public:
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
// (Uses it to initialize from_card_cache).
static void init_heap(uint max_regions) {
OtherRegionsTable::init_from_card_cache((size_t) max_regions);
G1CodeRootSet::initialize();
OtherRegionsTable::init_from_card_cache(max_regions);
}
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
static void shrink_heap(uint new_n_regs) {
OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
OtherRegionsTable::shrink_from_card_cache(new_n_regs);
}
#ifndef PRODUCT
@ -384,7 +435,7 @@ public:
class HeapRegionRemSetIterator : public StackObj {
// The region RSet over which we're iterating.
const HeapRegionRemSet* _hrrs;
HeapRegionRemSet* _hrrs;
// Local caching of HRRS fields.
const BitMap* _coarse_map;
@ -441,7 +492,7 @@ class HeapRegionRemSetIterator : public StackObj {
public:
// We require an iterator to be initialized before use, so the
// constructor does little.
HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs);
HeapRegionRemSetIterator(HeapRegionRemSet* hrrs);
// If there remains one or more cards to be yielded, returns true and
// sets "card_index" to one of those cards (which is then considered

@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "memory/allocation.hpp"

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,171 +23,60 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
uint HeapRegionSetBase::_unrealistically_long_length = 0;
HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone;
//////////////////// HeapRegionSetBase ////////////////////
void HeapRegionSetBase::set_unrealistically_long_length(uint len) {
guarantee(_unrealistically_long_length == 0, "should only be set once");
_unrealistically_long_length = len;
}
uint FreeRegionList::_unrealistically_long_length = 0;
void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
msg->append("[%s] %s ln: %u rn: %u cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
name(), message, length(), region_num(),
total_capacity_bytes(), total_used_bytes());
msg->append("[%s] %s ln: %u cy: "SIZE_FORMAT,
name(), message, length(), total_capacity_bytes());
fill_in_ext_msg_extra(msg);
}
bool HeapRegionSetBase::verify_region(HeapRegion* hr,
HeapRegionSetBase* expected_containing_set) {
const char* error_message = NULL;
if (!regions_humongous()) {
if (hr->isHumongous()) {
error_message = "the region should not be humongous";
}
} else {
if (!hr->isHumongous() || !hr->startsHumongous()) {
error_message = "the region should be 'starts humongous'";
}
}
if (!regions_empty()) {
if (hr->is_empty()) {
error_message = "the region should not be empty";
}
} else {
if (!hr->is_empty()) {
error_message = "the region should be empty";
}
}
#ifdef ASSERT
// The _containing_set field is only available when ASSERT is defined.
if (hr->containing_set() != expected_containing_set) {
error_message = "inconsistent containing set found";
}
#endif // ASSERT
const char* extra_error_message = verify_region_extra(hr);
if (extra_error_message != NULL) {
error_message = extra_error_message;
}
if (error_message != NULL) {
outputStream* out = tty;
out->cr();
out->print_cr("## [%s] %s", name(), error_message);
out->print_cr("## Offending Region: "PTR_FORMAT, hr);
out->print_cr(" "HR_FORMAT, HR_FORMAT_PARAMS(hr));
#ifdef ASSERT
out->print_cr(" containing set: "PTR_FORMAT, hr->containing_set());
#endif // ASSERT
out->print_cr("## Offending Region Set: "PTR_FORMAT, this);
print_on(out);
return false;
} else {
return true;
}
#ifndef PRODUCT
void HeapRegionSetBase::verify_region(HeapRegion* hr) {
assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrs_index()));
assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrs_index())); // currently we don't use these sets for young regions
assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrs_index(), name()));
assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrs_index(), name()));
assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrs_index()));
}
#endif
void HeapRegionSetBase::verify() {
// It's important that we also observe the MT safety protocol even
// for the verification calls. If we do verification without the
// appropriate locks and the set changes underneath our feet
// verification might fail and send us on a wild goose chase.
hrs_assert_mt_safety_ok(this);
check_mt_safety();
guarantee(( is_empty() && length() == 0 && region_num() == 0 &&
total_used_bytes() == 0 && total_capacity_bytes() == 0) ||
(!is_empty() && length() >= 0 && region_num() >= 0 &&
total_used_bytes() >= 0 && total_capacity_bytes() >= 0),
hrs_ext_msg(this, "invariant"));
guarantee((!regions_humongous() && region_num() == length()) ||
( regions_humongous() && region_num() >= length()),
hrs_ext_msg(this, "invariant"));
guarantee(!regions_empty() || total_used_bytes() == 0,
hrs_ext_msg(this, "invariant"));
guarantee(total_used_bytes() <= total_capacity_bytes(),
guarantee(( is_empty() && length() == 0 && total_capacity_bytes() == 0) ||
(!is_empty() && length() >= 0 && total_capacity_bytes() >= 0),
hrs_ext_msg(this, "invariant"));
}
void HeapRegionSetBase::verify_start() {
// See comment in verify() about MT safety and verification.
hrs_assert_mt_safety_ok(this);
check_mt_safety();
assert(!_verify_in_progress,
hrs_ext_msg(this, "verification should not be in progress"));
// Do the basic verification first before we do the checks over the regions.
HeapRegionSetBase::verify();
_calc_length = 0;
_calc_region_num = 0;
_calc_total_capacity_bytes = 0;
_calc_total_used_bytes = 0;
_verify_in_progress = true;
}
void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
// See comment in verify() about MT safety and verification.
hrs_assert_mt_safety_ok(this);
assert(_verify_in_progress,
hrs_ext_msg(this, "verification should be in progress"));
guarantee(verify_region(hr, this), hrs_ext_msg(this, "region verification"));
_calc_length += 1;
_calc_region_num += hr->region_num();
_calc_total_capacity_bytes += hr->capacity();
_calc_total_used_bytes += hr->used();
}
void HeapRegionSetBase::verify_end() {
// See comment in verify() about MT safety and verification.
hrs_assert_mt_safety_ok(this);
check_mt_safety();
assert(_verify_in_progress,
hrs_ext_msg(this, "verification should be in progress"));
guarantee(length() == _calc_length,
hrs_err_msg("[%s] length: %u should be == calc length: %u",
name(), length(), _calc_length));
guarantee(region_num() == _calc_region_num,
hrs_err_msg("[%s] region num: %u should be == calc region num: %u",
name(), region_num(), _calc_region_num));
guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
hrs_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
"calc capacity bytes: "SIZE_FORMAT,
name(),
total_capacity_bytes(), _calc_total_capacity_bytes));
guarantee(total_used_bytes() == _calc_total_used_bytes,
hrs_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
"calc used bytes: "SIZE_FORMAT,
name(), total_used_bytes(), _calc_total_used_bytes));
_verify_in_progress = false;
}
void HeapRegionSetBase::clear_phase() {
assert(_phase != HRSPhaseNone, "pre-condition");
_phase = HRSPhaseNone;
}
void HeapRegionSetBase::set_phase(HRSPhase phase) {
assert(_phase == HRSPhaseNone, "pre-condition");
assert(phase != HRSPhaseNone, "pre-condition");
_phase = phase;
}
void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
out->cr();
out->print_cr("Set: %s ("PTR_FORMAT")", name(), this);
@ -196,76 +85,38 @@ void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty()));
out->print_cr(" Attributes");
out->print_cr(" length : %14u", length());
out->print_cr(" region num : %14u", region_num());
out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes",
total_capacity_bytes());
out->print_cr(" total used : "SIZE_FORMAT_W(14)" bytes",
total_used_bytes());
}
void HeapRegionSetBase::clear() {
_length = 0;
_region_num = 0;
_total_used_bytes = 0;
}
HeapRegionSetBase::HeapRegionSetBase(const char* name)
HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool empty, HRSMtSafeChecker* mt_safety_checker)
: _name(name), _verify_in_progress(false),
_calc_length(0), _calc_region_num(0),
_calc_total_capacity_bytes(0), _calc_total_used_bytes(0) { }
_is_humongous(humongous), _is_empty(empty), _mt_safety_checker(mt_safety_checker),
_count()
{ }
//////////////////// HeapRegionSet ////////////////////
void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
hrs_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(proxy_set);
hrs_assert_sets_match(this, proxy_set);
verify_optional();
proxy_set->verify_optional();
if (proxy_set->is_empty()) return;
assert(proxy_set->length() <= _length,
hrs_err_msg("[%s] proxy set length: %u should be <= length: %u",
name(), proxy_set->length(), _length));
_length -= proxy_set->length();
assert(proxy_set->region_num() <= _region_num,
hrs_err_msg("[%s] proxy set region num: %u should be <= region num: %u",
name(), proxy_set->region_num(), _region_num));
_region_num -= proxy_set->region_num();
assert(proxy_set->total_used_bytes() <= _total_used_bytes,
hrs_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
"should be <= used bytes: "SIZE_FORMAT,
name(), proxy_set->total_used_bytes(),
_total_used_bytes));
_total_used_bytes -= proxy_set->total_used_bytes();
proxy_set->clear();
verify_optional();
proxy_set->verify_optional();
void FreeRegionList::set_unrealistically_long_length(uint len) {
guarantee(_unrealistically_long_length == 0, "should only be set once");
_unrealistically_long_length = len;
}
//////////////////// HeapRegionLinkedList ////////////////////
void HeapRegionLinkedList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
void FreeRegionList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
}
void HeapRegionLinkedList::add_as_head(HeapRegionLinkedList* from_list) {
hrs_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(from_list);
void FreeRegionList::add_as_head_or_tail(FreeRegionList* from_list, bool as_head) {
check_mt_safety();
from_list->check_mt_safety();
verify_optional();
from_list->verify_optional();
if (from_list->is_empty()) return;
if (from_list->is_empty()) {
return;
}
#ifdef ASSERT
HeapRegionLinkedListIterator iter(from_list);
FreeRegionListIterator iter(from_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
// In set_containing_set() we check that we either set the value
@ -276,73 +127,49 @@ void HeapRegionLinkedList::add_as_head(HeapRegionLinkedList* from_list) {
}
#endif // ASSERT
if (_head != NULL) {
assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant"));
from_list->_tail->set_next(_head);
} else {
if (_head == NULL) {
assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
_tail = from_list->_tail;
}
_head = from_list->_head;
_length += from_list->length();
_region_num += from_list->region_num();
_total_used_bytes += from_list->total_used_bytes();
from_list->clear();
verify_optional();
from_list->verify_optional();
}
void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
hrs_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(from_list);
verify_optional();
from_list->verify_optional();
if (from_list->is_empty()) return;
#ifdef ASSERT
HeapRegionLinkedListIterator iter(from_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
// In set_containing_set() we check that we either set the value
// from NULL to non-NULL or vice versa to catch bugs. So, we have
// to NULL it first before setting it to the value.
hr->set_containing_set(NULL);
hr->set_containing_set(this);
}
#endif // ASSERT
if (_tail != NULL) {
assert(length() > 0 && _head != NULL, hrs_ext_msg(this, "invariant"));
_tail->set_next(from_list->_head);
} else {
assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant"));
_head = from_list->_head;
_tail = from_list->_tail;
} else {
assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant"));
if (as_head) {
from_list->_tail->set_next(_head);
_head->set_prev(from_list->_tail);
_head = from_list->_head;
} else {
_tail->set_next(from_list->_head);
from_list->_head->set_prev(_tail);
_tail = from_list->_tail;
}
}
_tail = from_list->_tail;
_length += from_list->length();
_region_num += from_list->region_num();
_total_used_bytes += from_list->total_used_bytes();
_count.increment(from_list->length(), from_list->total_capacity_bytes());
from_list->clear();
verify_optional();
from_list->verify_optional();
}
void HeapRegionLinkedList::remove_all() {
hrs_assert_mt_safety_ok(this);
void FreeRegionList::add_as_head(FreeRegionList* from_list) {
add_as_head_or_tail(from_list, true /* as_head */);
}
void FreeRegionList::add_as_tail(FreeRegionList* from_list) {
add_as_head_or_tail(from_list, false /* as_head */);
}
void FreeRegionList::remove_all() {
check_mt_safety();
verify_optional();
HeapRegion* curr = _head;
while (curr != NULL) {
hrs_assert_region_ok(this, curr, this);
verify_region(curr);
HeapRegion* next = curr->next();
curr->set_next(NULL);
curr->set_prev(NULL);
curr->set_containing_set(NULL);
curr = next;
}
@ -351,8 +178,76 @@ void HeapRegionLinkedList::remove_all() {
verify_optional();
}
void HeapRegionLinkedList::remove_all_pending(uint target_count) {
hrs_assert_mt_safety_ok(this);
void FreeRegionList::add_ordered(FreeRegionList* from_list) {
check_mt_safety();
from_list->check_mt_safety();
verify_optional();
from_list->verify_optional();
if (from_list->is_empty()) {
return;
}
if (is_empty()) {
add_as_head(from_list);
return;
}
#ifdef ASSERT
FreeRegionListIterator iter(from_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
// In set_containing_set() we check that we either set the value
// from NULL to non-NULL or vice versa to catch bugs. So, we have
// to NULL it first before setting it to the value.
hr->set_containing_set(NULL);
hr->set_containing_set(this);
}
#endif // ASSERT
HeapRegion* curr_to = _head;
HeapRegion* curr_from = from_list->_head;
while (curr_from != NULL) {
while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
curr_to = curr_to->next();
}
if (curr_to == NULL) {
// The rest of the from list should be added as tail
_tail->set_next(curr_from);
curr_from->set_prev(_tail);
curr_from = NULL;
} else {
HeapRegion* next_from = curr_from->next();
curr_from->set_next(curr_to);
curr_from->set_prev(curr_to->prev());
if (curr_to->prev() == NULL) {
_head = curr_from;
} else {
curr_to->prev()->set_next(curr_from);
}
curr_to->set_prev(curr_from);
curr_from = next_from;
}
}
if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
_tail = from_list->_tail;
}
_count.increment(from_list->length(), from_list->total_capacity_bytes());
from_list->clear();
verify_optional();
from_list->verify_optional();
}
void FreeRegionList::remove_all_pending(uint target_count) {
check_mt_safety();
assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
@ -360,11 +255,11 @@ void HeapRegionLinkedList::remove_all_pending(uint target_count) {
DEBUG_ONLY(uint old_length = length();)
HeapRegion* curr = _head;
HeapRegion* prev = NULL;
uint count = 0;
while (curr != NULL) {
hrs_assert_region_ok(this, curr, this);
verify_region(curr);
HeapRegion* next = curr->next();
HeapRegion* prev = curr->prev();
if (curr->pending_removal()) {
assert(count < target_count,
@ -384,10 +279,15 @@ void HeapRegionLinkedList::remove_all_pending(uint target_count) {
_tail = prev;
} else {
assert(_tail != curr, hrs_ext_msg(this, "invariant"));
next->set_prev(prev);
}
if (_last = curr) {
_last = NULL;
}
curr->set_next(NULL);
remove_internal(curr);
curr->set_prev(NULL);
remove(curr);
curr->set_pending_removal(false);
count += 1;
@ -397,8 +297,6 @@ void HeapRegionLinkedList::remove_all_pending(uint target_count) {
// carry on iterating to make sure there are not more regions
// tagged with pending removal.
DEBUG_ONLY(if (count == target_count) break;)
} else {
prev = curr;
}
curr = next;
}
@ -414,46 +312,27 @@ void HeapRegionLinkedList::remove_all_pending(uint target_count) {
verify_optional();
}
void HeapRegionLinkedList::verify() {
void FreeRegionList::verify() {
// See comment in HeapRegionSetBase::verify() about MT safety and
// verification.
hrs_assert_mt_safety_ok(this);
check_mt_safety();
// This will also do the basic verification too.
verify_start();
HeapRegion* curr = _head;
HeapRegion* prev1 = NULL;
HeapRegion* prev0 = NULL;
uint count = 0;
while (curr != NULL) {
verify_next_region(curr);
count += 1;
guarantee(count < _unrealistically_long_length,
hrs_err_msg("[%s] the calculated length: %u "
"seems very long, is there maybe a cycle? "
"curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
"prev1: "PTR_FORMAT" length: %u",
name(), count, curr, prev0, prev1, length()));
prev1 = prev0;
prev0 = curr;
curr = curr->next();
}
guarantee(_tail == prev0, hrs_ext_msg(this, "post-condition"));
verify_list();
verify_end();
}
void HeapRegionLinkedList::clear() {
HeapRegionSetBase::clear();
void FreeRegionList::clear() {
_count = HeapRegionSetCount();
_head = NULL;
_tail = NULL;
_last = NULL;
}
void HeapRegionLinkedList::print_on(outputStream* out, bool print_contents) {
void FreeRegionList::print_on(outputStream* out, bool print_contents) {
HeapRegionSetBase::print_on(out, print_contents);
out->print_cr(" Linking");
out->print_cr(" head : "PTR_FORMAT, _head);
@ -461,10 +340,124 @@ void HeapRegionLinkedList::print_on(outputStream* out, bool print_contents) {
if (print_contents) {
out->print_cr(" Contents");
HeapRegionLinkedListIterator iter(this);
FreeRegionListIterator iter(this);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
hr->print_on(out);
}
}
}
void FreeRegionList::verify_list() {
HeapRegion* curr = head();
HeapRegion* prev1 = NULL;
HeapRegion* prev0 = NULL;
uint count = 0;
size_t capacity = 0;
uint last_index = 0;
guarantee(_head == NULL || _head->prev() == NULL, "_head should not have a prev");
while (curr != NULL) {
verify_region(curr);
count++;
guarantee(count < _unrealistically_long_length,
hrs_err_msg("[%s] the calculated length: %u seems very long, is there maybe a cycle? curr: "PTR_FORMAT" prev0: "PTR_FORMAT" " "prev1: "PTR_FORMAT" length: %u", name(), count, curr, prev0, prev1, length()));
if (curr->next() != NULL) {
guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up");
}
guarantee(curr->hrs_index() == 0 || curr->hrs_index() > last_index, "List should be sorted");
last_index = curr->hrs_index();
capacity += curr->capacity();
prev1 = prev0;
prev0 = curr;
curr = curr->next();
}
guarantee(tail() == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), tail()->hrs_index(), prev0->hrs_index()));
guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count));
guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
name(), total_capacity_bytes(), capacity));
}
// Note on the check_mt_safety() methods below:
//
// Verification of the "master" heap region sets / lists that are
// maintained by G1CollectedHeap is always done during a STW pause and
// by the VM thread at the start / end of the pause. The standard
// verification methods all assert check_mt_safety(). This is
// important as it ensures that verification is done without
// concurrent updates taking place at the same time. It follows, that,
// for the "master" heap region sets / lists, the check_mt_safety()
// method should include the VM thread / STW case.
void MasterFreeRegionListMtSafeChecker::check() {
// Master Free List MT safety protocol:
// (a) If we're at a safepoint, operations on the master free list
// should be invoked by either the VM thread (which will serialize
// them) or by the GC workers while holding the
// FreeList_lock.
// (b) If we're not at a safepoint, operations on the master free
// list should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
} else {
guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
}
}
void SecondaryFreeRegionListMtSafeChecker::check() {
// Secondary Free List MT safety protocol:
// Operations on the secondary free list should always be invoked
// while holding the SecondaryFreeList_lock.
guarantee(SecondaryFreeList_lock->owned_by_self(), "secondary free list MT safety protocol");
}
void OldRegionSetMtSafeChecker::check() {
// Master Old Set MT safety protocol:
// (a) If we're at a safepoint, operations on the master old set
// should be invoked:
// - by the VM thread (which will serialize them), or
// - by the GC workers while holding the FreeList_lock, if we're
// at a safepoint for an evacuation pause (this lock is taken
// anyway when an GC alloc region is retired so that a new one
// is allocated from the free list), or
// - by the GC workers while holding the OldSets_lock, if we're at a
// safepoint for a cleanup pause.
// (b) If we're not at a safepoint, operations on the master old set
// should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread()
|| FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
"master old set MT safety protocol at a safepoint");
} else {
guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
}
}
void HumongousRegionSetMtSafeChecker::check() {
// Humongous Set MT safety protocol:
// (a) If we're at a safepoint, operations on the master humongous
// set should be invoked by either the VM thread (which will
// serialize them) or by the GC workers while holding the
// OldSets_lock.
// (b) If we're not at a safepoint, operations on the master
// humongous set should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
OldSets_lock->owned_by_self(),
"master humongous set MT safety protocol at a safepoint");
} else {
guarantee(Heap_lock->owned_by_self(),
"master humongous set MT safety protocol outside a safepoint");
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,135 +38,108 @@ typedef FormatBuffer<HRS_ERR_MSG_BUFSZ> hrs_err_msg;
#define HEAP_REGION_SET_FORCE_VERIFY defined(ASSERT)
#endif // HEAP_REGION_SET_FORCE_VERIFY
//////////////////// HeapRegionSetBase ////////////////////
class hrs_ext_msg;
class HRSMtSafeChecker : public CHeapObj<mtGC> {
public:
virtual void check() = 0;
};
class MasterFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
class SecondaryFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
class HumongousRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
class OldRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
class HeapRegionSetCount VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
uint _length;
size_t _capacity;
public:
HeapRegionSetCount() : _length(0), _capacity(0) { }
const uint length() const { return _length; }
const size_t capacity() const { return _capacity; }
void increment(uint length_to_add, size_t capacity_to_add) {
_length += length_to_add;
_capacity += capacity_to_add;
}
void decrement(const uint length_to_remove, const size_t capacity_to_remove) {
_length -= length_to_remove;
_capacity -= capacity_to_remove;
}
};
// Base class for all the classes that represent heap region sets. It
// contains the basic attributes that each set needs to maintain
// (e.g., length, region num, used bytes sum) plus any shared
// functionality (e.g., verification).
class hrs_ext_msg;
typedef enum {
HRSPhaseNone,
HRSPhaseEvacuation,
HRSPhaseCleanup,
HRSPhaseFullGC
} HRSPhase;
class HRSPhaseSetter;
class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
friend class hrs_ext_msg;
friend class HRSPhaseSetter;
friend class VMStructs;
private:
bool _is_humongous;
bool _is_empty;
HRSMtSafeChecker* _mt_safety_checker;
protected:
static uint _unrealistically_long_length;
// The number of regions added to the set. If the set contains
// only humongous regions, this reflects only 'starts humongous'
// regions and does not include 'continues humongous' ones.
uint _length;
// The total number of regions represented by the set. If the set
// does not contain humongous regions, this should be the same as
// _length. If the set contains only humongous regions, this will
// include the 'continues humongous' regions.
uint _region_num;
// We don't keep track of the total capacity explicitly, we instead
// recalculate it based on _region_num and the heap region size.
// The sum of used bytes in the all the regions in the set.
size_t _total_used_bytes;
HeapRegionSetCount _count;
const char* _name;
bool _verify_in_progress;
uint _calc_length;
uint _calc_region_num;
size_t _calc_total_capacity_bytes;
size_t _calc_total_used_bytes;
// This is here so that it can be used in the subclasses to assert
// something different depending on which phase the GC is in. This
// can be particularly helpful in the check_mt_safety() methods.
static HRSPhase _phase;
// Only used by HRSPhaseSetter.
static void clear_phase();
static void set_phase(HRSPhase phase);
bool _verify_in_progress;
// verify_region() is used to ensure that the contents of a region
// added to / removed from a set are consistent. Different sets
// make different assumptions about the regions added to them. So
// each set can override verify_region_extra(), which is called
// from verify_region(), and do any extra verification it needs to
// perform in that.
virtual const char* verify_region_extra(HeapRegion* hr) { return NULL; }
bool verify_region(HeapRegion* hr,
HeapRegionSetBase* expected_containing_set);
// added to / removed from a set are consistent.
void verify_region(HeapRegion* hr) PRODUCT_RETURN;
// Indicates whether all regions in the set should be humongous or
// not. Only used during verification.
virtual bool regions_humongous() = 0;
bool regions_humongous() { return _is_humongous; }
// Indicates whether all regions in the set should be empty or
// not. Only used during verification.
virtual bool regions_empty() = 0;
bool regions_empty() { return _is_empty; }
// Subclasses can optionally override this to do MT safety protocol
// checks. It is called in an assert from all methods that perform
// updates on the set (and subclasses should also call it too).
virtual bool check_mt_safety() { return true; }
void check_mt_safety() {
if (_mt_safety_checker != NULL) {
_mt_safety_checker->check();
}
}
virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { }
HeapRegionSetBase(const char* name, bool humongous, bool empty, HRSMtSafeChecker* mt_safety_checker);
public:
const char* name() { return _name; }
uint length() { return _count.length(); }
bool is_empty() { return _count.length() == 0; }
size_t total_capacity_bytes() {
return _count.capacity();
}
// It updates the fields of the set to reflect hr being added to
// the set and tags the region appropriately.
inline void add(HeapRegion* hr);
// It updates the fields of the set to reflect hr being removed
// from the set and tags the region appropriately.
inline void remove(HeapRegion* hr);
// fill_in_ext_msg() writes the the values of the set's attributes
// in the custom err_msg (hrs_ext_msg). fill_in_ext_msg_extra()
// allows subclasses to append further information.
virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { }
void fill_in_ext_msg(hrs_ext_msg* msg, const char* message);
// It updates the fields of the set to reflect hr being added to
// the set.
inline void update_for_addition(HeapRegion* hr);
// It updates the fields of the set to reflect hr being added to
// the set and tags the region appropriately.
inline void add_internal(HeapRegion* hr);
// It updates the fields of the set to reflect hr being removed
// from the set.
inline void update_for_removal(HeapRegion* hr);
// It updates the fields of the set to reflect hr being removed
// from the set and tags the region appropriately.
inline void remove_internal(HeapRegion* hr);
// It clears all the fields of the sets. Note: it will not iterate
// over the set and remove regions from it. It assumes that the
// caller has already done so. It will literally just clear the fields.
virtual void clear();
HeapRegionSetBase(const char* name);
public:
static void set_unrealistically_long_length(uint len);
const char* name() { return _name; }
uint length() { return _length; }
bool is_empty() { return _length == 0; }
uint region_num() { return _region_num; }
size_t total_capacity_bytes() {
return (size_t) region_num() << HeapRegion::LogOfHRGrainBytes;
}
size_t total_used_bytes() { return _total_used_bytes; }
virtual void verify();
void verify_start();
void verify_next_region(HeapRegion* hr);
@ -187,7 +160,6 @@ public:
// assert/guarantee-specific message it also prints out the values of
// the fields of the associated set. This can be very helpful in
// diagnosing failures.
class hrs_ext_msg : public hrs_err_msg {
public:
hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("") {
@ -195,32 +167,6 @@ public:
}
};
class HRSPhaseSetter {
public:
HRSPhaseSetter(HRSPhase phase) {
HeapRegionSetBase::set_phase(phase);
}
~HRSPhaseSetter() {
HeapRegionSetBase::clear_phase();
}
};
// These two macros are provided for convenience, to keep the uses of
// these two asserts a bit more concise.
#define hrs_assert_mt_safety_ok(_set_) \
do { \
assert((_set_)->check_mt_safety(), hrs_ext_msg((_set_), "MT safety")); \
} while (0)
#define hrs_assert_region_ok(_set_, _hr_, _expected_) \
do { \
assert((_set_)->verify_region((_hr_), (_expected_)), \
hrs_ext_msg((_set_), "region verification")); \
} while (0)
//////////////////// HeapRegionSet ////////////////////
#define hrs_assert_sets_match(_set1_, _set2_) \
do { \
assert(((_set1_)->regions_humongous() == \
@ -236,63 +182,41 @@ public:
// the same interface (namely, the HeapRegionSetBase API).
class HeapRegionSet : public HeapRegionSetBase {
protected:
virtual const char* verify_region_extra(HeapRegion* hr) {
if (hr->next() != NULL) {
return "next() should always be NULL as we do not link the regions";
}
return HeapRegionSetBase::verify_region_extra(hr);
}
HeapRegionSet(const char* name) : HeapRegionSetBase(name) {
clear();
}
public:
// It adds hr to the set. The region should not be a member of
// another set.
inline void add(HeapRegion* hr);
HeapRegionSet(const char* name, bool humongous, HRSMtSafeChecker* mt_safety_checker):
HeapRegionSetBase(name, humongous, false /* empty */, mt_safety_checker) { }
// It removes hr from the set. The region should be a member of
// this set.
inline void remove(HeapRegion* hr);
// It removes a region from the set. Instead of updating the fields
// of the set to reflect this removal, it accumulates the updates
// in proxy_set. The idea is that proxy_set is thread-local to
// avoid multiple threads updating the fields of the set
// concurrently and having to synchronize. The method
// update_from_proxy() will update the fields of the set from the
// proxy_set.
inline void remove_with_proxy(HeapRegion* hr, HeapRegionSet* proxy_set);
// After multiple calls to remove_with_proxy() the updates to the
// fields of the set are accumulated in proxy_set. This call
// updates the fields of the set from proxy_set.
void update_from_proxy(HeapRegionSet* proxy_set);
void bulk_remove(const HeapRegionSetCount& removed) {
_count.decrement(removed.length(), removed.capacity());
}
};
//////////////////// HeapRegionLinkedList ////////////////////
// A set that links all the regions added to it in a singly-linked
// A set that links all the regions added to it in a doubly-linked
// list. We should try to avoid doing operations that iterate over
// such lists in performance critical paths. Typically we should
// add / remove one region at a time or concatenate two lists. All
// those operations are done in constant time.
// add / remove one region at a time or concatenate two lists. There are
// two ways to treat your lists, ordered and un-ordered. All un-ordered
// operations are done in constant time. To keep a list ordered only use
// add_ordered() to add elements to the list. If a list is not ordered
// from start, there is no way to sort it later.
class HeapRegionLinkedListIterator;
class FreeRegionListIterator;
class HeapRegionLinkedList : public HeapRegionSetBase {
friend class HeapRegionLinkedListIterator;
class FreeRegionList : public HeapRegionSetBase {
friend class FreeRegionListIterator;
private:
HeapRegion* _head;
HeapRegion* _tail;
// These are provided for use by the friend classes.
HeapRegion* head() { return _head; }
HeapRegion* tail() { return _tail; }
// _last is used to keep track of where we added an element the last
// time in ordered lists. It helps to improve performance when adding
// several ordered items in a row.
HeapRegion* _last;
static uint _unrealistically_long_length;
void add_as_head_or_tail(FreeRegionList* from_list, bool as_head);
protected:
virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg);
@ -300,11 +224,24 @@ protected:
// See the comment for HeapRegionSetBase::clear()
virtual void clear();
HeapRegionLinkedList(const char* name) : HeapRegionSetBase(name) {
public:
FreeRegionList(const char* name, HRSMtSafeChecker* mt_safety_checker = NULL):
HeapRegionSetBase(name, false /* humongous */, true /* empty */, mt_safety_checker) {
clear();
}
public:
void verify_list();
HeapRegion* head() { return _head; }
HeapRegion* tail() { return _tail; }
static void set_unrealistically_long_length(uint len);
// Add hr to the list. The region should not be a member of another set.
// Assumes that the list is ordered and will preserve that order. The order
// is determined by hrs_index.
inline void add_ordered(HeapRegion* hr);
// It adds hr to the list as the new head. The region should not be
// a member of another set.
inline void add_as_head(HeapRegion* hr);
@ -320,15 +257,29 @@ public:
// Convenience method.
inline HeapRegion* remove_head_or_null();
// Removes and returns the last element (_tail) of the list. It assumes
// that the list isn't empty so that it can return a non-NULL value.
inline HeapRegion* remove_tail();
// Convenience method
inline HeapRegion* remove_tail_or_null();
// Removes from head or tail based on the given argument.
inline HeapRegion* remove_region(bool from_head);
// Merge two ordered lists. The result is also ordered. The order is
// determined by hrs_index.
void add_ordered(FreeRegionList* from_list);
// It moves the regions from from_list to this list and empties
// from_list. The new regions will appear in the same order as they
// were in from_list and be linked in the beginning of this list.
void add_as_head(HeapRegionLinkedList* from_list);
void add_as_head(FreeRegionList* from_list);
// It moves the regions from from_list to this list and empties
// from_list. The new regions will appear in the same order as they
// were in from_list and be linked in the end of this list.
void add_as_tail(HeapRegionLinkedList* from_list);
void add_as_tail(FreeRegionList* from_list);
// It empties the list by removing all regions from it.
void remove_all();
@ -346,15 +297,13 @@ public:
virtual void print_on(outputStream* out, bool print_contents = false);
};
//////////////////// HeapRegionLinkedListIterator ////////////////////
// Iterator class that provides a convenient way to iterate over the
// regions of a HeapRegionLinkedList instance.
class HeapRegionLinkedListIterator : public StackObj {
class FreeRegionListIterator : public StackObj {
private:
HeapRegionLinkedList* _list;
HeapRegion* _curr;
FreeRegionList* _list;
HeapRegion* _curr;
public:
bool more_available() {
@ -369,13 +318,12 @@ public:
// do the "cycle" check.
HeapRegion* hr = _curr;
assert(_list->verify_region(hr, _list), "region verification");
_list->verify_region(hr);
_curr = hr->next();
return hr;
}
HeapRegionLinkedListIterator(HeapRegionLinkedList* list)
: _curr(NULL), _list(list) {
FreeRegionListIterator(FreeRegionList* list) : _curr(NULL), _list(list) {
_curr = list->head();
}
};

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,116 +27,110 @@
#include "gc_implementation/g1/heapRegionSet.hpp"
//////////////////// HeapRegionSetBase ////////////////////
inline void HeapRegionSetBase::update_for_addition(HeapRegion* hr) {
// Assumes the caller has already verified the region.
_length += 1;
_region_num += hr->region_num();
_total_used_bytes += hr->used();
}
inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
hrs_assert_region_ok(this, hr, NULL);
inline void HeapRegionSetBase::add(HeapRegion* hr) {
check_mt_safety();
assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
update_for_addition(hr);
_count.increment(1u, hr->capacity());
hr->set_containing_set(this);
verify_region(hr);
}
inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
// Assumes the caller has already verified the region.
assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
_length -= 1;
uint region_num_diff = hr->region_num();
assert(region_num_diff <= _region_num,
hrs_err_msg("[%s] region's region num: %u "
"should be <= region num: %u",
name(), region_num_diff, _region_num));
_region_num -= region_num_diff;
size_t used_bytes = hr->used();
assert(used_bytes <= _total_used_bytes,
hrs_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
"should be <= used bytes: "SIZE_FORMAT,
name(), used_bytes, _total_used_bytes));
_total_used_bytes -= used_bytes;
}
inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
hrs_assert_region_ok(this, hr, this);
inline void HeapRegionSetBase::remove(HeapRegion* hr) {
check_mt_safety();
verify_region(hr);
assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
hr->set_containing_set(NULL);
update_for_removal(hr);
assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
_count.decrement(1u, hr->capacity());
}
//////////////////// HeapRegionSet ////////////////////
inline void HeapRegionSet::add(HeapRegion* hr) {
hrs_assert_mt_safety_ok(this);
// add_internal() will verify the region.
add_internal(hr);
}
inline void HeapRegionSet::remove(HeapRegion* hr) {
hrs_assert_mt_safety_ok(this);
// remove_internal() will verify the region.
remove_internal(hr);
}
inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
HeapRegionSet* proxy_set) {
// No need to fo the MT safety check here given that this method
// does not update the contents of the set but instead accumulates
// the changes in proxy_set which is assumed to be thread-local.
hrs_assert_sets_match(this, proxy_set);
hrs_assert_region_ok(this, hr, this);
hr->set_containing_set(NULL);
proxy_set->update_for_addition(hr);
}
//////////////////// HeapRegionLinkedList ////////////////////
inline void HeapRegionLinkedList::add_as_head(HeapRegion* hr) {
hrs_assert_mt_safety_ok(this);
inline void FreeRegionList::add_ordered(HeapRegion* hr) {
check_mt_safety();
assert((length() == 0 && _head == NULL && _tail == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),
hrs_ext_msg(this, "invariant"));
// add_internal() will verify the region.
add_internal(hr);
// add() will verify the region and check mt safety.
add(hr);
// Now link the region
if (_head != NULL) {
HeapRegion* curr;
if (_last != NULL && _last->hrs_index() < hr->hrs_index()) {
curr = _last;
} else {
curr = _head;
}
// Find first entry with a Region Index larger than entry to insert.
while (curr != NULL && curr->hrs_index() < hr->hrs_index()) {
curr = curr->next();
}
hr->set_next(curr);
if (curr == NULL) {
// Adding at the end
hr->set_prev(_tail);
_tail->set_next(hr);
_tail = hr;
} else if (curr->prev() == NULL) {
// Adding at the beginning
hr->set_prev(NULL);
_head = hr;
curr->set_prev(hr);
} else {
hr->set_prev(curr->prev());
hr->prev()->set_next(hr);
curr->set_prev(hr);
}
} else {
// The list was empty
_tail = hr;
_head = hr;
}
_last = hr;
}
inline void FreeRegionList::add_as_head(HeapRegion* hr) {
assert((length() == 0 && _head == NULL && _tail == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),
hrs_ext_msg(this, "invariant"));
// add() will verify the region and check mt safety.
add(hr);
// Now link the region.
if (_head != NULL) {
hr->set_next(_head);
_head->set_prev(hr);
} else {
_tail = hr;
}
_head = hr;
}
inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
hrs_assert_mt_safety_ok(this);
inline void FreeRegionList::add_as_tail(HeapRegion* hr) {
check_mt_safety();
assert((length() == 0 && _head == NULL && _tail == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),
hrs_ext_msg(this, "invariant"));
// add_internal() will verify the region.
add_internal(hr);
// add() will verify the region and check mt safety.
add(hr);
// Now link the region.
if (_tail != NULL) {
_tail->set_next(hr);
hr->set_prev(_tail);
} else {
_head = hr;
}
_tail = hr;
}
inline HeapRegion* HeapRegionLinkedList::remove_head() {
hrs_assert_mt_safety_ok(this);
inline HeapRegion* FreeRegionList::remove_head() {
assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty"));
assert(length() > 0 && _head != NULL && _tail != NULL,
hrs_ext_msg(this, "invariant"));
@ -146,17 +140,22 @@ inline HeapRegion* HeapRegionLinkedList::remove_head() {
_head = hr->next();
if (_head == NULL) {
_tail = NULL;
} else {
_head->set_prev(NULL);
}
hr->set_next(NULL);
// remove_internal() will verify the region.
remove_internal(hr);
if (_last == hr) {
_last = NULL;
}
// remove() will verify the region and check mt safety.
remove(hr);
return hr;
}
inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() {
hrs_assert_mt_safety_ok(this);
inline HeapRegion* FreeRegionList::remove_head_or_null() {
check_mt_safety();
if (!is_empty()) {
return remove_head();
} else {
@ -164,4 +163,47 @@ inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() {
}
}
inline HeapRegion* FreeRegionList::remove_tail() {
assert(!is_empty(), hrs_ext_msg(this, "The list should not be empty"));
assert(length() > 0 && _head != NULL && _tail != NULL,
hrs_ext_msg(this, "invariant"));
// We need to unlink it first
HeapRegion* hr = _tail;
_tail = hr->prev();
if (_tail == NULL) {
_head = NULL;
} else {
_tail->set_next(NULL);
}
hr->set_prev(NULL);
if (_last == hr) {
_last = NULL;
}
// remove() will verify the region and check mt safety.
remove(hr);
return hr;
}
inline HeapRegion* FreeRegionList::remove_tail_or_null() {
check_mt_safety();
if (!is_empty()) {
return remove_tail();
} else {
return NULL;
}
}
inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
if (from_head) {
return remove_head_or_null();
} else {
return remove_tail_or_null();
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP

@ -1,175 +0,0 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
// Note on the check_mt_safety() methods below:
//
// Verification of the "master" heap region sets / lists that are
// maintained by G1CollectedHeap is always done during a STW pause and
// by the VM thread at the start / end of the pause. The standard
// verification methods all assert check_mt_safety(). This is
// important as it ensures that verification is done without
// concurrent updates taking place at the same time. It follows, that,
// for the "master" heap region sets / lists, the check_mt_safety()
// method should include the VM thread / STW case.
//////////////////// FreeRegionList ////////////////////
const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
if (hr->is_young()) {
return "the region should not be young";
}
// The superclass will check that the region is empty and
// not humongous.
return HeapRegionLinkedList::verify_region_extra(hr);
}
//////////////////// MasterFreeRegionList ////////////////////
const char* MasterFreeRegionList::verify_region_extra(HeapRegion* hr) {
// We should reset the RSet for parallel iteration before we add it
// to the master free list so that it is ready when the region is
// re-allocated.
if (!hr->rem_set()->verify_ready_for_par_iteration()) {
return "the region's RSet should be ready for parallel iteration";
}
return FreeRegionList::verify_region_extra(hr);
}
bool MasterFreeRegionList::check_mt_safety() {
// Master Free List MT safety protocol:
// (a) If we're at a safepoint, operations on the master free list
// should be invoked by either the VM thread (which will serialize
// them) or by the GC workers while holding the
// FreeList_lock.
// (b) If we're not at a safepoint, operations on the master free
// list should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
FreeList_lock->owned_by_self(),
hrs_ext_msg(this, "master free list MT safety protocol "
"at a safepoint"));
} else {
guarantee(Heap_lock->owned_by_self(),
hrs_ext_msg(this, "master free list MT safety protocol "
"outside a safepoint"));
}
return FreeRegionList::check_mt_safety();
}
//////////////////// SecondaryFreeRegionList ////////////////////
bool SecondaryFreeRegionList::check_mt_safety() {
// Secondary Free List MT safety protocol:
// Operations on the secondary free list should always be invoked
// while holding the SecondaryFreeList_lock.
guarantee(SecondaryFreeList_lock->owned_by_self(),
hrs_ext_msg(this, "secondary free list MT safety protocol"));
return FreeRegionList::check_mt_safety();
}
//////////////////// OldRegionSet ////////////////////
const char* OldRegionSet::verify_region_extra(HeapRegion* hr) {
if (hr->is_young()) {
return "the region should not be young";
}
// The superclass will check that the region is not empty and not
// humongous.
return HeapRegionSet::verify_region_extra(hr);
}
//////////////////// MasterOldRegionSet ////////////////////
bool MasterOldRegionSet::check_mt_safety() {
// Master Old Set MT safety protocol:
// (a) If we're at a safepoint, operations on the master old set
// should be invoked:
// - by the VM thread (which will serialize them), or
// - by the GC workers while holding the FreeList_lock, if we're
// at a safepoint for an evacuation pause (this lock is taken
// anyway when an GC alloc region is retired so that a new one
// is allocated from the free list), or
// - by the GC workers while holding the OldSets_lock, if we're at a
// safepoint for a cleanup pause.
// (b) If we're not at a safepoint, operations on the master old set
// should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
_phase == HRSPhaseEvacuation && FreeList_lock->owned_by_self() ||
_phase == HRSPhaseCleanup && OldSets_lock->owned_by_self(),
hrs_ext_msg(this, "master old set MT safety protocol "
"at a safepoint"));
} else {
guarantee(Heap_lock->owned_by_self(),
hrs_ext_msg(this, "master old set MT safety protocol "
"outside a safepoint"));
}
return OldRegionSet::check_mt_safety();
}
//////////////////// HumongousRegionSet ////////////////////
const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
if (hr->is_young()) {
return "the region should not be young";
}
// The superclass will check that the region is not empty and
// humongous.
return HeapRegionSet::verify_region_extra(hr);
}
//////////////////// MasterHumongousRegionSet ////////////////////
bool MasterHumongousRegionSet::check_mt_safety() {
// Master Humongous Set MT safety protocol:
// (a) If we're at a safepoint, operations on the master humongous
// set should be invoked by either the VM thread (which will
// serialize them) or by the GC workers while holding the
// OldSets_lock.
// (b) If we're not at a safepoint, operations on the master
// humongous set should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
OldSets_lock->owned_by_self(),
hrs_ext_msg(this, "master humongous set MT safety protocol "
"at a safepoint"));
} else {
guarantee(Heap_lock->owned_by_self(),
hrs_ext_msg(this, "master humongous set MT safety protocol "
"outside a safepoint"));
}
return HumongousRegionSet::check_mt_safety();
}

@ -1,111 +0,0 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
//////////////////// FreeRegionList ////////////////////
class FreeRegionList : public HeapRegionLinkedList {
protected:
virtual const char* verify_region_extra(HeapRegion* hr);
virtual bool regions_humongous() { return false; }
virtual bool regions_empty() { return true; }
public:
FreeRegionList(const char* name) : HeapRegionLinkedList(name) { }
};
//////////////////// MasterFreeRegionList ////////////////////
class MasterFreeRegionList : public FreeRegionList {
protected:
virtual const char* verify_region_extra(HeapRegion* hr);
virtual bool check_mt_safety();
public:
MasterFreeRegionList(const char* name) : FreeRegionList(name) { }
};
//////////////////// SecondaryFreeRegionList ////////////////////
class SecondaryFreeRegionList : public FreeRegionList {
protected:
virtual bool check_mt_safety();
public:
SecondaryFreeRegionList(const char* name) : FreeRegionList(name) { }
};
//////////////////// OldRegionSet ////////////////////
class OldRegionSet : public HeapRegionSet {
protected:
virtual const char* verify_region_extra(HeapRegion* hr);
virtual bool regions_humongous() { return false; }
virtual bool regions_empty() { return false; }
public:
OldRegionSet(const char* name) : HeapRegionSet(name) { }
};
//////////////////// MasterOldRegionSet ////////////////////
class MasterOldRegionSet : public OldRegionSet {
private:
protected:
virtual bool check_mt_safety();
public:
MasterOldRegionSet(const char* name) : OldRegionSet(name) { }
};
//////////////////// HumongousRegionSet ////////////////////
class HumongousRegionSet : public HeapRegionSet {
protected:
virtual const char* verify_region_extra(HeapRegion* hr);
virtual bool regions_humongous() { return true; }
virtual bool regions_empty() { return false; }
public:
HumongousRegionSet(const char* name) : HeapRegionSet(name) { }
};
//////////////////// MasterHumongousRegionSet ////////////////////
class MasterHumongousRegionSet : public HumongousRegionSet {
protected:
virtual bool check_mt_safety();
public:
MasterHumongousRegionSet(const char* name) : HumongousRegionSet(name) { }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP

@ -57,9 +57,10 @@
nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _old_used, size_t) \
\
nonstatic_field(HeapRegionSetBase, _length, uint) \
nonstatic_field(HeapRegionSetBase, _region_num, uint) \
nonstatic_field(HeapRegionSetBase, _total_used_bytes, size_t) \
nonstatic_field(HeapRegionSetBase, _count, HeapRegionSetCount) \
\
nonstatic_field(HeapRegionSetCount, _length, uint) \
nonstatic_field(HeapRegionSetCount, _capacity, size_t) \
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \
@ -71,6 +72,7 @@
declare_type(HeapRegion, ContiguousSpace) \
declare_toplevel_type(HeapRegionSeq) \
declare_toplevel_type(HeapRegionSetBase) \
declare_toplevel_type(HeapRegionSetCount) \
declare_toplevel_type(G1MonitoringSupport) \
\
declare_toplevel_type(G1CollectedHeap*) \

@ -665,8 +665,10 @@ void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
const PSHeapSummary& heap_summary = create_ps_heap_summary();
gc_tracer->report_gc_heap_summary(when, heap_summary);
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary);
gc_tracer->report_metaspace_summary(when, metaspace_summary);
}
ParallelScavengeHeap* ParallelScavengeHeap::heap() {

@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
#include "memory/allocation.hpp"
#include "memory/metaspaceChunkFreeListSummary.hpp"
class VirtualSpaceSummary : public StackObj {
HeapWord* _start;
@ -125,18 +126,49 @@ class PSHeapSummary : public GCHeapSummary {
};
class MetaspaceSummary : public StackObj {
size_t _capacity_until_GC;
MetaspaceSizes _meta_space;
MetaspaceSizes _data_space;
MetaspaceSizes _class_space;
MetaspaceChunkFreeListSummary _metaspace_chunk_free_list_summary;
MetaspaceChunkFreeListSummary _class_chunk_free_list_summary;
public:
MetaspaceSummary() : _meta_space(), _data_space(), _class_space() {}
MetaspaceSummary(const MetaspaceSizes& meta_space, const MetaspaceSizes& data_space, const MetaspaceSizes& class_space) :
_meta_space(meta_space), _data_space(data_space), _class_space(class_space) { }
MetaspaceSummary() :
_capacity_until_GC(0),
_meta_space(),
_data_space(),
_class_space(),
_metaspace_chunk_free_list_summary(),
_class_chunk_free_list_summary()
{}
MetaspaceSummary(size_t capacity_until_GC,
const MetaspaceSizes& meta_space,
const MetaspaceSizes& data_space,
const MetaspaceSizes& class_space,
const MetaspaceChunkFreeListSummary& metaspace_chunk_free_list_summary,
const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary) :
_capacity_until_GC(capacity_until_GC),
_meta_space(meta_space),
_data_space(data_space),
_class_space(class_space),
_metaspace_chunk_free_list_summary(metaspace_chunk_free_list_summary),
_class_chunk_free_list_summary(class_chunk_free_list_summary)
{}
size_t capacity_until_GC() const { return _capacity_until_GC; }
const MetaspaceSizes& meta_space() const { return _meta_space; }
const MetaspaceSizes& data_space() const { return _data_space; }
const MetaspaceSizes& class_space() const { return _class_space; }
const MetaspaceChunkFreeListSummary& metaspace_chunk_free_list_summary() const {
return _metaspace_chunk_free_list_summary;
}
const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary() const {
return _class_chunk_free_list_summary;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP

@ -139,11 +139,21 @@ void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
}
#endif // INCLUDE_SERVICES
void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const {
void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
assert_set_gc_id();
send_gc_heap_summary_event(when, heap_summary);
send_meta_space_summary_event(when, meta_space_summary);
}
void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const {
assert_set_gc_id();
send_meta_space_summary_event(when, summary);
send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary());
if (UseCompressedClassPointers) {
send_metaspace_chunk_free_list_summary(when, Metaspace::ClassType, summary.class_chunk_free_list_summary());
}
}
void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {

@ -30,6 +30,7 @@
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/allocation.hpp"
#include "memory/metaspace.hpp"
#include "memory/referenceType.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1YCTypes.hpp"
@ -41,6 +42,7 @@ typedef uint GCId;
class EvacuationInfo;
class GCHeapSummary;
class MetaspaceChunkFreeListSummary;
class MetaspaceSummary;
class PSHeapSummary;
class ReferenceProcessorStats;
@ -124,7 +126,8 @@ class GCTracer : public ResourceObj {
public:
void report_gc_start(GCCause::Cause cause, const Ticks& timestamp);
void report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions);
void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const;
void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const;
void report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& metaspace_summary) const;
void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN;
bool has_reported_gc_start() const;
@ -138,6 +141,7 @@ class GCTracer : public ResourceObj {
void send_garbage_collection_event() const;
void send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const;
void send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const;
void send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype, const MetaspaceChunkFreeListSummary& summary) const;
void send_reference_stats_event(ReferenceType type, size_t count) const;
void send_phase_events(TimePartitions* time_partitions) const;
};

@ -64,6 +64,30 @@ void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) cons
}
}
void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype,
const MetaspaceChunkFreeListSummary& summary) const {
EventMetaspaceChunkFreeListSummary e;
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_when(when);
e.set_metadataType(mdtype);
e.set_specializedChunks(summary.num_specialized_chunks());
e.set_specializedChunksTotalSize(summary.specialized_chunks_size_in_bytes());
e.set_smallChunks(summary.num_small_chunks());
e.set_smallChunksTotalSize(summary.small_chunks_size_in_bytes());
e.set_mediumChunks(summary.num_medium_chunks());
e.set_mediumChunksTotalSize(summary.medium_chunks_size_in_bytes());
e.set_humongousChunks(summary.num_humongous_chunks());
e.set_humongousChunksTotalSize(summary.humongous_chunks_size_in_bytes());
e.commit();
}
}
void ParallelOldTracer::send_parallel_old_event() const {
EventGCParallelOld e(UNTIMED);
if (e.should_commit()) {
@ -246,6 +270,7 @@ void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceS
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_when((u1) when);
e.set_gcThreshold(meta_space_summary.capacity_until_GC());
e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,10 +30,18 @@
#include "utilities/stack.inline.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
#endif // INCLUDE_ALL_GCS
inline void MarkSweep::mark_object(oop obj) {
#if INCLUDE_ALL_GCS
if (G1StringDedup::is_enabled()) {
// We must enqueue the object before it is marked
// as we otherwise can't read the object's age.
G1StringDedup::enqueue_from_mark(obj);
}
#endif
// some marks may contain information we need to preserve so we store them away
// and overwrite the mark. We'll restore it at the end of markSweep.
markOop mark = obj->mark();

@ -97,7 +97,13 @@ MetaspaceSummary CollectedHeap::create_metaspace_summary() {
MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
MetaspaceAux::reserved_bytes(Metaspace::ClassType));
return MetaspaceSummary(meta_space, data_space, class_space);
const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);
const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);
return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
ms_chunk_free_list_summary, class_chunk_free_list_summary);
}
void CollectedHeap::print_heap_before_gc() {
@ -128,8 +134,10 @@ void CollectedHeap::unregister_nmethod(nmethod* nm) {
void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
const GCHeapSummary& heap_summary = create_heap_summary();
gc_tracer->report_gc_heap_summary(when, heap_summary);
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary);
gc_tracer->report_metaspace_summary(when, metaspace_summary);
}
void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) {

@ -446,6 +446,7 @@ Arena::Arena(size_t init_size) {
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
_size_in_bytes = 0;
set_size_in_bytes(init_size);
NOT_PRODUCT(Atomic::inc(&_instance_count);)
}
@ -454,6 +455,7 @@ Arena::Arena() {
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
_size_in_bytes = 0;
set_size_in_bytes(Chunk::init_size);
NOT_PRODUCT(Atomic::inc(&_instance_count);)
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,7 @@
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#endif // INCLUDE_ALL_GCS
// Free list. A FreeList is used to access a linked list of chunks
@ -332,4 +333,5 @@ template class FreeList<Metablock>;
template class FreeList<Metachunk>;
#if INCLUDE_ALL_GCS
template class FreeList<FreeChunk>;
template class FreeList<G1CodeRootChunk>;
#endif // INCLUDE_ALL_GCS

@ -32,7 +32,9 @@
#include "memory/gcLocker.hpp"
#include "memory/metachunk.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspaceGCThresholdUpdater.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/metaspaceTracer.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/atomic.inline.hpp"
@ -57,6 +59,7 @@ size_t const allocation_from_dictionary_limit = 4 * K;
MetaWord* last_allocated = 0;
size_t Metaspace::_compressed_class_space_size;
const MetaspaceTracer* Metaspace::_tracer = NULL;
// Used in declarations in SpaceManager and ChunkManager
enum ChunkIndex {
@ -182,6 +185,48 @@ class ChunkManager : public CHeapObj<mtInternal> {
// Remove from a list by size. Selects list based on size of chunk.
Metachunk* free_chunks_get(size_t chunk_word_size);
#define index_bounds_check(index) \
assert(index == SpecializedIndex || \
index == SmallIndex || \
index == MediumIndex || \
index == HumongousIndex, err_msg("Bad index: %d", (int) index))
size_t num_free_chunks(ChunkIndex index) const {
index_bounds_check(index);
if (index == HumongousIndex) {
return _humongous_dictionary.total_free_blocks();
}
ssize_t count = _free_chunks[index].count();
return count == -1 ? 0 : (size_t) count;
}
size_t size_free_chunks_in_bytes(ChunkIndex index) const {
index_bounds_check(index);
size_t word_size = 0;
if (index == HumongousIndex) {
word_size = _humongous_dictionary.total_size();
} else {
const size_t size_per_chunk_in_words = _free_chunks[index].size();
word_size = size_per_chunk_in_words * num_free_chunks(index);
}
return word_size * BytesPerWord;
}
MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
num_free_chunks(SmallIndex),
num_free_chunks(MediumIndex),
num_free_chunks(HumongousIndex),
size_free_chunks_in_bytes(SpecializedIndex),
size_free_chunks_in_bytes(SmallIndex),
size_free_chunks_in_bytes(MediumIndex),
size_free_chunks_in_bytes(HumongousIndex));
}
// Debug support
void verify();
void slow_verify() {
@ -1436,19 +1481,21 @@ void MetaspaceGC::compute_new_size() {
expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
// Don't expand unless it's significant
if (expand_bytes >= MinMetaspaceExpansion) {
MetaspaceGC::inc_capacity_until_GC(expand_bytes);
}
if (PrintGCDetails && Verbose) {
size_t new_capacity_until_GC = capacity_until_GC;
gclog_or_tty->print_cr(" expanding:"
" minimum_desired_capacity: %6.1fKB"
" expand_bytes: %6.1fKB"
" MinMetaspaceExpansion: %6.1fKB"
" new metaspace HWM: %6.1fKB",
minimum_desired_capacity / (double) K,
expand_bytes / (double) K,
MinMetaspaceExpansion / (double) K,
new_capacity_until_GC / (double) K);
size_t new_capacity_until_GC = MetaspaceGC::inc_capacity_until_GC(expand_bytes);
Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
new_capacity_until_GC,
MetaspaceGCThresholdUpdater::ComputeNewSize);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr(" expanding:"
" minimum_desired_capacity: %6.1fKB"
" expand_bytes: %6.1fKB"
" MinMetaspaceExpansion: %6.1fKB"
" new metaspace HWM: %6.1fKB",
minimum_desired_capacity / (double) K,
expand_bytes / (double) K,
MinMetaspaceExpansion / (double) K,
new_capacity_until_GC / (double) K);
}
}
return;
}
@ -1528,7 +1575,10 @@ void MetaspaceGC::compute_new_size() {
// Don't shrink unless it's significant
if (shrink_bytes >= MinMetaspaceExpansion &&
((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
new_capacity_until_GC,
MetaspaceGCThresholdUpdater::ComputeNewSize);
}
}
@ -2629,6 +2679,19 @@ size_t MetaspaceAux::free_chunks_total_bytes() {
return free_chunks_total_words() * BytesPerWord;
}
bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
return Metaspace::get_chunk_manager(mdtype) != NULL;
}
MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
if (!has_chunk_free_list(mdtype)) {
return MetaspaceChunkFreeListSummary();
}
const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
return cm->chunk_free_list_summary();
}
void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
gclog_or_tty->print(", [Metaspace:");
if (PrintGCDetails && Verbose) {
@ -3132,6 +3195,7 @@ void Metaspace::global_initialize() {
}
MetaspaceGC::initialize();
_tracer = new MetaspaceTracer();
}
Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
@ -3220,8 +3284,12 @@ MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype)
assert(delta_bytes > 0, "Must be");
size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
// capacity_until_GC might be updated concurrently, must calculate previous value.
size_t before_inc = after_inc - delta_bytes;
tracer()->report_gc_threshold(before_inc, after_inc,
MetaspaceGCThresholdUpdater::ExpandAndAllocate);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
" to " SIZE_FORMAT, before_inc, after_inc);
@ -3345,6 +3413,8 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
if (result == NULL) {
tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
// Allocation failed.
if (is_init_completed()) {
// Only start a GC if the bootstrapping has completed.
@ -3356,7 +3426,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
}
if (result == NULL) {
report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL);
report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
}
// Zero initialize.
@ -3370,7 +3440,9 @@ size_t Metaspace::class_chunk_size(size_t word_size) {
return class_vsm()->calc_chunk_size(word_size);
}
void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
// If result is still null, we are out of memory.
if (Verbose && TraceMetadataChunkAllocation) {
gclog_or_tty->print_cr("Metaspace allocation failed for size "
@ -3413,6 +3485,16 @@ void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_s
}
}
const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
switch (mdtype) {
case Metaspace::ClassType: return "Class";
case Metaspace::NonClassType: return "Metadata";
default:
assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
return NULL;
}
}
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
assert(DumpSharedSpaces, "sanity");

@ -26,6 +26,7 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "memory/metaspaceChunkFreeListSummary.hpp"
#include "runtime/virtualspace.hpp"
#include "utilities/exceptions.hpp"
@ -60,6 +61,7 @@ class ChunkManager;
class ClassLoaderData;
class Metablock;
class Metachunk;
class MetaspaceTracer;
class MetaWord;
class Mutex;
class outputStream;
@ -149,6 +151,8 @@ class Metaspace : public CHeapObj<mtClass> {
static ChunkManager* _chunk_manager_metadata;
static ChunkManager* _chunk_manager_class;
static const MetaspaceTracer* _tracer;
public:
static VirtualSpaceList* space_list() { return _space_list; }
static VirtualSpaceList* class_space_list() { return _class_space_list; }
@ -164,6 +168,8 @@ class Metaspace : public CHeapObj<mtClass> {
return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
}
static const MetaspaceTracer* tracer() { return _tracer; }
private:
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will
// maintain a single list for now.
@ -234,7 +240,9 @@ class Metaspace : public CHeapObj<mtClass> {
static void purge();
static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size,
MetadataType mdtype, TRAPS);
MetaspaceObj::Type type, MetadataType mdtype, TRAPS);
static const char* metadata_type_name(Metaspace::MetadataType mdtype);
void print_on(outputStream* st) const;
// Debugging support
@ -348,6 +356,9 @@ class MetaspaceAux : AllStatic {
return min_chunk_size_words() * BytesPerWord;
}
static bool has_chunk_free_list(Metaspace::MetadataType mdtype);
static MetaspaceChunkFreeListSummary chunk_free_list_summary(Metaspace::MetadataType mdtype);
// Print change in used metadata.
static void print_metaspace_change(size_t prev_metadata_used);
static void print_on(outputStream * out);

@ -0,0 +1,103 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_METASPACE_CHUNK_FREE_LIST_SUMMARY_HPP
#define SHARE_VM_MEMORY_METASPACE_CHUNK_FREE_LIST_SUMMARY_HPP
#include "memory/allocation.hpp"
class MetaspaceChunkFreeListSummary VALUE_OBJ_CLASS_SPEC {
size_t _num_specialized_chunks;
size_t _num_small_chunks;
size_t _num_medium_chunks;
size_t _num_humongous_chunks;
size_t _specialized_chunks_size_in_bytes;
size_t _small_chunks_size_in_bytes;
size_t _medium_chunks_size_in_bytes;
size_t _humongous_chunks_size_in_bytes;
public:
MetaspaceChunkFreeListSummary() :
_num_specialized_chunks(0),
_num_small_chunks(0),
_num_medium_chunks(0),
_num_humongous_chunks(0),
_specialized_chunks_size_in_bytes(0),
_small_chunks_size_in_bytes(0),
_medium_chunks_size_in_bytes(0),
_humongous_chunks_size_in_bytes(0)
{}
MetaspaceChunkFreeListSummary(size_t num_specialized_chunks,
size_t num_small_chunks,
size_t num_medium_chunks,
size_t num_humongous_chunks,
size_t specialized_chunks_size_in_bytes,
size_t small_chunks_size_in_bytes,
size_t medium_chunks_size_in_bytes,
size_t humongous_chunks_size_in_bytes) :
_num_specialized_chunks(num_specialized_chunks),
_num_small_chunks(num_small_chunks),
_num_medium_chunks(num_medium_chunks),
_num_humongous_chunks(num_humongous_chunks),
_specialized_chunks_size_in_bytes(specialized_chunks_size_in_bytes),
_small_chunks_size_in_bytes(small_chunks_size_in_bytes),
_medium_chunks_size_in_bytes(medium_chunks_size_in_bytes),
_humongous_chunks_size_in_bytes(humongous_chunks_size_in_bytes)
{}
size_t num_specialized_chunks() const {
return _num_specialized_chunks;
}
size_t num_small_chunks() const {
return _num_small_chunks;
}
size_t num_medium_chunks() const {
return _num_medium_chunks;
}
size_t num_humongous_chunks() const {
return _num_humongous_chunks;
}
size_t specialized_chunks_size_in_bytes() const {
return _specialized_chunks_size_in_bytes;
}
size_t small_chunks_size_in_bytes() const {
return _small_chunks_size_in_bytes;
}
size_t medium_chunks_size_in_bytes() const {
return _medium_chunks_size_in_bytes;
}
size_t humongous_chunks_size_in_bytes() const {
return _humongous_chunks_size_in_bytes;
}
};
#endif // SHARE_VM_MEMORY_METASPACE_CHUNK_FREE_LIST_SUMMARY_HPP

@ -0,0 +1,52 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_METASPACE_GC_THRESHOLD_UPDATER_HPP
#define SHARE_VM_MEMORY_METASPACE_GC_THRESHOLD_UPDATER_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
class MetaspaceGCThresholdUpdater : public AllStatic {
public:
enum Type {
ComputeNewSize,
ExpandAndAllocate,
Last
};
static const char* to_string(MetaspaceGCThresholdUpdater::Type updater) {
switch (updater) {
case ComputeNewSize:
return "compute_new_size";
case ExpandAndAllocate:
return "expand_and_allocate";
default:
assert(false, err_msg("Got bad updater: %d", (int) updater));
return NULL;
};
}
};
#endif // SHARE_VM_MEMORY_METASPACE_GC_THRESHOLD_UPDATER_HPP

@ -0,0 +1,82 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "memory/metaspaceTracer.hpp"
#include "oops/oop.inline.hpp"
#include "trace/tracing.hpp"
#include "trace/traceBackend.hpp"
void MetaspaceTracer::report_gc_threshold(size_t old_val,
size_t new_val,
MetaspaceGCThresholdUpdater::Type updater) const {
EventMetaspaceGCThreshold event;
if (event.should_commit()) {
event.set_oldValue(old_val);
event.set_newValue(new_val);
event.set_updater((u1)updater);
event.commit();
}
}
void MetaspaceTracer::report_metaspace_allocation_failure(ClassLoaderData *cld,
size_t word_size,
MetaspaceObj::Type objtype,
Metaspace::MetadataType mdtype) const {
send_allocation_failure_event<EventMetaspaceAllocationFailure>(cld, word_size, objtype, mdtype);
}
void MetaspaceTracer::report_metadata_oom(ClassLoaderData *cld,
size_t word_size,
MetaspaceObj::Type objtype,
Metaspace::MetadataType mdtype) const {
send_allocation_failure_event<EventMetaspaceOOM>(cld, word_size, objtype, mdtype);
}
template <typename E>
void MetaspaceTracer::send_allocation_failure_event(ClassLoaderData *cld,
size_t word_size,
MetaspaceObj::Type objtype,
Metaspace::MetadataType mdtype) const {
E event;
if (event.should_commit()) {
if (cld->is_anonymous()) {
event.set_classLoader(NULL);
event.set_anonymousClassLoader(true);
} else {
if (cld->is_the_null_class_loader_data()) {
event.set_classLoader((Klass*) NULL);
} else {
event.set_classLoader(cld->class_loader()->klass());
}
event.set_anonymousClassLoader(false);
}
event.set_size(word_size * BytesPerWord);
event.set_metadataType((u1) mdtype);
event.set_metaspaceObjectType((u1) objtype);
event.commit();
}
}

@ -0,0 +1,55 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_METASPACE_TRACER_HPP
#define SHARE_VM_MEMORY_METASPACE_TRACER_HPP
#include "memory/allocation.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspaceGCThresholdUpdater.hpp"
class ClassLoaderData;
class MetaspaceTracer : public CHeapObj<mtTracing> {
template <typename E>
void send_allocation_failure_event(ClassLoaderData *cld,
size_t word_size,
MetaspaceObj::Type objtype,
Metaspace::MetadataType mdtype) const;
public:
void report_gc_threshold(size_t old_val,
size_t new_val,
MetaspaceGCThresholdUpdater::Type updater) const;
void report_metaspace_allocation_failure(ClassLoaderData *cld,
size_t word_size,
MetaspaceObj::Type objtype,
Metaspace::MetadataType mdtype) const;
void report_metadata_oom(ClassLoaderData *cld,
size_t word_size,
MetaspaceObj::Type objtype,
Metaspace::MetadataType mdtype) const;
};
#endif // SHARE_VM_MEMORY_METASPACE_TRACER_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -90,4 +90,23 @@ class PaddedArray {
static PaddedEnd<T>* create_unfreeable(uint length);
};
// Helper class to create an array of references to arrays of primitive types
// Both the array of references and the data arrays are aligned to the given
// alignment. The allocated memory is zero-filled.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class Padded2DArray {
public:
// Creates an aligned padded 2D array.
// The memory cannot be deleted since the raw memory chunk is not returned.
static T** create_unfreeable(uint rows, uint columns, size_t* allocation_size = NULL);
};
// Helper class to create an array of T objects. The array as a whole will
// start at a multiple of alignment and its size will be aligned to alignment.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class PaddedPrimitiveArray {
public:
static T* create_unfreeable(size_t length);
};
#endif // SHARE_VM_MEMORY_PADDED_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,3 +47,42 @@ PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
return aligned_padded_array;
}
template <class T, MEMFLAGS flags, size_t alignment>
T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint columns, size_t* allocation_size) {
// Calculate and align the size of the first dimension's table.
size_t table_size = align_size_up_(rows * sizeof(T*), alignment);
// The size of the separate rows.
size_t row_size = align_size_up_(columns * sizeof(T), alignment);
// Total size consists of the indirection table plus the rows.
size_t total_size = table_size + rows * row_size + alignment;
// Allocate a chunk of memory large enough to allow alignment of the chunk.
void* chunk = AllocateHeap(total_size, flags);
// Clear the allocated memory.
memset(chunk, 0, total_size);
// Align the chunk of memory.
T** result = (T**)align_pointer_up(chunk, alignment);
void* data_start = (void*)((uintptr_t)result + table_size);
// Fill in the row table.
for (size_t i = 0; i < rows; i++) {
result[i] = (T*)((uintptr_t)data_start + i * row_size);
}
if (allocation_size != NULL) {
*allocation_size = total_size;
}
return result;
}
template <class T, MEMFLAGS flags, size_t alignment>
T* PaddedPrimitiveArray<T, flags, alignment>::create_unfreeable(size_t length) {
// Allocate a chunk of memory large enough to allow for some alignment.
void* chunk = AllocateHeap(length * sizeof(T) + alignment, flags);
memset(chunk, 0, length * sizeof(T) + alignment);
return (T*)align_pointer_up(chunk, alignment);
}

@ -3882,6 +3882,7 @@ void TestKlass_test();
void TestOldFreeSpaceCalculation_test();
void TestG1BiasedArray_test();
void TestBufferingOopClosure_test();
void TestCodeCacheRemSet_test();
#endif
void execute_internal_vm_tests() {
@ -3910,6 +3911,7 @@ void execute_internal_vm_tests() {
run_unit_test(TestG1BiasedArray_test());
run_unit_test(HeapRegionRemSet::test_prt());
run_unit_test(TestBufferingOopClosure_test());
run_unit_test(TestCodeCacheRemSet_test());
#endif
tty->print_cr("All internal VM tests passed");
}

@ -1160,18 +1160,22 @@ static bool is_authorized(Handle context, instanceKlassHandle klass, TRAPS) {
// and null permissions - which gives no permissions.
oop create_dummy_access_control_context(TRAPS) {
InstanceKlass* pd_klass = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass());
// new ProtectionDomain(null,null);
oop null_protection_domain = pd_klass->allocate_instance(CHECK_NULL);
Handle null_pd(THREAD, null_protection_domain);
Handle obj = pd_klass->allocate_instance_handle(CHECK_NULL);
// Call constructor ProtectionDomain(null, null);
JavaValue result(T_VOID);
JavaCalls::call_special(&result, obj, KlassHandle(THREAD, pd_klass),
vmSymbols::object_initializer_name(),
vmSymbols::codesource_permissioncollection_signature(),
Handle(), Handle(), CHECK_NULL);
// new ProtectionDomain[] {pd};
objArrayOop context = oopFactory::new_objArray(pd_klass, 1, CHECK_NULL);
context->obj_at_put(0, null_pd());
context->obj_at_put(0, obj());
// new AccessControlContext(new ProtectionDomain[] {pd})
objArrayHandle h_context(THREAD, context);
oop result = java_security_AccessControlContext::create(h_context, false, Handle(), CHECK_NULL);
return result;
oop acc = java_security_AccessControlContext::create(h_context, false, Handle(), CHECK_NULL);
return acc;
}
JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, jobject context, jboolean wrapException))

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -993,7 +993,9 @@ void JvmtiExport::post_class_unload(Klass* klass) {
// Before we call the JVMTI agent, we have to set the state in the
// thread for which we are proxying.
JavaThreadState prev_state = real_thread->thread_state();
assert(prev_state == _thread_blocked, "JavaThread should be at safepoint");
assert(((Thread *)real_thread)->is_ConcurrentGC_thread() ||
(real_thread->is_Java_thread() && prev_state == _thread_blocked),
"should be ConcurrentGCThread or JavaThread at safepoint");
real_thread->set_thread_state(_thread_in_native);
jvmtiExtensionEvent callback = env->ext_callbacks()->ClassUnload;

@ -305,6 +305,7 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "DesiredMethodLimit",
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
#endif // PRODUCT
{ "UseVMInterruptibleIO", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ NULL, JDK_Version(0), JDK_Version(0) }
};
@ -2246,6 +2247,8 @@ bool Arguments::check_vm_args_consistency() {
"G1ConcRSHotCardLimit");
status = status && verify_interval(G1ConcRSLogCacheSize, 0, 31,
"G1ConcRSLogCacheSize");
status = status && verify_interval(StringDeduplicationAgeThreshold, 1, markOopDesc::max_age,
"StringDeduplicationAgeThreshold");
}
if (UseConcMarkSweepGC) {
status = status && verify_min_value(CMSOldPLABNumRefills, 1, "CMSOldPLABNumRefills");
@ -3226,11 +3229,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, MaxDirectMemorySize, max_direct_memory_size);
} else if (match_option(option, "-XX:+UseVMInterruptibleIO", &tail)) {
// NOTE! In JDK 9, the UseVMInterruptibleIO flag will completely go
// away and will cause VM initialization failures!
warning("-XX:+UseVMInterruptibleIO is obsolete and will be removed in a future release.");
FLAG_SET_CMDLINE(bool, UseVMInterruptibleIO, true);
#if !INCLUDE_MANAGEMENT
} else if (match_option(option, "-XX:+ManagementServer", &tail)) {
jio_fprintf(defaultStream::error_stream(),

@ -3840,17 +3840,28 @@ class CommandLineFlags {
experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \
"Number of buckets in the JVM internal Symbol table") \
\
product(bool, UseStringDeduplication, false, \
"Use string deduplication") \
\
product(bool, PrintStringDeduplicationStatistics, false, \
"Print string deduplication statistics") \
\
product(uintx, StringDeduplicationAgeThreshold, 3, \
"A string must reach this age (or be promoted to an old region) " \
"to be considered for deduplication") \
\
diagnostic(bool, StringDeduplicationResizeALot, false, \
"Force table resize every time the table is scanned") \
\
diagnostic(bool, StringDeduplicationRehashALot, false, \
"Force table rehash every time the table is scanned") \
\
develop(bool, TraceDefaultMethods, false, \
"Trace the default method processing steps") \
\
develop(bool, VerifyGenericSignatures, false, \
"Abort VM on erroneous or inconsistent generic signatures") \
\
product(bool, UseVMInterruptibleIO, false, \
"(Unstable, Solaris-specific) Thread interrupt before or with " \
"EINTR for I/O operations results in OS_INTRPT. The default " \
"value of this flag is true for JDK 6 and earlier") \
\
diagnostic(bool, WhiteBoxAPI, false, \
"Enable internal testing APIs") \
\

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,6 +58,8 @@ Mutex* SignatureHandlerLibrary_lock = NULL;
Mutex* VtableStubs_lock = NULL;
Mutex* SymbolTable_lock = NULL;
Mutex* StringTable_lock = NULL;
Monitor* StringDedupQueue_lock = NULL;
Mutex* StringDedupTable_lock = NULL;
Mutex* CodeCache_lock = NULL;
Mutex* MethodData_lock = NULL;
Mutex* RetData_lock = NULL;
@ -196,6 +198,9 @@ void mutex_init() {
def(MMUTracker_lock , Mutex , leaf , true );
def(HotCardCache_lock , Mutex , special , true );
def(EvacFailureStack_lock , Mutex , nonleaf , true );
def(StringDedupQueue_lock , Monitor, leaf, true );
def(StringDedupTable_lock , Mutex , leaf, true );
}
def(ParGCRareEvent_lock , Mutex , leaf , true );
def(DerivedPointerTableGC_lock , Mutex, leaf, true );

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,6 +66,8 @@ extern Mutex* SignatureHandlerLibrary_lock; // a lock on the SignatureHandl
extern Mutex* VtableStubs_lock; // a lock on the VtableStubs
extern Mutex* SymbolTable_lock; // a lock on the symbol table
extern Mutex* StringTable_lock; // a lock on the interned string table
extern Monitor* StringDedupQueue_lock; // a lock on the string deduplication queue
extern Mutex* StringDedupTable_lock; // a lock on the string deduplication table
extern Mutex* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
extern Mutex* MethodData_lock; // a lock on installation of method data
extern Mutex* RetData_lock; // a lock on installation of RetData inside method data

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,9 +38,6 @@ PerfCounter* RuntimeService::_sync_time_ticks = NULL;
PerfCounter* RuntimeService::_total_safepoints = NULL;
PerfCounter* RuntimeService::_safepoint_time_ticks = NULL;
PerfCounter* RuntimeService::_application_time_ticks = NULL;
PerfCounter* RuntimeService::_thread_interrupt_signaled_count = NULL;
PerfCounter* RuntimeService::_interrupted_before_count = NULL;
PerfCounter* RuntimeService::_interrupted_during_count = NULL;
void RuntimeService::init() {
// Make sure the VM version is initialized
@ -70,26 +67,6 @@ void RuntimeService::init() {
PerfDataManager::create_constant(SUN_RT, "jvmVersion", PerfData::U_None,
(jlong) Abstract_VM_Version::jvm_version(), CHECK);
// I/O interruption related counters
// thread signaling via os::interrupt()
_thread_interrupt_signaled_count =
PerfDataManager::create_counter(SUN_RT,
"threadInterruptSignaled", PerfData::U_Events, CHECK);
// OS_INTRPT via "check before" in _INTERRUPTIBLE
_interrupted_before_count =
PerfDataManager::create_counter(SUN_RT, "interruptedBeforeIO",
PerfData::U_Events, CHECK);
// OS_INTRPT via "check during" in _INTERRUPTIBLE
_interrupted_during_count =
PerfDataManager::create_counter(SUN_RT, "interruptedDuringIO",
PerfData::U_Events, CHECK);
// The capabilities counter is a binary representation of the VM capabilities in string.
// This string respresentation simplifies the implementation of the client side
// to parse the value.
@ -181,22 +158,4 @@ jlong RuntimeService::application_time_ms() {
Management::ticks_to_ms(_application_time_ticks->get_value()) : -1;
}
void RuntimeService::record_interrupted_before_count() {
if (UsePerfData) {
_interrupted_before_count->inc();
}
}
void RuntimeService::record_interrupted_during_count() {
if (UsePerfData) {
_interrupted_during_count->inc();
}
}
void RuntimeService::record_thread_interrupt_signaled_count() {
if (UsePerfData) {
_thread_interrupt_signaled_count->inc();
}
}
#endif // INCLUDE_MANAGEMENT

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,9 +34,6 @@ private:
static PerfCounter* _total_safepoints;
static PerfCounter* _safepoint_time_ticks; // Accumulated time at safepoints
static PerfCounter* _application_time_ticks; // Accumulated time not at safepoints
static PerfCounter* _thread_interrupt_signaled_count;// os:interrupt thr_kill
static PerfCounter* _interrupted_before_count; // _INTERRUPTIBLE OS_INTRPT
static PerfCounter* _interrupted_during_count; // _INTERRUPTIBLE OS_INTRPT
static TimeStamp _safepoint_timer;
static TimeStamp _app_timer;
@ -58,10 +55,6 @@ public:
static void record_safepoint_end() NOT_MANAGEMENT_RETURN;
static void record_application_start() NOT_MANAGEMENT_RETURN;
// interruption events
static void record_interrupted_before_count() NOT_MANAGEMENT_RETURN;
static void record_interrupted_during_count() NOT_MANAGEMENT_RETURN;
static void record_thread_interrupt_signaled_count() NOT_MANAGEMENT_RETURN;
};
#endif // SHARE_VM_SERVICES_RUNTIMESERVICE_HPP

@ -193,11 +193,48 @@ Declares a structure type that can be used in other events.
<event id="MetaspaceSummary" path="vm/gc/heap/metaspace_summary" label="Metaspace Summary" is_instant="true">
<value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
<value type="GCWHEN" field="when" label="When" />
<value type="BYTES64" field="gcThreshold" label="GC Threshold" />
<structvalue type="MetaspaceSizes" field="metaspace" label="Total"/>
<structvalue type="MetaspaceSizes" field="dataSpace" label="Data"/>
<structvalue type="MetaspaceSizes" field="classSpace" label="Class"/>
</event>
<event id="MetaspaceGCThreshold" path="vm/gc/metaspace/gc_threshold" label="Metaspace GC Threshold" is_instant="true">
<value type="BYTES64" field="oldValue" label="Old Value" />
<value type="BYTES64" field="newValue" label="New Value" />
<value type="GCTHRESHOLDUPDATER" field="updater" label="Updater" />
</event>
<event id="MetaspaceAllocationFailure" path="vm/gc/metaspace/allocation_failure" label="Metaspace Allocation Failure" is_instant="true" has_stacktrace="true">
<value type="CLASS" field="classLoader" label="Class Loader" />
<value type="BOOLEAN" field="anonymousClassLoader" label="Anonymous Class Loader" />
<value type="BYTES64" field="size" label="Size" />
<value type="METADATATYPE" field="metadataType" label="Metadata Type" />
<value type="METASPACEOBJTYPE" field="metaspaceObjectType" label="Metaspace Object Type" />
</event>
<event id="MetaspaceOOM" path="vm/gc/metaspace/out_of_memory" label="Metaspace Out of Memory" is_instant="true" has_stacktrace="true">
<value type="CLASS" field="classLoader" label="Class Loader" />
<value type="BOOLEAN" field="anonymousClassLoader" label="Anonymous Class Loader" />
<value type="BYTES64" field="size" label="Size" />
<value type="METADATATYPE" field="metadataType" label="Metadata Type" />
<value type="METASPACEOBJTYPE" field="metaspaceObjectType" label="Metaspace Object Type" />
</event>
<event id="MetaspaceChunkFreeListSummary" path="vm/gc/metaspace/chunk_free_list_summary" label="Metaspace Chunk Free List Summary" is_instant="true">
<value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
<value type="GCWHEN" field="when" label="When" />
<value type="METADATATYPE" field="metadataType" label="Metadata Type" />
<value type="ULONG" field="specializedChunks" label="Specialized Chunks" />
<value type="BYTES64" field="specializedChunksTotalSize" label="Specialized Chunks Total Size" />
<value type="ULONG" field="smallChunks" label="Small Chunks" />
<value type="BYTES64" field="smallChunksTotalSize" label="Small Chunks Total Size" />
<value type="ULONG" field="mediumChunks" label="Medium Chunks" />
<value type="BYTES64" field="mediumChunksTotalSize" label="Medium Chunks Total Size" />
<value type="ULONG" field="humongousChunks" label="Humongous Chunks" />
<value type="BYTES64" field="humongousChunksTotalSize" label="Humongous Chunks Total Size" />
</event>
<event id="PSHeapSummary" path="vm/gc/heap/ps_summary" label="Parallel Scavenge Heap Summary" is_instant="true">
<value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
<value type="GCWHEN" field="when" label="When" />

@ -130,11 +130,26 @@ Now we can use the content + data type in declaring event fields.
<value type="UTF8" field="type" label="type" />
</content_type>
<content_type id="GCThresholdUpdater" hr_name="GC Treshold Updater"
type="U1" jvm_type="GCTHRESHOLDUPDATER">
<value type="UTF8" field="updater" label="updater" />
</content_type>
<content_type id="ReferenceType" hr_name="Reference Type"
type="U1" jvm_type="REFERENCETYPE">
<value type="UTF8" field="type" label="type" />
</content_type>
<content_type id="MetadataType" hr_name="Metadata Type"
type="U1" jvm_type="METADATATYPE">
<value type="UTF8" field="type" label="type" />
</content_type>
<content_type id="MetaspaceObjectType" hr_name="Metaspace Object Type"
type="U1" jvm_type="METASPACEOBJTYPE">
<value type="UTF8" field="type" label="type" />
</content_type>
<content_type id="NARROW_OOP_MODE" hr_name="Narrow Oop Mode"
type="U1" jvm_type="NARROWOOPMODE">
<value type="UTF8" field="mode" label="mode" />
@ -324,10 +339,22 @@ Now we can use the content + data type in declaring event fields.
<primary_type symbol="G1YCTYPE" datatype="U1" contenttype="G1YCTYPE"
type="u1" sizeop="sizeof(u1)" />
<!-- GCTHRESHOLDUPDATER -->
<primary_type symbol="GCTHRESHOLDUPDATER" datatype="U1" contenttype="GCTHRESHOLDUPDATER"
type="u1" sizeop="sizeof(u1)" />
<!-- REFERENCETYPE -->
<primary_type symbol="REFERENCETYPE" datatype="U1"
contenttype="REFERENCETYPE" type="u1" sizeop="sizeof(u1)" />
<!-- METADATATYPE -->
<primary_type symbol="METADATATYPE" datatype="U1"
contenttype="METADATATYPE" type="u1" sizeop="sizeof(u1)" />
<!-- METADATAOBJTYPE -->
<primary_type symbol="METASPACEOBJTYPE" datatype="U1"
contenttype="METASPACEOBJTYPE" type="u1" sizeop="sizeof(u1)" />
<!-- NARROWOOPMODE -->
<primary_type symbol="NARROWOOPMODE" datatype="U1"
contenttype="NARROWOOPMODE" type="u1" sizeop="sizeof(u1)" />

@ -129,6 +129,8 @@ needs_compact3 = \
gc/g1/TestHumongousAllocInitialMark.java \
gc/arguments/TestG1HeapRegionSize.java \
gc/metaspace/TestMetaspaceMemoryPool.java \
gc/arguments/TestDynMinHeapFreeRatio.java \
gc/arguments/TestDynMaxHeapFreeRatio.java \
runtime/InternalApi/ThreadCpuTimesDeadlock.java \
serviceability/threads/TestFalseDeadLock.java \
compiler/tiered/NonTieredLevelsTest.java \

@ -0,0 +1,64 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test TestDynMaxHeapFreeRatio
* @bug 8028391
* @summary Verify that MaxHeapFreeRatio flag is manageable
* @library /testlibrary
* @run main TestDynMaxHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=0 -XX:MaxHeapFreeRatio=100 TestDynMaxHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=50 -XX:-UseAdaptiveSizePolicy TestDynMaxHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=50 TestDynMaxHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=51 -XX:MaxHeapFreeRatio=52 TestDynMaxHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=75 -XX:MaxHeapFreeRatio=100 TestDynMaxHeapFreeRatio
*/
import com.oracle.java.testlibrary.TestDynamicVMOption;
import com.oracle.java.testlibrary.DynamicVMOptionChecker;
public class TestDynMaxHeapFreeRatio extends TestDynamicVMOption {
public static final String MinFreeRatioFlagName = "MinHeapFreeRatio";
public static final String MaxFreeRatioFlagName = "MaxHeapFreeRatio";
public TestDynMaxHeapFreeRatio() {
super(MaxFreeRatioFlagName);
}
public void test() {
int minHeapFreeValue = DynamicVMOptionChecker.getIntValue(MinFreeRatioFlagName);
System.out.println(MinFreeRatioFlagName + " = " + minHeapFreeValue);
testPercentageValues();
checkInvalidValue(Integer.toString(minHeapFreeValue - 1));
checkValidValue(Integer.toString(minHeapFreeValue));
checkValidValue("100");
}
public static void main(String args[]) throws Exception {
new TestDynMaxHeapFreeRatio().test();
}
}

@ -0,0 +1,62 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test TestDynMinHeapFreeRatio
* @bug 8028391
* @summary Verify that MinHeapFreeRatio flag is manageable
* @library /testlibrary
* @run main TestDynMinHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=0 -XX:MaxHeapFreeRatio=100 TestDynMinHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=50 -XX:-UseAdaptiveSizePolicy TestDynMinHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=50 TestDynMinHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=51 -XX:MaxHeapFreeRatio=52 TestDynMinHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=75 -XX:MaxHeapFreeRatio=100 TestDynMinHeapFreeRatio
*/
import com.oracle.java.testlibrary.TestDynamicVMOption;
import com.oracle.java.testlibrary.DynamicVMOptionChecker;
public class TestDynMinHeapFreeRatio extends TestDynamicVMOption {
public static final String MinFreeRatioFlagName = "MinHeapFreeRatio";
public static final String MaxFreeRatioFlagName = "MaxHeapFreeRatio";
public TestDynMinHeapFreeRatio() {
super(MinFreeRatioFlagName);
}
public void test() {
int maxHeapFreeValue = DynamicVMOptionChecker.getIntValue(MaxFreeRatioFlagName);
System.out.println(MaxFreeRatioFlagName + " = " + maxHeapFreeValue);
testPercentageValues();
checkInvalidValue(Integer.toString(maxHeapFreeValue + 1));
checkValidValue(Integer.toString(maxHeapFreeValue));
checkValidValue("0");
}
public static void main(String args[]) throws Exception {
new TestDynMinHeapFreeRatio().test();
}
}

@ -0,0 +1,152 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestPrintGCDetails
* @bug 8035406 8027295 8035398
* @summary Ensure that the PrintGCDetails output for a minor GC with G1
* includes the expected necessary messages.
* @key gc
* @library /testlibrary
*/
import com.oracle.java.testlibrary.ProcessTools;
import com.oracle.java.testlibrary.OutputAnalyzer;
public class TestGCLogMessages {
public static void main(String[] args) throws Exception {
testNormalLogs();
testWithToSpaceExhaustionLogs();
}
private static void testNormalLogs() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-Xmx10M",
GCTest.class.getName());
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldNotContain("[Redirty Cards");
output.shouldNotContain("[Code Root Purge");
output.shouldNotContain("[String Dedup Fixup");
output.shouldNotContain("[Young Free CSet");
output.shouldNotContain("[Non-Young Free CSet");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-XX:+UseStringDeduplication",
"-Xmx10M",
"-XX:+PrintGCDetails",
GCTest.class.getName());
output = new OutputAnalyzer(pb.start());
output.shouldContain("[Redirty Cards");
output.shouldContain("[Code Root Purge");
output.shouldContain("[String Dedup Fixup");
output.shouldNotContain("[Young Free CSet");
output.shouldNotContain("[Non-Young Free CSet");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-XX:+UseStringDeduplication",
"-Xmx10M",
"-XX:+PrintGCDetails",
"-XX:+UnlockExperimentalVMOptions",
"-XX:G1LogLevel=finest",
GCTest.class.getName());
output = new OutputAnalyzer(pb.start());
output.shouldContain("[Redirty Cards");
output.shouldContain("[Code Root Purge");
output.shouldContain("[String Dedup Fixup");
output.shouldContain("[Young Free CSet");
output.shouldContain("[Non-Young Free CSet");
// also check evacuation failure messages once
output.shouldNotContain("[Evacuation Failure");
output.shouldNotContain("[Recalculate Used");
output.shouldNotContain("[Remove Self Forwards");
output.shouldNotContain("[Restore RemSet");
output.shouldHaveExitValue(0);
}
private static void testWithToSpaceExhaustionLogs() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-Xmx10M",
"-Xmn5M",
"-XX:+PrintGCDetails",
GCTestWithToSpaceExhaustion.class.getName());
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("[Evacuation Failure");
output.shouldNotContain("[Recalculate Used");
output.shouldNotContain("[Remove Self Forwards");
output.shouldNotContain("[Restore RemSet");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-Xmx10M",
"-Xmn5M",
"-XX:+PrintGCDetails",
"-XX:+UnlockExperimentalVMOptions",
"-XX:G1LogLevel=finest",
GCTestWithToSpaceExhaustion.class.getName());
output = new OutputAnalyzer(pb.start());
output.shouldContain("[Evacuation Failure");
output.shouldContain("[Recalculate Used");
output.shouldContain("[Remove Self Forwards");
output.shouldContain("[Restore RemSet");
output.shouldHaveExitValue(0);
}
static class GCTest {
private static byte[] garbage;
public static void main(String [] args) {
System.out.println("Creating garbage");
// create 128MB of garbage. This should result in at least one GC
for (int i = 0; i < 1024; i++) {
garbage = new byte[128 * 1024];
}
System.out.println("Done");
}
}
static class GCTestWithToSpaceExhaustion {
private static byte[] garbage;
private static byte[] largeObject;
public static void main(String [] args) {
largeObject = new byte[5*1024*1024];
System.out.println("Creating garbage");
// create 128MB of garbage. This should result in at least one GC,
// some of them with to-space exhaustion.
for (int i = 0; i < 1024; i++) {
garbage = new byte[128 * 1024];
}
System.out.println("Done");
}
}
}

@ -0,0 +1,36 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStringDeduplicationAgeThreshold
* @summary Test string deduplication age threshold
* @bug 8029075
* @key gc
* @library /testlibrary
*/
public class TestStringDeduplicationAgeThreshold {
public static void main(String[] args) throws Exception {
TestStringDeduplicationTools.testAgeThreshold();
}
}

@ -0,0 +1,36 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStringDeduplicationFullGC
* @summary Test string deduplication during full GC
* @bug 8029075
* @key gc
* @library /testlibrary
*/
public class TestStringDeduplicationFullGC {
public static void main(String[] args) throws Exception {
TestStringDeduplicationTools.testFullGC();
}
}

@ -0,0 +1,36 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStringDeduplicationInterned
* @summary Test string deduplication of interned strings
* @bug 8029075
* @key gc
* @library /testlibrary
*/
public class TestStringDeduplicationInterned {
public static void main(String[] args) throws Exception {
TestStringDeduplicationTools.testInterned();
}
}

@ -0,0 +1,36 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStringDeduplicationMemoryUsage
* @summary Test string deduplication memory usage
* @bug 8029075
* @key gc
* @library /testlibrary
*/
public class TestStringDeduplicationMemoryUsage {
public static void main(String[] args) throws Exception {
TestStringDeduplicationTools.testMemoryUsage();
}
}

@ -0,0 +1,36 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStringDeduplicationPrintOptions
* @summary Test string deduplication print options
* @bug 8029075
* @key gc
* @library /testlibrary
*/
public class TestStringDeduplicationPrintOptions {
public static void main(String[] args) throws Exception {
TestStringDeduplicationTools.testPrintOptions();
}
}

@ -0,0 +1,36 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStringDeduplicationTableRehash
* @summary Test string deduplication table rehash
* @bug 8029075
* @key gc
* @library /testlibrary
*/
public class TestStringDeduplicationTableRehash {
public static void main(String[] args) throws Exception {
TestStringDeduplicationTools.testTableRehash();
}
}

@ -0,0 +1,36 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStringDeduplicationTableResize
* @summary Test string deduplication table resize
* @bug 8029075
* @key gc
* @library /testlibrary
*/
public class TestStringDeduplicationTableResize {
public static void main(String[] args) throws Exception {
TestStringDeduplicationTools.testTableResize();
}
}

@ -0,0 +1,512 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* Common code for string deduplication tests
*/
import java.lang.management.*;
import java.lang.reflect.*;
import java.security.*;
import java.util.*;
import com.oracle.java.testlibrary.*;
import sun.misc.*;
class TestStringDeduplicationTools {
private static final String YoungGC = "YoungGC";
private static final String FullGC = "FullGC";
private static final int Xmn = 50; // MB
private static final int Xms = 100; // MB
private static final int Xmx = 100; // MB
private static final int MB = 1024 * 1024;
private static final int StringLength = 50;
private static Field valueField;
private static Unsafe unsafe;
private static byte[] dummy;
static {
try {
Field field = Unsafe.class.getDeclaredField("theUnsafe");
field.setAccessible(true);
unsafe = (Unsafe)field.get(null);
valueField = String.class.getDeclaredField("value");
valueField.setAccessible(true);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private static Object getValue(String string) {
try {
return valueField.get(string);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private static void doFullGc(int numberOfTimes) {
for (int i = 0; i < numberOfTimes; i++) {
System.out.println("Begin: Full GC " + (i + 1) + "/" + numberOfTimes);
System.gc();
System.out.println("End: Full GC " + (i + 1) + "/" + numberOfTimes);
}
}
private static void doYoungGc(int numberOfTimes) {
// Provoke at least numberOfTimes young GCs
final int objectSize = 128;
final int maxObjectInYoung = (Xmn * MB) / objectSize;
for (int i = 0; i < numberOfTimes; i++) {
System.out.println("Begin: Young GC " + (i + 1) + "/" + numberOfTimes);
for (int j = 0; j < maxObjectInYoung + 1; j++) {
dummy = new byte[objectSize];
}
System.out.println("End: Young GC " + (i + 1) + "/" + numberOfTimes);
}
}
private static void forceDeduplication(int ageThreshold, String gcType) {
// Force deduplication to happen by either causing a FullGC or a YoungGC.
// We do several collections to also provoke a situation where the the
// deduplication thread needs to yield while processing the queue. This
// also tests that the references in the deduplication queue are adjusted
// accordingly.
if (gcType.equals(FullGC)) {
doFullGc(3);
} else {
doYoungGc(ageThreshold + 3);
}
}
private static String generateString(int id) {
StringBuilder builder = new StringBuilder(StringLength);
builder.append("DeduplicationTestString:" + id + ":");
while (builder.length() < StringLength) {
builder.append('X');
}
return builder.toString();
}
private static ArrayList<String> createStrings(int total, int unique) {
System.out.println("Creating strings: total=" + total + ", unique=" + unique);
if (total % unique != 0) {
throw new RuntimeException("Total must be divisible by unique");
}
ArrayList<String> list = new ArrayList<String>(total);
for (int j = 0; j < total / unique; j++) {
for (int i = 0; i < unique; i++) {
list.add(generateString(i));
}
}
return list;
}
private static void verifyStrings(ArrayList<String> list, int uniqueExpected) {
for (;;) {
// Check number of deduplicated strings
ArrayList<Object> unique = new ArrayList<Object>(uniqueExpected);
for (String string: list) {
Object value = getValue(string);
boolean uniqueValue = true;
for (Object obj: unique) {
if (obj == value) {
uniqueValue = false;
break;
}
}
if (uniqueValue) {
unique.add(value);
}
}
System.out.println("Verifying strings: total=" + list.size() +
", uniqueFound=" + unique.size() +
", uniqueExpected=" + uniqueExpected);
if (unique.size() == uniqueExpected) {
System.out.println("Deduplication completed");
break;
} else {
System.out.println("Deduplication not completed, waiting...");
// Give the deduplication thread time to complete
try {
Thread.sleep(1000);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
private static OutputAnalyzer runTest(String... extraArgs) throws Exception {
String[] defaultArgs = new String[] {
"-Xmn" + Xmn + "m",
"-Xms" + Xms + "m",
"-Xmx" + Xmx + "m",
"-XX:+UseG1GC",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+VerifyAfterGC" // Always verify after GC
};
ArrayList<String> args = new ArrayList<String>();
args.addAll(Arrays.asList(defaultArgs));
args.addAll(Arrays.asList(extraArgs));
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[args.size()]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
System.err.println(output.getStderr());
System.out.println(output.getStdout());
return output;
}
private static class DeduplicationTest {
public static void main(String[] args) {
System.out.println("Begin: DeduplicationTest");
final int numberOfStrings = Integer.parseUnsignedInt(args[0]);
final int numberOfUniqueStrings = Integer.parseUnsignedInt(args[1]);
final int ageThreshold = Integer.parseUnsignedInt(args[2]);
final String gcType = args[3];
ArrayList<String> list = createStrings(numberOfStrings, numberOfUniqueStrings);
forceDeduplication(ageThreshold, gcType);
verifyStrings(list, numberOfUniqueStrings);
System.out.println("End: DeduplicationTest");
}
public static OutputAnalyzer run(int numberOfStrings, int ageThreshold, String gcType, String... extraArgs) throws Exception {
String[] defaultArgs = new String[] {
"-XX:+UseStringDeduplication",
"-XX:StringDeduplicationAgeThreshold=" + ageThreshold,
DeduplicationTest.class.getName(),
"" + numberOfStrings,
"" + numberOfStrings / 2,
"" + ageThreshold,
gcType
};
ArrayList<String> args = new ArrayList<String>();
args.addAll(Arrays.asList(extraArgs));
args.addAll(Arrays.asList(defaultArgs));
return runTest(args.toArray(new String[args.size()]));
}
}
private static class InternedTest {
public static void main(String[] args) {
// This test verifies that interned strings are always
// deduplicated when being interned, and never after
// being interned.
System.out.println("Begin: InternedTest");
final int ageThreshold = Integer.parseUnsignedInt(args[0]);
final String baseString = "DeduplicationTestString:" + InternedTest.class.getName();
// Create duplicate of baseString
StringBuilder sb1 = new StringBuilder(baseString);
String dupString1 = sb1.toString();
if (getValue(dupString1) == getValue(baseString)) {
throw new RuntimeException("Values should not match");
}
// Force baseString to be inspected for deduplication
// and be inserted into the deduplication hashtable.
forceDeduplication(ageThreshold, FullGC);
// Wait for deduplication to occur
while (getValue(dupString1) != getValue(baseString)) {
System.out.println("Waiting...");
try {
Thread.sleep(100);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
// Create a new duplicate of baseString
StringBuilder sb2 = new StringBuilder(baseString);
String dupString2 = sb2.toString();
if (getValue(dupString2) == getValue(baseString)) {
throw new RuntimeException("Values should not match");
}
// Intern the new duplicate
Object beforeInternedValue = getValue(dupString2);
String internedString = dupString2.intern();
if (internedString != dupString2) {
throw new RuntimeException("String should match");
}
if (getValue(internedString) != getValue(baseString)) {
throw new RuntimeException("Values should match");
}
// Check original value of interned string, to make sure
// deduplication happened on the interned string and not
// on the base string
if (beforeInternedValue == getValue(baseString)) {
throw new RuntimeException("Values should not match");
}
System.out.println("End: InternedTest");
}
public static OutputAnalyzer run() throws Exception {
return runTest("-XX:+PrintGC",
"-XX:+PrintGCDetails",
"-XX:+UseStringDeduplication",
"-XX:+PrintStringDeduplicationStatistics",
"-XX:StringDeduplicationAgeThreshold=" + DefaultAgeThreshold,
InternedTest.class.getName(),
"" + DefaultAgeThreshold);
}
}
private static class MemoryUsageTest {
public static void main(String[] args) {
System.out.println("Begin: MemoryUsageTest");
final boolean useStringDeduplication = Boolean.parseBoolean(args[0]);
final int numberOfStrings = LargeNumberOfStrings;
final int numberOfUniqueStrings = 1;
ArrayList<String> list = createStrings(numberOfStrings, numberOfUniqueStrings);
forceDeduplication(DefaultAgeThreshold, FullGC);
if (useStringDeduplication) {
verifyStrings(list, numberOfUniqueStrings);
}
System.gc();
System.out.println("Heap Memory Usage: " + ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed());
System.out.println("End: MemoryUsageTest");
}
public static OutputAnalyzer run(boolean useStringDeduplication) throws Exception {
String[] extraArgs = new String[0];
if (useStringDeduplication) {
extraArgs = new String[] {
"-XX:+UseStringDeduplication",
"-XX:+PrintStringDeduplicationStatistics",
"-XX:StringDeduplicationAgeThreshold=" + DefaultAgeThreshold
};
}
String[] defaultArgs = new String[] {
"-XX:+PrintGC",
"-XX:+PrintGCDetails",
MemoryUsageTest.class.getName(),
"" + useStringDeduplication
};
ArrayList<String> args = new ArrayList<String>();
args.addAll(Arrays.asList(extraArgs));
args.addAll(Arrays.asList(defaultArgs));
return runTest(args.toArray(new String[args.size()]));
}
}
/*
* Tests
*/
private static final int LargeNumberOfStrings = 10000;
private static final int SmallNumberOfStrings = 10;
private static final int MaxAgeThreshold = 15;
private static final int DefaultAgeThreshold = 3;
private static final int MinAgeThreshold = 1;
private static final int TooLowAgeThreshold = MinAgeThreshold - 1;
private static final int TooHighAgeThreshold = MaxAgeThreshold + 1;
public static void testYoungGC() throws Exception {
// Do young GC to age strings to provoke deduplication
OutputAnalyzer output = DeduplicationTest.run(LargeNumberOfStrings,
DefaultAgeThreshold,
YoungGC,
"-XX:+PrintGC",
"-XX:+PrintStringDeduplicationStatistics");
output.shouldNotContain("Full GC");
output.shouldContain("GC pause (G1 Evacuation Pause) (young)");
output.shouldContain("GC concurrent-string-deduplication");
output.shouldContain("Deduplicated:");
output.shouldHaveExitValue(0);
}
public static void testFullGC() throws Exception {
// Do full GC to age strings to provoke deduplication
OutputAnalyzer output = DeduplicationTest.run(LargeNumberOfStrings,
DefaultAgeThreshold,
FullGC,
"-XX:+PrintGC",
"-XX:+PrintStringDeduplicationStatistics");
output.shouldNotContain("GC pause (G1 Evacuation Pause) (young)");
output.shouldContain("Full GC");
output.shouldContain("GC concurrent-string-deduplication");
output.shouldContain("Deduplicated:");
output.shouldHaveExitValue(0);
}
public static void testTableResize() throws Exception {
// Test with StringDeduplicationResizeALot
OutputAnalyzer output = DeduplicationTest.run(LargeNumberOfStrings,
DefaultAgeThreshold,
YoungGC,
"-XX:+PrintGC",
"-XX:+PrintStringDeduplicationStatistics",
"-XX:+StringDeduplicationResizeALot");
output.shouldContain("GC concurrent-string-deduplication");
output.shouldContain("Deduplicated:");
output.shouldNotContain("Resize Count: 0");
output.shouldHaveExitValue(0);
}
public static void testTableRehash() throws Exception {
// Test with StringDeduplicationRehashALot
OutputAnalyzer output = DeduplicationTest.run(LargeNumberOfStrings,
DefaultAgeThreshold,
YoungGC,
"-XX:+PrintGC",
"-XX:+PrintStringDeduplicationStatistics",
"-XX:+StringDeduplicationRehashALot");
output.shouldContain("GC concurrent-string-deduplication");
output.shouldContain("Deduplicated:");
output.shouldNotContain("Rehash Count: 0");
output.shouldNotContain("Hash Seed: 0x0");
output.shouldHaveExitValue(0);
}
public static void testAgeThreshold() throws Exception {
OutputAnalyzer output;
// Test with max age theshold
output = DeduplicationTest.run(SmallNumberOfStrings,
MaxAgeThreshold,
YoungGC,
"-XX:+PrintGC",
"-XX:+PrintStringDeduplicationStatistics");
output.shouldContain("GC concurrent-string-deduplication");
output.shouldContain("Deduplicated:");
output.shouldHaveExitValue(0);
// Test with min age theshold
output = DeduplicationTest.run(SmallNumberOfStrings,
MinAgeThreshold,
YoungGC,
"-XX:+PrintGC",
"-XX:+PrintStringDeduplicationStatistics");
output.shouldContain("GC concurrent-string-deduplication");
output.shouldContain("Deduplicated:");
output.shouldHaveExitValue(0);
// Test with too low age threshold
output = DeduplicationTest.run(SmallNumberOfStrings,
TooLowAgeThreshold,
YoungGC);
output.shouldContain("StringDeduplicationAgeThreshold of " + TooLowAgeThreshold +
" is invalid; must be between " + MinAgeThreshold + " and " + MaxAgeThreshold);
output.shouldHaveExitValue(1);
// Test with too high age threshold
output = DeduplicationTest.run(SmallNumberOfStrings,
TooHighAgeThreshold,
YoungGC);
output.shouldContain("StringDeduplicationAgeThreshold of " + TooHighAgeThreshold +
" is invalid; must be between " + MinAgeThreshold + " and " + MaxAgeThreshold);
output.shouldHaveExitValue(1);
}
public static void testPrintOptions() throws Exception {
OutputAnalyzer output;
// Test without PrintGC and without PrintStringDeduplicationStatistics
output = DeduplicationTest.run(SmallNumberOfStrings,
DefaultAgeThreshold,
YoungGC);
output.shouldNotContain("GC concurrent-string-deduplication");
output.shouldNotContain("Deduplicated:");
output.shouldHaveExitValue(0);
// Test with PrintGC but without PrintStringDeduplicationStatistics
output = DeduplicationTest.run(SmallNumberOfStrings,
DefaultAgeThreshold,
YoungGC,
"-XX:+PrintGC");
output.shouldContain("GC concurrent-string-deduplication");
output.shouldNotContain("Deduplicated:");
output.shouldHaveExitValue(0);
}
public static void testInterned() throws Exception {
// Test that interned strings are deduplicated before being interned
OutputAnalyzer output = InternedTest.run();
output.shouldHaveExitValue(0);
}
public static void testMemoryUsage() throws Exception {
// Test that memory usage is reduced after deduplication
OutputAnalyzer output;
final String usagePattern = "Heap Memory Usage: (\\d+)";
// Run without deduplication
output = MemoryUsageTest.run(false);
output.shouldHaveExitValue(0);
final long memoryUsageWithoutDedup = Long.parseLong(output.firstMatch(usagePattern, 1));
// Run with deduplication
output = MemoryUsageTest.run(true);
output.shouldHaveExitValue(0);
final long memoryUsageWithDedup = Long.parseLong(output.firstMatch(usagePattern, 1));
// Calculate expected memory usage with deduplication enabled. This calculation does
// not take alignment and padding into account, so it's a conservative estimate.
final long sizeOfChar = 2; // bytes
final long bytesSaved = (LargeNumberOfStrings - 1) * (StringLength * sizeOfChar + unsafe.ARRAY_CHAR_BASE_OFFSET);
final long memoryUsageWithDedupExpected = memoryUsageWithoutDedup - bytesSaved;
System.out.println("Memory usage summary:");
System.out.println(" memoryUsageWithoutDedup: " + memoryUsageWithoutDedup);
System.out.println(" memoryUsageWithDedup: " + memoryUsageWithDedup);
System.out.println(" memoryUsageWithDedupExpected: " + memoryUsageWithDedupExpected);
if (memoryUsageWithDedup > memoryUsageWithDedupExpected) {
throw new Exception("Unexpected memory usage, memoryUsageWithDedup should less or equal to memoryUsageWithDedupExpected");
}
}
}

@ -0,0 +1,36 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStringDeduplicationYoungGC
* @summary Test string deduplication during young GC
* @bug 8029075
* @key gc
* @library /testlibrary
*/
public class TestStringDeduplicationYoungGC {
public static void main(String[] args) throws Exception {
TestStringDeduplicationTools.testYoungGC();
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,7 +22,6 @@
*/
/*
* @ignore 8026154
* @test
* @bug 8005933
* @summary Test that -Xshare:auto uses CDS when explicitly specified with -server.
@ -50,21 +49,15 @@ public class XShareAuto {
pb = ProcessTools.createJavaProcessBuilder(
"-server", "-Xshare:auto", "-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./sample.jsa", "-version");
"-XX:SharedArchiveFile=./sample.jsa", "-XX:+PrintSharedSpaces", "-version");
output = new OutputAnalyzer(pb.start());
try {
output.shouldContain("sharing");
output.shouldHaveExitValue(0);
} catch (RuntimeException e) {
// If this failed then check that it would also be unable
// to share even if -Xshare:on is specified. If so, then
// return a success status.
pb = ProcessTools.createJavaProcessBuilder(
"-server", "-Xshare:on", "-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./sample.jsa", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Unable to use shared archive");
output.shouldHaveExitValue(1);
// if sharing failed due to ASLR or similar reasons,
// check whether sharing was attempted at all (UseSharedSpaces)
output.shouldContain("UseSharedSpaces:");
}
output.shouldHaveExitValue(0);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,7 +22,6 @@
*/
/*
* @ignore 8025642
* @test CdsDifferentObjectAlignment
* @summary Testing CDS (class data sharing) using varying object alignment.
* Using different object alignment for each dump/load pair.
@ -84,7 +83,11 @@ public class CdsDifferentObjectAlignment {
createAlignment,
loadAlignment);
output.shouldContain(expectedErrorMsg);
try {
output.shouldContain(expectedErrorMsg);
} catch (RuntimeException e) {
output.shouldContain("Unable to use shared archive");
}
output.shouldHaveExitValue(1);
}
}

@ -22,7 +22,6 @@
*/
/*
* @ignore 8032224
* @test DefaultUseWithClient
* @summary Test default behavior of sharing with -client
* @library /testlibrary
@ -57,10 +56,17 @@ public class DefaultUseWithClient {
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./" + fileName,
"-client",
"-XX:+PrintSharedSpaces",
"-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("sharing");
try {
output.shouldContain("sharing");
} catch (RuntimeException e) {
// if sharing failed due to ASLR or similar reasons,
// check whether sharing was attempted at all (UseSharedSpaces)
output.shouldContain("UseSharedSpaces:");
}
output.shouldHaveExitValue(0);
}
}

Some files were not shown because too many files have changed in this diff Show More