Merge
This commit is contained in:
commit
1e10db0d6a
hotspot
build/solaris/makefiles
src
cpu/sparc/vm
share/vm
gc_implementation
concurrentMarkSweep
binaryTreeDictionary.cppcompactibleFreeListSpace.cppcompactibleFreeListSpace.hppconcurrentMarkSweepGeneration.cppfreeList.cppfreeList.hpp
includeDB_gc_sharedparNew
parallelScavenge
shared
memory
cardTableModRefBS.cppcardTableRS.cppcardTableRS.hppcollectorPolicy.cppcollectorPolicy.hppgenRemSet.hppheapInspection.cppheapInspection.hppreferenceProcessor.cpptenuredGeneration.cpp
runtime
@ -19,7 +19,7 @@
|
||||
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
# CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
# have any questions.
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
# Must also specify if CPU is little endian
|
||||
@ -45,6 +45,10 @@ OPT_CFLAGS/os_solaris_x86_64.o = -xO1
|
||||
OPT_CFLAGS/generateOptoStub.o = -xO2
|
||||
OPT_CFLAGS/thread.o = -xO2
|
||||
|
||||
# Work around for 6624782
|
||||
OPT_CFLAGS/instanceKlass.o = -Qoption ube -no_a2lf
|
||||
OPT_CFLAGS/objArrayKlass.o = -Qoption ube -no_a2lf
|
||||
|
||||
else
|
||||
|
||||
ifeq ("${Platform_compiler}", "gcc")
|
||||
@ -58,6 +62,6 @@ else
|
||||
# error
|
||||
_JUNK2_ := $(shell echo >&2 \
|
||||
"*** ERROR: this compiler is not yet supported by this code base!")
|
||||
@exit 1
|
||||
@exit 1
|
||||
endif
|
||||
endif
|
||||
|
@ -28,6 +28,12 @@
|
||||
int VM_Version::_features = VM_Version::unknown_m;
|
||||
const char* VM_Version::_features_str = "";
|
||||
|
||||
bool VM_Version::is_niagara1_plus() {
|
||||
// This is a placeholder until the real test is determined.
|
||||
return is_niagara1() &&
|
||||
(os::processor_count() > maximum_niagara1_processor_count());
|
||||
}
|
||||
|
||||
void VM_Version::initialize() {
|
||||
_features = determine_features();
|
||||
PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
|
||||
@ -160,3 +166,13 @@ void VM_Version::allow_all() {
|
||||
void VM_Version::revert() {
|
||||
_features = saved_features;
|
||||
}
|
||||
|
||||
unsigned int VM_Version::calc_parallel_worker_threads() {
|
||||
unsigned int result;
|
||||
if (is_niagara1_plus()) {
|
||||
result = nof_parallel_worker_threads(5, 16, 8);
|
||||
} else {
|
||||
result = nof_parallel_worker_threads(5, 8, 8);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -64,6 +64,11 @@ protected:
|
||||
|
||||
static bool is_niagara1(int features) { return (features & niagara1_m) == niagara1_m; }
|
||||
|
||||
static int maximum_niagara1_processor_count() { return 32; }
|
||||
// Returns true if the platform is in the niagara line and
|
||||
// newer than the niagara1.
|
||||
static bool is_niagara1_plus();
|
||||
|
||||
public:
|
||||
// Initialization
|
||||
static void initialize();
|
||||
@ -129,4 +134,7 @@ public:
|
||||
|
||||
// Override the Abstract_VM_Version implementation.
|
||||
static uint page_size_count() { return is_sun4v() ? 4 : 2; }
|
||||
|
||||
// Calculates the number of parallel threads
|
||||
static unsigned int calc_parallel_worker_threads();
|
||||
};
|
||||
|
@ -1071,85 +1071,56 @@ void BinaryTreeDictionary::reportStatistics() const {
|
||||
// for each list in the tree. Also print some summary
|
||||
// information.
|
||||
class printTreeCensusClosure : public AscendTreeCensusClosure {
|
||||
int _print_line;
|
||||
size_t _totalFree;
|
||||
AllocationStats _totals;
|
||||
size_t _count;
|
||||
FreeList _total;
|
||||
|
||||
public:
|
||||
printTreeCensusClosure() {
|
||||
_print_line = 0;
|
||||
_totalFree = 0;
|
||||
_count = 0;
|
||||
_totals.initialize();
|
||||
}
|
||||
AllocationStats* totals() { return &_totals; }
|
||||
size_t count() { return _count; }
|
||||
void increment_count_by(size_t v) { _count += v; }
|
||||
FreeList* total() { return &_total; }
|
||||
size_t totalFree() { return _totalFree; }
|
||||
void increment_totalFree_by(size_t v) { _totalFree += v; }
|
||||
void do_list(FreeList* fl) {
|
||||
bool nl = false; // "maybe this is not needed" isNearLargestChunk(fl->head());
|
||||
|
||||
gclog_or_tty->print("%c %4d\t\t" "%7d\t" "%7d\t"
|
||||
"%7d\t" "%7d\t" "%7d\t" "%7d\t"
|
||||
"%7d\t" "%7d\t" "%7d\t"
|
||||
"%7d\t" "\n",
|
||||
" n"[nl], fl->size(), fl->bfrSurp(), fl->surplus(),
|
||||
fl->desired(), fl->prevSweep(), fl->beforeSweep(), fl->count(),
|
||||
fl->coalBirths(), fl->coalDeaths(), fl->splitBirths(),
|
||||
fl->splitDeaths());
|
||||
|
||||
increment_totalFree_by(fl->count() * fl->size());
|
||||
increment_count_by(fl->count());
|
||||
totals()->set_bfrSurp(totals()->bfrSurp() + fl->bfrSurp());
|
||||
totals()->set_surplus(totals()->splitDeaths() + fl->surplus());
|
||||
totals()->set_prevSweep(totals()->prevSweep() + fl->prevSweep());
|
||||
totals()->set_beforeSweep(totals()->beforeSweep() + fl->beforeSweep());
|
||||
totals()->set_coalBirths(totals()->coalBirths() + fl->coalBirths());
|
||||
totals()->set_coalDeaths(totals()->coalDeaths() + fl->coalDeaths());
|
||||
totals()->set_splitBirths(totals()->splitBirths() + fl->splitBirths());
|
||||
totals()->set_splitDeaths(totals()->splitDeaths() + fl->splitDeaths());
|
||||
if (++_print_line >= 40) {
|
||||
FreeList::print_labels_on(gclog_or_tty, "size");
|
||||
_print_line = 0;
|
||||
}
|
||||
fl->print_on(gclog_or_tty);
|
||||
_totalFree += fl->count() * fl->size() ;
|
||||
total()->set_count( total()->count() + fl->count() );
|
||||
total()->set_bfrSurp( total()->bfrSurp() + fl->bfrSurp() );
|
||||
total()->set_surplus( total()->splitDeaths() + fl->surplus() );
|
||||
total()->set_desired( total()->desired() + fl->desired() );
|
||||
total()->set_prevSweep( total()->prevSweep() + fl->prevSweep() );
|
||||
total()->set_beforeSweep(total()->beforeSweep() + fl->beforeSweep());
|
||||
total()->set_coalBirths( total()->coalBirths() + fl->coalBirths() );
|
||||
total()->set_coalDeaths( total()->coalDeaths() + fl->coalDeaths() );
|
||||
total()->set_splitBirths(total()->splitBirths() + fl->splitBirths());
|
||||
total()->set_splitDeaths(total()->splitDeaths() + fl->splitDeaths());
|
||||
}
|
||||
};
|
||||
|
||||
void BinaryTreeDictionary::printDictCensus(void) const {
|
||||
|
||||
gclog_or_tty->print("\nBinaryTree\n");
|
||||
gclog_or_tty->print(
|
||||
"%4s\t\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
|
||||
"%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "\n",
|
||||
"size", "bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep",
|
||||
"count", "cBirths", "cDeaths", "sBirths", "sDeaths");
|
||||
|
||||
FreeList::print_labels_on(gclog_or_tty, "size");
|
||||
printTreeCensusClosure ptc;
|
||||
ptc.do_tree(root());
|
||||
|
||||
FreeList* total = ptc.total();
|
||||
FreeList::print_labels_on(gclog_or_tty, " ");
|
||||
total->print_on(gclog_or_tty, "TOTAL\t");
|
||||
gclog_or_tty->print(
|
||||
"\t\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
|
||||
"%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "\n",
|
||||
"bfrsurp", "surplus", "prvSwep", "bfrSwep",
|
||||
"count", "cBirths", "cDeaths", "sBirths", "sDeaths");
|
||||
gclog_or_tty->print(
|
||||
"%s\t\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t"
|
||||
"%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n",
|
||||
"totl",
|
||||
ptc.totals()->bfrSurp(),
|
||||
ptc.totals()->surplus(),
|
||||
ptc.totals()->prevSweep(),
|
||||
ptc.totals()->beforeSweep(),
|
||||
ptc.count(),
|
||||
ptc.totals()->coalBirths(),
|
||||
ptc.totals()->coalDeaths(),
|
||||
ptc.totals()->splitBirths(),
|
||||
ptc.totals()->splitDeaths());
|
||||
gclog_or_tty->print("totalFree(words): %7d growth: %8.5f deficit: %8.5f\n",
|
||||
"totalFree(words): " SIZE_FORMAT_W(16)
|
||||
" growth: %8.5f deficit: %8.5f\n",
|
||||
ptc.totalFree(),
|
||||
(double)(ptc.totals()->splitBirths()+ptc.totals()->coalBirths()
|
||||
-ptc.totals()->splitDeaths()-ptc.totals()->coalDeaths())
|
||||
/(ptc.totals()->prevSweep() != 0 ?
|
||||
(double)ptc.totals()->prevSweep() : 1.0),
|
||||
(double)(ptc.totals()->desired() - ptc.count())
|
||||
/(ptc.totals()->desired() != 0 ?
|
||||
(double)ptc.totals()->desired() : 1.0));
|
||||
(double)(total->splitBirths() + total->coalBirths()
|
||||
- total->splitDeaths() - total->coalDeaths())
|
||||
/(total->prevSweep() != 0 ? (double)total->prevSweep() : 1.0),
|
||||
(double)(total->desired() - total->count())
|
||||
/(total->desired() != 0 ? (double)total->desired() : 1.0));
|
||||
}
|
||||
|
||||
// Verify the following tree invariants:
|
||||
|
@ -1835,7 +1835,7 @@ void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
|
||||
bool CompactibleFreeListSpace::linearAllocationWouldFail() {
|
||||
bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
|
||||
return _smallLinearAllocBlock._word_size == 0;
|
||||
}
|
||||
|
||||
@ -1906,6 +1906,13 @@ CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
|
||||
}
|
||||
}
|
||||
|
||||
// Support for concurrent collection policy decisions.
|
||||
bool CompactibleFreeListSpace::should_concurrent_collect() const {
|
||||
// In the future we might want to add in frgamentation stats --
|
||||
// including erosion of the "mountain" into this decision as well.
|
||||
return !adaptive_freelists() && linearAllocationWouldFail();
|
||||
}
|
||||
|
||||
// Support for compaction
|
||||
|
||||
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
|
||||
@ -2013,11 +2020,11 @@ void CompactibleFreeListSpace::clearFLCensus() {
|
||||
}
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) {
|
||||
void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
|
||||
setFLSurplus();
|
||||
setFLHints();
|
||||
if (PrintGC && PrintFLSCensus > 0) {
|
||||
printFLCensus(sweepCt);
|
||||
printFLCensus(sweep_count);
|
||||
}
|
||||
clearFLCensus();
|
||||
assert_locked();
|
||||
@ -2293,59 +2300,37 @@ void CompactibleFreeListSpace::checkFreeListConsistency() const {
|
||||
}
|
||||
#endif
|
||||
|
||||
void CompactibleFreeListSpace::printFLCensus(int sweepCt) const {
|
||||
void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
|
||||
assert_lock_strong(&_freelistLock);
|
||||
ssize_t bfrSurp = 0;
|
||||
ssize_t surplus = 0;
|
||||
ssize_t desired = 0;
|
||||
ssize_t prevSweep = 0;
|
||||
ssize_t beforeSweep = 0;
|
||||
ssize_t count = 0;
|
||||
ssize_t coalBirths = 0;
|
||||
ssize_t coalDeaths = 0;
|
||||
ssize_t splitBirths = 0;
|
||||
ssize_t splitDeaths = 0;
|
||||
gclog_or_tty->print("end sweep# %d\n", sweepCt);
|
||||
gclog_or_tty->print("%4s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
|
||||
"%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
|
||||
"%7s\t" "\n",
|
||||
"size", "bfrsurp", "surplus", "desired", "prvSwep",
|
||||
"bfrSwep", "count", "cBirths", "cDeaths", "sBirths",
|
||||
"sDeaths");
|
||||
|
||||
FreeList total;
|
||||
gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
|
||||
FreeList::print_labels_on(gclog_or_tty, "size");
|
||||
size_t totalFree = 0;
|
||||
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
const FreeList *fl = &_indexedFreeList[i];
|
||||
totalFree += fl->count() * fl->size();
|
||||
|
||||
gclog_or_tty->print("%4d\t" "%7d\t" "%7d\t" "%7d\t"
|
||||
"%7d\t" "%7d\t" "%7d\t" "%7d\t"
|
||||
"%7d\t" "%7d\t" "%7d\t" "\n",
|
||||
fl->size(), fl->bfrSurp(), fl->surplus(), fl->desired(),
|
||||
fl->prevSweep(), fl->beforeSweep(), fl->count(), fl->coalBirths(),
|
||||
fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
|
||||
bfrSurp += fl->bfrSurp();
|
||||
surplus += fl->surplus();
|
||||
desired += fl->desired();
|
||||
prevSweep += fl->prevSweep();
|
||||
beforeSweep += fl->beforeSweep();
|
||||
count += fl->count();
|
||||
coalBirths += fl->coalBirths();
|
||||
coalDeaths += fl->coalDeaths();
|
||||
splitBirths += fl->splitBirths();
|
||||
splitDeaths += fl->splitDeaths();
|
||||
totalFree += fl->count() * fl->size();
|
||||
if (i % (40*IndexSetStride) == 0) {
|
||||
FreeList::print_labels_on(gclog_or_tty, "size");
|
||||
}
|
||||
fl->print_on(gclog_or_tty);
|
||||
total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
|
||||
total.set_surplus( total.surplus() + fl->surplus() );
|
||||
total.set_desired( total.desired() + fl->desired() );
|
||||
total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
|
||||
total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
|
||||
total.set_count( total.count() + fl->count() );
|
||||
total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
|
||||
total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
|
||||
total.set_splitBirths(total.splitBirths() + fl->splitBirths());
|
||||
total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
|
||||
}
|
||||
gclog_or_tty->print("%4s\t"
|
||||
"%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t"
|
||||
"%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n",
|
||||
"totl",
|
||||
bfrSurp, surplus, desired, prevSweep, beforeSweep,
|
||||
count, coalBirths, coalDeaths, splitBirths, splitDeaths);
|
||||
gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
|
||||
total.print_on(gclog_or_tty, "TOTAL");
|
||||
gclog_or_tty->print_cr("Total free in indexed lists "
|
||||
SIZE_FORMAT " words", totalFree);
|
||||
gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
|
||||
(double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
|
||||
(prevSweep != 0 ? (double)prevSweep : 1.0),
|
||||
(double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
|
||||
(double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
|
||||
(total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
|
||||
(double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
|
||||
_dictionary->printDictCensus();
|
||||
}
|
||||
|
||||
|
@ -418,7 +418,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
// chunk exists, return NULL.
|
||||
FreeChunk* find_chunk_at_end();
|
||||
|
||||
bool adaptive_freelists() { return _adaptive_freelists; }
|
||||
bool adaptive_freelists() const { return _adaptive_freelists; }
|
||||
|
||||
void set_collector(CMSCollector* collector) { _collector = collector; }
|
||||
|
||||
@ -566,7 +566,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
FreeChunk* allocateScratch(size_t size);
|
||||
|
||||
// returns true if either the small or large linear allocation buffer is empty.
|
||||
bool linearAllocationWouldFail();
|
||||
bool linearAllocationWouldFail() const;
|
||||
|
||||
// Adjust the chunk for the minimum size. This version is called in
|
||||
// most cases in CompactibleFreeListSpace methods.
|
||||
@ -585,6 +585,9 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
|
||||
bool coalesced);
|
||||
|
||||
// Support for decisions regarding concurrent collection policy
|
||||
bool should_concurrent_collect() const;
|
||||
|
||||
// Support for compaction
|
||||
void prepare_for_compaction(CompactPoint* cp);
|
||||
void adjust_pointers();
|
||||
@ -622,7 +625,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
// coalescing of chunks during the sweep of garbage.
|
||||
|
||||
// Print the statistics for the free lists.
|
||||
void printFLCensus(int sweepCt) const;
|
||||
void printFLCensus(size_t sweep_count) const;
|
||||
|
||||
// Statistics functions
|
||||
// Initialize census for lists before the sweep.
|
||||
@ -635,12 +638,11 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
// Clear the census for each of the free lists.
|
||||
void clearFLCensus();
|
||||
// Perform functions for the census after the end of the sweep.
|
||||
void endSweepFLCensus(int sweepCt);
|
||||
void endSweepFLCensus(size_t sweep_count);
|
||||
// Return true if the count of free chunks is greater
|
||||
// than the desired number of free chunks.
|
||||
bool coalOverPopulated(size_t size);
|
||||
|
||||
|
||||
// Record (for each size):
|
||||
//
|
||||
// split-births = #chunks added due to splits in (prev-sweep-end,
|
||||
|
@ -3121,12 +3121,7 @@ ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
|
||||
if (GCExpandToAllocateDelayMillis > 0) {
|
||||
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
|
||||
}
|
||||
size_t adj_word_sz = CompactibleFreeListSpace::adjustObjectSize(word_size);
|
||||
if (parallel) {
|
||||
return cmsSpace()->par_allocate(adj_word_sz);
|
||||
} else {
|
||||
return cmsSpace()->allocate(adj_word_sz);
|
||||
}
|
||||
return have_lock_and_allocate(word_size, tlab);
|
||||
}
|
||||
|
||||
// YSR: All of this generation expansion/shrinking stuff is an exact copy of
|
||||
@ -5732,13 +5727,19 @@ void CMSCollector::sweep(bool asynch) {
|
||||
// in the perm_gen_verify_bit_map. In order to do that we traverse
|
||||
// all blocks in perm gen and mark all dead objects.
|
||||
if (verifying() && !cms_should_unload_classes()) {
|
||||
CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
|
||||
bitMapLock());
|
||||
assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
|
||||
"Should have already been allocated");
|
||||
MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
|
||||
markBitMap(), perm_gen_verify_bit_map());
|
||||
_permGen->cmsSpace()->blk_iterate(&mdo);
|
||||
if (asynch) {
|
||||
CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
|
||||
bitMapLock());
|
||||
_permGen->cmsSpace()->blk_iterate(&mdo);
|
||||
} else {
|
||||
// In the case of synchronous sweep, we already have
|
||||
// the requisite locks/tokens.
|
||||
_permGen->cmsSpace()->blk_iterate(&mdo);
|
||||
}
|
||||
}
|
||||
|
||||
if (asynch) {
|
||||
|
@ -302,3 +302,29 @@ void FreeList::assert_proper_lock_protection_work() const {
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
// Print the "label line" for free list stats.
|
||||
void FreeList::print_labels_on(outputStream* st, const char* c) {
|
||||
st->print("%16s\t", c);
|
||||
st->print("%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t"
|
||||
"%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t" "\n",
|
||||
"bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep",
|
||||
"count", "cBirths", "cDeaths", "sBirths", "sDeaths");
|
||||
}
|
||||
|
||||
// Print the AllocationStats for the given free list. If the second argument
|
||||
// to the call is a non-null string, it is printed in the first column;
|
||||
// otherwise, if the argument is null (the default), then the size of the
|
||||
// (free list) block is printed in the first column.
|
||||
void FreeList::print_on(outputStream* st, const char* c) const {
|
||||
if (c != NULL) {
|
||||
st->print("%16s", c);
|
||||
} else {
|
||||
st->print(SIZE_FORMAT_W(16), size());
|
||||
}
|
||||
st->print("\t"
|
||||
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
|
||||
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
|
||||
bfrSurp(), surplus(), desired(), prevSweep(), beforeSweep(),
|
||||
count(), coalBirths(), coalDeaths(), splitBirths(), splitDeaths());
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ class Mutex;
|
||||
|
||||
class FreeList VALUE_OBJ_CLASS_SPEC {
|
||||
friend class CompactibleFreeListSpace;
|
||||
friend class printTreeCensusClosure;
|
||||
FreeChunk* _head; // List of free chunks
|
||||
FreeChunk* _tail; // Tail of list of free chunks
|
||||
size_t _size; // Size in Heap words of each chunks
|
||||
@ -63,10 +64,11 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
|
||||
protected:
|
||||
void init_statistics();
|
||||
void set_count(ssize_t v) { _count = v;}
|
||||
void increment_count() { _count++; }
|
||||
void increment_count() { _count++; }
|
||||
void decrement_count() {
|
||||
_count--;
|
||||
assert(_count >= 0, "Count should not be negative"); }
|
||||
assert(_count >= 0, "Count should not be negative");
|
||||
}
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
@ -159,6 +161,10 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
|
||||
ssize_t desired() const {
|
||||
return _allocation_stats.desired();
|
||||
}
|
||||
void set_desired(ssize_t v) {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.set_desired(v);
|
||||
}
|
||||
void compute_desired(float inter_sweep_current,
|
||||
float inter_sweep_estimate) {
|
||||
assert_proper_lock_protection();
|
||||
@ -298,4 +304,8 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
|
||||
// Verify that the chunk is in the list.
|
||||
// found. Return NULL if "fc" is not found.
|
||||
bool verifyChunkInFreeLists(FreeChunk* fc) const;
|
||||
|
||||
// Printing support
|
||||
static void print_labels_on(outputStream* st, const char* c);
|
||||
void print_on(outputStream* st, const char* c = NULL) const;
|
||||
};
|
||||
|
@ -19,15 +19,22 @@
|
||||
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
// CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
// have any questions.
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
|
||||
|
||||
gcAdaptivePolicyCounters.hpp adaptiveSizePolicy.hpp
|
||||
gcAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
|
||||
allocationStats.cpp allocationStats.hpp
|
||||
allocationStats.cpp ostream.hpp
|
||||
|
||||
gcAdaptivePolicyCounters.cpp resourceArea.hpp
|
||||
allocationStats.hpp allocation.hpp
|
||||
allocationStats.hpp gcUtil.hpp
|
||||
allocationStats.hpp globalDefinitions.hpp
|
||||
|
||||
gcAdaptivePolicyCounters.hpp adaptiveSizePolicy.hpp
|
||||
gcAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
|
||||
|
||||
gcAdaptivePolicyCounters.cpp resourceArea.hpp
|
||||
gcAdaptivePolicyCounters.cpp gcAdaptivePolicyCounters.hpp
|
||||
|
||||
gSpaceCounters.cpp generation.hpp
|
||||
@ -44,7 +51,7 @@ immutableSpace.cpp universe.hpp
|
||||
|
||||
isGCActiveMark.hpp parallelScavengeHeap.hpp
|
||||
|
||||
markSweep.inline.hpp psParallelCompact.hpp
|
||||
markSweep.inline.hpp psParallelCompact.hpp
|
||||
|
||||
mutableNUMASpace.cpp mutableNUMASpace.hpp
|
||||
mutableNUMASpace.cpp sharedHeap.hpp
|
||||
|
@ -74,8 +74,8 @@ size_t ASParNewGeneration::available_to_live() const {
|
||||
#ifdef SHRINKS_AT_END_OF_EDEN
|
||||
size_t delta_in_survivor = 0;
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
const size_t space_alignment = heap->intra_generation_alignment();
|
||||
const size_t gen_alignment = heap->generation_alignment();
|
||||
const size_t space_alignment = heap->intra_heap_alignment();
|
||||
const size_t gen_alignment = heap->object_heap_alignment();
|
||||
|
||||
MutableSpace* space_shrinking = NULL;
|
||||
if (from_space()->end() > to_space()->end()) {
|
||||
|
@ -785,6 +785,9 @@ void ParNewGeneration::collect(bool full,
|
||||
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
|
||||
from()->set_next_compaction_space(to());
|
||||
gch->set_incremental_collection_will_fail();
|
||||
|
||||
// Reset the PromotionFailureALot counters.
|
||||
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
|
||||
}
|
||||
// set new iteration safe limit for the survivor spaces
|
||||
from()->set_concurrent_iteration_safe_limit(from()->top());
|
||||
|
@ -86,7 +86,7 @@ size_t ASPSYoungGen::available_for_contraction() {
|
||||
if (eden_space()->is_empty()) {
|
||||
// Respect the minimum size for eden and for the young gen as a whole.
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
const size_t eden_alignment = heap->intra_generation_alignment();
|
||||
const size_t eden_alignment = heap->intra_heap_alignment();
|
||||
const size_t gen_alignment = heap->young_gen_alignment();
|
||||
|
||||
assert(eden_space()->capacity_in_bytes() >= eden_alignment,
|
||||
@ -124,7 +124,7 @@ size_t ASPSYoungGen::available_for_contraction() {
|
||||
// to_space can be.
|
||||
size_t ASPSYoungGen::available_to_live() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
const size_t alignment = heap->intra_generation_alignment();
|
||||
const size_t alignment = heap->intra_heap_alignment();
|
||||
|
||||
// Include any space that is committed but is not in eden.
|
||||
size_t available = pointer_delta(eden_space()->bottom(),
|
||||
@ -275,7 +275,7 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
assert(eden_start < from_start, "Cannot push into from_space");
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
const size_t alignment = heap->intra_generation_alignment();
|
||||
const size_t alignment = heap->intra_heap_alignment();
|
||||
|
||||
// Check whether from space is below to space
|
||||
if (from_start < to_start) {
|
||||
|
@ -39,10 +39,10 @@ class GenerationSizer : public TwoGenerationCollectorPolicy {
|
||||
|
||||
// If the user hasn't explicitly set the number of worker
|
||||
// threads, set the count.
|
||||
if (ParallelGCThreads == 0) {
|
||||
assert(UseParallelGC, "Setting ParallelGCThreads without UseParallelGC");
|
||||
ParallelGCThreads = os::active_processor_count();
|
||||
}
|
||||
assert(UseSerialGC ||
|
||||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
|
||||
(ParallelGCThreads > 0),
|
||||
"ParallelGCThreads should be set before flag initialization");
|
||||
|
||||
// The survivor ratio's are calculated "raw", unlike the
|
||||
// default gc, which adds 2 to the ratio value. We need to
|
||||
|
@ -173,7 +173,7 @@ jint ParallelScavengeHeap::initialize() {
|
||||
new PSAdaptiveSizePolicy(eden_capacity,
|
||||
initial_promo_size,
|
||||
young_gen()->to_space()->capacity_in_bytes(),
|
||||
intra_generation_alignment(),
|
||||
intra_heap_alignment(),
|
||||
max_gc_pause_sec,
|
||||
max_gc_minor_pause_sec,
|
||||
GCTimeRatio
|
||||
|
@ -58,9 +58,9 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
|
||||
public:
|
||||
ParallelScavengeHeap() : CollectedHeap() {
|
||||
set_alignment(_perm_gen_alignment, intra_generation_alignment());
|
||||
set_alignment(_young_gen_alignment, intra_generation_alignment());
|
||||
set_alignment(_old_gen_alignment, intra_generation_alignment());
|
||||
set_alignment(_perm_gen_alignment, intra_heap_alignment());
|
||||
set_alignment(_young_gen_alignment, intra_heap_alignment());
|
||||
set_alignment(_old_gen_alignment, intra_heap_alignment());
|
||||
}
|
||||
|
||||
// For use by VM operations
|
||||
@ -92,14 +92,14 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
|
||||
void post_initialize();
|
||||
void update_counters();
|
||||
|
||||
// The alignment used for the various generations.
|
||||
size_t perm_gen_alignment() const { return _perm_gen_alignment; }
|
||||
size_t young_gen_alignment() const { return _young_gen_alignment; }
|
||||
size_t old_gen_alignment() const { return _old_gen_alignment; }
|
||||
|
||||
// The alignment used for eden and survivors within the young gen.
|
||||
size_t intra_generation_alignment() const { return 64 * K; }
|
||||
// The alignment used for eden and survivors within the young gen
|
||||
// and for boundary between young gen and old gen.
|
||||
size_t intra_heap_alignment() const { return 64 * K; }
|
||||
|
||||
size_t capacity() const;
|
||||
size_t used() const;
|
||||
@ -217,6 +217,6 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
|
||||
{
|
||||
assert(is_power_of_2((intptr_t)val), "must be a power of 2");
|
||||
var = round_to(val, intra_generation_alignment());
|
||||
var = round_to(val, intra_heap_alignment());
|
||||
return var;
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ void PSYoungGen::initialize_work() {
|
||||
|
||||
// Compute maximum space sizes for performance counters
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
size_t alignment = heap->intra_generation_alignment();
|
||||
size_t alignment = heap->intra_heap_alignment();
|
||||
size_t size = _virtual_space->reserved_size();
|
||||
|
||||
size_t max_survivor_size;
|
||||
@ -141,7 +141,7 @@ void PSYoungGen::compute_initial_space_boundaries() {
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
// Compute sizes
|
||||
size_t alignment = heap->intra_generation_alignment();
|
||||
size_t alignment = heap->intra_heap_alignment();
|
||||
size_t size = _virtual_space->committed_size();
|
||||
|
||||
size_t survivor_size = size / InitialSurvivorRatio;
|
||||
@ -192,7 +192,7 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
|
||||
#ifndef PRODUCT
|
||||
void PSYoungGen::space_invariants() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
const size_t alignment = heap->intra_generation_alignment();
|
||||
const size_t alignment = heap->intra_heap_alignment();
|
||||
|
||||
// Currently, our eden size cannot shrink to zero
|
||||
guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
|
||||
@ -392,7 +392,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
char* to_end = (char*)to_space()->end();
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
const size_t alignment = heap->intra_generation_alignment();
|
||||
const size_t alignment = heap->intra_heap_alignment();
|
||||
const bool maintain_minimum =
|
||||
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
|
||||
|
||||
@ -708,7 +708,7 @@ size_t PSYoungGen::available_to_min_gen() {
|
||||
size_t PSYoungGen::available_to_live() {
|
||||
size_t delta_in_survivor = 0;
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
const size_t space_alignment = heap->intra_generation_alignment();
|
||||
const size_t space_alignment = heap->intra_heap_alignment();
|
||||
const size_t gen_alignment = heap->young_gen_alignment();
|
||||
|
||||
MutableSpace* space_shrinking = NULL;
|
||||
|
@ -98,6 +98,8 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
|
||||
}
|
||||
|
||||
ssize_t desired() const { return _desired; }
|
||||
void set_desired(ssize_t v) { _desired = v; }
|
||||
|
||||
ssize_t coalDesired() const { return _coalDesired; }
|
||||
void set_coalDesired(ssize_t v) { _coalDesired = v; }
|
||||
|
@ -19,7 +19,7 @@
|
||||
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
// CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
// have any questions.
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
|
||||
@ -46,13 +46,13 @@
|
||||
// as dependencies. Header files named H.inline.hpp generally contain
|
||||
// bodies for inline functions declared in H.hpp.
|
||||
//
|
||||
// NOTE: Files that use the token "generate_platform_dependent_include"
|
||||
// NOTE: Files that use the token "generate_platform_dependent_include"
|
||||
// are expected to contain macro references like <os>, <arch_model>, ... and
|
||||
// makedeps has a dependency on these platform files looking like:
|
||||
// foo_<macro>.trailing_string
|
||||
// foo_<macro>.trailing_string
|
||||
// (where "trailing_string" can be any legal filename strings but typically
|
||||
// is "hpp" or "inline.hpp").
|
||||
//
|
||||
//
|
||||
// The dependency in makedeps (and enforced) is that an underscore
|
||||
// will precedure the macro invocation. Note that this restriction
|
||||
// is only enforced on filenames that have the dependency token
|
||||
@ -148,12 +148,6 @@ allocation.hpp globals.hpp
|
||||
|
||||
allocation.inline.hpp os.hpp
|
||||
|
||||
allocationStats.cpp allocationStats.hpp
|
||||
|
||||
allocationStats.hpp allocation.hpp
|
||||
allocationStats.hpp gcUtil.hpp
|
||||
allocationStats.hpp globalDefinitions.hpp
|
||||
|
||||
aprofiler.cpp aprofiler.hpp
|
||||
aprofiler.cpp collectedHeap.inline.hpp
|
||||
aprofiler.cpp oop.inline.hpp
|
||||
@ -1935,7 +1929,7 @@ icache_<arch>.hpp generate_platform_dependent_include
|
||||
|
||||
init.cpp bytecodes.hpp
|
||||
init.cpp collectedHeap.hpp
|
||||
init.cpp handles.inline.hpp
|
||||
init.cpp handles.inline.hpp
|
||||
init.cpp icBuffer.hpp
|
||||
init.cpp icache.hpp
|
||||
init.cpp init.hpp
|
||||
|
@ -196,8 +196,8 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
|
||||
assert(_whole_heap.contains(new_region),
|
||||
"attempt to cover area not in reserved area");
|
||||
debug_only(verify_guard();)
|
||||
int ind = find_covering_region_by_base(new_region.start());
|
||||
MemRegion old_region = _covered[ind];
|
||||
int const ind = find_covering_region_by_base(new_region.start());
|
||||
MemRegion const old_region = _covered[ind];
|
||||
assert(old_region.start() == new_region.start(), "just checking");
|
||||
if (new_region.word_size() != old_region.word_size()) {
|
||||
// Commit new or uncommit old pages, if necessary.
|
||||
@ -205,21 +205,21 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
|
||||
// Extend the end of this _commited region
|
||||
// to cover the end of any lower _committed regions.
|
||||
// This forms overlapping regions, but never interior regions.
|
||||
HeapWord* max_prev_end = largest_prev_committed_end(ind);
|
||||
HeapWord* const max_prev_end = largest_prev_committed_end(ind);
|
||||
if (max_prev_end > cur_committed.end()) {
|
||||
cur_committed.set_end(max_prev_end);
|
||||
}
|
||||
// Align the end up to a page size (starts are already aligned).
|
||||
jbyte* new_end = byte_after(new_region.last());
|
||||
HeapWord* new_end_aligned =
|
||||
(HeapWord*)align_size_up((uintptr_t)new_end, _page_size);
|
||||
jbyte* const new_end = byte_after(new_region.last());
|
||||
HeapWord* const new_end_aligned =
|
||||
(HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
|
||||
assert(new_end_aligned >= (HeapWord*) new_end,
|
||||
"align up, but less");
|
||||
// The guard page is always committed and should not be committed over.
|
||||
HeapWord* new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
|
||||
HeapWord* const new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
|
||||
if (new_end_for_commit > cur_committed.end()) {
|
||||
// Must commit new pages.
|
||||
MemRegion new_committed =
|
||||
MemRegion const new_committed =
|
||||
MemRegion(cur_committed.end(), new_end_for_commit);
|
||||
|
||||
assert(!new_committed.is_empty(), "Region should not be empty here");
|
||||
@ -233,7 +233,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
|
||||
// the cur_committed region may include the guard region.
|
||||
} else if (new_end_aligned < cur_committed.end()) {
|
||||
// Must uncommit pages.
|
||||
MemRegion uncommit_region =
|
||||
MemRegion const uncommit_region =
|
||||
committed_unique_to_self(ind, MemRegion(new_end_aligned,
|
||||
cur_committed.end()));
|
||||
if (!uncommit_region.is_empty()) {
|
||||
@ -257,7 +257,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
|
||||
}
|
||||
assert(index_for(new_region.last()) < (int) _guard_index,
|
||||
"The guard card will be overwritten");
|
||||
jbyte* end = byte_after(new_region.last());
|
||||
jbyte* const end = byte_after(new_region.last());
|
||||
// do nothing if we resized downward.
|
||||
if (entry < end) {
|
||||
memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
|
||||
|
@ -556,10 +556,16 @@ void CardTableRS::verify() {
|
||||
}
|
||||
|
||||
|
||||
void CardTableRS::verify_empty(MemRegion mr) {
|
||||
void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
|
||||
if (!mr.is_empty()) {
|
||||
jbyte* cur_entry = byte_for(mr.start());
|
||||
jbyte* limit = byte_after(mr.last());
|
||||
// The region mr may not start on a card boundary so
|
||||
// the first card may reflect a write to the space
|
||||
// just prior to mr.
|
||||
if (!is_aligned(mr.start())) {
|
||||
cur_entry++;
|
||||
}
|
||||
for (;cur_entry < limit; cur_entry++) {
|
||||
guarantee(*cur_entry == CardTableModRefBS::clean_card,
|
||||
"Unexpected dirty card found");
|
||||
|
@ -126,7 +126,7 @@ public:
|
||||
}
|
||||
|
||||
void verify();
|
||||
void verify_empty(MemRegion mr);
|
||||
void verify_aligned_region_empty(MemRegion mr);
|
||||
|
||||
void clear(MemRegion mr) { _ct_bs.clear(mr); }
|
||||
void clear_into_younger(Generation* gen, bool clear_perm);
|
||||
|
@ -57,45 +57,51 @@ void CollectorPolicy::initialize_size_info() {
|
||||
// User inputs from -mx and ms are aligned
|
||||
_initial_heap_byte_size = align_size_up(Arguments::initial_heap_size(),
|
||||
min_alignment());
|
||||
_min_heap_byte_size = align_size_up(Arguments::min_heap_size(),
|
||||
min_alignment());
|
||||
_max_heap_byte_size = align_size_up(MaxHeapSize, max_alignment());
|
||||
set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(),
|
||||
min_alignment()));
|
||||
set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
|
||||
|
||||
// Check validity of heap parameters from launcher
|
||||
if (_initial_heap_byte_size == 0) {
|
||||
_initial_heap_byte_size = NewSize + OldSize;
|
||||
if (initial_heap_byte_size() == 0) {
|
||||
set_initial_heap_byte_size(NewSize + OldSize);
|
||||
} else {
|
||||
Universe::check_alignment(_initial_heap_byte_size, min_alignment(),
|
||||
Universe::check_alignment(initial_heap_byte_size(), min_alignment(),
|
||||
"initial heap");
|
||||
}
|
||||
if (_min_heap_byte_size == 0) {
|
||||
_min_heap_byte_size = NewSize + OldSize;
|
||||
if (min_heap_byte_size() == 0) {
|
||||
set_min_heap_byte_size(NewSize + OldSize);
|
||||
} else {
|
||||
Universe::check_alignment(_min_heap_byte_size, min_alignment(),
|
||||
Universe::check_alignment(min_heap_byte_size(), min_alignment(),
|
||||
"initial heap");
|
||||
}
|
||||
|
||||
// Check heap parameter properties
|
||||
if (_initial_heap_byte_size < M) {
|
||||
if (initial_heap_byte_size() < M) {
|
||||
vm_exit_during_initialization("Too small initial heap");
|
||||
}
|
||||
// Check heap parameter properties
|
||||
if (_min_heap_byte_size < M) {
|
||||
if (min_heap_byte_size() < M) {
|
||||
vm_exit_during_initialization("Too small minimum heap");
|
||||
}
|
||||
if (_initial_heap_byte_size <= NewSize) {
|
||||
if (initial_heap_byte_size() <= NewSize) {
|
||||
// make sure there is at least some room in old space
|
||||
vm_exit_during_initialization("Too small initial heap for new size specified");
|
||||
}
|
||||
if (_max_heap_byte_size < _min_heap_byte_size) {
|
||||
if (max_heap_byte_size() < min_heap_byte_size()) {
|
||||
vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
|
||||
}
|
||||
if (_initial_heap_byte_size < _min_heap_byte_size) {
|
||||
if (initial_heap_byte_size() < min_heap_byte_size()) {
|
||||
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
|
||||
}
|
||||
if (_max_heap_byte_size < _initial_heap_byte_size) {
|
||||
if (max_heap_byte_size() < initial_heap_byte_size()) {
|
||||
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
|
||||
}
|
||||
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
|
||||
SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
|
||||
min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
|
||||
}
|
||||
}
|
||||
|
||||
void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) {
|
||||
@ -128,10 +134,26 @@ GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
|
||||
|
||||
// GenCollectorPolicy methods.
|
||||
|
||||
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
|
||||
size_t x = base_size / (NewRatio+1);
|
||||
size_t new_gen_size = x > min_alignment() ?
|
||||
align_size_down(x, min_alignment()) :
|
||||
min_alignment();
|
||||
return new_gen_size;
|
||||
}
|
||||
|
||||
size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
|
||||
size_t maximum_size) {
|
||||
size_t alignment = min_alignment();
|
||||
size_t max_minus = maximum_size - alignment;
|
||||
return desired_size < max_minus ? desired_size : max_minus;
|
||||
}
|
||||
|
||||
|
||||
void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
|
||||
size_t init_promo_size,
|
||||
size_t init_survivor_size) {
|
||||
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
|
||||
const double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
|
||||
_size_policy = new AdaptiveSizePolicy(init_eden_size,
|
||||
init_promo_size,
|
||||
init_survivor_size,
|
||||
@ -210,74 +232,260 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
|
||||
assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
|
||||
}
|
||||
|
||||
// Values set on the command line win over any ergonomically
|
||||
// set command line parameters.
|
||||
// Ergonomic choice of parameters are done before this
|
||||
// method is called. Values for command line parameters such as NewSize
|
||||
// and MaxNewSize feed those ergonomic choices into this method.
|
||||
// This method makes the final generation sizings consistent with
|
||||
// themselves and with overall heap sizings.
|
||||
// In the absence of explicitly set command line flags, policies
|
||||
// such as the use of NewRatio are used to size the generation.
|
||||
void GenCollectorPolicy::initialize_size_info() {
|
||||
CollectorPolicy::initialize_size_info();
|
||||
|
||||
// Minimum sizes of the generations may be different than
|
||||
// the initial sizes.
|
||||
if (!FLAG_IS_DEFAULT(NewSize)) {
|
||||
_min_gen0_size = NewSize;
|
||||
} else {
|
||||
_min_gen0_size = align_size_down(_min_heap_byte_size / (NewRatio+1),
|
||||
// min_alignment() is used for alignment within a generation.
|
||||
// There is additional alignment done down stream for some
|
||||
// collectors that sometimes causes unwanted rounding up of
|
||||
// generations sizes.
|
||||
|
||||
// Determine maximum size of gen0
|
||||
|
||||
size_t max_new_size = 0;
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
if (MaxNewSize < min_alignment()) {
|
||||
max_new_size = min_alignment();
|
||||
} else if (MaxNewSize >= max_heap_byte_size()) {
|
||||
max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
|
||||
min_alignment());
|
||||
// We bound the minimum size by NewSize below (since it historically
|
||||
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
|
||||
"greater than the entire heap (" SIZE_FORMAT "k). A "
|
||||
"new generation size of " SIZE_FORMAT "k will be used.",
|
||||
MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
|
||||
} else {
|
||||
max_new_size = align_size_down(MaxNewSize, min_alignment());
|
||||
}
|
||||
|
||||
// The case for FLAG_IS_ERGO(MaxNewSize) could be treated
|
||||
// specially at this point to just use an ergonomically set
|
||||
// MaxNewSize to set max_new_size. For cases with small
|
||||
// heaps such a policy often did not work because the MaxNewSize
|
||||
// was larger than the entire heap. The interpretation given
|
||||
// to ergonomically set flags is that the flags are set
|
||||
// by different collectors for their own special needs but
|
||||
// are not allowed to badly shape the heap. This allows the
|
||||
// different collectors to decide what's best for themselves
|
||||
// without having to factor in the overall heap shape. It
|
||||
// can be the case in the future that the collectors would
|
||||
// only make "wise" ergonomics choices and this policy could
|
||||
// just accept those choices. The choices currently made are
|
||||
// not always "wise".
|
||||
} else {
|
||||
max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
|
||||
// Bound the maximum size by NewSize below (since it historically
|
||||
// would have been NewSize and because the NewRatio calculation could
|
||||
// yield a size that is too small) and bound it by MaxNewSize above.
|
||||
// This is not always best. The NewSize calculated by CMS (which has
|
||||
// a fixed minimum of 16m) can sometimes be "too" large. Consider
|
||||
// the case where -Xmx32m. The CMS calculated NewSize would be about
|
||||
// half the entire heap which seems too large. But the counter
|
||||
// example is seen when the client defaults for NewRatio are used.
|
||||
// An initial young generation size of 640k was observed
|
||||
// with -Xmx128m -XX:MaxNewSize=32m when NewSize was not used
|
||||
// as a lower bound as with
|
||||
// _min_gen0_size = MIN2(_min_gen0_size, MaxNewSize);
|
||||
// and 640k seemed too small a young generation.
|
||||
_min_gen0_size = MIN2(MAX2(_min_gen0_size, NewSize), MaxNewSize);
|
||||
// Ergonomics plays here by previously calculating the desired
|
||||
// NewSize and MaxNewSize.
|
||||
max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
|
||||
}
|
||||
assert(max_new_size > 0, "All paths should set max_new_size");
|
||||
|
||||
// Given the maximum gen0 size, determine the initial and
|
||||
// minimum sizes.
|
||||
|
||||
if (max_heap_byte_size() == min_heap_byte_size()) {
|
||||
// The maximum and minimum heap sizes are the same so
|
||||
// the generations minimum and initial must be the
|
||||
// same as its maximum.
|
||||
set_min_gen0_size(max_new_size);
|
||||
set_initial_gen0_size(max_new_size);
|
||||
set_max_gen0_size(max_new_size);
|
||||
} else {
|
||||
size_t desired_new_size = 0;
|
||||
if (!FLAG_IS_DEFAULT(NewSize)) {
|
||||
// If NewSize is set ergonomically (for example by cms), it
|
||||
// would make sense to use it. If it is used, also use it
|
||||
// to set the initial size. Although there is no reason
|
||||
// the minimum size and the initial size have to be the same,
|
||||
// the current implementation gets into trouble during the calculation
|
||||
// of the tenured generation sizes if they are different.
|
||||
// Note that this makes the initial size and the minimum size
|
||||
// generally small compared to the NewRatio calculation.
|
||||
_min_gen0_size = NewSize;
|
||||
desired_new_size = NewSize;
|
||||
max_new_size = MAX2(max_new_size, NewSize);
|
||||
} else {
|
||||
// For the case where NewSize is the default, use NewRatio
|
||||
// to size the minimum and initial generation sizes.
|
||||
// Use the default NewSize as the floor for these values. If
|
||||
// NewRatio is overly large, the resulting sizes can be too
|
||||
// small.
|
||||
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
|
||||
NewSize);
|
||||
desired_new_size =
|
||||
MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
|
||||
NewSize);
|
||||
}
|
||||
|
||||
assert(_min_gen0_size > 0, "Sanity check");
|
||||
set_initial_gen0_size(desired_new_size);
|
||||
set_max_gen0_size(max_new_size);
|
||||
|
||||
// At this point the desirable initial and minimum sizes have been
|
||||
// determined without regard to the maximum sizes.
|
||||
|
||||
// Bound the sizes by the corresponding overall heap sizes.
|
||||
set_min_gen0_size(
|
||||
bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
|
||||
set_initial_gen0_size(
|
||||
bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
|
||||
set_max_gen0_size(
|
||||
bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
|
||||
|
||||
// At this point all three sizes have been checked against the
|
||||
// maximum sizes but have not been checked for consistency
|
||||
// amoung the three.
|
||||
|
||||
// Final check min <= initial <= max
|
||||
set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
|
||||
set_initial_gen0_size(
|
||||
MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
|
||||
set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
|
||||
}
|
||||
|
||||
// Parameters are valid, compute area sizes.
|
||||
size_t max_new_size = align_size_down(_max_heap_byte_size / (NewRatio+1),
|
||||
min_alignment());
|
||||
max_new_size = MIN2(MAX2(max_new_size, _min_gen0_size), MaxNewSize);
|
||||
|
||||
// desired_new_size is used to set the initial size. The
|
||||
// initial size must be greater than the minimum size.
|
||||
size_t desired_new_size =
|
||||
align_size_down(_initial_heap_byte_size / (NewRatio+1),
|
||||
min_alignment());
|
||||
|
||||
size_t new_size = MIN2(MAX2(desired_new_size, _min_gen0_size), max_new_size);
|
||||
|
||||
_initial_gen0_size = new_size;
|
||||
_max_gen0_size = max_new_size;
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
|
||||
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
|
||||
min_gen0_size(), initial_gen0_size(), max_gen0_size());
|
||||
}
|
||||
}
|
||||
|
||||
// Call this method during the sizing of the gen1 to make
|
||||
// adjustments to gen0 because of gen1 sizing policy. gen0 initially has
|
||||
// the most freedom in sizing because it is done before the
|
||||
// policy for gen1 is applied. Once gen1 policies have been applied,
|
||||
// there may be conflicts in the shape of the heap and this method
|
||||
// is used to make the needed adjustments. The application of the
|
||||
// policies could be more sophisticated (iterative for example) but
|
||||
// keeping it simple also seems a worthwhile goal.
|
||||
bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
|
||||
size_t* gen1_size_ptr,
|
||||
size_t heap_size,
|
||||
size_t min_gen0_size) {
|
||||
bool result = false;
|
||||
if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
|
||||
if (((*gen0_size_ptr + OldSize) > heap_size) &&
|
||||
(heap_size - min_gen0_size) >= min_alignment()) {
|
||||
// Adjust gen0 down to accomodate OldSize
|
||||
*gen0_size_ptr = heap_size - min_gen0_size;
|
||||
*gen0_size_ptr =
|
||||
MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
|
||||
min_alignment());
|
||||
assert(*gen0_size_ptr > 0, "Min gen0 is too large");
|
||||
result = true;
|
||||
} else {
|
||||
*gen1_size_ptr = heap_size - *gen0_size_ptr;
|
||||
*gen1_size_ptr =
|
||||
MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
|
||||
min_alignment());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Minimum sizes of the generations may be different than
|
||||
// the initial sizes. An inconsistently is permitted here
|
||||
// in the total size that can be specified explicitly by
|
||||
// command line specification of OldSize and NewSize and
|
||||
// also a command line specification of -Xms. Issue a warning
|
||||
// but allow the values to pass.
|
||||
|
||||
void TwoGenerationCollectorPolicy::initialize_size_info() {
|
||||
GenCollectorPolicy::initialize_size_info();
|
||||
|
||||
// Minimum sizes of the generations may be different than
|
||||
// the initial sizes. An inconsistently is permitted here
|
||||
// in the total size that can be specified explicitly by
|
||||
// command line specification of OldSize and NewSize and
|
||||
// also a command line specification of -Xms. Issue a warning
|
||||
// but allow the values to pass.
|
||||
if (!FLAG_IS_DEFAULT(OldSize)) {
|
||||
_min_gen1_size = OldSize;
|
||||
// At this point the minimum, initial and maximum sizes
|
||||
// of the overall heap and of gen0 have been determined.
|
||||
// The maximum gen1 size can be determined from the maximum gen0
|
||||
// and maximum heap size since not explicit flags exits
|
||||
// for setting the gen1 maximum.
|
||||
_max_gen1_size = max_heap_byte_size() - _max_gen0_size;
|
||||
_max_gen1_size =
|
||||
MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
|
||||
min_alignment());
|
||||
// If no explicit command line flag has been set for the
|
||||
// gen1 size, use what is left for gen1.
|
||||
if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
|
||||
// The user has not specified any value or ergonomics
|
||||
// has chosen a value (which may or may not be consistent
|
||||
// with the overall heap size). In either case make
|
||||
// the minimum, maximum and initial sizes consistent
|
||||
// with the gen0 sizes and the overall heap sizes.
|
||||
assert(min_heap_byte_size() > _min_gen0_size,
|
||||
"gen0 has an unexpected minimum size");
|
||||
set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
|
||||
set_min_gen1_size(
|
||||
MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
|
||||
min_alignment()));
|
||||
set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
|
||||
set_initial_gen1_size(
|
||||
MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
|
||||
min_alignment()));
|
||||
|
||||
} else {
|
||||
// It's been explicitly set on the command line. Use the
|
||||
// OldSize and then determine the consequences.
|
||||
set_min_gen1_size(OldSize);
|
||||
set_initial_gen1_size(OldSize);
|
||||
|
||||
// If the user has explicitly set an OldSize that is inconsistent
|
||||
// with other command line flags, issue a warning.
|
||||
// The generation minimums and the overall heap mimimum should
|
||||
// be within one heap alignment.
|
||||
if ((_min_gen1_size + _min_gen0_size + max_alignment()) <
|
||||
_min_heap_byte_size) {
|
||||
if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
|
||||
min_heap_byte_size()) {
|
||||
warning("Inconsistency between minimum heap size and minimum "
|
||||
"generation sizes: using min heap = " SIZE_FORMAT,
|
||||
_min_heap_byte_size);
|
||||
"generation sizes: using minimum heap = " SIZE_FORMAT,
|
||||
min_heap_byte_size());
|
||||
}
|
||||
} else {
|
||||
_min_gen1_size = _min_heap_byte_size - _min_gen0_size;
|
||||
if ((OldSize > _max_gen1_size)) {
|
||||
warning("Inconsistency between maximum heap size and maximum "
|
||||
"generation sizes: using maximum heap = " SIZE_FORMAT
|
||||
" -XX:OldSize flag is being ignored",
|
||||
max_heap_byte_size());
|
||||
}
|
||||
// If there is an inconsistency between the OldSize and the minimum and/or
|
||||
// initial size of gen0, since OldSize was explicitly set, OldSize wins.
|
||||
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
|
||||
min_heap_byte_size(), OldSize)) {
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
|
||||
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
|
||||
min_gen0_size(), initial_gen0_size(), max_gen0_size());
|
||||
}
|
||||
}
|
||||
// Initial size
|
||||
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
|
||||
initial_heap_byte_size(), OldSize)) {
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
|
||||
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
|
||||
min_gen0_size(), initial_gen0_size(), max_gen0_size());
|
||||
}
|
||||
}
|
||||
}
|
||||
// Enforce the maximum gen1 size.
|
||||
set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
|
||||
|
||||
_initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
|
||||
_max_gen1_size = _max_heap_byte_size - _max_gen0_size;
|
||||
// Check that min gen1 <= initial gen1 <= max gen1
|
||||
set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
|
||||
set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
|
||||
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
|
||||
SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
|
||||
min_gen1_size(), initial_gen1_size(), max_gen1_size());
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
|
||||
|
@ -82,8 +82,11 @@ class CollectorPolicy : public CHeapObj {
|
||||
size_t max_alignment() { return _max_alignment; }
|
||||
|
||||
size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
|
||||
void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
|
||||
size_t max_heap_byte_size() { return _max_heap_byte_size; }
|
||||
void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
|
||||
size_t min_heap_byte_size() { return _min_heap_byte_size; }
|
||||
void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
|
||||
|
||||
enum Name {
|
||||
CollectorPolicyKind,
|
||||
@ -182,8 +185,24 @@ class GenCollectorPolicy : public CollectorPolicy {
|
||||
// compute max heap alignment
|
||||
size_t compute_max_alignment();
|
||||
|
||||
// Scale the base_size by NewRation according to
|
||||
// result = base_size / (NewRatio + 1)
|
||||
// and align by min_alignment()
|
||||
size_t scale_by_NewRatio_aligned(size_t base_size);
|
||||
|
||||
// Bound the value by the given maximum minus the
|
||||
// min_alignment.
|
||||
size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
|
||||
|
||||
public:
|
||||
// Accessors
|
||||
size_t min_gen0_size() { return _min_gen0_size; }
|
||||
void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
|
||||
size_t initial_gen0_size() { return _initial_gen0_size; }
|
||||
void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
|
||||
size_t max_gen0_size() { return _max_gen0_size; }
|
||||
void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
|
||||
|
||||
virtual int number_of_generations() = 0;
|
||||
|
||||
virtual GenerationSpec **generations() {
|
||||
@ -236,6 +255,14 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
|
||||
void initialize_generations() { ShouldNotReachHere(); }
|
||||
|
||||
public:
|
||||
// Accessors
|
||||
size_t min_gen1_size() { return _min_gen1_size; }
|
||||
void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
|
||||
size_t initial_gen1_size() { return _initial_gen1_size; }
|
||||
void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
|
||||
size_t max_gen1_size() { return _max_gen1_size; }
|
||||
void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
|
||||
|
||||
// Inherited methods
|
||||
TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
|
||||
|
||||
@ -246,6 +273,10 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
|
||||
virtual CollectorPolicy::Name kind() {
|
||||
return CollectorPolicy::TwoGenerationCollectorPolicyKind;
|
||||
}
|
||||
|
||||
// Returns true is gen0 sizes were adjusted
|
||||
bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
|
||||
size_t heap_size, size_t min_gen1_size);
|
||||
};
|
||||
|
||||
class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
|
||||
|
@ -91,8 +91,15 @@ public:
|
||||
virtual void verify() = 0;
|
||||
|
||||
// Verify that the remembered set has no entries for
|
||||
// the heap interval denoted by mr.
|
||||
virtual void verify_empty(MemRegion mr) = 0;
|
||||
// the heap interval denoted by mr. If there are any
|
||||
// alignment constraints on the remembered set, only the
|
||||
// part of the region that is aligned is checked.
|
||||
//
|
||||
// alignment boundaries
|
||||
// +--------+-------+--------+-------+
|
||||
// [ region mr )
|
||||
// [ part checked )
|
||||
virtual void verify_aligned_region_empty(MemRegion mr) = 0;
|
||||
|
||||
// If appropriate, print some information about the remset on "tty".
|
||||
virtual void print() {}
|
||||
|
@ -65,7 +65,7 @@ void KlassInfoEntry::print_on(outputStream* st) const {
|
||||
name = "<no name>";
|
||||
}
|
||||
// simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit
|
||||
st->print_cr("%13" FORMAT64_MODIFIER "d %13" FORMAT64_MODIFIER "u %s",
|
||||
st->print_cr(INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13) " %s",
|
||||
(jlong) _instance_count,
|
||||
(julong) _instance_words * HeapWordSize,
|
||||
name);
|
||||
@ -80,7 +80,10 @@ KlassInfoEntry* KlassInfoBucket::lookup(const klassOop k) {
|
||||
elt = elt->next();
|
||||
}
|
||||
elt = new KlassInfoEntry(k, list());
|
||||
set_list(elt);
|
||||
// We may be out of space to allocate the new entry.
|
||||
if (elt != NULL) {
|
||||
set_list(elt);
|
||||
}
|
||||
return elt;
|
||||
}
|
||||
|
||||
@ -103,21 +106,25 @@ void KlassInfoBucket::empty() {
|
||||
}
|
||||
|
||||
KlassInfoTable::KlassInfoTable(int size, HeapWord* ref) {
|
||||
_size = size;
|
||||
_size = 0;
|
||||
_ref = ref;
|
||||
_buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, _size);
|
||||
|
||||
for (int index = 0; index < _size; index++) {
|
||||
_buckets[index].initialize();
|
||||
_buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size);
|
||||
if (_buckets != NULL) {
|
||||
_size = size;
|
||||
for (int index = 0; index < _size; index++) {
|
||||
_buckets[index].initialize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
KlassInfoTable::~KlassInfoTable() {
|
||||
for (int index = 0; index < _size; index++) {
|
||||
_buckets[index].empty();
|
||||
if (_buckets != NULL) {
|
||||
for (int index = 0; index < _size; index++) {
|
||||
_buckets[index].empty();
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
|
||||
_size = 0;
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
|
||||
_size = 0;
|
||||
}
|
||||
|
||||
uint KlassInfoTable::hash(klassOop p) {
|
||||
@ -127,19 +134,32 @@ uint KlassInfoTable::hash(klassOop p) {
|
||||
|
||||
KlassInfoEntry* KlassInfoTable::lookup(const klassOop k) {
|
||||
uint idx = hash(k) % _size;
|
||||
assert(_buckets != NULL, "Allocation failure should have been caught");
|
||||
KlassInfoEntry* e = _buckets[idx].lookup(k);
|
||||
assert(k == e->klass(), "must be equal");
|
||||
// Lookup may fail if this is a new klass for which we
|
||||
// could not allocate space for an new entry.
|
||||
assert(e == NULL || k == e->klass(), "must be equal");
|
||||
return e;
|
||||
}
|
||||
|
||||
void KlassInfoTable::record_instance(const oop obj) {
|
||||
// Return false if the entry could not be recorded on account
|
||||
// of running out of space required to create a new entry.
|
||||
bool KlassInfoTable::record_instance(const oop obj) {
|
||||
klassOop k = obj->klass();
|
||||
KlassInfoEntry* elt = lookup(k);
|
||||
elt->set_count(elt->count() + 1);
|
||||
elt->set_words(elt->words() + obj->size());
|
||||
// elt may be NULL if it's a new klass for which we
|
||||
// could not allocate space for a new entry in the hashtable.
|
||||
if (elt != NULL) {
|
||||
elt->set_count(elt->count() + 1);
|
||||
elt->set_words(elt->words() + obj->size());
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void KlassInfoTable::iterate(KlassInfoClosure* cic) {
|
||||
assert(_size == 0 || _buckets != NULL, "Allocation failure should have been caught");
|
||||
for (int index = 0; index < _size; index++) {
|
||||
_buckets[index].iterate(cic);
|
||||
}
|
||||
@ -176,7 +196,7 @@ void KlassInfoHisto::print_elements(outputStream* st) const {
|
||||
total += elements()->at(i)->count();
|
||||
totalw += elements()->at(i)->words();
|
||||
}
|
||||
st->print_cr("Total %13" FORMAT64_MODIFIER "d %13" FORMAT64_MODIFIER "u",
|
||||
st->print_cr("Total " INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13),
|
||||
total, totalw * HeapWordSize);
|
||||
}
|
||||
|
||||
@ -199,12 +219,18 @@ class HistoClosure : public KlassInfoClosure {
|
||||
class RecordInstanceClosure : public ObjectClosure {
|
||||
private:
|
||||
KlassInfoTable* _cit;
|
||||
size_t _missed_count;
|
||||
public:
|
||||
RecordInstanceClosure(KlassInfoTable* cit) : _cit(cit) {}
|
||||
RecordInstanceClosure(KlassInfoTable* cit) :
|
||||
_cit(cit), _missed_count(0) {}
|
||||
|
||||
void do_object(oop obj) {
|
||||
_cit->record_instance(obj);
|
||||
if (!_cit->record_instance(obj)) {
|
||||
_missed_count++;
|
||||
}
|
||||
}
|
||||
|
||||
size_t missed_count() { return _missed_count; }
|
||||
};
|
||||
|
||||
void HeapInspection::heap_inspection(outputStream* st) {
|
||||
@ -230,21 +256,32 @@ void HeapInspection::heap_inspection(outputStream* st) {
|
||||
ShouldNotReachHere(); // Unexpected heap kind for this op
|
||||
}
|
||||
// Collect klass instance info
|
||||
|
||||
// Iterate over objects in the heap
|
||||
KlassInfoTable cit(KlassInfoTable::cit_size, ref);
|
||||
RecordInstanceClosure ric(&cit);
|
||||
Universe::heap()->object_iterate(&ric);
|
||||
if (!cit.allocation_failed()) {
|
||||
// Iterate over objects in the heap
|
||||
RecordInstanceClosure ric(&cit);
|
||||
Universe::heap()->object_iterate(&ric);
|
||||
|
||||
// Sort and print klass instance info
|
||||
KlassInfoHisto histo("\n"
|
||||
" num #instances #bytes class name\n"
|
||||
"----------------------------------------------",
|
||||
KlassInfoHisto::histo_initial_size);
|
||||
HistoClosure hc(&histo);
|
||||
cit.iterate(&hc);
|
||||
histo.sort();
|
||||
histo.print_on(st);
|
||||
// Report if certain classes are not counted because of
|
||||
// running out of C-heap for the histogram.
|
||||
size_t missed_count = ric.missed_count();
|
||||
if (missed_count != 0) {
|
||||
st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
|
||||
" total instances in data below",
|
||||
missed_count);
|
||||
}
|
||||
// Sort and print klass instance info
|
||||
KlassInfoHisto histo("\n"
|
||||
" num #instances #bytes class name\n"
|
||||
"----------------------------------------------",
|
||||
KlassInfoHisto::histo_initial_size);
|
||||
HistoClosure hc(&histo);
|
||||
cit.iterate(&hc);
|
||||
histo.sort();
|
||||
histo.print_on(st);
|
||||
} else {
|
||||
st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
|
||||
}
|
||||
st->flush();
|
||||
|
||||
if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) {
|
||||
|
@ -98,8 +98,9 @@ class KlassInfoTable: public StackObj {
|
||||
};
|
||||
KlassInfoTable(int size, HeapWord* ref);
|
||||
~KlassInfoTable();
|
||||
void record_instance(const oop obj);
|
||||
bool record_instance(const oop obj);
|
||||
void iterate(KlassInfoClosure* cic);
|
||||
bool allocation_failed() { return _buckets == NULL; }
|
||||
};
|
||||
|
||||
class KlassInfoHisto : public StackObj {
|
||||
|
@ -85,7 +85,7 @@ ReferenceProcessor* ReferenceProcessor::create_ref_processor(
|
||||
ReferenceProcessor* rp =
|
||||
new ReferenceProcessor(span, atomic_discovery,
|
||||
mt_discovery, mt_degree,
|
||||
mt_processing);
|
||||
mt_processing && (parallel_gc_threads > 0));
|
||||
if (rp == NULL) {
|
||||
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
|
||||
}
|
||||
|
@ -409,10 +409,11 @@ void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
|
||||
void TenuredGeneration::verify_alloc_buffers_clean() {
|
||||
if (UseParNewGC) {
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
_rs->verify_empty(_alloc_buffers[i]->range());
|
||||
_rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else // SERIALGC
|
||||
void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
|
||||
void TenuredGeneration::verify_alloc_buffers_clean() {}
|
||||
|
@ -924,10 +924,18 @@ static void no_shared_spaces() {
|
||||
void Arguments::set_parnew_gc_flags() {
|
||||
assert(!UseSerialGC && !UseParallelGC, "control point invariant");
|
||||
|
||||
// Turn off AdaptiveSizePolicy by default for parnew until it is
|
||||
// complete.
|
||||
if (UseParNewGC &&
|
||||
FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
|
||||
FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseParNewGC) && ParallelGCThreads > 1) {
|
||||
FLAG_SET_DEFAULT(UseParNewGC, true);
|
||||
} else if (UseParNewGC && ParallelGCThreads == 0) {
|
||||
FLAG_SET_DEFAULT(ParallelGCThreads, nof_parallel_gc_threads());
|
||||
FLAG_SET_DEFAULT(ParallelGCThreads,
|
||||
Abstract_VM_Version::parallel_worker_threads());
|
||||
if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
|
||||
FLAG_SET_DEFAULT(UseParNewGC, false);
|
||||
}
|
||||
@ -956,25 +964,6 @@ void Arguments::set_parnew_gc_flags() {
|
||||
}
|
||||
}
|
||||
|
||||
// CAUTION: this code is currently shared by UseParallelGC, UseParNewGC and
|
||||
// UseconcMarkSweepGC. Further tuning of individual collectors might
|
||||
// dictate refinement on a per-collector basis.
|
||||
int Arguments::nof_parallel_gc_threads() {
|
||||
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
|
||||
// For very large machines, there are diminishing returns
|
||||
// for large numbers of worker threads. Instead of
|
||||
// hogging the whole system, use 5/8ths of a worker for every
|
||||
// processor after the first 8. For example, on a 72 cpu
|
||||
// machine use 8 + (72 - 8) * (5/8) == 48 worker threads.
|
||||
// This is just a start and needs further tuning and study in
|
||||
// Tiger.
|
||||
int ncpus = os::active_processor_count();
|
||||
return (ncpus <= 8) ? ncpus : 3 + ((ncpus * 5) / 8);
|
||||
} else {
|
||||
return ParallelGCThreads;
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust some sizes to suit CMS and/or ParNew needs; these work well on
|
||||
// sparc/solaris for certain applications, but would gain from
|
||||
// further optimization and tuning efforts, and would almost
|
||||
@ -984,26 +973,24 @@ void Arguments::set_cms_and_parnew_gc_flags() {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(UseConcMarkSweepGC, "CMS is expected to be on here");
|
||||
|
||||
// If we are using CMS, we prefer to UseParNewGC,
|
||||
// unless explicitly forbidden.
|
||||
if (UseConcMarkSweepGC && !UseParNewGC && FLAG_IS_DEFAULT(UseParNewGC)) {
|
||||
FLAG_SET_DEFAULT(UseParNewGC, true);
|
||||
if (!UseParNewGC && FLAG_IS_DEFAULT(UseParNewGC)) {
|
||||
FLAG_SET_ERGO(bool, UseParNewGC, true);
|
||||
}
|
||||
|
||||
// Turn off AdaptiveSizePolicy by default for cms until it is
|
||||
// complete. Also turn it off in general if the
|
||||
// parnew collector has been selected.
|
||||
if ((UseConcMarkSweepGC || UseParNewGC) &&
|
||||
FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
|
||||
// complete.
|
||||
if (FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
|
||||
FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
|
||||
}
|
||||
|
||||
// In either case, adjust ParallelGCThreads and/or UseParNewGC
|
||||
// as needed.
|
||||
set_parnew_gc_flags();
|
||||
|
||||
if (!UseConcMarkSweepGC) {
|
||||
return;
|
||||
if (UseParNewGC) {
|
||||
set_parnew_gc_flags();
|
||||
}
|
||||
|
||||
// Now make adjustments for CMS
|
||||
@ -1013,7 +1000,7 @@ void Arguments::set_cms_and_parnew_gc_flags() {
|
||||
intx tenuring_default;
|
||||
if (CMSUseOldDefaults) { // old defaults: "old" as of 6.0
|
||||
if FLAG_IS_DEFAULT(CMSYoungGenPerWorker) {
|
||||
FLAG_SET_DEFAULT(CMSYoungGenPerWorker, 4*M);
|
||||
FLAG_SET_ERGO(intx, CMSYoungGenPerWorker, 4*M);
|
||||
}
|
||||
young_gen_per_worker = 4*M;
|
||||
new_ratio = (intx)15;
|
||||
@ -1038,16 +1025,20 @@ void Arguments::set_cms_and_parnew_gc_flags() {
|
||||
// for "short" pauses ~ 4M*ParallelGCThreads
|
||||
if (FLAG_IS_DEFAULT(MaxNewSize)) { // MaxNewSize not set at command-line
|
||||
if (!FLAG_IS_DEFAULT(NewSize)) { // NewSize explicitly set at command-line
|
||||
FLAG_SET_DEFAULT(MaxNewSize, MAX2(NewSize, preferred_max_new_size));
|
||||
FLAG_SET_ERGO(uintx, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
|
||||
} else {
|
||||
FLAG_SET_DEFAULT(MaxNewSize, preferred_max_new_size);
|
||||
FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size);
|
||||
}
|
||||
if(PrintGCDetails && Verbose) {
|
||||
// Too early to use gclog_or_tty
|
||||
tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
|
||||
}
|
||||
}
|
||||
// Unless explicitly requested otherwise, prefer a large
|
||||
// Old to Young gen size so as to shift the collection load
|
||||
// to the old generation concurrent collector
|
||||
if (FLAG_IS_DEFAULT(NewRatio)) {
|
||||
FLAG_SET_DEFAULT(NewRatio, MAX2(NewRatio, new_ratio));
|
||||
FLAG_SET_ERGO(intx, NewRatio, MAX2(NewRatio, new_ratio));
|
||||
|
||||
size_t min_new = align_size_up(ScaleForWordSize(min_new_default), os::vm_page_size());
|
||||
size_t prev_initial_size = initial_heap_size();
|
||||
@ -1065,19 +1056,34 @@ void Arguments::set_cms_and_parnew_gc_flags() {
|
||||
size_t max_heap = align_size_down(MaxHeapSize,
|
||||
CardTableRS::ct_max_alignment_constraint());
|
||||
|
||||
if(PrintGCDetails && Verbose) {
|
||||
// Too early to use gclog_or_tty
|
||||
tty->print_cr("CMS set min_heap_size: " SIZE_FORMAT
|
||||
" initial_heap_size: " SIZE_FORMAT
|
||||
" max_heap: " SIZE_FORMAT,
|
||||
min_heap_size(), initial_heap_size(), max_heap);
|
||||
}
|
||||
if (max_heap > min_new) {
|
||||
// Unless explicitly requested otherwise, make young gen
|
||||
// at least min_new, and at most preferred_max_new_size.
|
||||
if (FLAG_IS_DEFAULT(NewSize)) {
|
||||
FLAG_SET_DEFAULT(NewSize, MAX2(NewSize, min_new));
|
||||
FLAG_SET_DEFAULT(NewSize, MIN2(preferred_max_new_size, NewSize));
|
||||
FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new));
|
||||
FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
|
||||
if(PrintGCDetails && Verbose) {
|
||||
// Too early to use gclog_or_tty
|
||||
tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize);
|
||||
}
|
||||
}
|
||||
// Unless explicitly requested otherwise, size old gen
|
||||
// so that it's at least 3X of NewSize to begin with;
|
||||
// later NewRatio will decide how it grows; see above.
|
||||
if (FLAG_IS_DEFAULT(OldSize)) {
|
||||
if (max_heap > NewSize) {
|
||||
FLAG_SET_DEFAULT(OldSize, MIN2(3*NewSize, max_heap - NewSize));
|
||||
FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize, max_heap - NewSize));
|
||||
if(PrintGCDetails && Verbose) {
|
||||
// Too early to use gclog_or_tty
|
||||
tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1086,14 +1092,14 @@ void Arguments::set_cms_and_parnew_gc_flags() {
|
||||
// promote all objects surviving "tenuring_default" scavenges.
|
||||
if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
|
||||
FLAG_IS_DEFAULT(SurvivorRatio)) {
|
||||
FLAG_SET_DEFAULT(MaxTenuringThreshold, tenuring_default);
|
||||
FLAG_SET_ERGO(intx, MaxTenuringThreshold, tenuring_default);
|
||||
}
|
||||
// If we decided above (or user explicitly requested)
|
||||
// `promote all' (via MaxTenuringThreshold := 0),
|
||||
// prefer minuscule survivor spaces so as not to waste
|
||||
// space for (non-existent) survivors
|
||||
if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
|
||||
FLAG_SET_DEFAULT(SurvivorRatio, MAX2((intx)1024, SurvivorRatio));
|
||||
FLAG_SET_ERGO(intx, SurvivorRatio, MAX2((intx)1024, SurvivorRatio));
|
||||
}
|
||||
// If OldPLABSize is set and CMSParPromoteBlocksToClaim is not,
|
||||
// set CMSParPromoteBlocksToClaim equal to OldPLABSize.
|
||||
@ -1102,7 +1108,11 @@ void Arguments::set_cms_and_parnew_gc_flags() {
|
||||
// See CR 6362902.
|
||||
if (!FLAG_IS_DEFAULT(OldPLABSize)) {
|
||||
if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
|
||||
FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
|
||||
// OldPLABSize is not the default value but CMSParPromoteBlocksToClaim
|
||||
// is. In this situtation let CMSParPromoteBlocksToClaim follow
|
||||
// the value (either from the command line or ergonomics) of
|
||||
// OldPLABSize. Following OldPLABSize is an ergonomics decision.
|
||||
FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
|
||||
}
|
||||
else {
|
||||
// OldPLABSize and CMSParPromoteBlocksToClaim are both set.
|
||||
@ -1147,17 +1157,11 @@ void Arguments::set_ergonomics_flags() {
|
||||
FLAG_IS_DEFAULT(UseParallelGC)) {
|
||||
if (should_auto_select_low_pause_collector()) {
|
||||
FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
|
||||
set_cms_and_parnew_gc_flags();
|
||||
} else {
|
||||
FLAG_SET_ERGO(bool, UseParallelGC, true);
|
||||
}
|
||||
no_shared_spaces();
|
||||
}
|
||||
|
||||
// This is here because the parallel collector could
|
||||
// have been selected so this initialization should
|
||||
// still be done.
|
||||
set_parallel_gc_flags();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1170,6 +1174,9 @@ void Arguments::set_parallel_gc_flags() {
|
||||
// If no heap maximum was requested explicitly, use some reasonable fraction
|
||||
// of the physical memory, up to a maximum of 1GB.
|
||||
if (UseParallelGC) {
|
||||
FLAG_SET_ERGO(uintx, ParallelGCThreads,
|
||||
Abstract_VM_Version::parallel_worker_threads());
|
||||
|
||||
if (FLAG_IS_DEFAULT(MaxHeapSize)) {
|
||||
const uint64_t reasonable_fraction =
|
||||
os::physical_memory() / DefaultMaxRAMFraction;
|
||||
@ -1227,12 +1234,13 @@ void Arguments::set_parallel_gc_flags() {
|
||||
|
||||
if (UseParallelOldGC) {
|
||||
// Par compact uses lower default values since they are treated as
|
||||
// minimums.
|
||||
// minimums. These are different defaults because of the different
|
||||
// interpretation and are not ergonomically set.
|
||||
if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
|
||||
MarkSweepDeadRatio = 1;
|
||||
FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(PermMarkSweepDeadRatio)) {
|
||||
PermMarkSweepDeadRatio = 5;
|
||||
FLAG_SET_DEFAULT(PermMarkSweepDeadRatio, 5);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1312,6 +1320,31 @@ static bool verify_serial_gc_flags() {
|
||||
UseParallelOldGC));
|
||||
}
|
||||
|
||||
// Check consistency of GC selection
|
||||
bool Arguments::check_gc_consistency() {
|
||||
bool status = true;
|
||||
// Ensure that the user has not selected conflicting sets
|
||||
// of collectors. [Note: this check is merely a user convenience;
|
||||
// collectors over-ride each other so that only a non-conflicting
|
||||
// set is selected; however what the user gets is not what they
|
||||
// may have expected from the combination they asked for. It's
|
||||
// better to reduce user confusion by not allowing them to
|
||||
// select conflicting combinations.
|
||||
uint i = 0;
|
||||
if (UseSerialGC) i++;
|
||||
if (UseConcMarkSweepGC || UseParNewGC) i++;
|
||||
if (UseParallelGC || UseParallelOldGC) i++;
|
||||
if (i > 1) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Conflicting collector combinations in option list; "
|
||||
"please refer to the release notes for the combinations "
|
||||
"allowed\n");
|
||||
status = false;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
// Check the consistency of vm_init_args
|
||||
bool Arguments::check_vm_args_consistency() {
|
||||
// Method for adding checks for flag consistency.
|
||||
@ -1354,14 +1387,14 @@ bool Arguments::check_vm_args_consistency() {
|
||||
status = false;
|
||||
}
|
||||
|
||||
status &= verify_percentage(MaxLiveObjectEvacuationRatio,
|
||||
status = status && verify_percentage(MaxLiveObjectEvacuationRatio,
|
||||
"MaxLiveObjectEvacuationRatio");
|
||||
status &= verify_percentage(AdaptiveSizePolicyWeight,
|
||||
status = status && verify_percentage(AdaptiveSizePolicyWeight,
|
||||
"AdaptiveSizePolicyWeight");
|
||||
status &= verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
|
||||
status &= verify_percentage(ThresholdTolerance, "ThresholdTolerance");
|
||||
status &= verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio");
|
||||
status &= verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio");
|
||||
status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
|
||||
status = status && verify_percentage(ThresholdTolerance, "ThresholdTolerance");
|
||||
status = status && verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio");
|
||||
status = status && verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio");
|
||||
|
||||
if (MinHeapFreeRatio > MaxHeapFreeRatio) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
@ -1377,14 +1410,14 @@ bool Arguments::check_vm_args_consistency() {
|
||||
MarkSweepAlwaysCompactCount = 1; // Move objects every gc.
|
||||
}
|
||||
|
||||
status &= verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
|
||||
status &= verify_percentage(GCTimeLimit, "GCTimeLimit");
|
||||
status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
|
||||
status = status && verify_percentage(GCTimeLimit, "GCTimeLimit");
|
||||
if (GCTimeLimit == 100) {
|
||||
// Turn off gc-overhead-limit-exceeded checks
|
||||
FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
|
||||
}
|
||||
|
||||
status &= verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
|
||||
status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
|
||||
|
||||
// Check user specified sharing option conflict with Parallel GC
|
||||
bool cannot_share = (UseConcMarkSweepGC || UseParallelGC ||
|
||||
@ -1402,24 +1435,7 @@ bool Arguments::check_vm_args_consistency() {
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that the user has not selected conflicting sets
|
||||
// of collectors. [Note: this check is merely a user convenience;
|
||||
// collectors over-ride each other so that only a non-conflicting
|
||||
// set is selected; however what the user gets is not what they
|
||||
// may have expected from the combination they asked for. It's
|
||||
// better to reduce user confusion by not allowing them to
|
||||
// select conflicting combinations.
|
||||
uint i = 0;
|
||||
if (UseSerialGC) i++;
|
||||
if (UseConcMarkSweepGC || UseParNewGC) i++;
|
||||
if (UseParallelGC || UseParallelOldGC) i++;
|
||||
if (i > 1) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Conflicting collector combinations in option list; "
|
||||
"please refer to the release notes for the combinations "
|
||||
"allowed\n");
|
||||
status = false;
|
||||
}
|
||||
status = status && check_gc_consistency();
|
||||
|
||||
if (_has_alloc_profile) {
|
||||
if (UseParallelGC || UseParallelOldGC) {
|
||||
@ -1451,15 +1467,15 @@ bool Arguments::check_vm_args_consistency() {
|
||||
"allocation buffers\n(-XX:+UseTLAB).\n");
|
||||
status = false;
|
||||
} else {
|
||||
status &= verify_percentage(CMSIncrementalDutyCycle,
|
||||
status = status && verify_percentage(CMSIncrementalDutyCycle,
|
||||
"CMSIncrementalDutyCycle");
|
||||
status &= verify_percentage(CMSIncrementalDutyCycleMin,
|
||||
status = status && verify_percentage(CMSIncrementalDutyCycleMin,
|
||||
"CMSIncrementalDutyCycleMin");
|
||||
status &= verify_percentage(CMSIncrementalSafetyFactor,
|
||||
status = status && verify_percentage(CMSIncrementalSafetyFactor,
|
||||
"CMSIncrementalSafetyFactor");
|
||||
status &= verify_percentage(CMSIncrementalOffset,
|
||||
status = status && verify_percentage(CMSIncrementalOffset,
|
||||
"CMSIncrementalOffset");
|
||||
status &= verify_percentage(CMSExpAvgFactor,
|
||||
status = status && verify_percentage(CMSExpAvgFactor,
|
||||
"CMSExpAvgFactor");
|
||||
// If it was not set on the command line, set
|
||||
// CMSInitiatingOccupancyFraction to 1 so icms can initiate cycles early.
|
||||
@ -2064,7 +2080,8 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
|
||||
// Enable parallel GC and adaptive generation sizing
|
||||
FLAG_SET_CMDLINE(bool, UseParallelGC, true);
|
||||
FLAG_SET_DEFAULT(ParallelGCThreads, nof_parallel_gc_threads());
|
||||
FLAG_SET_DEFAULT(ParallelGCThreads,
|
||||
Abstract_VM_Version::parallel_worker_threads());
|
||||
|
||||
// Encourage steady state memory management
|
||||
FLAG_SET_CMDLINE(uintx, ThresholdTolerance, 100);
|
||||
@ -2451,15 +2468,25 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||
no_shared_spaces();
|
||||
#endif // KERNEL
|
||||
|
||||
// Set some flags for ParallelGC if needed.
|
||||
set_parallel_gc_flags();
|
||||
|
||||
// Set some flags for CMS and/or ParNew collectors, as needed.
|
||||
set_cms_and_parnew_gc_flags();
|
||||
|
||||
// Set flags based on ergonomics.
|
||||
set_ergonomics_flags();
|
||||
|
||||
// Check the GC selections again.
|
||||
if (!check_gc_consistency()) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
if (UseParallelGC || UseParallelOldGC) {
|
||||
// Set some flags for ParallelGC if needed.
|
||||
set_parallel_gc_flags();
|
||||
} else if (UseConcMarkSweepGC) {
|
||||
// Set some flags for CMS
|
||||
set_cms_and_parnew_gc_flags();
|
||||
} else if (UseParNewGC) {
|
||||
// Set some flags for ParNew
|
||||
set_parnew_gc_flags();
|
||||
}
|
||||
|
||||
#ifdef SERIALGC
|
||||
assert(verify_serial_gc_flags(), "SerialGC unset");
|
||||
#endif // SERIALGC
|
||||
@ -2479,6 +2506,12 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||
CommandLineFlags::printSetFlags();
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
if (PrintFlagsFinal) {
|
||||
CommandLineFlags::printFlags();
|
||||
}
|
||||
#endif
|
||||
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
|
@ -291,8 +291,6 @@ class Arguments : AllStatic {
|
||||
static bool _CIDynamicCompilePriority;
|
||||
static intx _Tier2CompileThreshold;
|
||||
|
||||
// GC processing
|
||||
static int nof_parallel_gc_threads();
|
||||
// CMS/ParNew garbage collectors
|
||||
static void set_parnew_gc_flags();
|
||||
static void set_cms_and_parnew_gc_flags();
|
||||
@ -385,6 +383,8 @@ class Arguments : AllStatic {
|
||||
public:
|
||||
// Parses the arguments
|
||||
static jint parse(const JavaVMInitArgs* args);
|
||||
// Check for consistency in the selection of the garbage collector.
|
||||
static bool check_gc_consistency();
|
||||
// Check consistecy or otherwise of VM argument settings
|
||||
static bool check_vm_args_consistency();
|
||||
// Used by os_solaris
|
||||
|
@ -205,6 +205,18 @@ bool CommandLineFlagsEx::is_default(CommandLineFlag flag) {
|
||||
return (f->origin == DEFAULT);
|
||||
}
|
||||
|
||||
bool CommandLineFlagsEx::is_ergo(CommandLineFlag flag) {
|
||||
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
|
||||
Flag* f = &Flag::flags[flag];
|
||||
return (f->origin == ERGONOMIC);
|
||||
}
|
||||
|
||||
bool CommandLineFlagsEx::is_cmdline(CommandLineFlag flag) {
|
||||
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
|
||||
Flag* f = &Flag::flags[flag];
|
||||
return (f->origin == COMMAND_LINE);
|
||||
}
|
||||
|
||||
bool CommandLineFlags::wasSetOnCmdline(const char* name, bool* value) {
|
||||
Flag* result = Flag::find_flag((char*)name, strlen(name));
|
||||
if (result == NULL) return false;
|
||||
|
@ -1794,6 +1794,9 @@ class CommandLineFlags {
|
||||
"number of times a GC thread (minus the coordinator) " \
|
||||
"will sleep while yielding before giving up and resuming GC") \
|
||||
\
|
||||
notproduct(bool, PrintFlagsFinal, false, \
|
||||
"Print all command line flags after argument processing") \
|
||||
\
|
||||
/* gc tracing */ \
|
||||
manageable(bool, PrintGC, false, \
|
||||
"Print message at garbage collect") \
|
||||
|
@ -154,6 +154,8 @@ RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_DEVELOP_FLAG_
|
||||
} CommandLineFlagWithType;
|
||||
|
||||
#define FLAG_IS_DEFAULT(name) (CommandLineFlagsEx::is_default(FLAG_MEMBER(name)))
|
||||
#define FLAG_IS_ERGO(name) (CommandLineFlagsEx::is_ergo(FLAG_MEMBER(name)))
|
||||
#define FLAG_IS_CMDLINE(name) (CommandLineFlagsEx::is_cmdline(FLAG_MEMBER(name)))
|
||||
|
||||
#define FLAG_SET_DEFAULT(name, value) ((name) = (value))
|
||||
|
||||
@ -171,4 +173,6 @@ class CommandLineFlagsEx : CommandLineFlags {
|
||||
static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin);
|
||||
|
||||
static bool is_default(CommandLineFlag flag);
|
||||
static bool is_ergo(CommandLineFlag flag);
|
||||
static bool is_cmdline(CommandLineFlag flag);
|
||||
};
|
||||
|
@ -52,6 +52,8 @@ int Abstract_VM_Version::_vm_major_version = 0;
|
||||
int Abstract_VM_Version::_vm_minor_version = 0;
|
||||
int Abstract_VM_Version::_vm_build_number = 0;
|
||||
bool Abstract_VM_Version::_initialized = false;
|
||||
int Abstract_VM_Version::_parallel_worker_threads = 0;
|
||||
bool Abstract_VM_Version::_parallel_worker_threads_initialized = false;
|
||||
|
||||
void Abstract_VM_Version::initialize() {
|
||||
if (_initialized) {
|
||||
@ -210,3 +212,43 @@ void VM_Version_init() {
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned int Abstract_VM_Version::nof_parallel_worker_threads(
|
||||
unsigned int num,
|
||||
unsigned int den,
|
||||
unsigned int switch_pt) {
|
||||
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
|
||||
assert(ParallelGCThreads == 0, "Default ParallelGCThreads is not 0");
|
||||
// For very large machines, there are diminishing returns
|
||||
// for large numbers of worker threads. Instead of
|
||||
// hogging the whole system, use a fraction of the workers for every
|
||||
// processor after the first 8. For example, on a 72 cpu machine
|
||||
// and a chosen fraction of 5/8
|
||||
// use 8 + (72 - 8) * (5/8) == 48 worker threads.
|
||||
unsigned int ncpus = (unsigned int) os::active_processor_count();
|
||||
return (ncpus <= switch_pt) ?
|
||||
ncpus :
|
||||
(switch_pt + ((ncpus - switch_pt) * num) / den);
|
||||
} else {
|
||||
return ParallelGCThreads;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int Abstract_VM_Version::calc_parallel_worker_threads() {
|
||||
return nof_parallel_worker_threads(5, 8, 8);
|
||||
}
|
||||
|
||||
|
||||
// Does not set the _initialized flag since it is
|
||||
// a global flag.
|
||||
unsigned int Abstract_VM_Version::parallel_worker_threads() {
|
||||
if (!_parallel_worker_threads_initialized) {
|
||||
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
|
||||
_parallel_worker_threads = VM_Version::calc_parallel_worker_threads();
|
||||
} else {
|
||||
_parallel_worker_threads = ParallelGCThreads;
|
||||
}
|
||||
_parallel_worker_threads_initialized = true;
|
||||
}
|
||||
return _parallel_worker_threads;
|
||||
}
|
||||
|
@ -36,6 +36,12 @@ class Abstract_VM_Version: AllStatic {
|
||||
static int _vm_minor_version;
|
||||
static int _vm_build_number;
|
||||
static bool _initialized;
|
||||
static int _parallel_worker_threads;
|
||||
static bool _parallel_worker_threads_initialized;
|
||||
|
||||
static unsigned int nof_parallel_worker_threads(unsigned int num,
|
||||
unsigned int dem,
|
||||
unsigned int switch_pt);
|
||||
public:
|
||||
static void initialize();
|
||||
|
||||
@ -69,4 +75,13 @@ class Abstract_VM_Version: AllStatic {
|
||||
// subclasses should define new versions to hide this one as needed. Note
|
||||
// that the O/S may support more sizes, but at most this many are used.
|
||||
static uint page_size_count() { return 2; }
|
||||
|
||||
// Returns the number of parallel threads to be used for VM
|
||||
// work. If that number has not been calculated, do so and
|
||||
// save it. Returns ParallelGCThreads if it is set on the
|
||||
// command line.
|
||||
static unsigned int parallel_worker_threads();
|
||||
// Calculates and returns the number of parallel threads. May
|
||||
// be VM version specific.
|
||||
static unsigned int calc_parallel_worker_threads();
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user