Merge
This commit is contained in:
commit
f5838a0d7c
@ -253,3 +253,4 @@ d0b525cd31b87abeb6d5b7e3516953eeb13b323c jdk9-b06
|
||||
db045d8faa0924b7378102d24a1a0d850c1e3834 jdk9-b08
|
||||
4a21dc7d57d1069a01f68e7182c074cb37349dfb jdk9-b09
|
||||
fa13f2b926f8426876ec03e7903f3ee0ee150f2e jdk9-b10
|
||||
ab55a18a95e1990a588929d5d29db3eb9985fea0 jdk9-b11
|
||||
|
@ -4243,7 +4243,7 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
|
||||
#CUSTOM_AUTOCONF_INCLUDE
|
||||
|
||||
# Do not change or remove the following line, it is needed for consistency checks:
|
||||
DATE_WHEN_GENERATED=1398196583
|
||||
DATE_WHEN_GENERATED=1398861894
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
@ -43505,9 +43505,10 @@ $as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME
|
||||
$as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location." >&6;}
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -44093,9 +44094,10 @@ $as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME
|
||||
$as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location." >&6;}
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -44395,9 +44397,10 @@ $as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME
|
||||
$as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location." >&6;}
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -44688,9 +44691,10 @@ $as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME
|
||||
$as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location." >&6;}
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -44981,9 +44985,10 @@ $as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME
|
||||
$as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location." >&6;}
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -45275,9 +45280,10 @@ $as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME
|
||||
$as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location." >&6;}
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -45570,9 +45576,10 @@ $as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME
|
||||
$as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location." >&6;}
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -45861,9 +45868,10 @@ $as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME
|
||||
$as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location." >&6;}
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -46152,9 +46160,10 @@ $as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME
|
||||
$as_echo "$as_me: Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location." >&6;}
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -286,9 +286,10 @@ AC_DEFUN([LIB_CHECK_POTENTIAL_FREETYPE],
|
||||
AC_MSG_NOTICE([Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location.])
|
||||
FOUND_FREETYPE=no
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64 && test -s "$POTENTIAL_FREETYPE_LIB_PATH/amd64/$FREETYPE_LIB_NAME"; then
|
||||
# On solaris-x86_86, default is (normally) PATH/lib/amd64. Update our guess!
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH/amd64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||
# Found lib in isa dir, use that instead.
|
||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -253,3 +253,4 @@ a4bf701ac316946c2e5e83138ad8e687da6a4b30 jdk9-b06
|
||||
2da7fead826bc27f193c7d63048c2cf100a8809c jdk9-b08
|
||||
1a3a4f48515dbf1cff37279691b2fb74f228298d jdk9-b09
|
||||
3bd4039dfc632fd7fc8418a25a3dcc34d1cd4019 jdk9-b10
|
||||
77ea0a2503582a28e4e66be7239a49a0d1dd313f jdk9-b11
|
||||
|
@ -413,3 +413,4 @@ bdc5311e1db7598589b77015119b821bf8c828bd jdk9-b05
|
||||
4dedef5e51ed3a36677a8ba82949fc517ad64162 jdk9-b08
|
||||
05e8f5242c26ba45d4fa947e4f4f54c058c9b522 jdk9-b09
|
||||
ebc44d040cd149d2120d69fe183a3dae7840f4b4 jdk9-b10
|
||||
783309c3a1a629a452673399dcfa83ef7eca94d8 jdk9-b11
|
||||
|
@ -2003,7 +2003,7 @@ void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register
|
||||
}
|
||||
} else {
|
||||
assert(MethodData::profile_return(), "either profile call args or call ret");
|
||||
update_mdp_by_constant(in_bytes(ReturnTypeEntry::size()));
|
||||
update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
|
||||
}
|
||||
|
||||
// mdp points right after the end of the
|
||||
|
@ -137,7 +137,7 @@ void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register ca
|
||||
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
|
||||
} else {
|
||||
assert(MethodData::profile_return(), "either profile call args or call ret");
|
||||
update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
|
||||
update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
|
||||
}
|
||||
|
||||
// mdp points right after the end of the
|
||||
|
@ -3188,8 +3188,8 @@ void LIRGenerator::profile_arguments(ProfileCall* x) {
|
||||
#ifdef ASSERT
|
||||
Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
|
||||
int n = x->nb_profiled_args();
|
||||
assert(MethodData::profile_parameters() && x->inlined() &&
|
||||
((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)),
|
||||
assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
|
||||
(x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
|
||||
"only at JSR292 bytecodes");
|
||||
#endif
|
||||
}
|
||||
|
@ -2831,7 +2831,6 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_b
|
||||
}
|
||||
}
|
||||
|
||||
assert(operand_fill_index == operands->length(), "exact fill");
|
||||
assert(ConstantPool::operand_array_length(operands) == attribute_array_length, "correct decode");
|
||||
|
||||
u1* current_end = cfs->current();
|
||||
|
@ -564,11 +564,11 @@ void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
|
||||
"--------------------------------\n");
|
||||
size_t total_size = totalSizeInIndexedFreeLists();
|
||||
size_t free_blocks = numFreeBlocksInIndexedFreeLists();
|
||||
gclog_or_tty->print("Total Free Space: %d\n", total_size);
|
||||
gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
|
||||
gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
|
||||
gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
|
||||
gclog_or_tty->print("Max Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
|
||||
gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
|
||||
if (free_blocks != 0) {
|
||||
gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks);
|
||||
gclog_or_tty->print("Av. Block Size: " SIZE_FORMAT "\n", total_size/free_blocks);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2152,7 +2152,7 @@ void CompactibleFreeListSpace::beginSweepFLCensus(
|
||||
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
|
||||
if (PrintFLSStatistics > 1) {
|
||||
gclog_or_tty->print("size[%d] : ", i);
|
||||
gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
|
||||
}
|
||||
fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
|
||||
fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
|
||||
@ -2683,7 +2683,8 @@ void CFLS_LAB::compute_desired_plab_size() {
|
||||
_global_num_workers[i] = 0;
|
||||
_global_num_blocks[i] = 0;
|
||||
if (PrintOldPLAB) {
|
||||
gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
|
||||
gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
|
||||
i, (size_t)_blocks_to_claim[i].average());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2722,7 +2723,7 @@ void CFLS_LAB::retire(int tid) {
|
||||
}
|
||||
}
|
||||
if (PrintOldPLAB) {
|
||||
gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
|
||||
gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
|
||||
tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
|
||||
}
|
||||
// Reset stats for next round
|
||||
|
@ -1512,6 +1512,8 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
|
||||
gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
|
||||
gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
|
||||
gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
|
||||
gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
|
||||
gclog_or_tty->print_cr("metadata initialized %d",
|
||||
MetaspaceGC::should_concurrent_collect());
|
||||
}
|
||||
@ -1574,6 +1576,28 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// CMSTriggerInterval starts a CMS cycle if enough time has passed.
|
||||
if (CMSTriggerInterval >= 0) {
|
||||
if (CMSTriggerInterval == 0) {
|
||||
// Trigger always
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check the CMS time since begin (we do not check the stats validity
|
||||
// as we want to be able to trigger the first CMS cycle as well)
|
||||
if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
|
||||
if (Verbose && PrintGCDetails) {
|
||||
if (stats().valid()) {
|
||||
gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
|
||||
stats().cms_time_since_begin());
|
||||
} else {
|
||||
gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2894,13 +2918,13 @@ bool CMSCollector::is_cms_reachable(HeapWord* addr) {
|
||||
|
||||
// Clear the marking bit map array before starting, but, just
|
||||
// for kicks, first report if the given address is already marked
|
||||
gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
|
||||
gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", addr,
|
||||
_markBitMap.isMarked(addr) ? "" : " not");
|
||||
|
||||
if (verify_after_remark()) {
|
||||
MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
|
||||
bool result = verification_mark_bm()->isMarked(addr);
|
||||
gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
|
||||
gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", addr,
|
||||
result ? "IS" : "is NOT");
|
||||
return result;
|
||||
} else {
|
||||
@ -4569,7 +4593,7 @@ void CMSCollector::abortable_preclean() {
|
||||
}
|
||||
}
|
||||
if (PrintCMSStatistics > 0) {
|
||||
gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
|
||||
gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
|
||||
loops, waited, cumworkdone);
|
||||
}
|
||||
}
|
||||
@ -4721,7 +4745,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
|
||||
curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
|
||||
if (Verbose && PrintGCDetails) {
|
||||
gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
|
||||
gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
|
||||
}
|
||||
// Either there are very few dirty cards, so re-mark
|
||||
// pause will be small anyway, or our pre-cleaning isn't
|
||||
@ -4743,7 +4767,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
|
||||
cumNumCards += curNumCards;
|
||||
if (PrintGCDetails && PrintCMSStatistics != 0) {
|
||||
gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
|
||||
gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
|
||||
curNumCards, cumNumCards, numIter);
|
||||
}
|
||||
return cumNumCards; // as a measure of useful work done
|
||||
@ -8205,7 +8229,7 @@ SweepClosure::~SweepClosure() {
|
||||
void SweepClosure::initialize_free_range(HeapWord* freeFinger,
|
||||
bool freeRangeInFreeLists) {
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
|
||||
gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
|
||||
freeFinger, freeRangeInFreeLists);
|
||||
}
|
||||
assert(!inFreeRange(), "Trampling existing free range");
|
||||
@ -8275,10 +8299,10 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
||||
pointer_delta(addr, freeFinger()));
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print("Sweep: last chunk: ");
|
||||
gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
|
||||
"[coalesced:"SIZE_FORMAT"]\n",
|
||||
gclog_or_tty->print("put_free_blk " PTR_FORMAT " ("SIZE_FORMAT") "
|
||||
"[coalesced:%d]\n",
|
||||
freeFinger(), pointer_delta(addr, freeFinger()),
|
||||
lastFreeRangeCoalesced());
|
||||
lastFreeRangeCoalesced() ? 1 : 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8421,7 +8445,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
|
||||
// the midst of a free range, we are coalescing
|
||||
print_free_block_coalesced(fc);
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
|
||||
gclog_or_tty->print(" -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", fc, size);
|
||||
}
|
||||
// remove it from the free lists
|
||||
_sp->removeFreeChunkFromFreeLists(fc);
|
||||
@ -8483,7 +8507,7 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
|
||||
// this will be swept up when we hit the end of the
|
||||
// free range
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
|
||||
gclog_or_tty->print(" -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", fc, size);
|
||||
}
|
||||
// If the chunk is being coalesced and the current free range is
|
||||
// in the free lists, remove the current free range so that it
|
||||
@ -8576,7 +8600,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
|
||||
}
|
||||
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
|
||||
gclog_or_tty->print_cr(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", fc, chunkSize);
|
||||
}
|
||||
|
||||
HeapWord* const fc_addr = (HeapWord*) fc;
|
||||
@ -8705,7 +8729,7 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
|
||||
"chunk should not be in free lists yet");
|
||||
}
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
|
||||
gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
|
||||
chunk, size);
|
||||
}
|
||||
// A new free range is going to be starting. The current
|
||||
|
@ -265,7 +265,7 @@ void PromotionInfo::print_statistics(uint worker_id) const {
|
||||
slots += _spoolHead->bufferSize - 1;
|
||||
blocks++;
|
||||
}
|
||||
gclog_or_tty->print_cr(" [worker %d] promo_blocks = %d, promo_slots = %d ",
|
||||
gclog_or_tty->print_cr(" [worker %d] promo_blocks = " SIZE_FORMAT ", promo_slots = " SIZE_FORMAT,
|
||||
worker_id, blocks, slots);
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include "gc_implementation/g1/g1HotCardCache.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
|
||||
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
|
||||
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure) :
|
||||
_threads(NULL), _n_threads(0),
|
||||
_hot_card_cache(g1h)
|
||||
{
|
||||
@ -61,7 +61,7 @@ ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
|
||||
|
||||
ConcurrentG1RefineThread *next = NULL;
|
||||
for (uint i = _n_threads - 1; i != UINT_MAX; i--) {
|
||||
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
|
||||
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, refine_closure, worker_id_offset, i);
|
||||
assert(t != NULL, "Conc refine should have been created");
|
||||
if (t->osthread() == NULL) {
|
||||
vm_shutdown_during_initialization("Could not create ConcurrentG1RefineThread");
|
||||
|
@ -71,7 +71,7 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
void reset_threshold_step();
|
||||
|
||||
public:
|
||||
ConcurrentG1Refine(G1CollectedHeap* g1h);
|
||||
ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure);
|
||||
~ConcurrentG1Refine();
|
||||
|
||||
void init(); // Accomplish some initialization that has to wait.
|
||||
|
@ -33,8 +33,10 @@
|
||||
|
||||
ConcurrentG1RefineThread::
|
||||
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
|
||||
CardTableEntryClosure* refine_closure,
|
||||
uint worker_id_offset, uint worker_id) :
|
||||
ConcurrentGCThread(),
|
||||
_refine_closure(refine_closure),
|
||||
_worker_id_offset(worker_id_offset),
|
||||
_worker_id(worker_id),
|
||||
_active(false),
|
||||
@ -71,6 +73,7 @@ void ConcurrentG1RefineThread::initialize() {
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
|
||||
SuspendibleThreadSetJoiner sts;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
if (g1p->adaptive_young_list_length()) {
|
||||
@ -82,8 +85,8 @@ void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
|
||||
|
||||
// we try to yield every time we visit 10 regions
|
||||
if (regions_visited == 10) {
|
||||
if (_sts.should_yield()) {
|
||||
_sts.yield("G1 refine");
|
||||
if (sts.should_yield()) {
|
||||
sts.yield();
|
||||
// we just abandon the iteration
|
||||
break;
|
||||
}
|
||||
@ -99,9 +102,7 @@ void ConcurrentG1RefineThread::run_young_rs_sampling() {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
_vtime_start = os::elapsedVTime();
|
||||
while(!_should_terminate) {
|
||||
_sts.join();
|
||||
sample_young_list_rs_lengths();
|
||||
_sts.leave();
|
||||
|
||||
if (os::supports_vtime()) {
|
||||
_vtime_accum = (os::elapsedVTime() - _vtime_start);
|
||||
@ -182,37 +183,37 @@ void ConcurrentG1RefineThread::run() {
|
||||
break;
|
||||
}
|
||||
|
||||
_sts.join();
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts;
|
||||
|
||||
do {
|
||||
int curr_buffer_num = (int)dcqs.completed_buffers_num();
|
||||
// If the number of the buffers falls down into the yellow zone,
|
||||
// that means that the transition period after the evacuation pause has ended.
|
||||
if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
|
||||
dcqs.set_completed_queue_padding(0);
|
||||
}
|
||||
do {
|
||||
int curr_buffer_num = (int)dcqs.completed_buffers_num();
|
||||
// If the number of the buffers falls down into the yellow zone,
|
||||
// that means that the transition period after the evacuation pause has ended.
|
||||
if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
|
||||
dcqs.set_completed_queue_padding(0);
|
||||
}
|
||||
|
||||
if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {
|
||||
// If the number of the buffer has fallen below our threshold
|
||||
// we should deactivate. The predecessor will reactivate this
|
||||
// thread should the number of the buffers cross the threshold again.
|
||||
if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {
|
||||
// If the number of the buffer has fallen below our threshold
|
||||
// we should deactivate. The predecessor will reactivate this
|
||||
// thread should the number of the buffers cross the threshold again.
|
||||
deactivate();
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if we need to activate the next thread.
|
||||
if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
|
||||
_next->activate();
|
||||
}
|
||||
} while (dcqs.apply_closure_to_completed_buffer(_refine_closure, _worker_id + _worker_id_offset, cg1r()->green_zone()));
|
||||
|
||||
// We can exit the loop above while being active if there was a yield request.
|
||||
if (is_active()) {
|
||||
deactivate();
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if we need to activate the next thread.
|
||||
if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
|
||||
_next->activate();
|
||||
}
|
||||
} while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, cg1r()->green_zone()));
|
||||
|
||||
// We can exit the loop above while being active if there was a yield request.
|
||||
if (is_active()) {
|
||||
deactivate();
|
||||
}
|
||||
|
||||
_sts.leave();
|
||||
|
||||
if (os::supports_vtime()) {
|
||||
_vtime_accum = (os::elapsedVTime() - _vtime_start);
|
||||
} else {
|
||||
@ -223,17 +224,6 @@ void ConcurrentG1RefineThread::run() {
|
||||
terminate();
|
||||
}
|
||||
|
||||
|
||||
void ConcurrentG1RefineThread::yield() {
|
||||
if (G1TraceConcRefinement) {
|
||||
gclog_or_tty->print_cr("G1-Refine-yield");
|
||||
}
|
||||
_sts.yield("G1 refine");
|
||||
if (G1TraceConcRefinement) {
|
||||
gclog_or_tty->print_cr("G1-Refine-yield-end");
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::stop() {
|
||||
// it is ok to take late safepoints here, if needed
|
||||
{
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc_implementation/shared/concurrentGCThread.hpp"
|
||||
|
||||
// Forward Decl.
|
||||
class CardTableEntryClosure;
|
||||
class ConcurrentG1Refine;
|
||||
|
||||
// The G1 Concurrent Refinement Thread (could be several in the future).
|
||||
@ -49,6 +50,9 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
|
||||
Monitor* _monitor;
|
||||
ConcurrentG1Refine* _cg1r;
|
||||
|
||||
// The closure applied to completed log buffers.
|
||||
CardTableEntryClosure* _refine_closure;
|
||||
|
||||
int _thread_threshold_step;
|
||||
// This thread activation threshold
|
||||
int _threshold;
|
||||
@ -64,13 +68,11 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
|
||||
void activate();
|
||||
void deactivate();
|
||||
|
||||
// For use by G1CollectedHeap, which is a friend.
|
||||
static SuspendibleThreadSet* sts() { return &_sts; }
|
||||
|
||||
public:
|
||||
virtual void run();
|
||||
// Constructor
|
||||
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
|
||||
CardTableEntryClosure* refine_closure,
|
||||
uint worker_id_offset, uint worker_id);
|
||||
|
||||
void initialize();
|
||||
@ -84,8 +86,6 @@ public:
|
||||
|
||||
ConcurrentG1Refine* cg1r() { return _cg1r; }
|
||||
|
||||
// Yield for GC
|
||||
void yield();
|
||||
// shutdown
|
||||
void stop();
|
||||
};
|
||||
|
@ -976,11 +976,11 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
|
||||
}
|
||||
|
||||
if (concurrent()) {
|
||||
ConcurrentGCThread::stsLeave();
|
||||
SuspendibleThreadSet::leave();
|
||||
}
|
||||
_first_overflow_barrier_sync.enter();
|
||||
if (concurrent()) {
|
||||
ConcurrentGCThread::stsJoin();
|
||||
SuspendibleThreadSet::join();
|
||||
}
|
||||
// at this point everyone should have synced up and not be doing any
|
||||
// more work
|
||||
@ -1024,11 +1024,11 @@ void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
|
||||
}
|
||||
|
||||
if (concurrent()) {
|
||||
ConcurrentGCThread::stsLeave();
|
||||
SuspendibleThreadSet::leave();
|
||||
}
|
||||
_second_overflow_barrier_sync.enter();
|
||||
if (concurrent()) {
|
||||
ConcurrentGCThread::stsJoin();
|
||||
SuspendibleThreadSet::join();
|
||||
}
|
||||
// at this point everything should be re-initialized and ready to go
|
||||
|
||||
@ -1076,7 +1076,7 @@ public:
|
||||
|
||||
double start_vtime = os::elapsedVTime();
|
||||
|
||||
ConcurrentGCThread::stsJoin();
|
||||
SuspendibleThreadSet::join();
|
||||
|
||||
assert(worker_id < _cm->active_tasks(), "invariant");
|
||||
CMTask* the_task = _cm->task(worker_id);
|
||||
@ -1103,9 +1103,9 @@ public:
|
||||
if (!_cm->has_aborted() && the_task->has_aborted()) {
|
||||
sleep_time_ms =
|
||||
(jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
|
||||
ConcurrentGCThread::stsLeave();
|
||||
SuspendibleThreadSet::leave();
|
||||
os::sleep(Thread::current(), sleep_time_ms, false);
|
||||
ConcurrentGCThread::stsJoin();
|
||||
SuspendibleThreadSet::join();
|
||||
}
|
||||
double end_time2_sec = os::elapsedTime();
|
||||
double elapsed_time2_sec = end_time2_sec - start_time_sec;
|
||||
@ -1123,7 +1123,7 @@ public:
|
||||
the_task->record_end_time();
|
||||
guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
|
||||
|
||||
ConcurrentGCThread::stsLeave();
|
||||
SuspendibleThreadSet::leave();
|
||||
|
||||
double end_vtime = os::elapsedVTime();
|
||||
_cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
|
||||
@ -2655,7 +2655,6 @@ public:
|
||||
str = " O";
|
||||
} else {
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
guarantee(hr != NULL, "invariant");
|
||||
bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
|
||||
bool marked = _g1h->is_marked(obj, _vo);
|
||||
|
||||
@ -3302,21 +3301,17 @@ void ConcurrentMark::print_on_error(outputStream* st) const {
|
||||
|
||||
// We take a break if someone is trying to stop the world.
|
||||
bool ConcurrentMark::do_yield_check(uint worker_id) {
|
||||
if (should_yield()) {
|
||||
if (SuspendibleThreadSet::should_yield()) {
|
||||
if (worker_id == 0) {
|
||||
_g1h->g1_policy()->record_concurrent_pause();
|
||||
}
|
||||
cmThread()->yield();
|
||||
SuspendibleThreadSet::yield();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool ConcurrentMark::should_yield() {
|
||||
return cmThread()->should_yield();
|
||||
}
|
||||
|
||||
bool ConcurrentMark::containing_card_is_marked(void* p) {
|
||||
size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
|
||||
return _card_bm.at(offset >> CardTableModRefBS::card_shift);
|
||||
@ -3417,9 +3412,8 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
|
||||
}
|
||||
|
||||
void CMTask::setup_for_region(HeapRegion* hr) {
|
||||
// Separated the asserts so that we know which one fires.
|
||||
assert(hr != NULL,
|
||||
"claim_region() should have filtered out continues humongous regions");
|
||||
"claim_region() should have filtered out NULL regions");
|
||||
assert(!hr->continuesHumongous(),
|
||||
"claim_region() should have filtered out continues humongous regions");
|
||||
|
||||
@ -3605,7 +3599,7 @@ void CMTask::regular_clock_call() {
|
||||
#endif // _MARKING_STATS_
|
||||
|
||||
// (4) We check whether we should yield. If we have to, then we abort.
|
||||
if (_cm->should_yield()) {
|
||||
if (SuspendibleThreadSet::should_yield()) {
|
||||
// We should yield. To do this we abort the task. The caller is
|
||||
// responsible for yielding.
|
||||
set_has_aborted();
|
||||
@ -3754,7 +3748,7 @@ void CMTask::drain_local_queue(bool partially) {
|
||||
|
||||
if (_task_queue->size() > target_size) {
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] draining local queue, target size = %d",
|
||||
gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
|
||||
_worker_id, target_size);
|
||||
}
|
||||
|
||||
@ -3782,7 +3776,7 @@ void CMTask::drain_local_queue(bool partially) {
|
||||
}
|
||||
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
|
||||
gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
|
||||
_worker_id, _task_queue->size());
|
||||
}
|
||||
}
|
||||
@ -3810,7 +3804,7 @@ void CMTask::drain_global_stack(bool partially) {
|
||||
|
||||
if (_cm->mark_stack_size() > target_size) {
|
||||
if (_cm->verbose_low()) {
|
||||
gclog_or_tty->print_cr("[%u] draining global_stack, target size %d",
|
||||
gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
|
||||
_worker_id, target_size);
|
||||
}
|
||||
|
||||
@ -3820,7 +3814,7 @@ void CMTask::drain_global_stack(bool partially) {
|
||||
}
|
||||
|
||||
if (_cm->verbose_low()) {
|
||||
gclog_or_tty->print_cr("[%u] drained global stack, size = %d",
|
||||
gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
|
||||
_worker_id, _cm->mark_stack_size());
|
||||
}
|
||||
}
|
||||
|
@ -814,7 +814,6 @@ public:
|
||||
}
|
||||
|
||||
inline bool do_yield_check(uint worker_i = 0);
|
||||
inline bool should_yield();
|
||||
|
||||
// Called to abort the marking cycle after a Full GC takes place.
|
||||
void abort();
|
||||
|
@ -89,6 +89,10 @@ void ConcurrentMarkThread::run() {
|
||||
while (!_should_terminate) {
|
||||
// wait until started is set.
|
||||
sleepBeforeNextCycle();
|
||||
if (_should_terminate) {
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
@ -190,9 +194,8 @@ void ConcurrentMarkThread::run() {
|
||||
} else {
|
||||
// We don't want to update the marking status if a GC pause
|
||||
// is already underway.
|
||||
_sts.join();
|
||||
SuspendibleThreadSetJoiner sts;
|
||||
g1h->set_marking_complete();
|
||||
_sts.leave();
|
||||
}
|
||||
|
||||
// Check if cleanup set the free_regions_coming flag. If it
|
||||
@ -262,11 +265,12 @@ void ConcurrentMarkThread::run() {
|
||||
// record_concurrent_mark_cleanup_completed() (and, in fact, it's
|
||||
// not needed any more as the concurrent mark state has been
|
||||
// already reset).
|
||||
_sts.join();
|
||||
if (!cm()->has_aborted()) {
|
||||
g1_policy->record_concurrent_mark_cleanup_completed();
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts;
|
||||
if (!cm()->has_aborted()) {
|
||||
g1_policy->record_concurrent_mark_cleanup_completed();
|
||||
}
|
||||
}
|
||||
_sts.leave();
|
||||
|
||||
if (cm()->has_aborted()) {
|
||||
if (G1Log::fine()) {
|
||||
@ -278,36 +282,43 @@ void ConcurrentMarkThread::run() {
|
||||
|
||||
// We now want to allow clearing of the marking bitmap to be
|
||||
// suspended by a collection pause.
|
||||
_sts.join();
|
||||
_cm->clearNextBitmap();
|
||||
_sts.leave();
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts;
|
||||
_cm->clearNextBitmap();
|
||||
}
|
||||
}
|
||||
|
||||
// Update the number of full collections that have been
|
||||
// completed. This will also notify the FullGCCount_lock in case a
|
||||
// Java thread is waiting for a full GC to happen (e.g., it
|
||||
// called System.gc() with +ExplicitGCInvokesConcurrent).
|
||||
_sts.join();
|
||||
g1h->increment_old_marking_cycles_completed(true /* concurrent */);
|
||||
g1h->register_concurrent_cycle_end();
|
||||
_sts.leave();
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts;
|
||||
g1h->increment_old_marking_cycles_completed(true /* concurrent */);
|
||||
g1h->register_concurrent_cycle_end();
|
||||
}
|
||||
}
|
||||
assert(_should_terminate, "just checking");
|
||||
|
||||
terminate();
|
||||
}
|
||||
|
||||
|
||||
void ConcurrentMarkThread::yield() {
|
||||
_sts.yield("Concurrent Mark");
|
||||
}
|
||||
|
||||
void ConcurrentMarkThread::stop() {
|
||||
// it is ok to take late safepoints here, if needed
|
||||
MutexLockerEx mu(Terminator_lock);
|
||||
_should_terminate = true;
|
||||
while (!_has_terminated) {
|
||||
Terminator_lock->wait();
|
||||
{
|
||||
MutexLockerEx ml(Terminator_lock);
|
||||
_should_terminate = true;
|
||||
}
|
||||
|
||||
{
|
||||
MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
|
||||
CGC_lock->notify_all();
|
||||
}
|
||||
|
||||
{
|
||||
MutexLockerEx ml(Terminator_lock);
|
||||
while (!_has_terminated) {
|
||||
Terminator_lock->wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -327,11 +338,14 @@ void ConcurrentMarkThread::sleepBeforeNextCycle() {
|
||||
assert(!in_progress(), "should have been cleared");
|
||||
|
||||
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
||||
while (!started()) {
|
||||
while (!started() && !_should_terminate) {
|
||||
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
set_in_progress();
|
||||
clear_started();
|
||||
|
||||
if (started()) {
|
||||
set_in_progress();
|
||||
clear_started();
|
||||
}
|
||||
}
|
||||
|
||||
// Note: As is the case with CMS - this method, although exported
|
||||
|
@ -89,9 +89,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
|
||||
// that started() is set and set in_progress().
|
||||
bool during_cycle() { return started() || in_progress(); }
|
||||
|
||||
// Yield for GC
|
||||
void yield();
|
||||
|
||||
// shutdown
|
||||
void stop();
|
||||
};
|
||||
|
@ -70,7 +70,7 @@ bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
|
||||
|
||||
DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
|
||||
PtrQueueSet(notify_when_complete),
|
||||
_closure(NULL),
|
||||
_mut_process_closure(NULL),
|
||||
_shared_dirty_card_queue(this, true /*perm*/),
|
||||
_free_ids(NULL),
|
||||
_processed_buffers_mut(0), _processed_buffers_rs_thread(0)
|
||||
@ -83,10 +83,11 @@ uint DirtyCardQueueSet::num_par_ids() {
|
||||
return (uint)os::processor_count();
|
||||
}
|
||||
|
||||
void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
|
||||
void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
|
||||
int process_completed_threshold,
|
||||
int max_completed_queue,
|
||||
Mutex* lock, PtrQueueSet* fl_owner) {
|
||||
_mut_process_closure = cl;
|
||||
PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold,
|
||||
max_completed_queue, fl_owner);
|
||||
set_buffer_size(G1UpdateBufferSize);
|
||||
@ -98,18 +99,15 @@ void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
|
||||
t->dirty_card_queue().handle_zero_index();
|
||||
}
|
||||
|
||||
void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) {
|
||||
_closure = closure;
|
||||
}
|
||||
|
||||
void DirtyCardQueueSet::iterate_closure_all_threads(bool consume,
|
||||
void DirtyCardQueueSet::iterate_closure_all_threads(CardTableEntryClosure* cl,
|
||||
bool consume,
|
||||
uint worker_i) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
||||
for(JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
bool b = t->dirty_card_queue().apply_closure(_closure, consume);
|
||||
bool b = t->dirty_card_queue().apply_closure(cl, consume);
|
||||
guarantee(b, "Should not be interrupted.");
|
||||
}
|
||||
bool b = shared_dirty_card_queue()->apply_closure(_closure,
|
||||
bool b = shared_dirty_card_queue()->apply_closure(cl,
|
||||
consume,
|
||||
worker_i);
|
||||
guarantee(b, "Should not be interrupted.");
|
||||
@ -143,7 +141,7 @@ bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
|
||||
|
||||
bool b = false;
|
||||
if (worker_i != UINT_MAX) {
|
||||
b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0,
|
||||
b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0,
|
||||
_sz, true, worker_i);
|
||||
if (b) Atomic::inc(&_processed_buffers_mut);
|
||||
|
||||
@ -218,18 +216,11 @@ bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure*
|
||||
return res;
|
||||
}
|
||||
|
||||
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(uint worker_i,
|
||||
int stop_at,
|
||||
bool during_pause) {
|
||||
return apply_closure_to_completed_buffer(_closure, worker_i,
|
||||
stop_at, during_pause);
|
||||
}
|
||||
|
||||
void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() {
|
||||
void DirtyCardQueueSet::apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
|
||||
BufferNode* nd = _completed_buffers_head;
|
||||
while (nd != NULL) {
|
||||
bool b =
|
||||
DirtyCardQueue::apply_closure_to_buffer(_closure,
|
||||
DirtyCardQueue::apply_closure_to_buffer(cl,
|
||||
BufferNode::make_buffer_from_node(nd),
|
||||
0, _sz, false);
|
||||
guarantee(b, "Should not stop early.");
|
||||
@ -237,6 +228,24 @@ void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() {
|
||||
}
|
||||
}
|
||||
|
||||
void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
|
||||
BufferNode* nd = _cur_par_buffer_node;
|
||||
while (nd != NULL) {
|
||||
BufferNode* next = (BufferNode*)nd->next();
|
||||
BufferNode* actual = (BufferNode*)Atomic::cmpxchg_ptr((void*)next, (volatile void*)&_cur_par_buffer_node, (void*)nd);
|
||||
if (actual == nd) {
|
||||
bool b =
|
||||
DirtyCardQueue::apply_closure_to_buffer(cl,
|
||||
BufferNode::make_buffer_from_node(actual),
|
||||
0, _sz, false);
|
||||
guarantee(b, "Should not stop early.");
|
||||
nd = next;
|
||||
} else {
|
||||
nd = actual;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deallocates any completed log buffers
|
||||
void DirtyCardQueueSet::clear() {
|
||||
BufferNode* buffers_to_delete = NULL;
|
||||
|
@ -73,7 +73,8 @@ public:
|
||||
|
||||
|
||||
class DirtyCardQueueSet: public PtrQueueSet {
|
||||
CardTableEntryClosure* _closure;
|
||||
// The closure used in mut_process_buffer().
|
||||
CardTableEntryClosure* _mut_process_closure;
|
||||
|
||||
DirtyCardQueue _shared_dirty_card_queue;
|
||||
|
||||
@ -88,10 +89,12 @@ class DirtyCardQueueSet: public PtrQueueSet {
|
||||
jint _processed_buffers_mut;
|
||||
jint _processed_buffers_rs_thread;
|
||||
|
||||
// Current buffer node used for parallel iteration.
|
||||
BufferNode* volatile _cur_par_buffer_node;
|
||||
public:
|
||||
DirtyCardQueueSet(bool notify_when_complete = true);
|
||||
|
||||
void initialize(Monitor* cbl_mon, Mutex* fl_lock,
|
||||
void initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
|
||||
int process_completed_threshold,
|
||||
int max_completed_queue,
|
||||
Mutex* lock, PtrQueueSet* fl_owner = NULL);
|
||||
@ -102,32 +105,14 @@ public:
|
||||
|
||||
static void handle_zero_index_for_thread(JavaThread* t);
|
||||
|
||||
// Register "blk" as "the closure" for all queues. Only one such closure
|
||||
// is allowed. The "apply_closure_to_completed_buffer" method will apply
|
||||
// this closure to a completed buffer, and "iterate_closure_all_threads"
|
||||
// applies it to partially-filled buffers (the latter should only be done
|
||||
// with the world stopped).
|
||||
void set_closure(CardTableEntryClosure* closure);
|
||||
|
||||
// If there is a registered closure for buffers, apply it to all entries
|
||||
// in all currently-active buffers. This should only be applied at a
|
||||
// safepoint. (Currently must not be called in parallel; this should
|
||||
// change in the future.) If "consume" is true, processed entries are
|
||||
// discarded.
|
||||
void iterate_closure_all_threads(bool consume = true,
|
||||
// Apply the given closure to all entries in all currently-active buffers.
|
||||
// This should only be applied at a safepoint. (Currently must not be called
|
||||
// in parallel; this should change in the future.) If "consume" is true,
|
||||
// processed entries are discarded.
|
||||
void iterate_closure_all_threads(CardTableEntryClosure* cl,
|
||||
bool consume = true,
|
||||
uint worker_i = 0);
|
||||
|
||||
// If there exists some completed buffer, pop it, then apply the
|
||||
// registered closure to all its elements, nulling out those elements
|
||||
// processed. If all elements are processed, returns "true". If no
|
||||
// completed buffers exist, returns false. If a completed buffer exists,
|
||||
// but is only partially completed before a "yield" happens, the
|
||||
// partially completed buffer (with its processed elements set to NULL)
|
||||
// is returned to the completed buffer set, and this call returns false.
|
||||
bool apply_closure_to_completed_buffer(uint worker_i = 0,
|
||||
int stop_at = 0,
|
||||
bool during_pause = false);
|
||||
|
||||
// If there exists some completed buffer, pop it, then apply the
|
||||
// specified closure to all its elements, nulling out those elements
|
||||
// processed. If all elements are processed, returns "true". If no
|
||||
@ -149,7 +134,12 @@ public:
|
||||
|
||||
// Applies the current closure to all completed buffers,
|
||||
// non-consumptively.
|
||||
void apply_closure_to_all_completed_buffers();
|
||||
void apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
|
||||
|
||||
void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
|
||||
// Applies the current closure to all completed buffers, non-consumptively.
|
||||
// Parallel version.
|
||||
void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
|
||||
|
||||
DirtyCardQueue* shared_dirty_card_queue() {
|
||||
return &_shared_dirty_card_queue;
|
||||
|
@ -45,32 +45,27 @@ void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
|
||||
}
|
||||
}
|
||||
|
||||
FreeList<G1CodeRootChunk> G1CodeRootSet::_free_list;
|
||||
size_t G1CodeRootSet::_num_chunks_handed_out = 0;
|
||||
|
||||
G1CodeRootChunk* G1CodeRootSet::new_chunk() {
|
||||
G1CodeRootChunk* result = _free_list.get_chunk_at_head();
|
||||
if (result == NULL) {
|
||||
result = new G1CodeRootChunk();
|
||||
}
|
||||
G1CodeRootSet::_num_chunks_handed_out++;
|
||||
result->reset();
|
||||
return result;
|
||||
G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) {
|
||||
_free_list.initialize();
|
||||
_free_list.set_size(G1CodeRootChunk::word_size());
|
||||
}
|
||||
|
||||
void G1CodeRootSet::free_chunk(G1CodeRootChunk* chunk) {
|
||||
_free_list.return_chunk_at_head(chunk);
|
||||
G1CodeRootSet::_num_chunks_handed_out--;
|
||||
size_t G1CodeRootChunkManager::fl_mem_size() {
|
||||
return _free_list.count() * _free_list.size();
|
||||
}
|
||||
|
||||
void G1CodeRootSet::free_all_chunks(FreeList<G1CodeRootChunk>* list) {
|
||||
G1CodeRootSet::_num_chunks_handed_out -= list->count();
|
||||
void G1CodeRootChunkManager::free_all_chunks(FreeList<G1CodeRootChunk>* list) {
|
||||
_num_chunks_handed_out -= list->count();
|
||||
_free_list.prepend(list);
|
||||
}
|
||||
|
||||
void G1CodeRootSet::purge_chunks(size_t keep_ratio) {
|
||||
size_t keep = G1CodeRootSet::_num_chunks_handed_out * keep_ratio / 100;
|
||||
void G1CodeRootChunkManager::free_chunk(G1CodeRootChunk* chunk) {
|
||||
_free_list.return_chunk_at_head(chunk);
|
||||
_num_chunks_handed_out--;
|
||||
}
|
||||
|
||||
void G1CodeRootChunkManager::purge_chunks(size_t keep_ratio) {
|
||||
size_t keep = _num_chunks_handed_out * keep_ratio / 100;
|
||||
if (keep >= (size_t)_free_list.count()) {
|
||||
return;
|
||||
}
|
||||
@ -88,20 +83,51 @@ void G1CodeRootSet::purge_chunks(size_t keep_ratio) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1CodeRootSet::static_mem_size() {
|
||||
return sizeof(_free_list) + sizeof(_num_chunks_handed_out);
|
||||
size_t G1CodeRootChunkManager::static_mem_size() {
|
||||
return sizeof(G1CodeRootChunkManager);
|
||||
}
|
||||
|
||||
size_t G1CodeRootSet::fl_mem_size() {
|
||||
return _free_list.count() * _free_list.size();
|
||||
|
||||
G1CodeRootChunk* G1CodeRootChunkManager::new_chunk() {
|
||||
G1CodeRootChunk* result = _free_list.get_chunk_at_head();
|
||||
if (result == NULL) {
|
||||
result = new G1CodeRootChunk();
|
||||
}
|
||||
_num_chunks_handed_out++;
|
||||
result->reset();
|
||||
return result;
|
||||
}
|
||||
|
||||
void G1CodeRootSet::initialize() {
|
||||
_free_list.initialize();
|
||||
_free_list.set_size(G1CodeRootChunk::word_size());
|
||||
#ifndef PRODUCT
|
||||
|
||||
size_t G1CodeRootChunkManager::num_chunks_handed_out() const {
|
||||
return _num_chunks_handed_out;
|
||||
}
|
||||
|
||||
G1CodeRootSet::G1CodeRootSet() : _list(), _length(0) {
|
||||
size_t G1CodeRootChunkManager::num_free_chunks() const {
|
||||
return (size_t)_free_list.count();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
G1CodeRootChunkManager G1CodeRootSet::_default_chunk_manager;
|
||||
|
||||
void G1CodeRootSet::purge_chunks(size_t keep_ratio) {
|
||||
_default_chunk_manager.purge_chunks(keep_ratio);
|
||||
}
|
||||
|
||||
size_t G1CodeRootSet::free_chunks_static_mem_size() {
|
||||
return _default_chunk_manager.static_mem_size();
|
||||
}
|
||||
|
||||
size_t G1CodeRootSet::free_chunks_mem_size() {
|
||||
return _default_chunk_manager.fl_mem_size();
|
||||
}
|
||||
|
||||
G1CodeRootSet::G1CodeRootSet(G1CodeRootChunkManager* manager) : _manager(manager), _list(), _length(0) {
|
||||
if (_manager == NULL) {
|
||||
_manager = &_default_chunk_manager;
|
||||
}
|
||||
_list.initialize();
|
||||
_list.set_size(G1CodeRootChunk::word_size());
|
||||
}
|
||||
@ -187,28 +213,38 @@ void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1CodeRootSet::static_mem_size() {
|
||||
return sizeof(G1CodeRootSet);
|
||||
}
|
||||
|
||||
size_t G1CodeRootSet::mem_size() {
|
||||
return sizeof(this) + _list.count() * _list.size();
|
||||
return G1CodeRootSet::static_mem_size() + _list.count() * _list.size();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void G1CodeRootSet::test() {
|
||||
initialize();
|
||||
G1CodeRootChunkManager mgr;
|
||||
|
||||
assert(_free_list.count() == 0, "Free List must be empty");
|
||||
assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet");
|
||||
assert(mgr.num_chunks_handed_out() == 0, "Must not have handed out chunks yet");
|
||||
|
||||
assert(G1CodeRootChunkManager::static_mem_size() > sizeof(void*),
|
||||
err_msg("The chunk manager's static memory usage seems too small, is only "SIZE_FORMAT" bytes.", G1CodeRootChunkManager::static_mem_size()));
|
||||
|
||||
// The number of chunks that we allocate for purge testing.
|
||||
size_t const num_chunks = 10;
|
||||
|
||||
{
|
||||
G1CodeRootSet set1;
|
||||
G1CodeRootSet set1(&mgr);
|
||||
assert(set1.is_empty(), "Code root set must be initially empty but is not.");
|
||||
|
||||
assert(G1CodeRootSet::static_mem_size() > sizeof(void*),
|
||||
err_msg("The code root set's static memory usage seems too small, is only "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size()));
|
||||
|
||||
set1.add((nmethod*)1);
|
||||
assert(_num_chunks_handed_out == 1,
|
||||
assert(mgr.num_chunks_handed_out() == 1,
|
||||
err_msg("Must have allocated and handed out one chunk, but handed out "
|
||||
SIZE_FORMAT" chunks", _num_chunks_handed_out));
|
||||
SIZE_FORMAT" chunks", mgr.num_chunks_handed_out()));
|
||||
assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
|
||||
SIZE_FORMAT" elements", set1.length()));
|
||||
|
||||
@ -217,19 +253,19 @@ void G1CodeRootSet::test() {
|
||||
for (uint i = 0; i < G1CodeRootChunk::word_size() + 1; i++) {
|
||||
set1.add((nmethod*)1);
|
||||
}
|
||||
assert(_num_chunks_handed_out == 1,
|
||||
assert(mgr.num_chunks_handed_out() == 1,
|
||||
err_msg("Duplicate detection must have prevented allocation of further "
|
||||
"chunks but contains "SIZE_FORMAT, _num_chunks_handed_out));
|
||||
"chunks but allocated "SIZE_FORMAT, mgr.num_chunks_handed_out()));
|
||||
assert(set1.length() == 1,
|
||||
err_msg("Duplicate detection should not have increased the set size but "
|
||||
"is "SIZE_FORMAT, set1.length()));
|
||||
|
||||
size_t num_total_after_add = G1CodeRootChunk::word_size() + 1;
|
||||
for (size_t i = 0; i < num_total_after_add - 1; i++) {
|
||||
set1.add((nmethod*)(2 + i));
|
||||
set1.add((nmethod*)(uintptr_t)(2 + i));
|
||||
}
|
||||
assert(_num_chunks_handed_out > 1,
|
||||
"After adding more code roots, more than one chunks should have been handed out");
|
||||
assert(mgr.num_chunks_handed_out() > 1,
|
||||
"After adding more code roots, more than one additional chunk should have been handed out");
|
||||
assert(set1.length() == num_total_after_add,
|
||||
err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
|
||||
"need to be in the set, but there are only "SIZE_FORMAT,
|
||||
@ -242,27 +278,27 @@ void G1CodeRootSet::test() {
|
||||
assert(num_popped == num_total_after_add,
|
||||
err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
|
||||
"were added", num_popped, num_total_after_add));
|
||||
assert(_num_chunks_handed_out == 0,
|
||||
assert(mgr.num_chunks_handed_out() == 0,
|
||||
err_msg("After popping all elements, all chunks must have been returned "
|
||||
"but are still "SIZE_FORMAT, _num_chunks_handed_out));
|
||||
"but there are still "SIZE_FORMAT" additional", mgr.num_chunks_handed_out()));
|
||||
|
||||
purge_chunks(0);
|
||||
assert(_free_list.count() == 0,
|
||||
mgr.purge_chunks(0);
|
||||
assert(mgr.num_free_chunks() == 0,
|
||||
err_msg("After purging everything, the free list must be empty but still "
|
||||
"contains "SIZE_FORMAT" chunks", _free_list.count()));
|
||||
"contains "SIZE_FORMAT" chunks", mgr.num_free_chunks()));
|
||||
|
||||
// Add some more handed out chunks.
|
||||
size_t i = 0;
|
||||
while (_num_chunks_handed_out < num_chunks) {
|
||||
while (mgr.num_chunks_handed_out() < num_chunks) {
|
||||
set1.add((nmethod*)i);
|
||||
i++;
|
||||
}
|
||||
|
||||
{
|
||||
// Generate chunks on the free list.
|
||||
G1CodeRootSet set2;
|
||||
G1CodeRootSet set2(&mgr);
|
||||
size_t i = 0;
|
||||
while (_num_chunks_handed_out < num_chunks * 2) {
|
||||
while (mgr.num_chunks_handed_out() < (num_chunks * 2)) {
|
||||
set2.add((nmethod*)i);
|
||||
i++;
|
||||
}
|
||||
@ -270,45 +306,45 @@ void G1CodeRootSet::test() {
|
||||
// num_chunks elements on the free list.
|
||||
}
|
||||
|
||||
assert(_num_chunks_handed_out == num_chunks,
|
||||
assert(mgr.num_chunks_handed_out() == num_chunks,
|
||||
err_msg("Deletion of the second set must have resulted in giving back "
|
||||
"those, but there is still "SIZE_FORMAT" handed out, expecting "
|
||||
SIZE_FORMAT, _num_chunks_handed_out, num_chunks));
|
||||
assert((size_t)_free_list.count() == num_chunks,
|
||||
"those, but there are still "SIZE_FORMAT" additional handed out, expecting "
|
||||
SIZE_FORMAT, mgr.num_chunks_handed_out(), num_chunks));
|
||||
assert(mgr.num_free_chunks() == num_chunks,
|
||||
err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
|
||||
"but there are only "SIZE_FORMAT, num_chunks, _free_list.count()));
|
||||
"but there are only "SIZE_FORMAT, num_chunks, mgr.num_free_chunks()));
|
||||
|
||||
size_t const test_percentage = 50;
|
||||
purge_chunks(test_percentage);
|
||||
assert(_num_chunks_handed_out == num_chunks,
|
||||
mgr.purge_chunks(test_percentage);
|
||||
assert(mgr.num_chunks_handed_out() == num_chunks,
|
||||
err_msg("Purging must not hand out chunks but there are "SIZE_FORMAT,
|
||||
_num_chunks_handed_out));
|
||||
assert((size_t)_free_list.count() == (ssize_t)(num_chunks * test_percentage / 100),
|
||||
mgr.num_chunks_handed_out()));
|
||||
assert(mgr.num_free_chunks() == (size_t)(mgr.num_chunks_handed_out() * test_percentage / 100),
|
||||
err_msg("Must have purged "SIZE_FORMAT" percent of "SIZE_FORMAT" chunks"
|
||||
"but there are "SSIZE_FORMAT, test_percentage, num_chunks,
|
||||
_free_list.count()));
|
||||
"but there are "SIZE_FORMAT, test_percentage, num_chunks,
|
||||
mgr.num_free_chunks()));
|
||||
// Purge the remainder of the chunks on the free list.
|
||||
purge_chunks(0);
|
||||
assert(_free_list.count() == 0, "Free List must be empty");
|
||||
assert(_num_chunks_handed_out == num_chunks,
|
||||
mgr.purge_chunks(0);
|
||||
assert(mgr.num_free_chunks() == 0, "Free List must be empty");
|
||||
assert(mgr.num_chunks_handed_out() == num_chunks,
|
||||
err_msg("Expected to be "SIZE_FORMAT" chunks handed out from the first set "
|
||||
"but there are "SIZE_FORMAT, num_chunks, _num_chunks_handed_out));
|
||||
"but there are "SIZE_FORMAT, num_chunks, mgr.num_chunks_handed_out()));
|
||||
|
||||
// Exit of the scope of the set1 object will call the destructor that generates
|
||||
// num_chunks additional elements on the free list.
|
||||
}
|
||||
}
|
||||
|
||||
assert(_num_chunks_handed_out == 0,
|
||||
assert(mgr.num_chunks_handed_out() == 0,
|
||||
err_msg("Deletion of the only set must have resulted in no chunks handed "
|
||||
"out, but there is still "SIZE_FORMAT" handed out", _num_chunks_handed_out));
|
||||
assert((size_t)_free_list.count() == num_chunks,
|
||||
"out, but there is still "SIZE_FORMAT" handed out", mgr.num_chunks_handed_out()));
|
||||
assert(mgr.num_free_chunks() == num_chunks,
|
||||
err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
|
||||
"but there are only "SSIZE_FORMAT, num_chunks, _free_list.count()));
|
||||
"but there are only "SIZE_FORMAT, num_chunks, mgr.num_free_chunks()));
|
||||
|
||||
// Restore initial state.
|
||||
purge_chunks(0);
|
||||
assert(_free_list.count() == 0, "Free List must be empty");
|
||||
assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet");
|
||||
mgr.purge_chunks(0);
|
||||
assert(mgr.num_free_chunks() == 0, "Free List must be empty");
|
||||
assert(mgr.num_chunks_handed_out() == 0, "No additional elements must have been handed out yet");
|
||||
}
|
||||
|
||||
void TestCodeCacheRemSet_test() {
|
||||
|
@ -128,19 +128,45 @@ class G1CodeRootChunk : public CHeapObj<mtGC> {
|
||||
}
|
||||
};
|
||||
|
||||
// Manages free chunks.
|
||||
class G1CodeRootChunkManager VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
// Global free chunk list management
|
||||
FreeList<G1CodeRootChunk> _free_list;
|
||||
// Total number of chunks handed out
|
||||
size_t _num_chunks_handed_out;
|
||||
|
||||
public:
|
||||
G1CodeRootChunkManager();
|
||||
|
||||
G1CodeRootChunk* new_chunk();
|
||||
void free_chunk(G1CodeRootChunk* chunk);
|
||||
// Free all elements of the given list.
|
||||
void free_all_chunks(FreeList<G1CodeRootChunk>* list);
|
||||
|
||||
void initialize();
|
||||
void purge_chunks(size_t keep_ratio);
|
||||
|
||||
static size_t static_mem_size();
|
||||
size_t fl_mem_size();
|
||||
|
||||
#ifndef PRODUCT
|
||||
size_t num_chunks_handed_out() const;
|
||||
size_t num_free_chunks() const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// Implements storage for a set of code roots.
|
||||
// All methods that modify the set are not thread-safe except if otherwise noted.
|
||||
class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
// Global free chunk list management
|
||||
static FreeList<G1CodeRootChunk> _free_list;
|
||||
// Total number of chunks handed out
|
||||
static size_t _num_chunks_handed_out;
|
||||
// Global default free chunk manager instance.
|
||||
static G1CodeRootChunkManager _default_chunk_manager;
|
||||
|
||||
static G1CodeRootChunk* new_chunk();
|
||||
static void free_chunk(G1CodeRootChunk* chunk);
|
||||
G1CodeRootChunk* new_chunk() { return _manager->new_chunk(); }
|
||||
void free_chunk(G1CodeRootChunk* chunk) { _manager->free_chunk(chunk); }
|
||||
// Free all elements of the given list.
|
||||
static void free_all_chunks(FreeList<G1CodeRootChunk>* list);
|
||||
void free_all_chunks(FreeList<G1CodeRootChunk>* list) { _manager->free_all_chunks(list); }
|
||||
|
||||
// Return the chunk that contains the given nmethod, NULL otherwise.
|
||||
// Scans the list of chunks backwards, as this method is used to add new
|
||||
@ -150,16 +176,18 @@ class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
size_t _length;
|
||||
FreeList<G1CodeRootChunk> _list;
|
||||
G1CodeRootChunkManager* _manager;
|
||||
|
||||
public:
|
||||
G1CodeRootSet();
|
||||
// If an instance is initialized with a chunk manager of NULL, use the global
|
||||
// default one.
|
||||
G1CodeRootSet(G1CodeRootChunkManager* manager = NULL);
|
||||
~G1CodeRootSet();
|
||||
|
||||
static void initialize();
|
||||
static void purge_chunks(size_t keep_ratio);
|
||||
|
||||
static size_t static_mem_size();
|
||||
static size_t fl_mem_size();
|
||||
static size_t free_chunks_static_mem_size();
|
||||
static size_t free_chunks_mem_size();
|
||||
|
||||
// Search for the code blob from the recently allocated ones to find duplicates more quickly, as this
|
||||
// method is likely to be repeatedly called with the same nmethod.
|
||||
@ -179,6 +207,8 @@ class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
|
||||
// Length in elements
|
||||
size_t length() const { return _length; }
|
||||
|
||||
// Static data memory size in bytes of this set.
|
||||
static size_t static_mem_size();
|
||||
// Memory size in bytes taken by this set.
|
||||
size_t mem_size();
|
||||
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.pcgc.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
||||
@ -92,56 +93,54 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
||||
// Local to this file.
|
||||
|
||||
class RefineCardTableEntryClosure: public CardTableEntryClosure {
|
||||
SuspendibleThreadSet* _sts;
|
||||
G1RemSet* _g1rs;
|
||||
ConcurrentG1Refine* _cg1r;
|
||||
bool _concurrent;
|
||||
public:
|
||||
RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
|
||||
G1RemSet* g1rs,
|
||||
ConcurrentG1Refine* cg1r) :
|
||||
_sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
|
||||
{}
|
||||
RefineCardTableEntryClosure() : _concurrent(true) { }
|
||||
|
||||
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||
bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
|
||||
bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
|
||||
// This path is executed by the concurrent refine or mutator threads,
|
||||
// concurrently, and so we do not care if card_ptr contains references
|
||||
// that point into the collection set.
|
||||
assert(!oops_into_cset, "should be");
|
||||
|
||||
if (_concurrent && _sts->should_yield()) {
|
||||
if (_concurrent && SuspendibleThreadSet::should_yield()) {
|
||||
// Caller will actually yield.
|
||||
return false;
|
||||
}
|
||||
// Otherwise, we finished successfully; return true.
|
||||
return true;
|
||||
}
|
||||
|
||||
void set_concurrent(bool b) { _concurrent = b; }
|
||||
};
|
||||
|
||||
|
||||
class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
|
||||
int _calls;
|
||||
G1CollectedHeap* _g1h;
|
||||
size_t _num_processed;
|
||||
CardTableModRefBS* _ctbs;
|
||||
int _histo[256];
|
||||
public:
|
||||
|
||||
public:
|
||||
ClearLoggedCardTableEntryClosure() :
|
||||
_calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
|
||||
_num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
|
||||
{
|
||||
for (int i = 0; i < 256; i++) _histo[i] = 0;
|
||||
}
|
||||
|
||||
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
|
||||
_calls++;
|
||||
unsigned char* ujb = (unsigned char*)card_ptr;
|
||||
int ind = (int)(*ujb);
|
||||
_histo[ind]++;
|
||||
*card_ptr = -1;
|
||||
}
|
||||
unsigned char* ujb = (unsigned char*)card_ptr;
|
||||
int ind = (int)(*ujb);
|
||||
_histo[ind]++;
|
||||
|
||||
*card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
|
||||
_num_processed++;
|
||||
|
||||
return true;
|
||||
}
|
||||
int calls() { return _calls; }
|
||||
|
||||
size_t num_processed() { return _num_processed; }
|
||||
|
||||
void print_histo() {
|
||||
gclog_or_tty->print_cr("Card table value histogram:");
|
||||
for (int i = 0; i < 256; i++) {
|
||||
@ -152,22 +151,20 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
|
||||
int _calls;
|
||||
G1CollectedHeap* _g1h;
|
||||
CardTableModRefBS* _ctbs;
|
||||
public:
|
||||
RedirtyLoggedCardTableEntryClosure() :
|
||||
_calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
|
||||
class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
|
||||
private:
|
||||
size_t _num_processed;
|
||||
|
||||
public:
|
||||
RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
|
||||
|
||||
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
|
||||
_calls++;
|
||||
*card_ptr = 0;
|
||||
}
|
||||
*card_ptr = CardTableModRefBS::dirty_card_val();
|
||||
_num_processed++;
|
||||
return true;
|
||||
}
|
||||
int calls() { return _calls; }
|
||||
|
||||
size_t num_processed() const { return _num_processed; }
|
||||
};
|
||||
|
||||
YoungList::YoungList(G1CollectedHeap* g1h) :
|
||||
@ -431,6 +428,9 @@ HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
|
||||
void G1CollectedHeap::stop_conc_gc_threads() {
|
||||
_cg1r->stop();
|
||||
_cmThread->stop();
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1StringDedup::stop();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -445,24 +445,18 @@ void G1CollectedHeap::stop_conc_gc_threads() {
|
||||
// implementation of is_scavengable() for G1 will indicate that
|
||||
// all nmethods must be scanned during a partial collection.
|
||||
bool G1CollectedHeap::is_in_partial_collection(const void* p) {
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
return hr != NULL && hr->in_collection_set();
|
||||
if (p == NULL) {
|
||||
return false;
|
||||
}
|
||||
return heap_region_containing(p)->in_collection_set();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Returns true if the reference points to an object that
|
||||
// can move in an incremental collection.
|
||||
bool G1CollectedHeap::is_scavengable(const void* p) {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
if (hr == NULL) {
|
||||
// null
|
||||
assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
|
||||
return false;
|
||||
} else {
|
||||
return !hr->isHumongous();
|
||||
}
|
||||
return !hr->isHumongous();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
||||
@ -476,9 +470,8 @@ void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
||||
|
||||
// First clear the logged cards.
|
||||
ClearLoggedCardTableEntryClosure clear;
|
||||
dcqs.set_closure(&clear);
|
||||
dcqs.apply_closure_to_all_completed_buffers();
|
||||
dcqs.iterate_closure_all_threads(false);
|
||||
dcqs.apply_closure_to_all_completed_buffers(&clear);
|
||||
dcqs.iterate_closure_all_threads(&clear, false);
|
||||
clear.print_histo();
|
||||
|
||||
// Now ensure that there's no dirty cards.
|
||||
@ -491,13 +484,13 @@ void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
||||
guarantee(count2.n() == 0, "Card table should be clean.");
|
||||
|
||||
RedirtyLoggedCardTableEntryClosure redirty;
|
||||
JavaThread::dirty_card_queue_set().set_closure(&redirty);
|
||||
dcqs.apply_closure_to_all_completed_buffers();
|
||||
dcqs.iterate_closure_all_threads(false);
|
||||
dcqs.apply_closure_to_all_completed_buffers(&redirty);
|
||||
dcqs.iterate_closure_all_threads(&redirty, false);
|
||||
gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
|
||||
clear.calls(), orig_count);
|
||||
guarantee(redirty.calls() == clear.calls(),
|
||||
"Or else mechanism is broken.");
|
||||
clear.num_processed(), orig_count);
|
||||
guarantee(redirty.num_processed() == clear.num_processed(),
|
||||
err_msg("Redirtied "SIZE_FORMAT" cards, bug cleared "SIZE_FORMAT,
|
||||
redirty.num_processed(), clear.num_processed()));
|
||||
|
||||
CountNonCleanMemRegionClosure count3(this);
|
||||
ct_bs->mod_card_iterate(&count3);
|
||||
@ -506,8 +499,6 @@ void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
||||
orig_count, count3.n());
|
||||
guarantee(count3.n() >= orig_count, "Should have restored them all.");
|
||||
}
|
||||
|
||||
JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
|
||||
}
|
||||
|
||||
// Private class members.
|
||||
@ -1512,9 +1503,6 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
assert(g1_policy()->collection_set() == NULL, "must be");
|
||||
g1_policy()->start_incremental_cset_building();
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the next
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
|
||||
init_mutator_alloc_region();
|
||||
@ -1934,8 +1922,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_old_marking_cycles_started(0),
|
||||
_old_marking_cycles_completed(0),
|
||||
_concurrent_cycle_started(false),
|
||||
_in_cset_fast_test(NULL),
|
||||
_in_cset_fast_test_base(NULL),
|
||||
_in_cset_fast_test(),
|
||||
_dirty_cards_region_list(NULL),
|
||||
_worker_cset_start_region(NULL),
|
||||
_worker_cset_start_region_time_stamp(NULL),
|
||||
@ -2005,7 +1992,9 @@ jint G1CollectedHeap::initialize() {
|
||||
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
||||
Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
|
||||
|
||||
_cg1r = new ConcurrentG1Refine(this);
|
||||
_refine_cte_cl = new RefineCardTableEntryClosure();
|
||||
|
||||
_cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);
|
||||
|
||||
// Reserve the maximum.
|
||||
|
||||
@ -2077,20 +2066,7 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
_g1h = this;
|
||||
|
||||
_in_cset_fast_test_length = max_regions();
|
||||
_in_cset_fast_test_base =
|
||||
NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
|
||||
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the first
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
_in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
|
||||
|
||||
// Create the ConcurrentMark data structure and thread.
|
||||
// (Must do this late, so that "max_regions" is defined.)
|
||||
@ -2113,25 +2089,21 @@ jint G1CollectedHeap::initialize() {
|
||||
// Perform any initialization actions delegated to the policy.
|
||||
g1_policy()->init();
|
||||
|
||||
_refine_cte_cl =
|
||||
new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
|
||||
g1_rem_set(),
|
||||
concurrent_g1_refine());
|
||||
JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
|
||||
|
||||
JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
|
||||
SATB_Q_FL_lock,
|
||||
G1SATBProcessCompletedThreshold,
|
||||
Shared_SATB_Q_lock);
|
||||
|
||||
JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
|
||||
JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
|
||||
DirtyCardQ_CBL_mon,
|
||||
DirtyCardQ_FL_lock,
|
||||
concurrent_g1_refine()->yellow_zone(),
|
||||
concurrent_g1_refine()->red_zone(),
|
||||
Shared_DirtyCardQ_lock);
|
||||
|
||||
if (G1DeferredRSUpdate) {
|
||||
dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
|
||||
dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
|
||||
DirtyCardQ_CBL_mon,
|
||||
DirtyCardQ_FL_lock,
|
||||
-1, // never trigger processing
|
||||
-1, // no limit on length
|
||||
@ -2141,7 +2113,8 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
// Initialize the card queue set used to hold cards containing
|
||||
// references into the collection set.
|
||||
_into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
|
||||
_into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
|
||||
DirtyCardQ_CBL_mon,
|
||||
DirtyCardQ_FL_lock,
|
||||
-1, // never trigger processing
|
||||
-1, // no limit on length
|
||||
@ -2178,6 +2151,23 @@ jint G1CollectedHeap::initialize() {
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::stop() {
|
||||
#if 0
|
||||
// Stopping concurrent worker threads is currently disabled until
|
||||
// some bugs in concurrent mark has been resolve. Without fixing
|
||||
// those bugs first we risk haning during VM exit when trying to
|
||||
// stop these threads.
|
||||
|
||||
// Abort any ongoing concurrent root region scanning and stop all
|
||||
// concurrent threads. We do this to make sure these threads do
|
||||
// not continue to execute and access resources (e.g. gclog_or_tty)
|
||||
// that are destroyed during shutdown.
|
||||
_cm->root_regions()->abort();
|
||||
_cm->root_regions()->wait_until_scan_finished();
|
||||
stop_conc_gc_threads();
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::conservative_max_heap_alignment() {
|
||||
return HeapRegion::max_region_size();
|
||||
}
|
||||
@ -2963,21 +2953,16 @@ CompactibleSpace* G1CollectedHeap::first_compactible_space() {
|
||||
|
||||
|
||||
Space* G1CollectedHeap::space_containing(const void* addr) const {
|
||||
Space* res = heap_region_containing(addr);
|
||||
return res;
|
||||
return heap_region_containing(addr);
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
|
||||
Space* sp = space_containing(addr);
|
||||
if (sp != NULL) {
|
||||
return sp->block_start(addr);
|
||||
}
|
||||
return NULL;
|
||||
return sp->block_start(addr);
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
|
||||
Space* sp = space_containing(addr);
|
||||
assert(sp != NULL, "block_size of address outside of heap");
|
||||
return sp->block_size(addr);
|
||||
}
|
||||
|
||||
@ -3212,7 +3197,7 @@ class VerifyKlassClosure: public KlassClosure {
|
||||
_young_ref_counter_closure.reset_count();
|
||||
k->oops_do(&_young_ref_counter_closure);
|
||||
if (_young_ref_counter_closure.count() > 0) {
|
||||
guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
|
||||
guarantee(k->has_modified_oops(), err_msg("Klass " PTR_FORMAT ", has young refs but is not dirty.", k));
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -3296,7 +3281,7 @@ public:
|
||||
int *val;
|
||||
for (cur = start; cur < end; cur++) {
|
||||
val = (int *) cur;
|
||||
gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
|
||||
gclog_or_tty->print("\t "PTR_FORMAT":%d\n", val, *val);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4125,9 +4110,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// Start a new incremental collection set for the next pause.
|
||||
g1_policy()->start_incremental_cset_building();
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the next
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
|
||||
_young_list->reset_sampled_info();
|
||||
@ -4304,7 +4286,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// this point does not assume that we are the only GC thread
|
||||
// running. Note: of course, the actual marking work will
|
||||
// not start until the safepoint itself is released in
|
||||
// ConcurrentGCThread::safepoint_desynchronize().
|
||||
// SuspendibleThreadSet::desynchronize().
|
||||
doConcurrentMark();
|
||||
}
|
||||
|
||||
@ -4571,7 +4553,7 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
||||
}
|
||||
|
||||
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
|
||||
ParGCAllocBuffer(gclab_word_size), _retired(false) { }
|
||||
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
|
||||
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
|
||||
: _g1h(g1h),
|
||||
@ -4694,30 +4676,19 @@ G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
|
||||
_worker_id(par_scan_state->queue_num()) { }
|
||||
|
||||
void G1ParCopyHelper::mark_object(oop obj) {
|
||||
#ifdef ASSERT
|
||||
HeapRegion* hr = _g1->heap_region_containing(obj);
|
||||
assert(hr != NULL, "sanity");
|
||||
assert(!hr->in_collection_set(), "should not mark objects in the CSet");
|
||||
#endif // ASSERT
|
||||
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||
|
||||
// We know that the object is not moving so it's safe to read its size.
|
||||
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
|
||||
}
|
||||
|
||||
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
|
||||
#ifdef ASSERT
|
||||
assert(from_obj->is_forwarded(), "from obj should be forwarded");
|
||||
assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
|
||||
assert(from_obj != to_obj, "should not be self-forwarded");
|
||||
|
||||
HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
|
||||
assert(from_hr != NULL, "sanity");
|
||||
assert(from_hr->in_collection_set(), "from obj should be in the CSet");
|
||||
|
||||
HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
|
||||
assert(to_hr != NULL, "sanity");
|
||||
assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
|
||||
#endif // ASSERT
|
||||
assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
|
||||
assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||
|
||||
// The object might be in the process of being copied by another
|
||||
// worker so we cannot trust that its to-space image is
|
||||
@ -4935,8 +4906,6 @@ void G1ParEvacuateFollowersClosure::do_void() {
|
||||
pss->trim_queue();
|
||||
}
|
||||
} while (!offer_termination());
|
||||
|
||||
pss->retire_alloc_buffers();
|
||||
}
|
||||
|
||||
class G1KlassScanClosure : public KlassClosure {
|
||||
@ -5273,11 +5242,25 @@ void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive
|
||||
}
|
||||
}
|
||||
|
||||
class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
|
||||
public:
|
||||
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||
*card_ptr = CardTableModRefBS::dirty_card_val();
|
||||
return true;
|
||||
class G1RedirtyLoggedCardsTask : public AbstractGangTask {
|
||||
private:
|
||||
DirtyCardQueueSet* _queue;
|
||||
public:
|
||||
G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
|
||||
|
||||
virtual void work(uint worker_id) {
|
||||
double start_time = os::elapsedTime();
|
||||
|
||||
RedirtyLoggedCardTableEntryClosure cl;
|
||||
if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
|
||||
_queue->par_apply_closure_to_all_completed_buffers(&cl);
|
||||
} else {
|
||||
_queue->apply_closure_to_all_completed_buffers(&cl);
|
||||
}
|
||||
|
||||
G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
|
||||
timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
|
||||
timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
|
||||
}
|
||||
};
|
||||
|
||||
@ -5285,9 +5268,18 @@ void G1CollectedHeap::redirty_logged_cards() {
|
||||
guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
|
||||
double redirty_logged_cards_start = os::elapsedTime();
|
||||
|
||||
RedirtyLoggedCardTableEntryFastClosure redirty;
|
||||
dirty_card_queue_set().set_closure(&redirty);
|
||||
dirty_card_queue_set().apply_closure_to_all_completed_buffers();
|
||||
uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
|
||||
_g1h->workers()->active_workers() : 1);
|
||||
|
||||
G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
|
||||
dirty_card_queue_set().reset_for_par_iteration();
|
||||
if (use_parallel_gc_threads()) {
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&redirty_task);
|
||||
set_par_threads(0);
|
||||
} else {
|
||||
redirty_task.work(0);
|
||||
}
|
||||
|
||||
DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
|
||||
dcq.merge_bufferlists(&dirty_card_queue_set());
|
||||
@ -5762,10 +5754,8 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
|
||||
}
|
||||
|
||||
_gc_tracer_stw->report_gc_reference_stats(stats);
|
||||
// We have completed copying any necessary live referent objects
|
||||
// (that were not copied during the actual pause) so we can
|
||||
// retire any active alloc buffers
|
||||
pss.retire_alloc_buffers();
|
||||
|
||||
// We have completed copying any necessary live referent objects.
|
||||
assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
|
||||
|
||||
double ref_proc_time = os::elapsedTime() - ref_proc_start;
|
||||
@ -6456,11 +6446,7 @@ void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
|
||||
|
||||
bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
if (hr == NULL) {
|
||||
return false;
|
||||
} else {
|
||||
return hr->is_in(p);
|
||||
}
|
||||
return hr->is_in(p);
|
||||
}
|
||||
|
||||
// Methods for the mutator alloc region
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||
#include "gc_implementation/g1/evacuationInfo.hpp"
|
||||
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||
#include "gc_implementation/g1/g1BiasedArray.hpp"
|
||||
#include "gc_implementation/g1/g1HRPrinter.hpp"
|
||||
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
@ -197,6 +198,16 @@ public:
|
||||
bool do_object_b(oop p);
|
||||
};
|
||||
|
||||
// Instances of this class are used for quick tests on whether a reference points
|
||||
// into the collection set. Each of the array's elements denotes whether the
|
||||
// corresponding region is in the collection set.
|
||||
class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {
|
||||
protected:
|
||||
bool default_value() const { return false; }
|
||||
public:
|
||||
void clear() { G1BiasedMappedArray<bool>::clear(); }
|
||||
};
|
||||
|
||||
class RefineCardTableEntryClosure;
|
||||
|
||||
class G1CollectedHeap : public SharedHeap {
|
||||
@ -353,26 +364,10 @@ private:
|
||||
// than the current allocation region.
|
||||
size_t _summary_bytes_used;
|
||||
|
||||
// This is used for a quick test on whether a reference points into
|
||||
// the collection set or not. Basically, we have an array, with one
|
||||
// byte per region, and that byte denotes whether the corresponding
|
||||
// region is in the collection set or not. The entry corresponding
|
||||
// the bottom of the heap, i.e., region 0, is pointed to by
|
||||
// _in_cset_fast_test_base. The _in_cset_fast_test field has been
|
||||
// biased so that it actually points to address 0 of the address
|
||||
// space, to make the test as fast as possible (we can simply shift
|
||||
// the address to address into it, instead of having to subtract the
|
||||
// bottom of the heap from the address before shifting it; basically
|
||||
// it works in the same way the card table works).
|
||||
bool* _in_cset_fast_test;
|
||||
|
||||
// The allocated array used for the fast test on whether a reference
|
||||
// points into the collection set or not. This field is also used to
|
||||
// free the array.
|
||||
bool* _in_cset_fast_test_base;
|
||||
|
||||
// The length of the _in_cset_fast_test_base array.
|
||||
uint _in_cset_fast_test_length;
|
||||
// This array is used for a quick test on whether a reference points into
|
||||
// the collection set or not. Each of the array's elements denotes whether the
|
||||
// corresponding region is in the collection set or not.
|
||||
G1FastCSetBiasedMappedArray _in_cset_fast_test;
|
||||
|
||||
volatile unsigned _gc_time_stamp;
|
||||
|
||||
@ -695,12 +690,7 @@ public:
|
||||
// We register a region with the fast "in collection set" test. We
|
||||
// simply set to true the array slot corresponding to this region.
|
||||
void register_region_with_in_cset_fast_test(HeapRegion* r) {
|
||||
assert(_in_cset_fast_test_base != NULL, "sanity");
|
||||
assert(r->in_collection_set(), "invariant");
|
||||
uint index = r->hrs_index();
|
||||
assert(index < _in_cset_fast_test_length, "invariant");
|
||||
assert(!_in_cset_fast_test_base[index], "invariant");
|
||||
_in_cset_fast_test_base[index] = true;
|
||||
_in_cset_fast_test.set_by_index(r->hrs_index(), true);
|
||||
}
|
||||
|
||||
// This is a fast test on whether a reference points into the
|
||||
@ -709,9 +699,7 @@ public:
|
||||
inline bool in_cset_fast_test(oop obj);
|
||||
|
||||
void clear_cset_fast_test() {
|
||||
assert(_in_cset_fast_test_base != NULL, "sanity");
|
||||
memset(_in_cset_fast_test_base, false,
|
||||
(size_t) _in_cset_fast_test_length * sizeof(bool));
|
||||
_in_cset_fast_test.clear();
|
||||
}
|
||||
|
||||
// This is called at the start of either a concurrent cycle or a Full
|
||||
@ -1077,6 +1065,8 @@ public:
|
||||
// specified by the policy object.
|
||||
jint initialize();
|
||||
|
||||
virtual void stop();
|
||||
|
||||
// Return the (conservative) maximum heap alignment for any G1 heap
|
||||
static size_t conservative_max_heap_alignment();
|
||||
|
||||
@ -1390,17 +1380,15 @@ public:
|
||||
// space containing a given address, or else returns NULL.
|
||||
virtual Space* space_containing(const void* addr) const;
|
||||
|
||||
// A G1CollectedHeap will contain some number of heap regions. This
|
||||
// finds the region containing a given address, or else returns NULL.
|
||||
template <class T>
|
||||
inline HeapRegion* heap_region_containing(const T addr) const;
|
||||
|
||||
// Like the above, but requires "addr" to be in the heap (to avoid a
|
||||
// null-check), and unlike the above, may return an continuing humongous
|
||||
// region.
|
||||
// Returns the HeapRegion that contains addr. addr must not be NULL.
|
||||
template <class T>
|
||||
inline HeapRegion* heap_region_containing_raw(const T addr) const;
|
||||
|
||||
// Returns the HeapRegion that contains addr. addr must not be NULL.
|
||||
// If addr is within a humongous continues region, it returns its humongous start region.
|
||||
template <class T>
|
||||
inline HeapRegion* heap_region_containing(const T addr) const;
|
||||
|
||||
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
|
||||
// each address in the (reserved) heap is a member of exactly
|
||||
// one block. The defining characteristic of a block is that it is
|
||||
@ -1542,7 +1530,6 @@ public:
|
||||
// the region to which the object belongs. An object is dead
|
||||
// iff a) it was not allocated since the last mark and b) it
|
||||
// is not marked.
|
||||
|
||||
bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
!hr->obj_allocated_since_prev_marking(obj) &&
|
||||
@ -1552,7 +1539,6 @@ public:
|
||||
// This function returns true when an object has been
|
||||
// around since the previous marking and hasn't yet
|
||||
// been marked during this marking.
|
||||
|
||||
bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
!hr->obj_allocated_since_next_marking(obj) &&
|
||||
@ -1698,15 +1684,19 @@ private:
|
||||
|
||||
public:
|
||||
G1ParGCAllocBuffer(size_t gclab_word_size);
|
||||
virtual ~G1ParGCAllocBuffer() {
|
||||
guarantee(_retired, "Allocation buffer has not been retired");
|
||||
}
|
||||
|
||||
void set_buf(HeapWord* buf) {
|
||||
virtual void set_buf(HeapWord* buf) {
|
||||
ParGCAllocBuffer::set_buf(buf);
|
||||
_retired = false;
|
||||
}
|
||||
|
||||
void retire(bool end_of_gc, bool retain) {
|
||||
if (_retired)
|
||||
virtual void retire(bool end_of_gc, bool retain) {
|
||||
if (_retired) {
|
||||
return;
|
||||
}
|
||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
||||
_retired = true;
|
||||
}
|
||||
@ -1776,6 +1766,7 @@ public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
|
||||
|
||||
~G1ParScanThreadState() {
|
||||
retire_alloc_buffers();
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
|
||||
}
|
||||
|
||||
@ -1886,6 +1877,7 @@ public:
|
||||
return _surviving_young_words;
|
||||
}
|
||||
|
||||
private:
|
||||
void retire_alloc_buffers() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
size_t waste = _alloc_buffers[ap]->words_remaining();
|
||||
@ -1895,8 +1887,8 @@ public:
|
||||
false /* retain */);
|
||||
}
|
||||
}
|
||||
private:
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
inline bool has_partial_array_mask(oop* ref) const {
|
||||
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
|
||||
|
@ -42,21 +42,22 @@ inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion*
|
||||
G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
|
||||
// hr can be null if addr in perm_gen
|
||||
if (hr != NULL && hr->continuesHumongous()) {
|
||||
hr = hr->humongous_start_region();
|
||||
}
|
||||
return hr;
|
||||
G1CollectedHeap::heap_region_containing_raw(const T addr) const {
|
||||
assert(addr != NULL, "invariant");
|
||||
assert(_g1_reserved.contains((const void*) addr),
|
||||
err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
|
||||
(void*)addr, _g1_reserved.start(), _g1_reserved.end()));
|
||||
return _hrs.addr_to_region((HeapWord*) addr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion*
|
||||
G1CollectedHeap::heap_region_containing_raw(const T addr) const {
|
||||
assert(_g1_reserved.contains((const void*) addr), "invariant");
|
||||
HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
|
||||
return res;
|
||||
G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
HeapRegion* hr = heap_region_containing_raw(addr);
|
||||
if (hr->continuesHumongous()) {
|
||||
return hr->humongous_start_region();
|
||||
}
|
||||
return hr;
|
||||
}
|
||||
|
||||
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
|
||||
@ -134,8 +135,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||
// have to keep calling heap_region_containing_raw() in the
|
||||
// asserts below.
|
||||
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
|
||||
assert(containing_hr != NULL && start != NULL && word_size > 0,
|
||||
"pre-condition");
|
||||
assert(word_size > 0, "pre-condition");
|
||||
assert(containing_hr->is_in(start), "it should contain start");
|
||||
assert(containing_hr->is_young(), "it should be young");
|
||||
assert(!containing_hr->isHumongous(), "it should not be humongous");
|
||||
@ -164,12 +164,7 @@ inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
|
||||
// collection set or not. Assume that the reference
|
||||
// points into the heap.
|
||||
inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
|
||||
assert(_in_cset_fast_test != NULL, "sanity");
|
||||
assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
|
||||
// no need to subtract the bottom of the heap from obj,
|
||||
// _in_cset_fast_test is biased
|
||||
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
|
||||
bool ret = _in_cset_fast_test[index];
|
||||
bool ret = _in_cset_fast_test.get_by_address((HeapWord*)obj);
|
||||
// let's make sure the result is consistent with what the slower
|
||||
// test returns
|
||||
assert( ret || !obj_in_cs(obj), "sanity");
|
||||
@ -251,8 +246,10 @@ inline void G1CollectedHeap::reset_evacuation_should_fail() {
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
inline bool G1CollectedHeap::is_in_young(const oop obj) {
|
||||
HeapRegion* hr = heap_region_containing(obj);
|
||||
return hr != NULL && hr->is_young();
|
||||
if (obj == NULL) {
|
||||
return false;
|
||||
}
|
||||
return heap_region_containing(obj)->is_young();
|
||||
}
|
||||
|
||||
// We don't need barriers for initializing stores to objects
|
||||
@ -265,21 +262,17 @@ inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
if (obj == NULL) {
|
||||
return false;
|
||||
}
|
||||
else return is_obj_dead(obj, hr);
|
||||
return is_obj_dead(obj, heap_region_containing(obj));
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
if (obj == NULL) {
|
||||
return false;
|
||||
}
|
||||
else return is_obj_ill(obj, hr);
|
||||
return is_obj_ill(obj, heap_region_containing(obj));
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
|
@ -170,6 +170,8 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
||||
_last_gc_worker_end_times_ms(_max_gc_threads, "%.1lf", false),
|
||||
_last_gc_worker_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_redirty_logged_cards_time_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_redirty_logged_cards_processed_cards(_max_gc_threads, SIZE_FORMAT),
|
||||
_cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads, "%.1lf")
|
||||
{
|
||||
@ -195,6 +197,10 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
|
||||
_last_gc_worker_end_times_ms.reset();
|
||||
_last_gc_worker_times_ms.reset();
|
||||
_last_gc_worker_other_times_ms.reset();
|
||||
|
||||
_last_redirty_logged_cards_time_ms.reset();
|
||||
_last_redirty_logged_cards_processed_cards.reset();
|
||||
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::note_gc_end() {
|
||||
@ -230,6 +236,9 @@ void G1GCPhaseTimes::note_gc_end() {
|
||||
|
||||
_last_gc_worker_times_ms.verify();
|
||||
_last_gc_worker_other_times_ms.verify();
|
||||
|
||||
_last_redirty_logged_cards_time_ms.verify();
|
||||
_last_redirty_logged_cards_processed_cards.verify();
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::note_string_dedup_fixup_start() {
|
||||
@ -349,6 +358,10 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
|
||||
if (G1DeferredRSUpdate) {
|
||||
print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
|
||||
if (G1Log::finest()) {
|
||||
_last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty");
|
||||
_last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
|
||||
}
|
||||
}
|
||||
print_stats(2, "Free CSet",
|
||||
(_recorded_young_free_cset_time_ms +
|
||||
|
@ -151,6 +151,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
double _recorded_young_cset_choice_time_ms;
|
||||
double _recorded_non_young_cset_choice_time_ms;
|
||||
|
||||
WorkerDataArray<double> _last_redirty_logged_cards_time_ms;
|
||||
WorkerDataArray<size_t> _last_redirty_logged_cards_processed_cards;
|
||||
double _recorded_redirty_logged_cards_time_ms;
|
||||
|
||||
double _recorded_young_free_cset_time_ms;
|
||||
@ -293,6 +295,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_recorded_non_young_cset_choice_time_ms = time_ms;
|
||||
}
|
||||
|
||||
void record_redirty_logged_cards_time_ms(uint worker_i, double time_ms) {
|
||||
_last_redirty_logged_cards_time_ms.set(worker_i, time_ms);
|
||||
}
|
||||
|
||||
void record_redirty_logged_cards_processed_cards(uint worker_i, size_t processed_buffers) {
|
||||
_last_redirty_logged_cards_processed_cards.set(worker_i, processed_buffers);
|
||||
}
|
||||
|
||||
void record_redirty_logged_cards_time_ms(double time_ms) {
|
||||
_recorded_redirty_logged_cards_time_ms = time_ms;
|
||||
}
|
||||
|
@ -125,9 +125,7 @@ inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
|
||||
if (hr != NULL) {
|
||||
_cm->grayRoot(obj, obj->size(), _worker_id, hr);
|
||||
}
|
||||
_cm->grayRoot(obj, obj->size(), _worker_id, hr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -154,57 +152,63 @@ inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
|
||||
template <class T>
|
||||
inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (obj == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// can't do because of races
|
||||
// assert(obj == NULL || obj->is_oop(), "expected an oop");
|
||||
|
||||
// Do the safe subset of is_oop
|
||||
if (obj != NULL) {
|
||||
#ifdef CHECK_UNHANDLED_OOPS
|
||||
oopDesc* o = obj.obj();
|
||||
oopDesc* o = obj.obj();
|
||||
#else
|
||||
oopDesc* o = obj;
|
||||
oopDesc* o = obj;
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
}
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
#endif // ASSERT
|
||||
|
||||
assert(_from != NULL, "from region must be non-NULL");
|
||||
assert(_from->is_in_reserved(p), "p is not in from");
|
||||
|
||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||
if (to != NULL && _from != to) {
|
||||
// The _record_refs_into_cset flag is true during the RSet
|
||||
// updating part of an evacuation pause. It is false at all
|
||||
// other times:
|
||||
// * rebuilding the remembered sets after a full GC
|
||||
// * during concurrent refinement.
|
||||
// * updating the remembered sets of regions in the collection
|
||||
// set in the event of an evacuation failure (when deferred
|
||||
// updates are enabled).
|
||||
if (_from == to) {
|
||||
// Normally this closure should only be called with cross-region references.
|
||||
// But since Java threads are manipulating the references concurrently and we
|
||||
// reload the values things may have changed.
|
||||
return;
|
||||
}
|
||||
|
||||
if (_record_refs_into_cset && to->in_collection_set()) {
|
||||
// We are recording references that point into the collection
|
||||
// set and this particular reference does exactly that...
|
||||
// If the referenced object has already been forwarded
|
||||
// to itself, we are handling an evacuation failure and
|
||||
// we have already visited/tried to copy this object
|
||||
// there is no need to retry.
|
||||
if (!self_forwarded(obj)) {
|
||||
assert(_push_ref_cl != NULL, "should not be null");
|
||||
// Push the reference in the refs queue of the G1ParScanThreadState
|
||||
// instance for this worker thread.
|
||||
_push_ref_cl->do_oop(p);
|
||||
}
|
||||
// The _record_refs_into_cset flag is true during the RSet
|
||||
// updating part of an evacuation pause. It is false at all
|
||||
// other times:
|
||||
// * rebuilding the remembered sets after a full GC
|
||||
// * during concurrent refinement.
|
||||
// * updating the remembered sets of regions in the collection
|
||||
// set in the event of an evacuation failure (when deferred
|
||||
// updates are enabled).
|
||||
|
||||
// Deferred updates to the CSet are either discarded (in the normal case),
|
||||
// or processed (if an evacuation failure occurs) at the end
|
||||
// of the collection.
|
||||
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
|
||||
return;
|
||||
if (_record_refs_into_cset && to->in_collection_set()) {
|
||||
// We are recording references that point into the collection
|
||||
// set and this particular reference does exactly that...
|
||||
// If the referenced object has already been forwarded
|
||||
// to itself, we are handling an evacuation failure and
|
||||
// we have already visited/tried to copy this object
|
||||
// there is no need to retry.
|
||||
if (!self_forwarded(obj)) {
|
||||
assert(_push_ref_cl != NULL, "should not be null");
|
||||
// Push the reference in the refs queue of the G1ParScanThreadState
|
||||
// instance for this worker thread.
|
||||
_push_ref_cl->do_oop(p);
|
||||
}
|
||||
|
||||
// Deferred updates to the CSet are either discarded (in the normal case),
|
||||
// or processed (if an evacuation failure occurs) at the end
|
||||
// of the collection.
|
||||
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
|
||||
} else {
|
||||
// We either don't care about pushing references that point into the
|
||||
// collection set (i.e. we're not during an evacuation pause) _or_
|
||||
// the reference doesn't point into the collection set. Either way
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/intHisto.hpp"
|
||||
|
||||
#define CARD_REPEAT_HISTO 0
|
||||
@ -163,7 +164,7 @@ public:
|
||||
void printCard(HeapRegion* card_region, size_t card_index,
|
||||
HeapWord* card_start) {
|
||||
gclog_or_tty->print_cr("T %u Region [" PTR_FORMAT ", " PTR_FORMAT ") "
|
||||
"RS names card %p: "
|
||||
"RS names card " SIZE_FORMAT_HEX ": "
|
||||
"[" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
_worker_i,
|
||||
card_region->bottom(), card_region->end(),
|
||||
@ -209,7 +210,6 @@ public:
|
||||
#endif
|
||||
|
||||
HeapRegion* card_region = _g1h->heap_region_containing(card_start);
|
||||
assert(card_region != NULL, "Yielding cards not in the heap?");
|
||||
_cards++;
|
||||
|
||||
if (!card_region->is_on_dirty_cards_region_list()) {
|
||||
@ -404,7 +404,6 @@ public:
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
// And find the region containing it.
|
||||
HeapRegion* r = _g1->heap_region_containing(start);
|
||||
assert(r != NULL, "unexpected null");
|
||||
|
||||
// Scan oops in the card looking for references into the collection set
|
||||
// Don't use addr_for(card_ptr + 1) which can ask for
|
||||
@ -566,11 +565,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
// And find the region containing it.
|
||||
HeapRegion* r = _g1->heap_region_containing(start);
|
||||
if (r == NULL) {
|
||||
// Again no need to return that this card contains refs that
|
||||
// point into the collection set.
|
||||
return false; // Not in the G1 heap (might be in perm, for example.)
|
||||
}
|
||||
|
||||
// Why do we have to check here whether a card is on a young region,
|
||||
// given that we dirty young regions and, as a result, the
|
||||
@ -623,10 +617,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
|
||||
|
||||
start = _ct_bs->addr_for(card_ptr);
|
||||
r = _g1->heap_region_containing(start);
|
||||
if (r == NULL) {
|
||||
// Not in the G1 heap
|
||||
return false;
|
||||
}
|
||||
|
||||
// Checking whether the region we got back from the cache
|
||||
// is young here is inappropriate. The region could have been
|
||||
|
@ -45,26 +45,28 @@ inline void G1RemSet::write_ref(HeapRegion* from, T* p) {
|
||||
template <class T>
|
||||
inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (obj == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// can't do because of races
|
||||
// assert(obj == NULL || obj->is_oop(), "expected an oop");
|
||||
|
||||
// Do the safe subset of is_oop
|
||||
if (obj != NULL) {
|
||||
#ifdef CHECK_UNHANDLED_OOPS
|
||||
oopDesc* o = obj.obj();
|
||||
oopDesc* o = obj.obj();
|
||||
#else
|
||||
oopDesc* o = obj;
|
||||
oopDesc* o = obj;
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
}
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
#endif // ASSERT
|
||||
|
||||
assert(from == NULL || from->is_in_reserved(p), "p is not in from");
|
||||
|
||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||
if (to != NULL && from != to) {
|
||||
if (from != to) {
|
||||
assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
|
||||
to->rem_set()->add_reference(p, tid);
|
||||
}
|
||||
|
@ -44,6 +44,11 @@ void G1StringDedup::initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1StringDedup::stop() {
|
||||
assert(is_enabled(), "String deduplication not enabled");
|
||||
G1StringDedupThread::stop();
|
||||
}
|
||||
|
||||
bool G1StringDedup::is_candidate_from_mark(oop obj) {
|
||||
if (java_lang_String::is_instance(obj)) {
|
||||
bool from_young = G1CollectedHeap::heap()->heap_region_containing_raw(obj)->is_young();
|
||||
|
@ -110,8 +110,12 @@ public:
|
||||
return _enabled;
|
||||
}
|
||||
|
||||
// Initialize string deduplication.
|
||||
static void initialize();
|
||||
|
||||
// Stop the deduplication thread.
|
||||
static void stop();
|
||||
|
||||
// Immediately deduplicates the given String object, bypassing the
|
||||
// the deduplication queue.
|
||||
static void deduplicate(oop java_string);
|
||||
|
@ -35,6 +35,7 @@ const size_t G1StringDedupQueue::_max_cache_size = 0; // Max cache size p
|
||||
|
||||
G1StringDedupQueue::G1StringDedupQueue() :
|
||||
_cursor(0),
|
||||
_cancel(false),
|
||||
_empty(true),
|
||||
_dropped(0) {
|
||||
_nqueues = MAX2(ParallelGCThreads, (size_t)1);
|
||||
@ -55,11 +56,17 @@ void G1StringDedupQueue::create() {
|
||||
|
||||
void G1StringDedupQueue::wait() {
|
||||
MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
|
||||
while (_queue->_empty) {
|
||||
while (_queue->_empty && !_queue->_cancel) {
|
||||
ml.wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
}
|
||||
|
||||
void G1StringDedupQueue::cancel_wait() {
|
||||
MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
|
||||
_queue->_cancel = true;
|
||||
ml.notify();
|
||||
}
|
||||
|
||||
void G1StringDedupQueue::push(uint worker_id, oop java_string) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
|
||||
assert(worker_id < _queue->_nqueues, "Invalid queue");
|
||||
|
@ -65,6 +65,7 @@ private:
|
||||
G1StringDedupWorkerQueue* _queues;
|
||||
size_t _nqueues;
|
||||
size_t _cursor;
|
||||
bool _cancel;
|
||||
volatile bool _empty;
|
||||
|
||||
// Statistics counter, only used for logging.
|
||||
@ -81,6 +82,9 @@ public:
|
||||
// Blocks and waits for the queue to become non-empty.
|
||||
static void wait();
|
||||
|
||||
// Wakes up any thread blocked waiting for the queue to become non-empty.
|
||||
static void cancel_wait();
|
||||
|
||||
// Pushes a deduplication candidate onto a specific GC worker queue.
|
||||
static void push(uint worker_id, oop java_string);
|
||||
|
||||
|
@ -73,42 +73,60 @@ void G1StringDedupThread::run() {
|
||||
|
||||
// Wait for the queue to become non-empty
|
||||
G1StringDedupQueue::wait();
|
||||
|
||||
// Include this thread in safepoints
|
||||
stsJoin();
|
||||
|
||||
stat.mark_exec();
|
||||
|
||||
// Process the queue
|
||||
for (;;) {
|
||||
oop java_string = G1StringDedupQueue::pop();
|
||||
if (java_string == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
G1StringDedupTable::deduplicate(java_string, stat);
|
||||
|
||||
// Safepoint this thread if needed
|
||||
if (stsShouldYield()) {
|
||||
stat.mark_block();
|
||||
stsYield(NULL);
|
||||
stat.mark_unblock();
|
||||
}
|
||||
if (_should_terminate) {
|
||||
break;
|
||||
}
|
||||
|
||||
G1StringDedupTable::trim_entry_cache();
|
||||
{
|
||||
// Include thread in safepoints
|
||||
SuspendibleThreadSetJoiner sts;
|
||||
|
||||
stat.mark_done();
|
||||
stat.mark_exec();
|
||||
|
||||
// Print statistics
|
||||
total_stat.add(stat);
|
||||
print(gclog_or_tty, stat, total_stat);
|
||||
// Process the queue
|
||||
for (;;) {
|
||||
oop java_string = G1StringDedupQueue::pop();
|
||||
if (java_string == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Exclude this thread from safepoints
|
||||
stsLeave();
|
||||
G1StringDedupTable::deduplicate(java_string, stat);
|
||||
|
||||
// Safepoint this thread if needed
|
||||
if (sts.should_yield()) {
|
||||
stat.mark_block();
|
||||
sts.yield();
|
||||
stat.mark_unblock();
|
||||
}
|
||||
}
|
||||
|
||||
G1StringDedupTable::trim_entry_cache();
|
||||
|
||||
stat.mark_done();
|
||||
|
||||
// Print statistics
|
||||
total_stat.add(stat);
|
||||
print(gclog_or_tty, stat, total_stat);
|
||||
}
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
terminate();
|
||||
}
|
||||
|
||||
void G1StringDedupThread::stop() {
|
||||
{
|
||||
MonitorLockerEx ml(Terminator_lock);
|
||||
_thread->_should_terminate = true;
|
||||
}
|
||||
|
||||
G1StringDedupQueue::cancel_wait();
|
||||
|
||||
{
|
||||
MonitorLockerEx ml(Terminator_lock);
|
||||
while (!_thread->_has_terminated) {
|
||||
ml.wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void G1StringDedupThread::print(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
|
||||
|
@ -47,6 +47,8 @@ private:
|
||||
|
||||
public:
|
||||
static void create();
|
||||
static void stop();
|
||||
|
||||
static G1StringDedupThread* thread();
|
||||
|
||||
virtual void run();
|
||||
|
@ -167,7 +167,7 @@ public:
|
||||
|
||||
// Mem size in bytes.
|
||||
size_t mem_size() const {
|
||||
return sizeof(this) + _bm.size_in_words() * HeapWordSize;
|
||||
return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
|
||||
}
|
||||
|
||||
// Requires "from" to be in "hr()".
|
||||
@ -491,7 +491,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
} else {
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print_cr(" [tid %d] sparse table entry "
|
||||
"overflow(f: %d, t: %d)",
|
||||
"overflow(f: %d, t: %u)",
|
||||
tid, from_hrs_ind, cur_hrs_ind);
|
||||
}
|
||||
}
|
||||
@ -610,7 +610,7 @@ PerRegionTable* OtherRegionsTable::delete_region_table() {
|
||||
_n_coarse_entries++;
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
|
||||
"for region [" PTR_FORMAT "...] (%d coarse entries).\n",
|
||||
"for region [" PTR_FORMAT "...] (" SIZE_FORMAT " coarse entries).\n",
|
||||
hr()->bottom(),
|
||||
max->hr()->bottom(),
|
||||
_n_coarse_entries);
|
||||
@ -733,7 +733,7 @@ size_t OtherRegionsTable::mem_size() const {
|
||||
sum += (sizeof(PerRegionTable*) * _max_fine_entries);
|
||||
sum += (_coarse_map.size_in_words() * HeapWordSize);
|
||||
sum += (_sparse_table.mem_size());
|
||||
sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
|
||||
sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
|
||||
return sum;
|
||||
}
|
||||
|
||||
@ -768,30 +768,6 @@ void OtherRegionsTable::clear() {
|
||||
clear_fcc();
|
||||
}
|
||||
|
||||
void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
|
||||
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
|
||||
size_t hrs_ind = (size_t) from_hr->hrs_index();
|
||||
size_t ind = hrs_ind & _mod_max_fine_entries_mask;
|
||||
if (del_single_region_table(ind, from_hr)) {
|
||||
assert(!_coarse_map.at(hrs_ind), "Inv");
|
||||
} else {
|
||||
_coarse_map.par_at_put(hrs_ind, 0);
|
||||
}
|
||||
// Check to see if any of the fcc entries come from here.
|
||||
uint hr_ind = hr()->hrs_index();
|
||||
for (uint tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
|
||||
int fcc_ent = FromCardCache::at(tid, hr_ind);
|
||||
if (fcc_ent != FromCardCache::InvalidCard) {
|
||||
HeapWord* card_addr = (HeapWord*)
|
||||
(uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
|
||||
if (hr()->is_in_reserved(card_addr)) {
|
||||
// Clear the from card cache.
|
||||
FromCardCache::set(tid, hr_ind, FromCardCache::InvalidCard);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool OtherRegionsTable::del_single_region_table(size_t ind,
|
||||
HeapRegion* hr) {
|
||||
assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
|
||||
@ -821,7 +797,6 @@ bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
|
||||
|
||||
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(from);
|
||||
if (hr == NULL) return false;
|
||||
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
|
||||
// Is this region in the coarse map?
|
||||
if (_coarse_map.at(hr_ind)) return true;
|
||||
@ -903,10 +878,12 @@ void HeapRegionRemSet::print() {
|
||||
}
|
||||
if (iter.n_yielded() != occupied()) {
|
||||
gclog_or_tty->print_cr("Yielded disagrees with occupied:");
|
||||
gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).",
|
||||
gclog_or_tty->print_cr(" " SIZE_FORMAT_W(6) " yielded (" SIZE_FORMAT_W(6)
|
||||
" coarse, " SIZE_FORMAT_W(6) " fine).",
|
||||
iter.n_yielded(),
|
||||
iter.n_yielded_coarse(), iter.n_yielded_fine());
|
||||
gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).",
|
||||
gclog_or_tty->print_cr(" " SIZE_FORMAT_W(6) " occ (" SIZE_FORMAT_W(6)
|
||||
" coarse, " SIZE_FORMAT_W(6) " fine).",
|
||||
occupied(), occ_coarse(), occ_fine());
|
||||
}
|
||||
guarantee(iter.n_yielded() == occupied(),
|
||||
@ -1046,20 +1023,16 @@ size_t HeapRegionRemSet::strong_code_roots_mem_size() {
|
||||
return _code_roots.mem_size();
|
||||
}
|
||||
|
||||
//-------------------- Iteration --------------------
|
||||
|
||||
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
|
||||
_hrrs(hrrs),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_coarse_map(&hrrs->_other_regions._coarse_map),
|
||||
_fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
|
||||
_bosa(hrrs->bosa()),
|
||||
_is(Sparse),
|
||||
// Set these values so that we increment to the first region.
|
||||
_coarse_cur_region_index(-1),
|
||||
_coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
|
||||
_cur_region_cur_card(0),
|
||||
_fine_array_index(-1),
|
||||
_cur_card_in_prt(HeapRegion::CardsPerRegion),
|
||||
_fine_cur_prt(NULL),
|
||||
_n_yielded_coarse(0),
|
||||
_n_yielded_fine(0),
|
||||
@ -1091,58 +1064,59 @@ bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void HeapRegionRemSetIterator::fine_find_next_non_null_prt() {
|
||||
// Otherwise, find the next bucket list in the array.
|
||||
_fine_array_index++;
|
||||
while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) {
|
||||
_fine_cur_prt = _fine_grain_regions[_fine_array_index];
|
||||
if (_fine_cur_prt != NULL) return;
|
||||
else _fine_array_index++;
|
||||
}
|
||||
assert(_fine_cur_prt == NULL, "Loop post");
|
||||
}
|
||||
|
||||
bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
|
||||
if (fine_has_next()) {
|
||||
_cur_region_cur_card =
|
||||
_fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
|
||||
_cur_card_in_prt =
|
||||
_fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
|
||||
}
|
||||
while (!fine_has_next()) {
|
||||
if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
|
||||
_cur_region_cur_card = 0;
|
||||
_fine_cur_prt = _fine_cur_prt->collision_list_next();
|
||||
if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
|
||||
// _fine_cur_prt may still be NULL in case if there are not PRTs at all for
|
||||
// the remembered set.
|
||||
if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (_fine_cur_prt == NULL) {
|
||||
fine_find_next_non_null_prt();
|
||||
if (_fine_cur_prt == NULL) return false;
|
||||
}
|
||||
assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
|
||||
"inv.");
|
||||
HeapWord* r_bot =
|
||||
_fine_cur_prt->hr()->bottom();
|
||||
_cur_region_card_offset = _bosa->index_for(r_bot);
|
||||
_cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
|
||||
PerRegionTable* next_prt = _fine_cur_prt->next();
|
||||
switch_to_prt(next_prt);
|
||||
_cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
|
||||
}
|
||||
assert(fine_has_next(), "Or else we exited the loop via the return.");
|
||||
card_index = _cur_region_card_offset + _cur_region_cur_card;
|
||||
|
||||
card_index = _cur_region_card_offset + _cur_card_in_prt;
|
||||
guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
|
||||
err_msg("Card index "SIZE_FORMAT" must be within the region", _cur_card_in_prt));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool HeapRegionRemSetIterator::fine_has_next() {
|
||||
return
|
||||
_fine_cur_prt != NULL &&
|
||||
_cur_region_cur_card < HeapRegion::CardsPerRegion;
|
||||
return _cur_card_in_prt != HeapRegion::CardsPerRegion;
|
||||
}
|
||||
|
||||
void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
|
||||
assert(prt != NULL, "Cannot switch to NULL prt");
|
||||
_fine_cur_prt = prt;
|
||||
|
||||
HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
|
||||
_cur_region_card_offset = _bosa->index_for(r_bot);
|
||||
|
||||
// The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
|
||||
// To avoid special-casing this start case, and not miss the first bitmap
|
||||
// entry, initialize _cur_region_cur_card with -1 instead of 0.
|
||||
_cur_card_in_prt = (size_t)-1;
|
||||
}
|
||||
|
||||
bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
|
||||
switch (_is) {
|
||||
case Sparse:
|
||||
case Sparse: {
|
||||
if (_sparse_iter.has_next(card_index)) {
|
||||
_n_yielded_sparse++;
|
||||
return true;
|
||||
}
|
||||
// Otherwise, deliberate fall-through
|
||||
_is = Fine;
|
||||
PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
|
||||
if (initial_fine_prt != NULL) {
|
||||
switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
|
||||
}
|
||||
}
|
||||
case Fine:
|
||||
if (fine_has_next(card_index)) {
|
||||
_n_yielded_fine++;
|
||||
@ -1274,6 +1248,11 @@ HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
|
||||
#ifndef PRODUCT
|
||||
void PerRegionTable::test_fl_mem_size() {
|
||||
PerRegionTable* dummy = alloc(NULL);
|
||||
|
||||
size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
|
||||
assert(dummy->mem_size() > min_prt_size,
|
||||
err_msg("PerRegionTable memory usage is suspiciously small, only has "SIZE_FORMAT" bytes. "
|
||||
"Should be at least "SIZE_FORMAT" bytes.", dummy->mem_size(), min_prt_size));
|
||||
free(dummy);
|
||||
guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
|
||||
// try to reset the state
|
||||
|
@ -206,9 +206,6 @@ public:
|
||||
// Specifically clear the from_card_cache.
|
||||
void clear_fcc();
|
||||
|
||||
// "from_hr" is being cleared; remove any entries from it.
|
||||
void clear_incoming_entry(HeapRegion* from_hr);
|
||||
|
||||
void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
|
||||
|
||||
// Declare the heap size (in # of regions) to the OtherRegionsTable.
|
||||
@ -338,20 +335,20 @@ public:
|
||||
return _other_regions.mem_size()
|
||||
// This correction is necessary because the above includes the second
|
||||
// part.
|
||||
+ (sizeof(this) - sizeof(OtherRegionsTable))
|
||||
+ (sizeof(HeapRegionRemSet) - sizeof(OtherRegionsTable))
|
||||
+ strong_code_roots_mem_size();
|
||||
}
|
||||
|
||||
// Returns the memory occupancy of all static data structures associated
|
||||
// with remembered sets.
|
||||
static size_t static_mem_size() {
|
||||
return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size();
|
||||
return OtherRegionsTable::static_mem_size() + G1CodeRootSet::free_chunks_static_mem_size();
|
||||
}
|
||||
|
||||
// Returns the memory occupancy of all free_list data structures associated
|
||||
// with remembered sets.
|
||||
static size_t fl_mem_size() {
|
||||
return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::fl_mem_size();
|
||||
return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::free_chunks_mem_size();
|
||||
}
|
||||
|
||||
bool contains_reference(OopOrNarrowOopStar from) const {
|
||||
@ -396,7 +393,6 @@ public:
|
||||
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
|
||||
// (Uses it to initialize from_card_cache).
|
||||
static void init_heap(uint max_regions) {
|
||||
G1CodeRootSet::initialize();
|
||||
OtherRegionsTable::init_from_card_cache(max_regions);
|
||||
}
|
||||
|
||||
@ -429,26 +425,24 @@ public:
|
||||
};
|
||||
|
||||
class HeapRegionRemSetIterator : public StackObj {
|
||||
|
||||
// The region RSet over which we're iterating.
|
||||
private:
|
||||
// The region RSet over which we are iterating.
|
||||
HeapRegionRemSet* _hrrs;
|
||||
|
||||
// Local caching of HRRS fields.
|
||||
const BitMap* _coarse_map;
|
||||
PerRegionTable** _fine_grain_regions;
|
||||
|
||||
G1BlockOffsetSharedArray* _bosa;
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The number yielded since initialization.
|
||||
// The number of cards yielded since initialization.
|
||||
size_t _n_yielded_fine;
|
||||
size_t _n_yielded_coarse;
|
||||
size_t _n_yielded_sparse;
|
||||
|
||||
// Indicates what granularity of table that we're currently iterating over.
|
||||
// Indicates what granularity of table that we are currently iterating over.
|
||||
// We start iterating over the sparse table, progress to the fine grain
|
||||
// table, and then finish with the coarse table.
|
||||
// See HeapRegionRemSetIterator::has_next().
|
||||
enum IterState {
|
||||
Sparse,
|
||||
Fine,
|
||||
@ -456,38 +450,30 @@ class HeapRegionRemSetIterator : public StackObj {
|
||||
};
|
||||
IterState _is;
|
||||
|
||||
// In both kinds of iteration, heap offset of first card of current
|
||||
// region.
|
||||
// For both Coarse and Fine remembered set iteration this contains the
|
||||
// first card number of the heap region we currently iterate over.
|
||||
size_t _cur_region_card_offset;
|
||||
// Card offset within cur region.
|
||||
size_t _cur_region_cur_card;
|
||||
|
||||
// Coarse table iteration fields:
|
||||
|
||||
// Current region index;
|
||||
// Current region index for the Coarse remembered set iteration.
|
||||
int _coarse_cur_region_index;
|
||||
size_t _coarse_cur_region_cur_card;
|
||||
|
||||
bool coarse_has_next(size_t& card_index);
|
||||
|
||||
// Fine table iteration fields:
|
||||
|
||||
// Index of bucket-list we're working on.
|
||||
int _fine_array_index;
|
||||
|
||||
// Per Region Table we're doing within current bucket list.
|
||||
// The PRT we are currently iterating over.
|
||||
PerRegionTable* _fine_cur_prt;
|
||||
// Card offset within the current PRT.
|
||||
size_t _cur_card_in_prt;
|
||||
|
||||
/* SparsePRT::*/ SparsePRTIter _sparse_iter;
|
||||
|
||||
void fine_find_next_non_null_prt();
|
||||
|
||||
// Update internal variables when switching to the given PRT.
|
||||
void switch_to_prt(PerRegionTable* prt);
|
||||
bool fine_has_next();
|
||||
bool fine_has_next(size_t& card_index);
|
||||
|
||||
public:
|
||||
// We require an iterator to be initialized before use, so the
|
||||
// constructor does little.
|
||||
// The Sparse remembered set iterator.
|
||||
SparsePRTIter _sparse_iter;
|
||||
|
||||
public:
|
||||
HeapRegionRemSetIterator(HeapRegionRemSet* hrrs);
|
||||
|
||||
// If there remains one or more cards to be yielded, returns true and
|
||||
|
@ -240,7 +240,6 @@ void HeapRegionSeq::verify_optional() {
|
||||
// Asserts will fire if i is >= _length
|
||||
HeapWord* addr = hr->bottom();
|
||||
guarantee(addr_to_region(addr) == hr, "sanity");
|
||||
guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
|
||||
} else {
|
||||
guarantee(hr->is_empty(), "sanity");
|
||||
guarantee(!hr->isHumongous(), "sanity");
|
||||
|
@ -110,10 +110,6 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
|
||||
// HeapRegion, otherwise return NULL.
|
||||
inline HeapRegion* addr_to_region(HeapWord* addr) const;
|
||||
|
||||
// Return the HeapRegion that corresponds to the given
|
||||
// address. Assume the address is valid.
|
||||
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
|
||||
|
||||
// Return the number of regions that have been committed in the heap.
|
||||
uint length() const { return _committed_length; }
|
||||
|
||||
|
@ -28,21 +28,17 @@
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||
|
||||
inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
|
||||
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
||||
assert(addr < heap_end(),
|
||||
err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, addr, heap_end()));
|
||||
assert(addr >= heap_bottom(),
|
||||
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
|
||||
|
||||
HeapRegion* hr = _regions.get_by_address(addr);
|
||||
assert(hr != NULL, "invariant");
|
||||
return hr;
|
||||
}
|
||||
|
||||
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
||||
if (addr != NULL && addr < heap_end()) {
|
||||
assert(addr >= heap_bottom(),
|
||||
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
|
||||
return addr_to_region_unsafe(addr);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inline HeapRegion* HeapRegionSeq::at(uint index) const {
|
||||
assert(index < length(), "pre-condition");
|
||||
HeapRegion* hr = _regions.get_by_index(index);
|
||||
|
@ -370,7 +370,7 @@ bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index)
|
||||
}
|
||||
|
||||
size_t RSHashTable::mem_size() const {
|
||||
return sizeof(this) +
|
||||
return sizeof(RSHashTable) +
|
||||
capacity() * (SparsePRTEntry::size() + sizeof(int));
|
||||
}
|
||||
|
||||
@ -472,7 +472,7 @@ SparsePRT::~SparsePRT() {
|
||||
size_t SparsePRT::mem_size() const {
|
||||
// We ignore "_cur" here, because it either = _next, or else it is
|
||||
// on the deleted list.
|
||||
return sizeof(this) + _next->mem_size();
|
||||
return sizeof(SparsePRT) + _next->mem_size();
|
||||
}
|
||||
|
||||
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
|
||||
|
@ -187,10 +187,10 @@ SurvRateGroup::all_surviving_words_recorded(bool propagate) {
|
||||
#ifndef PRODUCT
|
||||
void
|
||||
SurvRateGroup::print() {
|
||||
gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries)",
|
||||
gclog_or_tty->print_cr("Surv Rate Group: %s (" SIZE_FORMAT " entries)",
|
||||
_name, _region_num);
|
||||
for (size_t i = 0; i < _region_num; ++i) {
|
||||
gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%",
|
||||
gclog_or_tty->print_cr(" age " SIZE_FORMAT_W(4) " surv rate %6.2lf %% pred %6.2lf %%",
|
||||
i, _surv_rate[i] * 100.0,
|
||||
_g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0);
|
||||
}
|
||||
@ -203,14 +203,15 @@ SurvRateGroup::print_surv_rate_summary() {
|
||||
return;
|
||||
|
||||
gclog_or_tty->print_cr("");
|
||||
gclog_or_tty->print_cr("%s Rate Summary (for up to age %d)", _name, length-1);
|
||||
gclog_or_tty->print_cr("%s Rate Summary (for up to age " SIZE_FORMAT ")", _name, length-1);
|
||||
gclog_or_tty->print_cr(" age range survival rate (avg) samples (avg)");
|
||||
gclog_or_tty->print_cr(" ---------------------------------------------------------");
|
||||
|
||||
size_t index = 0;
|
||||
size_t limit = MIN2((int) length, 10);
|
||||
while (index < limit) {
|
||||
gclog_or_tty->print_cr(" %4d %6.2lf%% %6.2lf",
|
||||
gclog_or_tty->print_cr(" " SIZE_FORMAT_W(4)
|
||||
" %6.2lf%% %6.2lf",
|
||||
index, _summary_surv_rates[index]->avg() * 100.0,
|
||||
(double) _summary_surv_rates[index]->num());
|
||||
++index;
|
||||
@ -228,7 +229,8 @@ SurvRateGroup::print_surv_rate_summary() {
|
||||
++index;
|
||||
|
||||
if (index == length || num % 10 == 0) {
|
||||
gclog_or_tty->print_cr(" %4d .. %4d %6.2lf%% %6.2lf",
|
||||
gclog_or_tty->print_cr(" " SIZE_FORMAT_W(4) " .. " SIZE_FORMAT_W(4)
|
||||
" %6.2lf%% %6.2lf",
|
||||
(index-1) / 10 * 10, index-1, sum / (double) num,
|
||||
(double) samples / (double) num);
|
||||
sum = 0.0;
|
||||
|
@ -143,7 +143,8 @@ void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
|
||||
|
||||
if (TraceAdaptiveGCBoundary) {
|
||||
gclog_or_tty->print_cr("Before expansion of old gen with boundary move");
|
||||
gclog_or_tty->print_cr(" Requested change: 0x%x Attempted change: 0x%x",
|
||||
gclog_or_tty->print_cr(" Requested change: " SIZE_FORMAT_HEX
|
||||
" Attempted change: " SIZE_FORMAT_HEX,
|
||||
expand_in_bytes, change_in_bytes);
|
||||
if (!PrintHeapAtGC) {
|
||||
Universe::print_on(gclog_or_tty);
|
||||
@ -201,7 +202,7 @@ bool AdjoiningGenerations::request_young_gen_expansion(size_t expand_in_bytes) {
|
||||
|
||||
if (TraceAdaptiveGCBoundary) {
|
||||
gclog_or_tty->print_cr("Before expansion of young gen with boundary move");
|
||||
gclog_or_tty->print_cr(" Requested change: 0x%x Attempted change: 0x%x",
|
||||
gclog_or_tty->print_cr(" Requested change: " SIZE_FORMAT_HEX " Attempted change: " SIZE_FORMAT_HEX,
|
||||
expand_in_bytes, change_in_bytes);
|
||||
if (!PrintHeapAtGC) {
|
||||
Universe::print_on(gclog_or_tty);
|
||||
|
@ -127,22 +127,22 @@ size_t ASPSOldGen::available_for_contraction() {
|
||||
size_t result_aligned = align_size_down(result, gen_alignment);
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr("\nASPSOldGen::available_for_contraction:"
|
||||
" %d K / 0x%x", result_aligned/K, result_aligned);
|
||||
gclog_or_tty->print_cr(" reserved().byte_size() %d K / 0x%x ",
|
||||
" " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, result_aligned/K, result_aligned);
|
||||
gclog_or_tty->print_cr(" reserved().byte_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
|
||||
reserved().byte_size()/K, reserved().byte_size());
|
||||
size_t working_promoted = (size_t) policy->avg_promoted()->padded_average();
|
||||
gclog_or_tty->print_cr(" padded promoted %d K / 0x%x",
|
||||
gclog_or_tty->print_cr(" padded promoted " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
|
||||
working_promoted/K, working_promoted);
|
||||
gclog_or_tty->print_cr(" used %d K / 0x%x",
|
||||
gclog_or_tty->print_cr(" used " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
|
||||
used_in_bytes()/K, used_in_bytes());
|
||||
gclog_or_tty->print_cr(" min_gen_size() %d K / 0x%x",
|
||||
gclog_or_tty->print_cr(" min_gen_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
|
||||
min_gen_size()/K, min_gen_size());
|
||||
gclog_or_tty->print_cr(" max_contraction %d K / 0x%x",
|
||||
gclog_or_tty->print_cr(" max_contraction " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
|
||||
max_contraction/K, max_contraction);
|
||||
gclog_or_tty->print_cr(" without alignment %d K / 0x%x",
|
||||
gclog_or_tty->print_cr(" without alignment " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
|
||||
policy->promo_increment(max_contraction)/K,
|
||||
policy->promo_increment(max_contraction));
|
||||
gclog_or_tty->print_cr(" alignment 0x%x", gen_alignment);
|
||||
gclog_or_tty->print_cr(" alignment " SIZE_FORMAT_HEX, gen_alignment);
|
||||
}
|
||||
assert(result_aligned <= max_contraction, "arithmetic is wrong");
|
||||
return result_aligned;
|
||||
|
@ -112,11 +112,11 @@ size_t ASPSYoungGen::available_for_contraction() {
|
||||
size_t result = policy->eden_increment_aligned_down(max_contraction);
|
||||
size_t result_aligned = align_size_down(result, gen_alignment);
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K",
|
||||
gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K",
|
||||
result_aligned/K);
|
||||
gclog_or_tty->print_cr(" max_contraction %d K", max_contraction/K);
|
||||
gclog_or_tty->print_cr(" eden_avail %d K", eden_avail/K);
|
||||
gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K);
|
||||
gclog_or_tty->print_cr(" max_contraction " SIZE_FORMAT " K", max_contraction/K);
|
||||
gclog_or_tty->print_cr(" eden_avail " SIZE_FORMAT " K", eden_avail/K);
|
||||
gclog_or_tty->print_cr(" gen_avail " SIZE_FORMAT " K", gen_avail/K);
|
||||
}
|
||||
return result_aligned;
|
||||
}
|
||||
|
@ -487,7 +487,7 @@ void GCTaskManager::set_active_gang() {
|
||||
if (TraceDynamicGCThreads) {
|
||||
gclog_or_tty->print_cr("GCTaskManager::set_active_gang(): "
|
||||
"all_workers_active() %d workers %d "
|
||||
"active %d ParallelGCThreads %d ",
|
||||
"active %d ParallelGCThreads " UINTX_FORMAT,
|
||||
all_workers_active(), workers(), active_workers(),
|
||||
ParallelGCThreads);
|
||||
}
|
||||
|
@ -128,8 +128,6 @@ class ObjectStartArray : public CHeapObj<mtGC> {
|
||||
// When doing MT offsets, we can't assert this.
|
||||
//assert(offset > *block, "Found backwards allocation");
|
||||
*block = (jbyte)offset;
|
||||
|
||||
// tty->print_cr("[%p]", p);
|
||||
}
|
||||
|
||||
// Optimized for finding the first object that crosses into
|
||||
|
@ -264,7 +264,7 @@ void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||
cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
|
||||
if (TraceDynamicGCThreads) {
|
||||
gclog_or_tty->print_cr("StealRegionCompactionTask::do_it "
|
||||
"region_stack_index %d region_stack = 0x%x "
|
||||
"region_stack_index %d region_stack = " PTR_FORMAT " "
|
||||
" empty (%d) use all workers %d",
|
||||
which_stack_index, ParCompactionManager::region_list(which_stack_index),
|
||||
cm->region_stack()->is_empty(),
|
||||
@ -366,7 +366,7 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||
if (TraceDynamicGCThreads) {
|
||||
void* old_region_stack = (void*) cm->region_stack();
|
||||
int old_region_stack_index = cm->region_stack_index();
|
||||
gclog_or_tty->print_cr("Pushing region stack 0x%x/%d",
|
||||
gclog_or_tty->print_cr("Pushing region stack " PTR_FORMAT "/%d",
|
||||
old_region_stack, old_region_stack_index);
|
||||
}
|
||||
|
||||
|
@ -379,7 +379,7 @@ void PSAdaptiveSizePolicy::compute_eden_space_size(
|
||||
gclog_or_tty->print_cr(
|
||||
"PSAdaptiveSizePolicy::compute_eden_space_size: gc time limit"
|
||||
" gc_cost: %f "
|
||||
" GCTimeLimit: %d",
|
||||
" GCTimeLimit: " UINTX_FORMAT,
|
||||
gc_cost(), GCTimeLimit);
|
||||
}
|
||||
}
|
||||
@ -586,7 +586,7 @@ void PSAdaptiveSizePolicy::compute_old_gen_free_space(
|
||||
gclog_or_tty->print_cr(
|
||||
"PSAdaptiveSizePolicy::compute_old_gen_free_space: gc time limit"
|
||||
" gc_cost: %f "
|
||||
" GCTimeLimit: %d",
|
||||
" GCTimeLimit: " UINTX_FORMAT,
|
||||
gc_cost(), GCTimeLimit);
|
||||
}
|
||||
}
|
||||
|
@ -270,7 +270,8 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
gclog_or_tty->print_cr(" collection: %d ",
|
||||
heap->total_collections());
|
||||
if (Verbose) {
|
||||
gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
|
||||
gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
|
||||
" young_gen_capacity: " SIZE_FORMAT,
|
||||
old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
|
||||
}
|
||||
}
|
||||
|
@ -1428,7 +1428,7 @@ PSParallelCompact::compute_dense_prefix(const SpaceId id,
|
||||
"space_cap=" SIZE_FORMAT,
|
||||
space_live, space_used,
|
||||
space_capacity);
|
||||
tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
|
||||
tty->print_cr("dead_wood_limiter(%6.4f, " SIZE_FORMAT ")=%6.4f "
|
||||
"dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
|
||||
density, min_percent_free, limiter,
|
||||
dead_wood_max, dead_wood_limit);
|
||||
@ -2106,7 +2106,8 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
gclog_or_tty->print_cr(" collection: %d ",
|
||||
heap->total_collections());
|
||||
if (Verbose) {
|
||||
gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
|
||||
gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
|
||||
" young_gen_capacity: " SIZE_FORMAT,
|
||||
old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
|
||||
}
|
||||
}
|
||||
@ -2559,7 +2560,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
|
||||
|
||||
if (TraceParallelOldGCCompactionPhase) {
|
||||
if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
|
||||
gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
|
||||
gclog_or_tty->print_cr(SIZE_FORMAT " initially fillable regions", fillable_regions);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -330,7 +330,7 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceScavenge) {
|
||||
gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
|
||||
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " (%d)}",
|
||||
"promotion-failure",
|
||||
obj->klass()->internal_name(),
|
||||
(void *)obj, obj->size());
|
||||
|
@ -510,7 +510,8 @@ bool PSScavenge::invoke_no_policy() {
|
||||
heap->total_collections());
|
||||
|
||||
if (Verbose) {
|
||||
gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
|
||||
gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
|
||||
" young_gen_capacity: " SIZE_FORMAT,
|
||||
old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
|
||||
}
|
||||
}
|
||||
@ -728,7 +729,7 @@ void PSScavenge::clean_up_failed_promotion() {
|
||||
young_gen->object_iterate(&unforward_closure);
|
||||
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
|
||||
gclog_or_tty->print_cr("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size());
|
||||
}
|
||||
|
||||
// Restore any saved marks.
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
inline void PSScavenge::save_to_space_top_before_gc() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
@ -178,7 +179,7 @@ class PSScavengeKlassClosure: public KlassClosure {
|
||||
#ifndef PRODUCT
|
||||
if (TraceScavenge) {
|
||||
ResourceMark rm;
|
||||
gclog_or_tty->print_cr("PSScavengeKlassClosure::do_klass %p, %s, dirty: %s",
|
||||
gclog_or_tty->print_cr("PSScavengeKlassClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
|
||||
klass,
|
||||
klass->external_name(),
|
||||
klass->has_modified_oops() ? "true" : "false");
|
||||
|
@ -168,9 +168,9 @@ int AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
|
||||
|
||||
if (TraceDynamicGCThreads) {
|
||||
gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
|
||||
"active_workers(): %d new_active_workers: %d "
|
||||
"prev_active_workers: %d\n"
|
||||
" active_workers_by_JT: %d active_workers_by_heap_size: %d",
|
||||
"active_workers(): " UINTX_FORMAT " new_active_workers: " UINTX_FORMAT " "
|
||||
"prev_active_workers: " UINTX_FORMAT "\n"
|
||||
" active_workers_by_JT: " UINTX_FORMAT " active_workers_by_heap_size: " UINTX_FORMAT,
|
||||
active_workers, new_active_workers, prev_active_workers,
|
||||
active_workers_by_JT, active_workers_by_heap_size);
|
||||
}
|
||||
@ -545,12 +545,12 @@ void AdaptiveSizePolicy::check_gc_overhead_limit(
|
||||
if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
|
||||
if (gc_overhead_limit_exceeded()) {
|
||||
gclog_or_tty->print_cr(" GC is exceeding overhead limit "
|
||||
"of %d%%", GCTimeLimit);
|
||||
"of " UINTX_FORMAT "%%", GCTimeLimit);
|
||||
reset_gc_overhead_limit_count();
|
||||
} else if (print_gc_overhead_limit_would_be_exceeded) {
|
||||
assert(gc_overhead_limit_count() > 0, "Should not be printing");
|
||||
gclog_or_tty->print_cr(" GC would exceed overhead limit "
|
||||
"of %d%% %d consecutive time(s)",
|
||||
"of " UINTX_FORMAT "%% %d consecutive time(s)",
|
||||
GCTimeLimit, gc_overhead_limit_count());
|
||||
}
|
||||
}
|
||||
|
@ -120,8 +120,9 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
|
||||
float delta_ise = (CMSExtrapolateSweep ? intra_sweep_estimate : 0.0);
|
||||
_desired = (ssize_t)(new_rate * (inter_sweep_estimate + delta_ise));
|
||||
if (PrintFLSStatistics > 1) {
|
||||
gclog_or_tty->print_cr("demand: %d, old_rate: %f, current_rate: %f, new_rate: %f, old_desired: %d, new_desired: %d",
|
||||
demand, old_rate, rate, new_rate, old_desired, _desired);
|
||||
gclog_or_tty->print_cr("demand: " SSIZE_FORMAT ", old_rate: %f, current_rate: %f, "
|
||||
"new_rate: %f, old_desired: " SSIZE_FORMAT ", new_desired: " SSIZE_FORMAT,
|
||||
demand, old_rate, rate, new_rate, old_desired, _desired);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -37,21 +37,10 @@
|
||||
|
||||
int ConcurrentGCThread::_CGC_flag = CGC_nil;
|
||||
|
||||
SuspendibleThreadSet ConcurrentGCThread::_sts;
|
||||
|
||||
ConcurrentGCThread::ConcurrentGCThread() :
|
||||
_should_terminate(false), _has_terminated(false) {
|
||||
_sts.initialize();
|
||||
};
|
||||
|
||||
void ConcurrentGCThread::safepoint_synchronize() {
|
||||
_sts.suspend_all();
|
||||
}
|
||||
|
||||
void ConcurrentGCThread::safepoint_desynchronize() {
|
||||
_sts.resume_all();
|
||||
}
|
||||
|
||||
void ConcurrentGCThread::create_and_start() {
|
||||
if (os::create_thread(this, os::cgc_thread)) {
|
||||
// XXX: need to set this to low priority
|
||||
@ -92,78 +81,6 @@ void ConcurrentGCThread::terminate() {
|
||||
ThreadLocalStorage::set_thread(NULL);
|
||||
}
|
||||
|
||||
|
||||
void SuspendibleThreadSet::initialize_work() {
|
||||
MutexLocker x(STS_init_lock);
|
||||
if (!_initialized) {
|
||||
_m = new Monitor(Mutex::leaf,
|
||||
"SuspendibleThreadSetLock", true);
|
||||
_async = 0;
|
||||
_async_stop = false;
|
||||
_async_stopped = 0;
|
||||
_initialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
void SuspendibleThreadSet::join() {
|
||||
initialize();
|
||||
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
|
||||
while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag);
|
||||
_async++;
|
||||
assert(_async > 0, "Huh.");
|
||||
}
|
||||
|
||||
void SuspendibleThreadSet::leave() {
|
||||
assert(_initialized, "Must be initialized.");
|
||||
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
|
||||
_async--;
|
||||
assert(_async >= 0, "Huh.");
|
||||
if (_async_stop) _m->notify_all();
|
||||
}
|
||||
|
||||
void SuspendibleThreadSet::yield(const char* id) {
|
||||
assert(_initialized, "Must be initialized.");
|
||||
if (_async_stop) {
|
||||
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
|
||||
if (_async_stop) {
|
||||
_async_stopped++;
|
||||
assert(_async_stopped > 0, "Huh.");
|
||||
if (_async_stopped == _async) {
|
||||
if (ConcGCYieldTimeout > 0) {
|
||||
double now = os::elapsedTime();
|
||||
guarantee((now - _suspend_all_start) * 1000.0 <
|
||||
(double)ConcGCYieldTimeout,
|
||||
"Long delay; whodunit?");
|
||||
}
|
||||
}
|
||||
_m->notify_all();
|
||||
while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag);
|
||||
_async_stopped--;
|
||||
assert(_async >= 0, "Huh");
|
||||
_m->notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SuspendibleThreadSet::suspend_all() {
|
||||
initialize(); // If necessary.
|
||||
if (ConcGCYieldTimeout > 0) {
|
||||
_suspend_all_start = os::elapsedTime();
|
||||
}
|
||||
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
|
||||
assert(!_async_stop, "Only one at a time.");
|
||||
_async_stop = true;
|
||||
while (_async_stopped < _async) _m->wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
|
||||
void SuspendibleThreadSet::resume_all() {
|
||||
assert(_initialized, "Must be initialized.");
|
||||
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
|
||||
assert(_async_stopped == _async, "Huh.");
|
||||
_async_stop = false;
|
||||
_m->notify_all();
|
||||
}
|
||||
|
||||
static void _sltLoop(JavaThread* thread, TRAPS) {
|
||||
SurrogateLockerThread* slt = (SurrogateLockerThread*)thread;
|
||||
slt->loop();
|
||||
@ -283,30 +200,3 @@ void SurrogateLockerThread::loop() {
|
||||
}
|
||||
assert(!_monitor.owned_by_self(), "Should unlock before exit.");
|
||||
}
|
||||
|
||||
|
||||
// ===== STS Access From Outside CGCT =====
|
||||
|
||||
void ConcurrentGCThread::stsYield(const char* id) {
|
||||
assert( Thread::current()->is_ConcurrentGC_thread(),
|
||||
"only a conc GC thread can call this" );
|
||||
_sts.yield(id);
|
||||
}
|
||||
|
||||
bool ConcurrentGCThread::stsShouldYield() {
|
||||
assert( Thread::current()->is_ConcurrentGC_thread(),
|
||||
"only a conc GC thread can call this" );
|
||||
return _sts.should_yield();
|
||||
}
|
||||
|
||||
void ConcurrentGCThread::stsJoin() {
|
||||
assert( Thread::current()->is_ConcurrentGC_thread(),
|
||||
"only a conc GC thread can call this" );
|
||||
_sts.join();
|
||||
}
|
||||
|
||||
void ConcurrentGCThread::stsLeave() {
|
||||
assert( Thread::current()->is_ConcurrentGC_thread(),
|
||||
"only a conc GC thread can call this" );
|
||||
_sts.leave();
|
||||
}
|
||||
|
@ -26,55 +26,8 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_CONCURRENTGCTHREAD_HPP
|
||||
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/shared/suspendibleThreadSet.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
class VoidClosure;
|
||||
|
||||
// A SuspendibleThreadSet is (obviously) a set of threads that can be
|
||||
// suspended. A thread can join and later leave the set, and periodically
|
||||
// yield. If some thread (not in the set) requests, via suspend_all, that
|
||||
// the threads be suspended, then the requesting thread is blocked until
|
||||
// all the threads in the set have yielded or left the set. (Threads may
|
||||
// not enter the set when an attempted suspension is in progress.) The
|
||||
// suspending thread later calls resume_all, allowing the suspended threads
|
||||
// to continue.
|
||||
|
||||
class SuspendibleThreadSet {
|
||||
Monitor* _m;
|
||||
int _async;
|
||||
bool _async_stop;
|
||||
int _async_stopped;
|
||||
bool _initialized;
|
||||
double _suspend_all_start;
|
||||
|
||||
void initialize_work();
|
||||
|
||||
public:
|
||||
SuspendibleThreadSet() : _initialized(false) {}
|
||||
|
||||
// Add the current thread to the set. May block if a suspension
|
||||
// is in progress.
|
||||
void join();
|
||||
// Removes the current thread from the set.
|
||||
void leave();
|
||||
// Returns "true" iff an suspension is in progress.
|
||||
bool should_yield() { return _async_stop; }
|
||||
// Suspends the current thread if a suspension is in progress (for
|
||||
// the duration of the suspension.)
|
||||
void yield(const char* id);
|
||||
// Return when all threads in the set are suspended.
|
||||
void suspend_all();
|
||||
// Allow suspended threads to resume.
|
||||
void resume_all();
|
||||
// Redundant initializations okay.
|
||||
void initialize() {
|
||||
// Double-check dirty read idiom.
|
||||
if (!_initialized) initialize_work();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class ConcurrentGCThread: public NamedThread {
|
||||
friend class VMStructs;
|
||||
@ -96,9 +49,6 @@ protected:
|
||||
static int set_CGC_flag(int b) { return _CGC_flag |= b; }
|
||||
static int reset_CGC_flag(int b) { return _CGC_flag &= ~b; }
|
||||
|
||||
// All instances share this one set.
|
||||
static SuspendibleThreadSet _sts;
|
||||
|
||||
// Create and start the thread (setting it's priority high.)
|
||||
void create_and_start();
|
||||
|
||||
@ -121,25 +71,6 @@ public:
|
||||
|
||||
// Tester
|
||||
bool is_ConcurrentGC_thread() const { return true; }
|
||||
|
||||
static void safepoint_synchronize();
|
||||
static void safepoint_desynchronize();
|
||||
|
||||
// All overridings should probably do _sts::yield, but we allow
|
||||
// overriding for distinguished debugging messages. Default is to do
|
||||
// nothing.
|
||||
virtual void yield() {}
|
||||
|
||||
bool should_yield() { return _sts.should_yield(); }
|
||||
|
||||
// they are prefixed by sts since there are already yield() and
|
||||
// should_yield() (non-static) methods in this class and it was an
|
||||
// easy way to differentiate them.
|
||||
static void stsYield(const char* id);
|
||||
static bool stsShouldYield();
|
||||
static void stsJoin();
|
||||
static void stsLeave();
|
||||
|
||||
};
|
||||
|
||||
// The SurrogateLockerThread is used by concurrent GC threads for
|
||||
|
@ -131,7 +131,7 @@ void MarkSweep::restore_marks() {
|
||||
assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
|
||||
"inconsistent preserved oop stacks");
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print_cr("Restoring %d marks",
|
||||
gclog_or_tty->print_cr("Restoring " SIZE_FORMAT " marks",
|
||||
_preserved_count + _preserved_oop_stack.size());
|
||||
}
|
||||
|
||||
|
@ -888,7 +888,9 @@ void MutableNUMASpace::print_on(outputStream* st) const {
|
||||
for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
||||
lgrp_spaces()->at(i)->accumulate_statistics(page_size());
|
||||
}
|
||||
st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
|
||||
st->print(" local/remote/unbiased/uncommitted: " SIZE_FORMAT "K/"
|
||||
SIZE_FORMAT "K/" SIZE_FORMAT "K/" SIZE_FORMAT
|
||||
"K, large/small pages: " SIZE_FORMAT "/" SIZE_FORMAT "\n",
|
||||
ls->space_stats()->_local_space / K,
|
||||
ls->space_stats()->_remote_space / K,
|
||||
ls->space_stats()->_unbiased_space / K,
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
|
||||
_word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
|
||||
@ -112,7 +113,7 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
|
||||
}
|
||||
_used = _allocated - _wasted - _unused;
|
||||
size_t plab_sz = _used/(target_refills*no_of_gc_workers);
|
||||
if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
|
||||
if (PrintPLAB) gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " ", plab_sz);
|
||||
// Take historical weighted average
|
||||
_filter.sample(plab_sz);
|
||||
// Clip from above and below, and align to object boundary
|
||||
@ -120,7 +121,7 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
|
||||
plab_sz = MIN2(max_size(), plab_sz);
|
||||
plab_sz = align_object_size(plab_sz);
|
||||
// Latch the result
|
||||
if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
|
||||
if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = " SIZE_FORMAT ") ", plab_sz);
|
||||
_desired_plab_sz = plab_sz;
|
||||
// Now clear the accumulators for next round:
|
||||
// note this needs to be fixed in the case where we
|
||||
@ -132,8 +133,9 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ParGCAllocBuffer::print() {
|
||||
gclog_or_tty->print("parGCAllocBuffer: _bottom: %p _top: %p _end: %p _hard_end: %p"
|
||||
"_retained: %c _retained_filler: [%p,%p)\n",
|
||||
gclog_or_tty->print("parGCAllocBuffer: _bottom: " PTR_FORMAT " _top: " PTR_FORMAT
|
||||
" _end: " PTR_FORMAT " _hard_end: " PTR_FORMAT " _retained: %c"
|
||||
" _retained_filler: [" PTR_FORMAT "," PTR_FORMAT ")\n",
|
||||
_bottom, _top, _end, _hard_end,
|
||||
"FT"[_retained], _retained_filler.start(), _retained_filler.end());
|
||||
}
|
||||
|
@ -60,6 +60,7 @@ public:
|
||||
// Initializes the buffer to be empty, but with the given "word_sz".
|
||||
// Must get initialized with "set_buf" for an allocation to succeed.
|
||||
ParGCAllocBuffer(size_t word_sz);
|
||||
virtual ~ParGCAllocBuffer() {}
|
||||
|
||||
static const size_t min_size() {
|
||||
return ThreadLocalAllocBuffer::min_size();
|
||||
@ -113,7 +114,7 @@ public:
|
||||
}
|
||||
|
||||
// Sets the space of the buffer to be [buf, space+word_sz()).
|
||||
void set_buf(HeapWord* buf) {
|
||||
virtual void set_buf(HeapWord* buf) {
|
||||
_bottom = buf;
|
||||
_top = _bottom;
|
||||
_hard_end = _bottom + word_sz();
|
||||
@ -158,7 +159,7 @@ public:
|
||||
// Fills in the unallocated portion of the buffer with a garbage object.
|
||||
// If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain"
|
||||
// is true, attempt to re-use the unused portion in the next GC.
|
||||
void retire(bool end_of_gc, bool retain);
|
||||
virtual void retire(bool end_of_gc, bool retain);
|
||||
|
||||
void print() PRODUCT_RETURN;
|
||||
};
|
||||
@ -238,14 +239,14 @@ public:
|
||||
|
||||
void undo_allocation(HeapWord* obj, size_t word_sz);
|
||||
|
||||
void set_buf(HeapWord* buf_start) {
|
||||
virtual void set_buf(HeapWord* buf_start) {
|
||||
ParGCAllocBuffer::set_buf(buf_start);
|
||||
_true_end = _hard_end;
|
||||
_bt.set_region(MemRegion(buf_start, word_sz()));
|
||||
_bt.initialize_threshold();
|
||||
}
|
||||
|
||||
void retire(bool end_of_gc, bool retain);
|
||||
virtual void retire(bool end_of_gc, bool retain);
|
||||
|
||||
MemRegion range() {
|
||||
return MemRegion(_top, _true_end);
|
||||
|
@ -84,7 +84,7 @@ void SpaceMangler::mangle_region(MemRegion mr) {
|
||||
assert(ZapUnusedHeapArea, "Mangling should not be in use");
|
||||
#ifdef ASSERT
|
||||
if(TraceZapUnusedHeapArea) {
|
||||
gclog_or_tty->print("Mangling [0x%x to 0x%x)", mr.start(), mr.end());
|
||||
gclog_or_tty->print("Mangling [" PTR_FORMAT " to " PTR_FORMAT ")", mr.start(), mr.end());
|
||||
}
|
||||
Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord);
|
||||
if(TraceZapUnusedHeapArea) {
|
||||
|
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/shared/suspendibleThreadSet.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
uint SuspendibleThreadSet::_nthreads = 0;
|
||||
uint SuspendibleThreadSet::_nthreads_stopped = 0;
|
||||
bool SuspendibleThreadSet::_suspend_all = false;
|
||||
double SuspendibleThreadSet::_suspend_all_start = 0.0;
|
||||
|
||||
void SuspendibleThreadSet::join() {
|
||||
MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
|
||||
while (_suspend_all) {
|
||||
ml.wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
_nthreads++;
|
||||
}
|
||||
|
||||
void SuspendibleThreadSet::leave() {
|
||||
MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(_nthreads > 0, "Invalid");
|
||||
_nthreads--;
|
||||
if (_suspend_all) {
|
||||
ml.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
void SuspendibleThreadSet::yield() {
|
||||
if (_suspend_all) {
|
||||
MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_suspend_all) {
|
||||
_nthreads_stopped++;
|
||||
if (_nthreads_stopped == _nthreads) {
|
||||
if (ConcGCYieldTimeout > 0) {
|
||||
double now = os::elapsedTime();
|
||||
guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay");
|
||||
}
|
||||
}
|
||||
ml.notify_all();
|
||||
while (_suspend_all) {
|
||||
ml.wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
assert(_nthreads_stopped > 0, "Invalid");
|
||||
_nthreads_stopped--;
|
||||
ml.notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SuspendibleThreadSet::synchronize() {
|
||||
assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
|
||||
if (ConcGCYieldTimeout > 0) {
|
||||
_suspend_all_start = os::elapsedTime();
|
||||
}
|
||||
MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(!_suspend_all, "Only one at a time");
|
||||
_suspend_all = true;
|
||||
while (_nthreads_stopped < _nthreads) {
|
||||
ml.wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
}
|
||||
|
||||
void SuspendibleThreadSet::desynchronize() {
|
||||
assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
|
||||
MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(_nthreads_stopped == _nthreads, "Invalid");
|
||||
_suspend_all = false;
|
||||
ml.notify_all();
|
||||
}
|
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_SUSPENDIBLETHREADSET_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_SUSPENDIBLETHREADSET_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
// A SuspendibleThreadSet is a set of threads that can be suspended.
|
||||
// A thread can join and later leave the set, and periodically yield.
|
||||
// If some thread (not in the set) requests, via synchronize(), that
|
||||
// the threads be suspended, then the requesting thread is blocked
|
||||
// until all the threads in the set have yielded or left the set. Threads
|
||||
// may not enter the set when an attempted suspension is in progress. The
|
||||
// suspending thread later calls desynchronize(), allowing the suspended
|
||||
// threads to continue.
|
||||
class SuspendibleThreadSet : public AllStatic {
|
||||
private:
|
||||
static uint _nthreads;
|
||||
static uint _nthreads_stopped;
|
||||
static bool _suspend_all;
|
||||
static double _suspend_all_start;
|
||||
|
||||
public:
|
||||
// Add the current thread to the set. May block if a suspension is in progress.
|
||||
static void join();
|
||||
|
||||
// Removes the current thread from the set.
|
||||
static void leave();
|
||||
|
||||
// Returns true if an suspension is in progress.
|
||||
static bool should_yield() { return _suspend_all; }
|
||||
|
||||
// Suspends the current thread if a suspension is in progress.
|
||||
static void yield();
|
||||
|
||||
// Returns when all threads in the set are suspended.
|
||||
static void synchronize();
|
||||
|
||||
// Resumes all suspended threads in the set.
|
||||
static void desynchronize();
|
||||
};
|
||||
|
||||
class SuspendibleThreadSetJoiner : public StackObj {
|
||||
public:
|
||||
SuspendibleThreadSetJoiner() {
|
||||
SuspendibleThreadSet::join();
|
||||
}
|
||||
|
||||
~SuspendibleThreadSetJoiner() {
|
||||
SuspendibleThreadSet::leave();
|
||||
}
|
||||
|
||||
bool should_yield() {
|
||||
return SuspendibleThreadSet::should_yield();
|
||||
}
|
||||
|
||||
void yield() {
|
||||
SuspendibleThreadSet::yield();
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_SUSPENDIBLETHREADSET_HPP
|
@ -208,6 +208,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// This is the correct place to place such initialization methods.
|
||||
virtual void post_initialize() = 0;
|
||||
|
||||
// Stop any onging concurrent work and prepare for exit.
|
||||
virtual void stop() {}
|
||||
|
||||
MemRegion reserved_region() const { return _reserved; }
|
||||
address base() const { return (address)reserved_region().start(); }
|
||||
|
||||
|
@ -1205,13 +1205,13 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::report_statistics() const {
|
||||
"------------------------------------\n");
|
||||
size_t total_size = total_chunk_size(debug_only(NULL));
|
||||
size_t free_blocks = num_free_blocks();
|
||||
gclog_or_tty->print("Total Free Space: %d\n", total_size);
|
||||
gclog_or_tty->print("Max Chunk Size: %d\n", max_chunk_size());
|
||||
gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
|
||||
gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
|
||||
gclog_or_tty->print("Max Chunk Size: " SIZE_FORMAT "\n", max_chunk_size());
|
||||
gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
|
||||
if (free_blocks > 0) {
|
||||
gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks);
|
||||
gclog_or_tty->print("Av. Block Size: " SIZE_FORMAT "\n", total_size/free_blocks);
|
||||
}
|
||||
gclog_or_tty->print("Tree Height: %d\n", tree_height());
|
||||
gclog_or_tty->print("Tree Height: " SIZE_FORMAT "\n", tree_height());
|
||||
}
|
||||
|
||||
// Print census information - counts, births, deaths, etc.
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
//
|
||||
@ -131,7 +132,7 @@ void KlassScanClosure::do_klass(Klass* klass) {
|
||||
#ifndef PRODUCT
|
||||
if (TraceScavenge) {
|
||||
ResourceMark rm;
|
||||
gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s",
|
||||
gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
|
||||
klass,
|
||||
klass->external_name(),
|
||||
klass->has_modified_oops() ? "true" : "false");
|
||||
@ -511,7 +512,7 @@ void DefNewGeneration::space_iterate(SpaceClosure* blk,
|
||||
HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
|
||||
HeapWord* result = NULL;
|
||||
if (Verbose && PrintGCDetails) {
|
||||
gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
|
||||
gclog_or_tty->print("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):"
|
||||
" will_fail: %s"
|
||||
" heap_lock: %s"
|
||||
" free: " SIZE_FORMAT,
|
||||
@ -756,7 +757,7 @@ void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
|
||||
|
||||
void DefNewGeneration::handle_promotion_failure(oop old) {
|
||||
if (PrintPromotionFailure && !_promotion_failed) {
|
||||
gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
|
||||
gclog_or_tty->print(" (promotion failure size = %d) ",
|
||||
old->size());
|
||||
}
|
||||
_promotion_failed = true;
|
||||
|
@ -573,8 +573,8 @@ void CardGeneration::compute_new_size() {
|
||||
maximum_desired_capacity / (double) K);
|
||||
gclog_or_tty->print_cr(" "
|
||||
" shrink_bytes: %.1fK"
|
||||
" current_shrink_factor: %d"
|
||||
" new shrink factor: %d"
|
||||
" current_shrink_factor: " SIZE_FORMAT
|
||||
" new shrink factor: " SIZE_FORMAT
|
||||
" _min_heap_delta_bytes: %.1fK",
|
||||
shrink_bytes / (double) K,
|
||||
current_shrink_factor,
|
||||
|
@ -257,7 +257,7 @@ void SharedHeap::print_size_transition(outputStream* out,
|
||||
size_t bytes_before,
|
||||
size_t bytes_after,
|
||||
size_t capacity) {
|
||||
out->print(" %d%s->%d%s(%d%s)",
|
||||
out->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)",
|
||||
byte_size_in_proper_unit(bytes_before),
|
||||
proper_unit_for_byte_size(bytes_before),
|
||||
byte_size_in_proper_unit(bytes_after),
|
||||
|
@ -1012,6 +1012,11 @@ public:
|
||||
static ByteSize argument_type_offset(int i) {
|
||||
return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
|
||||
}
|
||||
|
||||
static ByteSize return_only_size() {
|
||||
return ReturnTypeEntry::size() + in_ByteSize(header_cell_count() * DataLayout::cell_size);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
// CallTypeData
|
||||
@ -2143,7 +2148,6 @@ private:
|
||||
|
||||
static bool profile_jsr292(methodHandle m, int bci);
|
||||
static int profile_arguments_flag();
|
||||
static bool profile_arguments_jsr292_only();
|
||||
static bool profile_all_arguments();
|
||||
static bool profile_arguments_for_invoke(methodHandle m, int bci);
|
||||
static int profile_return_flag();
|
||||
@ -2442,6 +2446,7 @@ public:
|
||||
|
||||
static bool profile_parameters_for_method(methodHandle m);
|
||||
static bool profile_arguments();
|
||||
static bool profile_arguments_jsr292_only();
|
||||
static bool profile_return();
|
||||
static bool profile_parameters();
|
||||
static bool profile_return_jsr292_only();
|
||||
|
@ -1266,8 +1266,9 @@ void SuperWord::co_locate_pack(Node_List* pk) {
|
||||
memops.clear();
|
||||
for (DUIterator i = upper_insert_pt->outs(); upper_insert_pt->has_out(i); i++) {
|
||||
Node* use = upper_insert_pt->out(i);
|
||||
if (!use->is_Store())
|
||||
if (use->is_Mem() && !use->is_Store()) {
|
||||
memops.push(use);
|
||||
}
|
||||
}
|
||||
|
||||
MemNode* lower_insert_pt = last;
|
||||
|
@ -1931,6 +1931,10 @@ class CommandLineFlags {
|
||||
"not just one of the generations (e.g., G1). A value of 0 " \
|
||||
"denotes 'do constant GC cycles'.") \
|
||||
\
|
||||
manageable(intx, CMSTriggerInterval, -1, \
|
||||
"Commence a CMS collection cycle (at least) every so many " \
|
||||
"milliseconds (0 permanently, -1 disabled)") \
|
||||
\
|
||||
product(bool, UseCMSInitiatingOccupancyOnly, false, \
|
||||
"Only use occupancy as a criterion for starting a CMS collection")\
|
||||
\
|
||||
|
@ -499,6 +499,9 @@ void before_exit(JavaThread * thread) {
|
||||
os::infinite_sleep();
|
||||
}
|
||||
|
||||
// Stop any ongoing concurrent GC work
|
||||
Universe::heap()->stop();
|
||||
|
||||
// Terminate watcher thread - must before disenrolling any periodic task
|
||||
if (PeriodicTask::num_tasks() > 0)
|
||||
WatcherThread::stop();
|
||||
|
@ -69,7 +69,7 @@ Monitor* Safepoint_lock = NULL;
|
||||
Monitor* SerializePage_lock = NULL;
|
||||
Monitor* Threads_lock = NULL;
|
||||
Monitor* CGC_lock = NULL;
|
||||
Mutex* STS_init_lock = NULL;
|
||||
Monitor* STS_lock = NULL;
|
||||
Monitor* SLT_lock = NULL;
|
||||
Monitor* iCMS_lock = NULL;
|
||||
Monitor* FullGCCount_lock = NULL;
|
||||
@ -173,7 +173,7 @@ void mutex_init() {
|
||||
def(tty_lock , Mutex , event, true ); // allow to lock in VM
|
||||
|
||||
def(CGC_lock , Monitor, special, true ); // coordinate between fore- and background GC
|
||||
def(STS_init_lock , Mutex, leaf, true );
|
||||
def(STS_lock , Monitor, leaf, true );
|
||||
if (UseConcMarkSweepGC) {
|
||||
def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ extern Monitor* Threads_lock; // a lock on the Threads table
|
||||
// (also used by Safepoints too to block threads creation/destruction)
|
||||
extern Monitor* CGC_lock; // used for coordination between
|
||||
// fore- & background GC threads.
|
||||
extern Mutex* STS_init_lock; // coordinate initialization of SuspendibleThreadSets.
|
||||
extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet.
|
||||
extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL
|
||||
extern Monitor* iCMS_lock; // CMS incremental mode start/stop notification
|
||||
extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc
|
||||
|
@ -75,7 +75,7 @@
|
||||
#endif
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
|
||||
#include "gc_implementation/shared/concurrentGCThread.hpp"
|
||||
#include "gc_implementation/shared/suspendibleThreadSet.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_globals.hpp"
|
||||
@ -110,7 +110,7 @@ void SafepointSynchronize::begin() {
|
||||
// more-general mechanism below. DLD (01/05).
|
||||
ConcurrentMarkSweepThread::synchronize(false);
|
||||
} else if (UseG1GC) {
|
||||
ConcurrentGCThread::safepoint_synchronize();
|
||||
SuspendibleThreadSet::synchronize();
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
@ -486,7 +486,7 @@ void SafepointSynchronize::end() {
|
||||
if (UseConcMarkSweepGC) {
|
||||
ConcurrentMarkSweepThread::desynchronize(false);
|
||||
} else if (UseG1GC) {
|
||||
ConcurrentGCThread::safepoint_desynchronize();
|
||||
SuspendibleThreadSet::desynchronize();
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
// record this time so VMThread can keep track how much time has elapsed
|
||||
|
@ -1324,10 +1324,12 @@ inline int build_int_from_shorts( jushort low, jushort high ) {
|
||||
#define PTR_FORMAT "0x%08" PRIxPTR
|
||||
#endif // _LP64
|
||||
|
||||
#define SSIZE_FORMAT "%" PRIdPTR
|
||||
#define SIZE_FORMAT "%" PRIuPTR
|
||||
#define SSIZE_FORMAT_W(width) "%" #width PRIdPTR
|
||||
#define SIZE_FORMAT_W(width) "%" #width PRIuPTR
|
||||
#define SSIZE_FORMAT "%" PRIdPTR
|
||||
#define SIZE_FORMAT "%" PRIuPTR
|
||||
#define SIZE_FORMAT_HEX "0x%" PRIxPTR
|
||||
#define SSIZE_FORMAT_W(width) "%" #width PRIdPTR
|
||||
#define SIZE_FORMAT_W(width) "%" #width PRIuPTR
|
||||
#define SIZE_FORMAT_HEX_W(width) "0x%" #width PRIxPTR
|
||||
|
||||
#define INTX_FORMAT "%" PRIdPTR
|
||||
#define UINTX_FORMAT "%" PRIuPTR
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -127,10 +127,12 @@ needs_compact3 = \
|
||||
gc/6581734/Test6581734.java \
|
||||
gc/7072527/TestFullGCCount.java \
|
||||
gc/g1/TestHumongousAllocInitialMark.java \
|
||||
gc/g1/TestHumongousShrinkHeap.java \
|
||||
gc/arguments/TestG1HeapRegionSize.java \
|
||||
gc/metaspace/TestMetaspaceMemoryPool.java \
|
||||
gc/arguments/TestDynMinHeapFreeRatio.java \
|
||||
gc/arguments/TestDynMaxHeapFreeRatio.java \
|
||||
gc/parallelScavenge/TestDynShrinkHeap.java \
|
||||
runtime/InternalApi/ThreadCpuTimesDeadlock.java \
|
||||
serviceability/threads/TestFalseDeadLock.java \
|
||||
compiler/tiered/NonTieredLevelsTest.java \
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
/*
|
||||
* @test TestPrintGCDetails
|
||||
* @bug 8035406 8027295 8035398
|
||||
* @bug 8035406 8027295 8035398 8019342
|
||||
* @summary Ensure that the PrintGCDetails output for a minor GC with G1
|
||||
* includes the expected necessary messages.
|
||||
* @key gc
|
||||
@ -48,6 +48,8 @@ public class TestGCLogMessages {
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
|
||||
output.shouldNotContain("[Redirty Cards");
|
||||
output.shouldNotContain("[Parallel Redirty");
|
||||
output.shouldNotContain("[Redirtied Cards");
|
||||
output.shouldNotContain("[Code Root Purge");
|
||||
output.shouldNotContain("[String Dedup Fixup");
|
||||
output.shouldNotContain("[Young Free CSet");
|
||||
@ -63,6 +65,8 @@ public class TestGCLogMessages {
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
|
||||
output.shouldContain("[Redirty Cards");
|
||||
output.shouldNotContain("[Parallel Redirty");
|
||||
output.shouldNotContain("[Redirtied Cards");
|
||||
output.shouldContain("[Code Root Purge");
|
||||
output.shouldContain("[String Dedup Fixup");
|
||||
output.shouldNotContain("[Young Free CSet");
|
||||
@ -80,6 +84,8 @@ public class TestGCLogMessages {
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
|
||||
output.shouldContain("[Redirty Cards");
|
||||
output.shouldContain("[Parallel Redirty");
|
||||
output.shouldContain("[Redirtied Cards");
|
||||
output.shouldContain("[Code Root Purge");
|
||||
output.shouldContain("[String Dedup Fixup");
|
||||
output.shouldContain("[Young Free CSet");
|
||||
|
131
hotspot/test/gc/g1/TestHumongousShrinkHeap.java
Normal file
131
hotspot/test/gc/g1/TestHumongousShrinkHeap.java
Normal file
@ -0,0 +1,131 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test TestHumongousShrinkHeap
|
||||
* @bug 8036025
|
||||
* @summary Verify that heap shrinks after GC in the presence of fragmentation due to humongous objects
|
||||
* @library /testlibrary
|
||||
* @run main/othervm -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=50 -XX:+UseG1GC -XX:G1HeapRegionSize=1M -verbose:gc TestHumongousShrinkHeap
|
||||
*/
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.MemoryUsage;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import sun.management.ManagementFactoryHelper;
|
||||
import static com.oracle.java.testlibrary.Asserts.*;
|
||||
|
||||
public class TestHumongousShrinkHeap {
|
||||
|
||||
public static final String MIN_FREE_RATIO_FLAG_NAME = "MinHeapFreeRatio";
|
||||
public static final String MAX_FREE_RATIO_FLAG_NAME = "MaxHeapFreeRatio";
|
||||
|
||||
private static final ArrayList<ArrayList<byte[]>> garbage = new ArrayList<>();
|
||||
private static final int PAGE_SIZE = 1024 * 1024; // 1M
|
||||
private static final int PAGES_NUM = 5;
|
||||
|
||||
|
||||
public static void main(String[] args) {
|
||||
new TestHumongousShrinkHeap().test();
|
||||
}
|
||||
|
||||
private final void test() {
|
||||
System.gc();
|
||||
MemoryUsagePrinter.printMemoryUsage("init");
|
||||
|
||||
eat();
|
||||
MemoryUsagePrinter.printMemoryUsage("eaten");
|
||||
MemoryUsage muFull = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
|
||||
free();
|
||||
MemoryUsagePrinter.printMemoryUsage("free");
|
||||
MemoryUsage muFree = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
|
||||
assertLessThan(muFree.getCommitted(), muFull.getCommitted(), String.format(
|
||||
"committed free heap size is not less than committed full heap size, heap hasn't been shrunk?%n"
|
||||
+ "%s = %s%n%s = %s",
|
||||
MIN_FREE_RATIO_FLAG_NAME,
|
||||
ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MIN_FREE_RATIO_FLAG_NAME).getValue(),
|
||||
MAX_FREE_RATIO_FLAG_NAME,
|
||||
ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MAX_FREE_RATIO_FLAG_NAME).getValue()
|
||||
));
|
||||
}
|
||||
|
||||
private void eat() {
|
||||
int HumongousObjectSize = Math.round(.9f * PAGE_SIZE);
|
||||
System.out.println("Will allocate objects of size=" +
|
||||
MemoryUsagePrinter.humanReadableByteCount(HumongousObjectSize, true));
|
||||
|
||||
for (int i = 0; i < PAGES_NUM; i++) {
|
||||
ArrayList<byte[]> stuff = new ArrayList<>();
|
||||
eatList(stuff, 100, HumongousObjectSize);
|
||||
MemoryUsagePrinter.printMemoryUsage("eat #" + i);
|
||||
garbage.add(stuff);
|
||||
}
|
||||
}
|
||||
|
||||
private void free() {
|
||||
// do not free last one list
|
||||
garbage.subList(0, garbage.size() - 1).clear();
|
||||
|
||||
// do not free last one element from last list
|
||||
ArrayList stuff = garbage.get(garbage.size() - 1);
|
||||
stuff.subList(0, stuff.size() - 1).clear();
|
||||
System.gc();
|
||||
}
|
||||
|
||||
private static void eatList(List garbage, int count, int size) {
|
||||
for (int i = 0; i < count; i++) {
|
||||
garbage.add(new byte[size]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints memory usage to standard output
|
||||
*/
|
||||
class MemoryUsagePrinter {
|
||||
|
||||
public static String humanReadableByteCount(long bytes, boolean si) {
|
||||
int unit = si ? 1000 : 1024;
|
||||
if (bytes < unit) {
|
||||
return bytes + " B";
|
||||
}
|
||||
int exp = (int) (Math.log(bytes) / Math.log(unit));
|
||||
String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (si ? "" : "i");
|
||||
return String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre);
|
||||
}
|
||||
|
||||
public static void printMemoryUsage(String label) {
|
||||
MemoryUsage memusage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
float freeratio = 1f - (float) memusage.getUsed() / memusage.getCommitted();
|
||||
System.out.format("[%-24s] init: %-7s, used: %-7s, comm: %-7s, freeRatio ~= %.1f%%%n",
|
||||
label,
|
||||
humanReadableByteCount(memusage.getInit(), true),
|
||||
humanReadableByteCount(memusage.getUsed(), true),
|
||||
humanReadableByteCount(memusage.getCommitted(), true),
|
||||
freeratio * 100
|
||||
);
|
||||
}
|
||||
}
|
125
hotspot/test/gc/parallelScavenge/TestDynShrinkHeap.java
Normal file
125
hotspot/test/gc/parallelScavenge/TestDynShrinkHeap.java
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test TestDynShrinkHeap
|
||||
* @bug 8016479
|
||||
* @summary Verify that the heap shrinks after full GC according to the current values of the Min/MaxHeapFreeRatio flags
|
||||
* @library /testlibrary
|
||||
* @run main/othervm -XX:+UseAdaptiveSizePolicyWithSystemGC -XX:+UseParallelGC -XX:MinHeapFreeRatio=0 -XX:MaxHeapFreeRatio=100 -verbose:gc TestDynShrinkHeap
|
||||
*/
|
||||
|
||||
import com.oracle.java.testlibrary.TestDynamicVMOption;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.MemoryUsage;
|
||||
import java.util.ArrayList;
|
||||
import sun.management.ManagementFactoryHelper;
|
||||
import static com.oracle.java.testlibrary.Asserts.*;
|
||||
|
||||
public class TestDynShrinkHeap {
|
||||
|
||||
public static final String MIN_FREE_RATIO_FLAG_NAME = "MinHeapFreeRatio";
|
||||
public static final String MAX_FREE_RATIO_FLAG_NAME = "MaxHeapFreeRatio";
|
||||
|
||||
private static ArrayList<byte[]> list = new ArrayList<>(0);
|
||||
private static final int M = 1024 * 1024; // to make heap more manageable by test code
|
||||
|
||||
private final TestDynamicVMOption maxRatioOption;
|
||||
private final TestDynamicVMOption minRatioOption;
|
||||
|
||||
public TestDynShrinkHeap() {
|
||||
minRatioOption = new TestDynamicVMOption(MIN_FREE_RATIO_FLAG_NAME);
|
||||
maxRatioOption = new TestDynamicVMOption(MAX_FREE_RATIO_FLAG_NAME);
|
||||
}
|
||||
|
||||
private final void test() {
|
||||
System.gc();
|
||||
MemoryUsagePrinter.printMemoryUsage("init");
|
||||
|
||||
eat();
|
||||
MemoryUsagePrinter.printMemoryUsage("eaten");
|
||||
MemoryUsage muFull = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
|
||||
free();
|
||||
MemoryUsagePrinter.printMemoryUsage("free");
|
||||
MemoryUsage muFree = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
|
||||
assertLessThan(muFree.getCommitted(), muFull.getCommitted(), String.format(
|
||||
"committed free heap size is not less than committed full heap size, heap hasn't been shrunk?%n"
|
||||
+ "%s = %s%n%s = %s",
|
||||
MIN_FREE_RATIO_FLAG_NAME,
|
||||
ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MIN_FREE_RATIO_FLAG_NAME).getValue(),
|
||||
MAX_FREE_RATIO_FLAG_NAME,
|
||||
ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MAX_FREE_RATIO_FLAG_NAME).getValue()
|
||||
));
|
||||
}
|
||||
|
||||
private void eat() {
|
||||
for (int i = 0; i < M; i++) {
|
||||
list.add(new byte[1024]);
|
||||
}
|
||||
MemoryUsagePrinter.printMemoryUsage("allocated " + M + " arrays");
|
||||
|
||||
list.subList(0, M / 2).clear();
|
||||
System.gc();
|
||||
MemoryUsagePrinter.printMemoryUsage("array halved");
|
||||
}
|
||||
|
||||
private void free() {
|
||||
maxRatioOption.setIntValue(minRatioOption.getIntValue() + 1);
|
||||
System.gc();
|
||||
MemoryUsagePrinter.printMemoryUsage("under pressure");
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
new TestDynShrinkHeap().test();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints memory usage to standard output
|
||||
*/
|
||||
class MemoryUsagePrinter {
|
||||
|
||||
public static String humanReadableByteCount(long bytes, boolean si) {
|
||||
int unit = si ? 1000 : 1024;
|
||||
if (bytes < unit) {
|
||||
return bytes + " B";
|
||||
}
|
||||
int exp = (int) (Math.log(bytes) / Math.log(unit));
|
||||
String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (si ? "" : "i");
|
||||
return String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre);
|
||||
}
|
||||
|
||||
public static void printMemoryUsage(String label) {
|
||||
MemoryUsage memusage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
float freeratio = 1f - (float) memusage.getUsed() / memusage.getCommitted();
|
||||
System.out.format("[%-24s] init: %-7s, used: %-7s, comm: %-7s, freeRatio ~= %.1f%%%n",
|
||||
label,
|
||||
humanReadableByteCount(memusage.getInit(), true),
|
||||
humanReadableByteCount(memusage.getUsed(), true),
|
||||
humanReadableByteCount(memusage.getCommitted(), true),
|
||||
freeratio * 100
|
||||
);
|
||||
}
|
||||
}
|
@ -1,191 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.net.URI;
|
||||
import java.util.Arrays;
|
||||
import java.util.Vector;
|
||||
|
||||
import javax.tools.Diagnostic;
|
||||
import javax.tools.DiagnosticCollector;
|
||||
import javax.tools.FileObject;
|
||||
import javax.tools.ForwardingJavaFileManager;
|
||||
import javax.tools.JavaCompiler;
|
||||
import javax.tools.JavaCompiler.CompilationTask;
|
||||
import javax.tools.JavaFileManager;
|
||||
import javax.tools.JavaFileObject;
|
||||
import javax.tools.JavaFileObject.Kind;
|
||||
import javax.tools.SimpleJavaFileObject;
|
||||
import javax.tools.StandardJavaFileManager;
|
||||
import javax.tools.ToolProvider;
|
||||
|
||||
/*
|
||||
* @ignore 6959423
|
||||
* @test SortMethodsTest
|
||||
* @bug 6925573
|
||||
* @summary verify that class loading does not need quadratic time with regard to the number of class
|
||||
methods.
|
||||
* @run main SortMethodsTest
|
||||
* @author volker.simonis@gmail.com
|
||||
*/
|
||||
|
||||
public class SortMethodsTest {
|
||||
|
||||
static String createClass(String name, int nrOfMethods) {
|
||||
StringWriter sw = new StringWriter();
|
||||
PrintWriter pw = new PrintWriter(sw);
|
||||
pw.println("public class " + name + "{");
|
||||
for (int i = 0; i < nrOfMethods; i++) {
|
||||
pw.println(" public void m" + i + "() {}");
|
||||
}
|
||||
pw.println(" public static String sayHello() {");
|
||||
pw.println(" return \"Hello from class \" + " + name +
|
||||
".class.getName() + \" with \" + " + name +
|
||||
".class.getDeclaredMethods().length + \" methods\";");
|
||||
pw.println(" }");
|
||||
pw.println("}");
|
||||
pw.close();
|
||||
return sw.toString();
|
||||
}
|
||||
|
||||
public static void main(String args[]) {
|
||||
|
||||
JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
|
||||
DiagnosticCollector<JavaFileObject> diags = new DiagnosticCollector<JavaFileObject>();
|
||||
final String cName = new String("ManyMethodsClass");
|
||||
Vector<Long> results = new Vector<Long>();
|
||||
|
||||
for (int i = 6; i < 600000; i*=10) {
|
||||
String klass = createClass(cName, i);
|
||||
JavaMemoryFileObject file = new JavaMemoryFileObject(cName, klass);
|
||||
MemoryFileManager mfm = new MemoryFileManager(comp.getStandardFileManager(diags, null, null), file);
|
||||
CompilationTask task = comp.getTask(null, mfm, diags, null, null, Arrays.asList(file));
|
||||
|
||||
if (task.call()) {
|
||||
try {
|
||||
MemoryClassLoader mcl = new MemoryClassLoader(file);
|
||||
long start = System.nanoTime();
|
||||
Class<? extends Object> c = Class.forName(cName, true, mcl);
|
||||
long end = System.nanoTime();
|
||||
results.add(end - start);
|
||||
Method m = c.getDeclaredMethod("sayHello", new Class[0]);
|
||||
String ret = (String)m.invoke(null, new Object[0]);
|
||||
System.out.println(ret + " (loaded and resloved in " + (end - start) + "ns)");
|
||||
} catch (Exception e) {
|
||||
System.err.println(e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
System.out.println(klass);
|
||||
System.out.println();
|
||||
for (Diagnostic diag : diags.getDiagnostics()) {
|
||||
System.out.println(diag.getCode() + "\n" + diag.getKind() + "\n" + diag.getPosition());
|
||||
System.out.println(diag.getSource() + "\n" + diag.getMessage(null));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
long lastRatio = 0;
|
||||
for (int i = 2; i < results.size(); i++) {
|
||||
long normalized1 = Math.max(results.get(i-1) - results.get(0), 1);
|
||||
long normalized2 = Math.max(results.get(i) - results.get(0), 1);
|
||||
long ratio = normalized2/normalized1;
|
||||
lastRatio = ratio;
|
||||
System.out.println("10 x more methods requires " + ratio + " x more time");
|
||||
}
|
||||
// The following is just vague estimation but seems to work on current x86_64 and sparcv9 machines
|
||||
if (lastRatio > 80) {
|
||||
throw new RuntimeException("ATTENTION: it seems that class loading needs quadratic time with regard to the number of class methods!!!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class JavaMemoryFileObject extends SimpleJavaFileObject {
|
||||
|
||||
private final String code;
|
||||
private ByteArrayOutputStream byteCode;
|
||||
|
||||
JavaMemoryFileObject(String name, String code) {
|
||||
super(URI.create("string:///" + name.replace('.','/') + Kind.SOURCE.extension), Kind.SOURCE);
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CharSequence getCharContent(boolean ignoreEncodingErrors) {
|
||||
return code;
|
||||
}
|
||||
|
||||
@Override
|
||||
public OutputStream openOutputStream() {
|
||||
byteCode = new ByteArrayOutputStream();
|
||||
return byteCode;
|
||||
}
|
||||
|
||||
byte[] getByteCode() {
|
||||
return byteCode.toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
class MemoryClassLoader extends ClassLoader {
|
||||
|
||||
private final JavaMemoryFileObject jfo;
|
||||
|
||||
public MemoryClassLoader(JavaMemoryFileObject jfo) {
|
||||
this.jfo = jfo;
|
||||
}
|
||||
|
||||
public Class findClass(String name) {
|
||||
byte[] b = jfo.getByteCode();
|
||||
return defineClass(name, b, 0, b.length);
|
||||
}
|
||||
}
|
||||
|
||||
class MemoryFileManager extends ForwardingJavaFileManager<JavaFileManager> {
|
||||
|
||||
private final JavaFileObject jfo;
|
||||
|
||||
public MemoryFileManager(StandardJavaFileManager jfm, JavaFileObject jfo) {
|
||||
super(jfm);
|
||||
this.jfo = jfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileObject getFileForInput(Location location, String packageName,
|
||||
String relativeName) throws IOException {
|
||||
return jfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JavaFileObject getJavaFileForOutput(Location location, String qualifiedName,
|
||||
Kind kind, FileObject outputFile) throws IOException {
|
||||
return jfo;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8040018
|
||||
* @library /testlibrary
|
||||
* @summary Check for exception instead of assert.
|
||||
* @run main ClassFileParserBug
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import com.oracle.java.testlibrary.*;
|
||||
|
||||
public class ClassFileParserBug {
|
||||
public static void main(String args[]) throws Throwable {
|
||||
|
||||
System.out.println("Regression test for bug 8040018");
|
||||
String testsrc = System.getProperty("test.src") + "/";
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-jar", testsrc + File.separator + "test.jar");
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("java.lang.ClassFormatError: Bad length on BootstrapMethods");
|
||||
output.shouldHaveExitValue(1);
|
||||
}
|
||||
}
|
609
hotspot/test/runtime/classFileParserBug/LambdaMath.jcod
Normal file
609
hotspot/test/runtime/classFileParserBug/LambdaMath.jcod
Normal file
@ -0,0 +1,609 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This test contains a BootstrapMethods attribute with a fuzzied
|
||||
* attribute_length field that is larger than it should be. This
|
||||
* should cause a java.lang.ClassFormatError exception to be thrown.
|
||||
*/
|
||||
class LambdaMath {
|
||||
0xCAFEBABE;
|
||||
0; // minor version
|
||||
52; // version
|
||||
[162] { // Constant Pool
|
||||
; // first element is empty
|
||||
Method #31 #69; // #1 at 0x0A
|
||||
class #70; // #2 at 0x0F
|
||||
Method #2 #71; // #3 at 0x12
|
||||
Method #72 #73; // #4 at 0x17
|
||||
Field #74 #75; // #5 at 0x1C
|
||||
String #76; // #6 at 0x21
|
||||
Method #77 #78; // #7 at 0x24
|
||||
InvokeDynamic 0s #84; // #8 at 0x29
|
||||
Method #30 #85; // #9 at 0x2E
|
||||
String #86; // #10 at 0x33
|
||||
InvokeDynamic 1s #84; // #11 at 0x36
|
||||
String #88; // #12 at 0x3B
|
||||
InvokeDynamic 2s #84; // #13 at 0x3E
|
||||
String #90; // #14 at 0x43
|
||||
InvokeDynamic 3s #84; // #15 at 0x46
|
||||
String #92; // #16 at 0x4B
|
||||
InvokeDynamic 4s #84; // #17 at 0x4E
|
||||
InterfaceMethod #94 #95; // #18 at 0x53
|
||||
InterfaceMethod #96 #97; // #19 at 0x58
|
||||
InterfaceMethod #96 #98; // #20 at 0x5D
|
||||
InterfaceMethod #99 #100; // #21 at 0x62
|
||||
class #101; // #22 at 0x67
|
||||
Method #22 #69; // #23 at 0x6A
|
||||
Method #22 #102; // #24 at 0x6F
|
||||
String #103; // #25 at 0x74
|
||||
Method #22 #104; // #26 at 0x77
|
||||
Method #22 #105; // #27 at 0x7C
|
||||
class #106; // #28 at 0x81
|
||||
Method #2 #107; // #29 at 0x84
|
||||
class #108; // #30 at 0x89
|
||||
class #109; // #31 at 0x8C
|
||||
Utf8 "<init>"; // #32 at 0x8F
|
||||
Utf8 "()V"; // #33 at 0x98
|
||||
Utf8 "Code"; // #34 at 0x9E
|
||||
Utf8 "LineNumberTable"; // #35 at 0xA5
|
||||
Utf8 "LocalVariableTable"; // #36 at 0xB7
|
||||
Utf8 "this"; // #37 at 0xCC
|
||||
Utf8 "LLambdaMath;"; // #38 at 0xD3
|
||||
Utf8 "main"; // #39 at 0xE2
|
||||
Utf8 "([Ljava/lang/String;)V"; // #40 at 0xE9
|
||||
Utf8 "a"; // #41 at 0x0102
|
||||
Utf8 "[Ljava/lang/String;"; // #42 at 0x0106
|
||||
Utf8 "list"; // #43 at 0x011C
|
||||
Utf8 "Ljava/util/List;"; // #44 at 0x0123
|
||||
Utf8 "LocalVariableTypeTable"; // #45 at 0x0136
|
||||
Utf8 "Ljava/util/List<Ljava/lang/Integer;>;"; // #46 at 0x014F
|
||||
Utf8 "evaluate"; // #47 at 0x0177
|
||||
Utf8 "(Ljava/util/List;Ljava/util/function/Predicate;)V"; // #48 at 0x0182
|
||||
Utf8 "n"; // #49 at 0x01B6
|
||||
Utf8 "Ljava/lang/Integer;"; // #50 at 0x01BA
|
||||
Utf8 "e"; // #51 at 0x01D0
|
||||
Utf8 "Ljava/lang/Throwable;"; // #52 at 0x01D4
|
||||
Utf8 "predicate"; // #53 at 0x01EC
|
||||
Utf8 "Ljava/util/function/PrediCate;"; // #54 at 0x01F8
|
||||
Utf8 "Ljava/util/function/Predicate<Ljava/lang/Integer;>;"; // #55 at 0x0219
|
||||
Utf8 "StackMapTable"; // #56 at 0x024F
|
||||
class #110; // #57 at 0x025F
|
||||
class #106; // #58 at 0x0262
|
||||
Utf8 "Signature"; // #59 at 0x0265
|
||||
Utf8 "(Ljava/util/List<Ljava/lang/Integer;>;Ljava/util/function/Predicate<Ljava/lang/Integer;>;)V"; // #60 at 0x0271
|
||||
Utf8 "lambda$main$4"; // #61 at 0x02CF
|
||||
Utf8 "(Ljava/lang/Integer;)Z"; // #62 at 0x02DF
|
||||
Utf8 "lambda$main$3"; // #63 at 0x02F8
|
||||
Utf8 "lambda$main$2"; // #64 at 0x0308
|
||||
Utf8 "lambda$main$1"; // #65 at 0x0318
|
||||
Utf8 "lambda$main$0"; // #66 at 0x0328
|
||||
Utf8 "SourceFile"; // #67 at 0x0338
|
||||
Utf8 "LambdaMath.java"; // #68 at 0x0345
|
||||
NameAndType #32 #33; // #69 at 0x0357
|
||||
Utf8 "java/lang/Integer"; // #70 at 0x035C
|
||||
NameAndType #111 #112; // #71 at 0x0370
|
||||
class #113; // #72 at 0x0375
|
||||
NameAndType #114 #115; // #73 at 0x0378
|
||||
class #116; // #74 at 0x037D
|
||||
NameAndType #117 #118; // #75 at 0x0380
|
||||
Utf8 "Print all numbers:"; // #76 at 0x0385
|
||||
class #119; // #77 at 0x039A
|
||||
NameAndType #120 #121; // #78 at 0x039D
|
||||
Utf8 "BootstrapMethods"; // #79 at 0x03A2
|
||||
MethodHandle 6b #122; // #80 at 0x03B5
|
||||
MethodType #123; // #81 at 0x03B9
|
||||
MethodHandle 6b #124; // #82 at 0x03BC
|
||||
MethodType #62; // #83 at 0x03C0
|
||||
NameAndType #125 #126; // #84 at 0x03C3
|
||||
NameAndType #47 #48; // #85 at 0x03C8
|
||||
Utf8 "Print no numbers:"; // #86 at 0x03CD
|
||||
MethodHandle 6b #127; // #87 at 0x03E1
|
||||
Utf8 "Print even numbers:"; // #88 at 0x03E5
|
||||
MethodHandle 6b #128; // #89 at 0x03FB
|
||||
Utf8 "Print odd numbers:"; // #90 at 0x03FF
|
||||
MethodHandle 6b #129; // #91 at 0x0414
|
||||
Utf8 "Print numbers greater than 5:"; // #92 at 0x0418
|
||||
MethodHandle 6b #130; // #93 at 0x0438
|
||||
class #131; // #94 at 0x043C
|
||||
NameAndType #132 #133; // #95 at 0x043F
|
||||
class #110; // #96 at 0x0444
|
||||
NameAndType #134 #135; // #97 at 0x0447
|
||||
NameAndType #136 #137; // #98 at 0x044C
|
||||
class #138; // #99 at 0x0451
|
||||
NameAndType #125 #123; // #100 at 0x0454
|
||||
Utf8 "java/lang/StringFuilder"; // #101 at 0x0459
|
||||
NameAndType #139 #140; // #102 at 0x0473
|
||||
Utf8 " "; // #103 at 0x0478
|
||||
NameAndType #139 #141; // #104 at 0x047C
|
||||
NameAndType #142 #143; // #105 at 0x0481
|
||||
Utf8 "java/lang/Throwable"; // #106 at 0x0486
|
||||
NameAndType #144 #145; // #107 at 0x049C
|
||||
Utf8 "LambdaMath"; // #108 at 0x04A1
|
||||
Utf8 "java/lang/Object"; // #109 at 0x04AE
|
||||
Utf8 "java/util/Iterator"; // #110 at 0x04C1
|
||||
Utf8 "valueOf"; // #111 at 0x04D6
|
||||
Utf8 "(I)Ljava/lang/Integer;"; // #112 at 0x04E0
|
||||
Utf8 "java/util/Arrays"; // #113 at 0x04F9
|
||||
Utf8 "asList"; // #114 at 0x050C
|
||||
Utf8 "([Ljava/lang/Object;)Ljava/util/List;"; // #115 at 0x0515
|
||||
Utf8 "java/lang/System"; // #116 at 0x053D
|
||||
Utf8 "out"; // #117 at 0x0550
|
||||
Utf8 "Ljava/io/PrintStream;"; // #118 at 0x0556
|
||||
Utf8 "java/io/PrintStream"; // #119 at 0x056E
|
||||
Utf8 "println"; // #120 at 0x0584
|
||||
Utf8 "(Ljava/lang/String;)V"; // #121 at 0x058E
|
||||
Method #146 #147; // #122 at 0x05A6
|
||||
Utf8 "(Ljava/lang/Object;)Z"; // #123 at 0x05AB
|
||||
Method #30 #148; // #124 at 0x05C3
|
||||
Utf8 "test"; // #125 at 0x05C8
|
||||
Utf8 "()Ljava/util/function/Predicate;"; // #126 at 0x05CF
|
||||
Method #30 #149; // #127 at 0x05F2
|
||||
Method #30 #150; // #128 at 0x05F7
|
||||
Method #30 #151; // #129 at 0x05FC
|
||||
Method #30 #152; // #130 at 0x0601
|
||||
Utf8 "java/util/List"; // #131 at 0x0606
|
||||
Utf8 "iterator"; // #132 at 0x0617
|
||||
Utf8 "()Ljava/util/Iterator;"; // #133 at 0x0622
|
||||
Utf8 "hasNext"; // #134 at 0x063B
|
||||
Utf8 "()Z"; // #135 at 0x0645
|
||||
Utf8 "next"; // #136 at 0x064B
|
||||
Utf8 "()Ljava/lang/Object;"; // #137 at 0x0652
|
||||
Utf8 "java/util/function/Predicate"; // #138 at 0x0669
|
||||
Utf8 "append"; // #139 at 0x0688
|
||||
Utf8 "(Ljava/lang/Object;)Ljava/lang/StringBuilder;"; // #140 at 0x0691
|
||||
Utf8 "(Ljava/lang/String;)Ljava/lang/StringBuilder;"; // #141 at 0x06C1
|
||||
Utf8 "toString"; // #142 at 0x06F1
|
||||
Utf8 "()Ljava/lang/String;"; // #143 at 0x06FC
|
||||
Utf8 "intValue"; // #144 at 0x0713
|
||||
Utf8 "()I"; // #145 at 0x071E
|
||||
class #153; // #146 at 0x0724
|
||||
NameAndType #154 #158; // #147 at 0x0727
|
||||
NameAndType #66 #62; // #148 at 0x072C
|
||||
NameAndType #65 #62; // #149 at 0x0731
|
||||
NameAndType #64 #62; // #150 at 0x0736
|
||||
NameAndType #63 #62; // #151 at 0x073B
|
||||
NameAndType #61 #62; // #152 at 0x0740
|
||||
Utf8 "java/lang/invoke/LambdaMetafactory"; // #153 at 0x0745
|
||||
Utf8 "metafactory"; // #154 at 0x076A
|
||||
class #160; // #155 at 0x0778
|
||||
Utf8 "Lookup"; // #156 at 0x077B
|
||||
Utf8 "InnerClasses"; // #157 at 0x0784
|
||||
Utf8 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/invoke/MethodType;Ljava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"; // #158 at 0x0793
|
||||
class #161; // #159 at 0x0862
|
||||
Utf8 "java/lang/invoke/MethodHandles$Lookup"; // #160 at 0x0865
|
||||
Utf8 "java/lang/invoke/MethodHandles"; // #161 at 0x088D
|
||||
} // Constant Pool
|
||||
|
||||
0x0021; // access
|
||||
#30;// this_cpx
|
||||
#31;// super_cpx
|
||||
|
||||
[0] { // Interfaces
|
||||
} // Interfaces
|
||||
|
||||
[0] { // fields
|
||||
} // fields
|
||||
|
||||
[8] { // methods
|
||||
{ // Member at 0x08BA
|
||||
0x0001; // access
|
||||
#32; // name_cpx
|
||||
#33; // sig_cpx
|
||||
[1] { // Attributes
|
||||
Attr(#34, 47) { // Code at 0x08C2
|
||||
1; // max_stack
|
||||
1; // max_locals
|
||||
Bytes[5]{
|
||||
0x2AB70001B1;
|
||||
};
|
||||
[0] { // Traps
|
||||
} // end Traps
|
||||
[2] { // Attributes
|
||||
Attr(#35, 6) { // LineNumberTable at 0x08D9
|
||||
[1] { // LineNumberTable
|
||||
0 5; // at 0x08E5
|
||||
}
|
||||
} // end LineNumberTable
|
||||
;
|
||||
Attr(#36, 12) { // LocalVariableTable at 0x08E5
|
||||
[1] { // LocalVariableTable
|
||||
0 5 37 38 0; // at 0x08F7
|
||||
}
|
||||
} // end LocalVariableTable
|
||||
} // Attributes
|
||||
} // end Code
|
||||
} // Attributes
|
||||
} // Member
|
||||
;
|
||||
{ // Member at 0x08F7
|
||||
0x0009; // access
|
||||
#39; // name_cpx
|
||||
#40; // sig_cpx
|
||||
[1] { // Attributes
|
||||
Attr(#34, 261) { // Code at 0x08FF
|
||||
4; // max_stack
|
||||
2; // max_locals
|
||||
Bytes[147]{
|
||||
0x1007BD0002590304;
|
||||
0xB8000353590405B8;
|
||||
0x000353590506B800;
|
||||
0x0353590607B80003;
|
||||
0x53590708B8000353;
|
||||
0x59081006B8000353;
|
||||
0x5910061007B80003;
|
||||
0x53B800044CB20005;
|
||||
0x1206B600072BBA00;
|
||||
0x080000B80009B200;
|
||||
0x05120AB600072BBA;
|
||||
0x000B0000B80009B2;
|
||||
0x0005120CB600072B;
|
||||
0xBA000D0000B80009;
|
||||
0xB20005120EB60007;
|
||||
0x2BBA000F0000B800;
|
||||
0x09B200051210B600;
|
||||
0x072BBA00110000B8;
|
||||
0x0009B1;
|
||||
};
|
||||
[0] { // Traps
|
||||
} // end Traps
|
||||
[3] { // Attributes
|
||||
Attr(#35, 50) { // LineNumberTable at 0x09A4
|
||||
[12] { // LineNumberTable
|
||||
0 9; // at 0x09B0
|
||||
61 11; // at 0x09B4
|
||||
69 12; // at 0x09B8
|
||||
78 14; // at 0x09BC
|
||||
86 15; // at 0x09C0
|
||||
95 17; // at 0x09C4
|
||||
103 18; // at 0x09C8
|
||||
112 20; // at 0x09CC
|
||||
120 21; // at 0x09D0
|
||||
129 23; // at 0x09D4
|
||||
137 24; // at 0x09D8
|
||||
146 26; // at 0x09DC
|
||||
}
|
||||
} // end LineNumberTable
|
||||
;
|
||||
Attr(#36, 22) { // LocalVariableTable at 0x09DC
|
||||
[2] { // LocalVariableTable
|
||||
0 147 41 42 0; // at 0x09EE
|
||||
61 86 43 44 1; // at 0x09F8
|
||||
}
|
||||
} // end LocalVariableTable
|
||||
;
|
||||
Attr(#45, 12) { // LocalVariableTypeTable at 0x09F8
|
||||
[1] { // LocalVariableTypeTable
|
||||
61 86 43 46 1; // at 0x0A0A
|
||||
}
|
||||
} // end LocalVariableTypeTable
|
||||
} // Attributes
|
||||
} // end Code
|
||||
} // Attributes
|
||||
} // Member
|
||||
;
|
||||
{ // Member at 0x0A0A
|
||||
0x0009; // access
|
||||
#47; // name_cpx
|
||||
#48; // sig_cpx
|
||||
[2] { // Attributes
|
||||
Attr(#34, 224) { // Code at 0x0A12
|
||||
3; // max_stack
|
||||
4; // max_locals
|
||||
Bytes[69]{
|
||||
0x2AB9001201004D2C;
|
||||
0xB900130100990033;
|
||||
0x2CB900140100C200;
|
||||
0x024E2B2DB9001502;
|
||||
0x0099001CB20005BB;
|
||||
0x001659B700172DB6;
|
||||
0x00181219B6001AB6;
|
||||
0x001BB60007A7FFCA;
|
||||
0xA700044DB1;
|
||||
};
|
||||
[1] { // Traps
|
||||
0 64 67 28; // at 0x0A6F
|
||||
} // end Traps
|
||||
[4] { // Attributes
|
||||
Attr(#35, 30) { // LineNumberTable at 0x0A71
|
||||
[7] { // LineNumberTable
|
||||
0 30; // at 0x0A7D
|
||||
26 31; // at 0x0A81
|
||||
36 32; // at 0x0A85
|
||||
61 34; // at 0x0A89
|
||||
64 38; // at 0x0A8D
|
||||
67 37; // at 0x0A91
|
||||
68 39; // at 0x0A95
|
||||
}
|
||||
} // end LineNumberTable
|
||||
;
|
||||
Attr(#36, 42) { // LocalVariableTable at 0x0A95
|
||||
[4] { // LocalVariableTable
|
||||
26 35 49 50 3; // at 0x0AA7
|
||||
68 0 51 52 2; // at 0x0AB1
|
||||
0 69 43 44 0; // at 0x0ABB
|
||||
0 69 53 54 1; // at 0x0AC5
|
||||
}
|
||||
} // end LocalVariableTable
|
||||
;
|
||||
Attr(#45, 22) { // LocalVariableTypeTable at 0x0AC5
|
||||
[2] { // LocalVariableTypeTable
|
||||
0 69 43 46 0; // at 0x0AD7
|
||||
0 69 53 55 1; // at 0x0AE1
|
||||
}
|
||||
} // end LocalVariableTypeTable
|
||||
;
|
||||
Attr(#56, 17) { // StackMapTable at 0x0AE1
|
||||
[5] { //
|
||||
252b, 7, [1]z{7b,57}; // append_frame 1
|
||||
53b; // same_frame
|
||||
250b, 2; // chop_frame 1
|
||||
66b, [1]z{7b,58}; // same_locals_1_stack_item_frame
|
||||
0b; // same_frame
|
||||
}
|
||||
} // end StackMapTable
|
||||
} // Attributes
|
||||
} // end Code
|
||||
;
|
||||
Attr(#59, 2) { // Signature at 0x0AF8
|
||||
#60;
|
||||
} // end Signature
|
||||
} // Attributes
|
||||
} // Member
|
||||
;
|
||||
{ // Member at 0x0B00
|
||||
0x100A; // access
|
||||
#61; // name_cpx
|
||||
#62; // sig_cpx
|
||||
[1] { // Attributes
|
||||
Attr(#34, 67) { // Code at 0x0B08
|
||||
2; // max_stack
|
||||
1; // max_locals
|
||||
Bytes[14]{
|
||||
0x2AB6001D08A40007;
|
||||
0x04A7000403AC;
|
||||
};
|
||||
[0] { // Traps
|
||||
} // end Traps
|
||||
[3] { // Attributes
|
||||
Attr(#35, 6) { // LineNumberTable at 0x0B28
|
||||
[1] { // LineNumberTable
|
||||
0 24; // at 0x0B34
|
||||
}
|
||||
} // end LineNumberTable
|
||||
;
|
||||
Attr(#36, 12) { // LocalVariableTable at 0x0B34
|
||||
[1] { // LocalVariableTable
|
||||
0 14 49 50 0; // at 0x0B46
|
||||
}
|
||||
} // end LocalVariableTable
|
||||
;
|
||||
Attr(#56, 5) { // StackMapTable at 0x0B46
|
||||
[2] { //
|
||||
12b; // same_frame
|
||||
64b, [1]z{1b}; // same_locals_1_stack_item_frame
|
||||
}
|
||||
} // end StackMapTable
|
||||
} // Attributes
|
||||
} // end Code
|
||||
} // Attributes
|
||||
} // Member
|
||||
;
|
||||
{ // Member at 0x0B51
|
||||
0x100A; // access
|
||||
#63; // name_cpx
|
||||
#62; // sig_cpx
|
||||
[1] { // Attributes
|
||||
Attr(#34, 69) { // Code at 0x0B59
|
||||
2; // max_stack
|
||||
1; // max_locals
|
||||
Bytes[16]{
|
||||
0x2AB6001D057004A0;
|
||||
0x000704A7000403AC;
|
||||
};
|
||||
[0] { // Traps
|
||||
} // end Traps
|
||||
[3] { // Attributes
|
||||
Attr(#35, 6) { // LineNumberTable at 0x0B7B
|
||||
[1] { // LineNumberTable
|
||||
0 21; // at 0x0B87
|
||||
}
|
||||
} // end LineNumberTable
|
||||
;
|
||||
Attr(#36, 12) { // LocalVariableTable at 0x0B87
|
||||
[1] { // LocalVariableTable
|
||||
0 16 49 50 0; // at 0x0B99
|
||||
}
|
||||
} // end LocalVariableTable
|
||||
;
|
||||
Attr(#56, 5) { // StackMapTable at 0x0B99
|
||||
[2] { //
|
||||
14b; // same_frame
|
||||
64b, [1]z{1b}; // same_locals_1_stack_item_frame
|
||||
}
|
||||
} // end StackMapTable
|
||||
} // Attributes
|
||||
} // end Code
|
||||
} // Attributes
|
||||
} // Member
|
||||
;
|
||||
{ // Member at 0x0BA4
|
||||
0x100A; // access
|
||||
#64; // name_cpx
|
||||
#62; // sig_cpx
|
||||
[1] { // Attributes
|
||||
Attr(#34, 68) { // Code at 0x0BAC
|
||||
2; // max_stack
|
||||
1; // max_locals
|
||||
Bytes[15]{
|
||||
0x2AB6001D05709A00;
|
||||
0x0704A7000403AC;
|
||||
};
|
||||
[0] { // Traps
|
||||
} // end Traps
|
||||
[3] { // Attributes
|
||||
Attr(#35, 6) { // LineNumberTable at 0x0BCD
|
||||
[1] { // LineNumberTable
|
||||
0 18; // at 0x0BD9
|
||||
}
|
||||
} // end LineNumberTable
|
||||
;
|
||||
Attr(#36, 12) { // LocalVariableTable at 0x0BD9
|
||||
[1] { // LocalVariableTable
|
||||
0 15 49 50 0; // at 0x0BEB
|
||||
}
|
||||
} // end LocalVariableTable
|
||||
;
|
||||
Attr(#56, 5) { // StackMapTable at 0x0BEB
|
||||
[2] { //
|
||||
13b; // same_frame
|
||||
64b, [1]z{1b}; // same_locals_1_stack_item_frame
|
||||
}
|
||||
} // end StackMapTable
|
||||
} // Attributes
|
||||
} // end Code
|
||||
} // Attributes
|
||||
} // Member
|
||||
;
|
||||
{ // Member at 0x0BF6
|
||||
0x100A; // access
|
||||
#65; // name_cpx
|
||||
#62; // sig_cpx
|
||||
[1] { // Attributes
|
||||
Attr(#34, 44) { // Code at 0x0BFE
|
||||
1; // max_stack
|
||||
1; // max_locals
|
||||
Bytes[2]{
|
||||
0x03AC;
|
||||
};
|
||||
[0] { // Traps
|
||||
} // end Traps
|
||||
[2] { // Attributes
|
||||
Attr(#35, 6) { // LineNumberTable at 0x0C12
|
||||
[1] { // LineNumberTable
|
||||
0 15; // at 0x0C1E
|
||||
}
|
||||
} // end LineNumberTable
|
||||
;
|
||||
Attr(#36, 12) { // LocalVariableTable at 0x0C1E
|
||||
[1] { // LocalVariableTable
|
||||
0 2 49 50 0; // at 0x0C30
|
||||
}
|
||||
} // end LocalVariableTable
|
||||
} // Attributes
|
||||
} // end Code
|
||||
} // Attributes
|
||||
} // Member
|
||||
;
|
||||
{ // Member at 0x0C30
|
||||
0x100A; // access
|
||||
#66; // name_cpx
|
||||
#62; // sig_cpx
|
||||
[1] { // Attributes
|
||||
Attr(#34, 44) { // Code at 0x0C38
|
||||
1; // max_stack
|
||||
1; // max_locals
|
||||
Bytes[2]{
|
||||
0x04AC;
|
||||
};
|
||||
[0] { // Traps
|
||||
} // end Traps
|
||||
[2] { // Attributes
|
||||
Attr(#35, 6) { // LineNumberTable at 0x0C4C
|
||||
[1] { // LineNumberTable
|
||||
0 12; // at 0x0C58
|
||||
}
|
||||
} // end LineNumberTable
|
||||
;
|
||||
Attr(#36, 12) { // LocalVariableTable at 0x0C58
|
||||
[1] { // LocalVariableTable
|
||||
0 2 49 50 0; // at 0x0C6A
|
||||
}
|
||||
} // end LocalVariableTable
|
||||
} // Attributes
|
||||
} // end Code
|
||||
} // Attributes
|
||||
} // Member
|
||||
} // methods
|
||||
|
||||
[3] { // Attributes
|
||||
Attr(#67, 2) { // SourceFile at 0x0C6C
|
||||
#68;
|
||||
} // end SourceFile
|
||||
;
|
||||
Attr(#157, 10) { // InnerClasses at 0x0C74
|
||||
[1] { // InnerClasses
|
||||
#155 #159 #156 25; // at 0x0C84
|
||||
}
|
||||
} // end InnerClasses
|
||||
;
|
||||
Attr(#79, 52) { // BootstrapMethods at 0x0C84
|
||||
[5] { // bootstrap_methods
|
||||
{ // bootstrap_method
|
||||
#80; // bootstrap_method_ref
|
||||
[3] { // bootstrap_arguments
|
||||
#81; // at 0x0C92
|
||||
#82; // at 0x0C94
|
||||
#83; // at 0x0C96
|
||||
} // bootstrap_arguments
|
||||
} // bootstrap_method
|
||||
;
|
||||
{ // bootstrap_method
|
||||
#80; // bootstrap_method_ref
|
||||
[3] { // bootstrap_arguments
|
||||
#81; // at 0x0C9C
|
||||
#87; // at 0x0C9E
|
||||
#83; // at 0x0CA0
|
||||
} // bootstrap_arguments
|
||||
} // bootstrap_method
|
||||
;
|
||||
{ // bootstrap_method
|
||||
#80; // bootstrap_method_ref
|
||||
[3] { // bootstrap_arguments
|
||||
#81; // at 0x0CA6
|
||||
#89; // at 0x0CA8
|
||||
#83; // at 0x0CAA
|
||||
} // bootstrap_arguments
|
||||
} // bootstrap_method
|
||||
;
|
||||
{ // bootstrap_method
|
||||
#80; // bootstrap_method_ref
|
||||
[3] { // bootstrap_arguments
|
||||
#81; // at 0x0CB0
|
||||
#91; // at 0x0CB2
|
||||
#83; // at 0x0CB4
|
||||
} // bootstrap_arguments
|
||||
} // bootstrap_method
|
||||
;
|
||||
{ // bootstrap_method
|
||||
#80; // bootstrap_method_ref
|
||||
[1] { // bootstrap_arguments
|
||||
#81; // at 0x0CBA
|
||||
} // bootstrap_arguments
|
||||
} // bootstrap_method
|
||||
}
|
||||
// ======== attribute array started at 0x0C84 has 4 bytes more:
|
||||
0x005D0053;
|
||||
} // end BootstrapMethods
|
||||
} // Attributes
|
||||
} // end class LambdaMath
|
BIN
hotspot/test/runtime/classFileParserBug/test.jar
Normal file
BIN
hotspot/test/runtime/classFileParserBug/test.jar
Normal file
Binary file not shown.
@ -253,3 +253,4 @@ f4e624447514f12dd7c51f1e5b0cb97efcd15be2 jdk9-b07
|
||||
9e7bd44ea85c72318130379c34b98716b9c7c248 jdk9-b08
|
||||
2cef452ba711b17950da275fd15931925799f07c jdk9-b09
|
||||
ab06ba2894313a47e4969ca37792ff119c49e711 jdk9-b10
|
||||
47feccd164b7187a0147693a922ee47c6629643c jdk9-b11
|
||||
|
@ -1147,7 +1147,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(ErrorSet
|
||||
(Error INVALID_CLASS "clazz is not the ID of a class.")
|
||||
(Error INVALID_OBJECT "clazz is not a known ID.")
|
||||
(Error INVALID_METHODID "methodID is not the ID of a method.")
|
||||
(Error INVALID_METHODID "methodID is not the ID of a static method in "
|
||||
"this class type or one of its superclasses.")
|
||||
(Error INVALID_THREAD)
|
||||
(Error THREAD_NOT_SUSPENDED)
|
||||
(Error VM_DEAD)
|
||||
@ -1250,6 +1251,83 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
)
|
||||
)
|
||||
(CommandSet InterfaceType=5
|
||||
(Command InvokeMethod=1
|
||||
"Invokes a static method. "
|
||||
"The method must not be a static initializer. "
|
||||
"The method must be a member of the interface type. "
|
||||
"<p>Since JDWP version 1.8 "
|
||||
"<p>"
|
||||
"The method invocation will occur in the specified thread. "
|
||||
"Method invocation can occur only if the specified thread "
|
||||
"has been suspended by an event. "
|
||||
"Method invocation is not supported "
|
||||
"when the target VM has been suspended by the front-end. "
|
||||
"<p>"
|
||||
"The specified method is invoked with the arguments in the specified "
|
||||
"argument list. "
|
||||
"The method invocation is synchronous; the reply packet is not "
|
||||
"sent until the invoked method returns in the target VM. "
|
||||
"The return value (possibly the void value) is "
|
||||
"included in the reply packet. "
|
||||
"If the invoked method throws an exception, the "
|
||||
"exception object ID is set in the reply packet; otherwise, the "
|
||||
"exception object ID is null. "
|
||||
"<p>"
|
||||
"For primitive arguments, the argument value's type must match the "
|
||||
"argument's type exactly. For object arguments, there must exist a "
|
||||
"widening reference conversion from the argument value's type to the "
|
||||
"argument's type and the argument's type must be loaded. "
|
||||
"<p>"
|
||||
"By default, all threads in the target VM are resumed while "
|
||||
"the method is being invoked if they were previously "
|
||||
"suspended by an event or by a command. "
|
||||
"This is done to prevent the deadlocks "
|
||||
"that will occur if any of the threads own monitors "
|
||||
"that will be needed by the invoked method. It is possible that "
|
||||
"breakpoints or other events might occur during the invocation. "
|
||||
"Note, however, that this implicit resume acts exactly like "
|
||||
"the ThreadReference resume command, so if the thread's suspend "
|
||||
"count is greater than 1, it will remain in a suspended state "
|
||||
"during the invocation. By default, when the invocation completes, "
|
||||
"all threads in the target VM are suspended, regardless their state "
|
||||
"before the invocation. "
|
||||
"<p>"
|
||||
"The resumption of other threads during the invoke can be prevented "
|
||||
"by specifying the INVOKE_SINGLE_THREADED "
|
||||
"bit flag in the <code>options</code> field; however, "
|
||||
"there is no protection against or recovery from the deadlocks "
|
||||
"described above, so this option should be used with great caution. "
|
||||
"Only the specified thread will be resumed (as described for all "
|
||||
"threads above). Upon completion of a single threaded invoke, the invoking thread "
|
||||
"will be suspended once again. Note that any threads started during "
|
||||
"the single threaded invocation will not be suspended when the "
|
||||
"invocation completes. "
|
||||
"<p>"
|
||||
"If the target VM is disconnected during the invoke (for example, through "
|
||||
"the VirtualMachine dispose command) the method invocation continues. "
|
||||
(Out
|
||||
(interfaceType clazz "The interface type ID.")
|
||||
(threadObject thread "The thread in which to invoke.")
|
||||
(method methodID "The method to invoke.")
|
||||
(Repeat arguments
|
||||
(value arg "The argument value.")
|
||||
)
|
||||
(int options "Invocation <a href=\"#JDWP_InvokeOptions\">options</a>")
|
||||
)
|
||||
(Reply
|
||||
(value returnValue "The returned value.")
|
||||
(tagged-object exception "The thrown exception.")
|
||||
)
|
||||
(ErrorSet
|
||||
(Error INVALID_CLASS "clazz is not the ID of an interface.")
|
||||
(Error INVALID_OBJECT "clazz is not a known ID.")
|
||||
(Error INVALID_METHODID "methodID is not the ID of a static method in this "
|
||||
"interface type or is the ID of a static initializer.")
|
||||
(Error INVALID_THREAD)
|
||||
(Error THREAD_NOT_SUSPENDED)
|
||||
(Error VM_DEAD)
|
||||
)
|
||||
)
|
||||
)
|
||||
(CommandSet Method=6
|
||||
(Command LineTable=1
|
||||
@ -1543,7 +1621,7 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"<p>"
|
||||
"By default, all threads in the target VM are resumed while "
|
||||
"the method is being invoked if they were previously "
|
||||
"suspended by an event or by command. "
|
||||
"suspended by an event or by a command. "
|
||||
"This is done to prevent the deadlocks "
|
||||
"that will occur if any of the threads own monitors "
|
||||
"that will be needed by the invoked method. It is possible that "
|
||||
@ -1586,7 +1664,9 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(Error INVALID_OBJECT)
|
||||
(Error INVALID_CLASS "clazz is not the ID of a reference "
|
||||
"type.")
|
||||
(Error INVALID_METHODID "methodID is not the ID of a method.")
|
||||
(Error INVALID_METHODID "methodID is not the ID of an instance method "
|
||||
"in this object's type or one of its superclasses, "
|
||||
"superinterfaces, or implemented interfaces.")
|
||||
(Error INVALID_THREAD)
|
||||
(Error THREAD_NOT_SUSPENDED)
|
||||
(Error VM_DEAD)
|
||||
|
@ -798,6 +798,10 @@ ifeq ($(OPENJDK_TARGET_OS), linux)
|
||||
BUILD_LIBFONTMANAGER_ExtensionSubtables.cpp_CXXFLAGS := -fno-strict-aliasing
|
||||
endif
|
||||
|
||||
# Libfontmanager doesn't actually need X_LIBS to link, but if building
|
||||
# on a Solaris machine without X installed, using a devkit, linking
|
||||
# to libawt_xawt will fail without the -L parameters from X_LIBS. Filter
|
||||
# out the -R parameters since they aren't needed.
|
||||
$(eval $(call SetupNativeCompilation,BUILD_LIBFONTMANAGER, \
|
||||
LIBRARY := fontmanager, \
|
||||
OUTPUT_DIR := $(INSTALL_LIBRARIES_HERE), \
|
||||
@ -816,7 +820,8 @@ $(eval $(call SetupNativeCompilation,BUILD_LIBFONTMANAGER, \
|
||||
$(call SET_SHARED_LIBRARY_ORIGIN), \
|
||||
LDFLAGS_SUFFIX := $(BUILD_LIBFONTMANAGER_FONTLIB), \
|
||||
LDFLAGS_SUFFIX_linux := -lawt $(LIBM) $(LIBCXX) -ljava -ljvm -lc, \
|
||||
LDFLAGS_SUFFIX_solaris := -lawt -lawt_xawt -lc $(LIBM) $(LIBCXX) -ljava -ljvm, \
|
||||
LDFLAGS_SUFFIX_solaris := $(filter-out -R%, $(X_LIBS)) \
|
||||
-lawt -lawt_xawt -lc $(LIBM) $(LIBCXX) -ljava -ljvm, \
|
||||
LDFLAGS_SUFFIX_aix := -lawt -lawt_xawt $(LIBM) $(LIBCXX) -ljava -ljvm,\
|
||||
LDFLAGS_SUFFIX_macosx := -lawt $(LIBM) $(LIBCXX) -undefined dynamic_lookup \
|
||||
-ljava -ljvm, \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,6 +31,13 @@ import java.nio.ByteBuffer;
|
||||
import java.lang.annotation.Native;
|
||||
|
||||
public final class JRSUIConstants {
|
||||
|
||||
/**
|
||||
* There is no way to get width of focus border, so it is hardcoded here.
|
||||
* All components, which can be focused should take care about it.
|
||||
*/
|
||||
public static final int FOCUS_SIZE = 4;
|
||||
|
||||
private static native long getPtrForConstant(final int constant);
|
||||
|
||||
static class Key {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,6 +36,8 @@ import apple.laf.JRSUIConstants.*;
|
||||
import com.apple.laf.AquaUtilControlSize.*;
|
||||
import com.apple.laf.AquaUtils.RecyclableSingleton;
|
||||
|
||||
import static apple.laf.JRSUIConstants.FOCUS_SIZE;
|
||||
|
||||
/**
|
||||
* All the "magic numbers" in this class should go away once
|
||||
* <rdar://problem/4613866> "default font" and sizes for controls in Java Aqua Look and Feel
|
||||
@ -145,7 +147,8 @@ public class AquaButtonExtendedTypes {
|
||||
protected static Map<String, TypeSpecifier> getAllTypes() {
|
||||
final Map<String, TypeSpecifier> specifiersByName = new HashMap<String, TypeSpecifier>();
|
||||
|
||||
final Insets focusInsets = new Insets(4, 4, 4, 4);
|
||||
final Insets focusInsets = new Insets(FOCUS_SIZE, FOCUS_SIZE,
|
||||
FOCUS_SIZE, FOCUS_SIZE);
|
||||
|
||||
final TypeSpecifier[] specifiers = {
|
||||
new TypeSpecifier("toolbar", true) {
|
||||
|
@ -44,7 +44,8 @@ public class AquaIcon {
|
||||
}
|
||||
|
||||
static UIResource getIconFor(final JRSUIControlSpec spec, final int width, final int height) {
|
||||
return new CachableJRSUIIcon(width, height) {
|
||||
return new ScalingJRSUIIcon(width, height) {
|
||||
@Override
|
||||
public void initIconPainter(final AquaPainter<JRSUIState> painter) {
|
||||
spec.initIconPainter(painter);
|
||||
}
|
||||
@ -128,35 +129,12 @@ public class AquaIcon {
|
||||
if (image != null) return image;
|
||||
|
||||
if (!GraphicsEnvironment.isHeadless()) {
|
||||
image = getOptimizedImage();
|
||||
image = createImage();
|
||||
}
|
||||
|
||||
return image;
|
||||
}
|
||||
|
||||
private Image getOptimizedImage() {
|
||||
final Image img = createImage();
|
||||
// TODO: no RuntimeOptions for now
|
||||
//if (RuntimeOptions.getRenderer(null) != RuntimeOptions.Sun) return img;
|
||||
return getProgressiveOptimizedImage(img, getIconWidth(), getIconHeight());
|
||||
}
|
||||
|
||||
static Image getProgressiveOptimizedImage(final Image img, final int w, final int h) {
|
||||
if (img == null) return null;
|
||||
|
||||
final int halfImgW = img.getWidth(null) / 2;
|
||||
final int halfImgH = img.getHeight(null) / 2;
|
||||
if (w * 2 > halfImgW && h * 2 > halfImgH) return img;
|
||||
|
||||
final BufferedImage halfImage = new BufferedImage(halfImgW, halfImgH, BufferedImage.TYPE_INT_ARGB);
|
||||
final Graphics g = halfImage.getGraphics();
|
||||
((Graphics2D)g).setRenderingHint(RenderingHints.KEY_INTERPOLATION, RenderingHints.VALUE_INTERPOLATION_BILINEAR);
|
||||
g.drawImage(img, 0, 0, halfImgW, halfImgH, null);
|
||||
g.dispose();
|
||||
|
||||
return getProgressiveOptimizedImage(halfImage, w, h);
|
||||
}
|
||||
|
||||
abstract Image createImage();
|
||||
|
||||
public boolean hasIconRef() {
|
||||
@ -189,24 +167,50 @@ public class AquaIcon {
|
||||
|
||||
}
|
||||
|
||||
static abstract class CachableJRSUIIcon extends CachingScalingIcon implements UIResource {
|
||||
public CachableJRSUIIcon(final int width, final int height) {
|
||||
super(width, height);
|
||||
static abstract class ScalingJRSUIIcon implements Icon, UIResource {
|
||||
final int width;
|
||||
final int height;
|
||||
|
||||
public ScalingJRSUIIcon(final int width, final int height) {
|
||||
this.width = width;
|
||||
this.height = height;
|
||||
}
|
||||
|
||||
Image createImage() {
|
||||
final AquaPainter<JRSUIState> painter = AquaPainter.create(JRSUIState.getInstance());
|
||||
@Override
|
||||
public void paintIcon(final Component c, Graphics g,
|
||||
final int x, final int y) {
|
||||
if (GraphicsEnvironment.isHeadless()) {
|
||||
return;
|
||||
}
|
||||
|
||||
g = g.create();
|
||||
|
||||
if (g instanceof Graphics2D) {
|
||||
// improves icon rendering quality in Quartz
|
||||
((Graphics2D) g).setRenderingHint(RenderingHints.KEY_RENDERING,
|
||||
RenderingHints.VALUE_RENDER_QUALITY);
|
||||
}
|
||||
|
||||
final AquaPainter<JRSUIState> painter =
|
||||
AquaPainter.create(JRSUIState.getInstance());
|
||||
initIconPainter(painter);
|
||||
|
||||
final BufferedImage img = new BufferedImage(getIconWidth(), getIconHeight(), BufferedImage.TYPE_INT_ARGB_PRE);
|
||||
final Graphics g = img.getGraphics();
|
||||
g.setClip(new Rectangle(0, 0, getIconWidth(), getIconHeight()));
|
||||
painter.paint(g, null, 0, 0, getIconWidth(), getIconHeight());
|
||||
g.setClip(new Rectangle(x, y, width, height));
|
||||
painter.paint(g, c, x, y, width, height);
|
||||
g.dispose();
|
||||
return img;
|
||||
}
|
||||
|
||||
public abstract void initIconPainter(final AquaPainter<JRSUIState> painter);
|
||||
|
||||
@Override
|
||||
public int getIconWidth() {
|
||||
return width;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getIconHeight() {
|
||||
return height;
|
||||
}
|
||||
}
|
||||
|
||||
static class FileIcon extends CachingScalingIcon {
|
||||
|
@ -787,8 +787,9 @@ public class AquaInternalFrameUI extends BasicInternalFrameUI implements SwingCo
|
||||
}
|
||||
|
||||
static final RecyclableSingleton<Icon> RESIZE_ICON = new RecyclableSingleton<Icon>() {
|
||||
@Override
|
||||
protected Icon getInstance() {
|
||||
return new AquaIcon.CachableJRSUIIcon(11, 11) {
|
||||
return new AquaIcon.ScalingJRSUIIcon(11, 11) {
|
||||
public void initIconPainter(final AquaPainter<JRSUIState> iconState) {
|
||||
iconState.state.set(Widget.GROW_BOX_TEXTURED);
|
||||
iconState.state.set(WindowType.UTILITY);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -141,40 +141,71 @@ abstract class AquaPainter <T extends JRSUIState> {
|
||||
paintFromSingleCachedImage(g, control, stateToPaint, boundsRect);
|
||||
}
|
||||
|
||||
/**
|
||||
* Paints a native control, which identified by its size and a set of
|
||||
* additional arguments using a cached image.
|
||||
*
|
||||
* @param g Graphics to draw the control
|
||||
* @param control the reference to the native control
|
||||
* @param controlState the state of the native control
|
||||
* @param bounds the rectangle where the native part should be drawn.
|
||||
* Note: the focus can/will be drawn outside of this bounds.
|
||||
*/
|
||||
static void paintFromSingleCachedImage(final Graphics2D g,
|
||||
final JRSUIControl control, final JRSUIState controlState,
|
||||
final Rectangle bounds) {
|
||||
final JRSUIControl control,
|
||||
final JRSUIState controlState,
|
||||
final Rectangle bounds) {
|
||||
if (bounds.width <= 0 || bounds.height <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int scale = 1;
|
||||
if (g instanceof SunGraphics2D) {
|
||||
scale = ((SunGraphics2D) g).surfaceData.getDefaultScale();
|
||||
int focus = 0;
|
||||
if (controlState.is(JRSUIConstants.Focused.YES)) {
|
||||
focus = JRSUIConstants.FOCUS_SIZE;
|
||||
}
|
||||
|
||||
final int imgX = bounds.x - focus;
|
||||
final int imgY = bounds.y - focus;
|
||||
final int imgW = bounds.width + (focus << 1);
|
||||
final int imgH = bounds.height + (focus << 1);
|
||||
final GraphicsConfiguration config = g.getDeviceConfiguration();
|
||||
final ImageCache cache = ImageCache.getInstance();
|
||||
final int imgW = bounds.width * scale;
|
||||
final int imgH = bounds.height * scale;
|
||||
AquaPixelsKey key = new AquaPixelsKey(config,
|
||||
imgW, imgH, scale, controlState);
|
||||
BufferedImage img = (BufferedImage) cache.getImage(key);
|
||||
final AquaPixelsKey key = new AquaPixelsKey(config, imgW, imgH,
|
||||
bounds, controlState);
|
||||
Image img = cache.getImage(key);
|
||||
if (img == null) {
|
||||
img = new BufferedImage(imgW, imgH, BufferedImage.TYPE_INT_ARGB_PRE);
|
||||
|
||||
Image baseImage = createImage(imgX, imgY, imgW, imgH, bounds,
|
||||
control, controlState);
|
||||
|
||||
img = new MultiResolutionBufferedImage(baseImage,
|
||||
(rvWidth, rvHeight) -> createImage(imgX, imgY,
|
||||
rvWidth, rvHeight, bounds, control, controlState));
|
||||
|
||||
if (!controlState.is(JRSUIConstants.Animating.YES)) {
|
||||
cache.setImage(key, img);
|
||||
}
|
||||
|
||||
final WritableRaster raster = img.getRaster();
|
||||
final DataBufferInt buffer = (DataBufferInt) raster.getDataBuffer();
|
||||
|
||||
control.set(controlState);
|
||||
control.paint(SunWritableRaster.stealData(buffer, 0),
|
||||
imgW, imgH, 0, 0, bounds.width, bounds.height);
|
||||
SunWritableRaster.markDirty(buffer);
|
||||
}
|
||||
|
||||
g.drawImage(img, bounds.x, bounds.y, bounds.width, bounds.height, null);
|
||||
g.drawImage(img, imgX, imgY, imgW, imgH, null);
|
||||
}
|
||||
|
||||
private static Image createImage(int imgX, int imgY, int imgW, int imgH,
|
||||
final Rectangle bounds,
|
||||
final JRSUIControl control,
|
||||
JRSUIState controlState) {
|
||||
BufferedImage img = new BufferedImage(imgW, imgH,
|
||||
BufferedImage.TYPE_INT_ARGB_PRE);
|
||||
|
||||
final WritableRaster raster = img.getRaster();
|
||||
final DataBufferInt buffer = (DataBufferInt) raster.getDataBuffer();
|
||||
|
||||
control.set(controlState);
|
||||
control.paint(SunWritableRaster.stealData(buffer, 0), imgW, imgH,
|
||||
bounds.x - imgX, bounds.y - imgY, bounds.width,
|
||||
bounds.height);
|
||||
SunWritableRaster.markDirty(buffer);
|
||||
return img;
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,21 +218,22 @@ abstract class AquaPainter <T extends JRSUIState> {
|
||||
private final GraphicsConfiguration config;
|
||||
private final int w;
|
||||
private final int h;
|
||||
private final int scale;
|
||||
private final Rectangle bounds;
|
||||
private final JRSUIState state;
|
||||
|
||||
AquaPixelsKey(final GraphicsConfiguration config,
|
||||
final int w, final int h, final int scale,
|
||||
final int w, final int h, final Rectangle bounds,
|
||||
final JRSUIState state) {
|
||||
this.pixelCount = w * h;
|
||||
this.config = config;
|
||||
this.w = w;
|
||||
this.h = h;
|
||||
this.scale = scale;
|
||||
this.bounds = bounds;
|
||||
this.state = state;
|
||||
this.hash = hash();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPixelCount() {
|
||||
return pixelCount;
|
||||
}
|
||||
@ -210,7 +242,7 @@ abstract class AquaPainter <T extends JRSUIState> {
|
||||
int hash = config != null ? config.hashCode() : 0;
|
||||
hash = 31 * hash + w;
|
||||
hash = 31 * hash + h;
|
||||
hash = 31 * hash + scale;
|
||||
hash = 31 * hash + bounds.hashCode();
|
||||
hash = 31 * hash + state.hashCode();
|
||||
return hash;
|
||||
}
|
||||
@ -225,7 +257,7 @@ abstract class AquaPainter <T extends JRSUIState> {
|
||||
if (obj instanceof AquaPixelsKey) {
|
||||
AquaPixelsKey key = (AquaPixelsKey) obj;
|
||||
return config == key.config && w == key.w && h == key.h
|
||||
&& scale == key.scale && state.equals(key.state);
|
||||
&& bounds.equals(key.bounds) && state.equals(key.state);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ import java.net.MalformedURLException;
|
||||
|
||||
import sun.awt.*;
|
||||
import sun.awt.datatransfer.DataTransferer;
|
||||
import sun.java2d.opengl.OGLRenderQueue;
|
||||
import sun.lwawt.*;
|
||||
import sun.lwawt.LWWindowPeer.PeerType;
|
||||
import sun.security.action.GetBooleanAction;
|
||||
@ -410,7 +411,11 @@ public final class LWCToolkit extends LWToolkit {
|
||||
|
||||
@Override
|
||||
public void sync() {
|
||||
// TODO Auto-generated method stub
|
||||
// flush the OGL pipeline (this is a no-op if OGL is not enabled)
|
||||
OGLRenderQueue.sync();
|
||||
// setNeedsDisplay() selector was sent to the appropriate CALayer so now
|
||||
// we have to flush the native selectors queue.
|
||||
flushNativeSelectors();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -813,6 +818,11 @@ public final class LWCToolkit extends LWToolkit {
|
||||
|
||||
private native boolean nativeSyncQueue(long timeout);
|
||||
|
||||
/**
|
||||
* Just spin a single empty block synchronously.
|
||||
*/
|
||||
private static native void flushNativeSelectors();
|
||||
|
||||
@Override
|
||||
public Clipboard createPlatformClipboard() {
|
||||
return new CClipboard("System");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -199,7 +199,7 @@ static inline jint doPaintImage
|
||||
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
|
||||
CGContextRef cgRef = CGBitmapContextCreate(rawPixelData, imgW, imgH, 8, imgW * 4, colorspace, kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host);
|
||||
CGColorSpaceRelease(colorspace);
|
||||
CGContextScaleCTM(cgRef, imgW/w , imgH/h);
|
||||
CGContextScaleCTM(cgRef, imgW/(w + x + x) , imgH/(h + y + y));
|
||||
|
||||
jint status = doPaintCGContext(cgRef, controlPtr, oldProperties, newProperties, x, y, w, h);
|
||||
CGContextRelease(cgRef);
|
||||
|
@ -762,6 +762,10 @@ AWT_ASSERT_APPKIT_THREAD;
|
||||
return lastKeyWindow;
|
||||
}
|
||||
|
||||
- (BOOL)windowShouldZoom:(NSWindow *)window toFrame:(NSRect)newFrame {
|
||||
return !NSEqualSizes(self.nsWindow.frame.size, newFrame.size);
|
||||
}
|
||||
|
||||
|
||||
@end // AWTWindow
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user