Merge
This commit is contained in:
commit
1736e104a1
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -139,18 +139,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -139,18 +139,6 @@
|
||||
_JVM_Halt
|
||||
_JVM_HoldsLock
|
||||
_JVM_IHashCode
|
||||
_JVM_ImageAttributeOffsets
|
||||
_JVM_ImageAttributeOffsetsLength
|
||||
_JVM_ImageClose
|
||||
_JVM_ImageFindAttributes
|
||||
_JVM_ImageGetAttributes
|
||||
_JVM_ImageGetAttributesCount
|
||||
_JVM_ImageGetDataAddress
|
||||
_JVM_ImageGetIndexAddress
|
||||
_JVM_ImageGetStringBytes
|
||||
_JVM_ImageOpen
|
||||
_JVM_ImageRead
|
||||
_JVM_ImageReadCompressed
|
||||
_JVM_InitAgentProperties
|
||||
_JVM_InitProperties
|
||||
_JVM_InternString
|
||||
|
@ -139,18 +139,6 @@
|
||||
_JVM_Halt
|
||||
_JVM_HoldsLock
|
||||
_JVM_IHashCode
|
||||
_JVM_ImageAttributeOffsets
|
||||
_JVM_ImageAttributeOffsetsLength
|
||||
_JVM_ImageClose
|
||||
_JVM_ImageFindAttributes
|
||||
_JVM_ImageGetAttributes
|
||||
_JVM_ImageGetAttributesCount
|
||||
_JVM_ImageGetDataAddress
|
||||
_JVM_ImageGetIndexAddress
|
||||
_JVM_ImageGetStringBytes
|
||||
_JVM_ImageOpen
|
||||
_JVM_ImageRead
|
||||
_JVM_ImageReadCompressed
|
||||
_JVM_InitAgentProperties
|
||||
_JVM_InitProperties
|
||||
_JVM_InternString
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -76,6 +76,11 @@ endif
|
||||
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
|
||||
CFLAGS_WARN = +w -errwarn
|
||||
endif
|
||||
# When using compiler version 5.13 (Solaris Studio 12.4), calls to explicitly
|
||||
# instantiated template functions trigger this warning when +w is active.
|
||||
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 513), 1)
|
||||
CFLAGS_WARN += -erroff=notemsource
|
||||
endif
|
||||
CFLAGS += $(CFLAGS_WARN)
|
||||
|
||||
ifeq ("${Platform_compiler}", "sparcWorks")
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -3043,7 +3043,9 @@ void MacroAssembler::store_check(Register obj) {
|
||||
// register obj is destroyed afterwards.
|
||||
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||
bs->kind() == BarrierSet::CardTableExtension,
|
||||
"Wrong barrier set kind");
|
||||
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||
|
@ -691,7 +691,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
|
||||
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -731,7 +731,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
||||
|
@ -186,7 +186,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
}
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
if (val == noreg) {
|
||||
|
@ -2614,7 +2614,7 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register t
|
||||
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
|
||||
CardTableModRefBS* bs =
|
||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef ||
|
||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
||||
#ifdef ASSERT
|
||||
cmpdi(CCR0, Rnew_val, 0);
|
||||
|
@ -656,7 +656,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bind(filtered);
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -697,7 +697,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
Label Lskip_loop, Lstore_loop;
|
||||
|
@ -105,7 +105,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
}
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
Label Lnull, Ldone;
|
||||
|
@ -3958,7 +3958,7 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
|
||||
if (new_val == G0) return;
|
||||
CardTableModRefBS* bs =
|
||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef ||
|
||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
||||
card_table_write(bs->byte_map_base, tmp, store_addr);
|
||||
}
|
||||
|
@ -0,0 +1,159 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
|
||||
// An implementation of memset, for use when there may be concurrent
|
||||
// readers of the region being stored into.
|
||||
//
|
||||
// We can't use the standard library memset if it is implemented using
|
||||
// block initializing stores. Doing so can result in concurrent readers
|
||||
// seeing spurious zeros.
|
||||
//
|
||||
// We can't use the obvious C/C++ for-loop, because the compiler may
|
||||
// recognize the idiomatic loop and optimize it into a call to the
|
||||
// standard library memset; we've seen exactly this happen with, for
|
||||
// example, Solaris Studio 12.3. Hence the use of inline assembly
|
||||
// code, hiding loops from the compiler's optimizer.
|
||||
//
|
||||
// We don't attempt to use the standard library memset when it is safe
|
||||
// to do so. We could conservatively do so by detecting the presence
|
||||
// of block initializing stores (VM_Version::has_blk_init()), but the
|
||||
// implementation provided here should be sufficient.
|
||||
|
||||
inline void fill_subword(void* start, void* end, int value) {
|
||||
STATIC_ASSERT(BytesPerWord == 8);
|
||||
assert(pointer_delta(end, start, 1) < BytesPerWord, "precondition");
|
||||
// Dispatch on (end - start).
|
||||
void* pc;
|
||||
__asm__ volatile(
|
||||
// offset := (7 - (end - start)) + 3
|
||||
// 3 instructions from rdpc to DISPATCH
|
||||
" sub %[offset], %[end], %[offset]\n\t" // offset := start - end
|
||||
" sllx %[offset], 2, %[offset]\n\t" // scale offset for instruction size of 4
|
||||
" add %[offset], 40, %[offset]\n\t" // offset += 10 * instruction size
|
||||
" rd %pc, %[pc]\n\t" // dispatch on scaled offset
|
||||
" jmpl %[pc]+%[offset], %g0\n\t"
|
||||
" nop\n\t"
|
||||
// DISPATCH: no direct reference, but without it the store block may be elided.
|
||||
"1:\n\t"
|
||||
" stb %[value], [%[end]-7]\n\t" // end[-7] = value
|
||||
" stb %[value], [%[end]-6]\n\t"
|
||||
" stb %[value], [%[end]-5]\n\t"
|
||||
" stb %[value], [%[end]-4]\n\t"
|
||||
" stb %[value], [%[end]-3]\n\t"
|
||||
" stb %[value], [%[end]-2]\n\t"
|
||||
" stb %[value], [%[end]-1]\n\t" // end[-1] = value
|
||||
: /* no outputs */
|
||||
[pc] "&=r" (pc) // temp
|
||||
: [offset] "&+r" (start),
|
||||
[end] "r" (end),
|
||||
[value] "r" (value)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
void memset_with_concurrent_readers(void* to, int value, size_t size) {
|
||||
Prefetch::write(to, 0);
|
||||
void* end = static_cast<char*>(to) + size;
|
||||
if (size >= BytesPerWord) {
|
||||
// Fill any partial word prefix.
|
||||
uintx* aligned_to = static_cast<uintx*>(align_ptr_up(to, BytesPerWord));
|
||||
fill_subword(to, aligned_to, value);
|
||||
|
||||
// Compute fill word.
|
||||
STATIC_ASSERT(BitsPerByte == 8);
|
||||
STATIC_ASSERT(BitsPerWord == 64);
|
||||
uintx xvalue = value & 0xff;
|
||||
xvalue |= (xvalue << 8);
|
||||
xvalue |= (xvalue << 16);
|
||||
xvalue |= (xvalue << 32);
|
||||
|
||||
uintx* aligned_end = static_cast<uintx*>(align_ptr_down(end, BytesPerWord));
|
||||
assert(aligned_to <= aligned_end, "invariant");
|
||||
|
||||
// for ( ; aligned_to < aligned_end; ++aligned_to) {
|
||||
// *aligned_to = xvalue;
|
||||
// }
|
||||
uintptr_t temp;
|
||||
__asm__ volatile(
|
||||
// Unroll loop x8.
|
||||
" sub %[aend], %[ato], %[temp]\n\t"
|
||||
" cmp %[temp], 56\n\t" // cc := (aligned_end - aligned_to) > 7 words
|
||||
" ba %xcc, 2f\n\t" // goto TEST always
|
||||
" sub %[aend], 56, %[temp]\n\t" // limit := aligned_end - 7 words
|
||||
// LOOP:
|
||||
"1:\n\t" // unrolled x8 store loop top
|
||||
" cmp %[temp], %[ato]\n\t" // cc := limit > (next) aligned_to
|
||||
" stx %[xvalue], [%[ato]-64]\n\t" // store 8 words, aligned_to pre-incremented
|
||||
" stx %[xvalue], [%[ato]-56]\n\t"
|
||||
" stx %[xvalue], [%[ato]-48]\n\t"
|
||||
" stx %[xvalue], [%[ato]-40]\n\t"
|
||||
" stx %[xvalue], [%[ato]-32]\n\t"
|
||||
" stx %[xvalue], [%[ato]-24]\n\t"
|
||||
" stx %[xvalue], [%[ato]-16]\n\t"
|
||||
" stx %[xvalue], [%[ato]-8]\n\t"
|
||||
// TEST:
|
||||
"2:\n\t"
|
||||
" bgu,a %xcc, 1b\n\t" // goto LOOP if more than 7 words remaining
|
||||
" add %[ato], 64, %[ato]\n\t" // aligned_to += 8, for next iteration
|
||||
// Fill remaining < 8 full words.
|
||||
// Dispatch on (aligned_end - aligned_to).
|
||||
// offset := (7 - (aligned_end - aligned_to)) + 3
|
||||
// 3 instructions from rdpc to DISPATCH
|
||||
" sub %[ato], %[aend], %[ato]\n\t" // offset := aligned_to - aligned_end
|
||||
" srax %[ato], 1, %[ato]\n\t" // scale offset for instruction size of 4
|
||||
" add %[ato], 40, %[ato]\n\t" // offset += 10 * instruction size
|
||||
" rd %pc, %[temp]\n\t" // dispatch on scaled offset
|
||||
" jmpl %[temp]+%[ato], %g0\n\t"
|
||||
" nop\n\t"
|
||||
// DISPATCH: no direct reference, but without it the store block may be elided.
|
||||
"3:\n\t"
|
||||
" stx %[xvalue], [%[aend]-56]\n\t" // aligned_end[-7] = xvalue
|
||||
" stx %[xvalue], [%[aend]-48]\n\t"
|
||||
" stx %[xvalue], [%[aend]-40]\n\t"
|
||||
" stx %[xvalue], [%[aend]-32]\n\t"
|
||||
" stx %[xvalue], [%[aend]-24]\n\t"
|
||||
" stx %[xvalue], [%[aend]-16]\n\t"
|
||||
" stx %[xvalue], [%[aend]-8]\n\t" // aligned_end[-1] = xvalue
|
||||
: /* no outputs */
|
||||
[temp] "&=r" (temp)
|
||||
: [ato] "&+r" (aligned_to),
|
||||
[aend] "r" (aligned_end),
|
||||
[xvalue] "r" (xvalue)
|
||||
: "cc", "memory");
|
||||
to = aligned_end; // setup for suffix
|
||||
}
|
||||
// Fill any partial word suffix. Also the prefix if size < BytesPerWord.
|
||||
fill_subword(to, end, value);
|
||||
}
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
@ -981,7 +981,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ restore();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -1014,7 +1014,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ restore();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
|
@ -91,7 +91,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
}
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
if (index == noreg ) {
|
||||
|
@ -85,27 +85,6 @@ void VM_Version::initialize() {
|
||||
_supports_cx8 = has_v9();
|
||||
_supports_atomic_getset4 = true; // swap instruction
|
||||
|
||||
// There are Fujitsu Sparc64 CPUs which support blk_init as well so
|
||||
// we have to take this check out of the 'is_niagara()' block below.
|
||||
if (has_blk_init()) {
|
||||
// When using CMS or G1, we cannot use memset() in BOT updates
|
||||
// because the sun4v/CMT version in libc_psr uses BIS which
|
||||
// exposes "phantom zeros" to concurrent readers. See 6948537.
|
||||
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
|
||||
FLAG_SET_DEFAULT(UseMemSetInBOT, false);
|
||||
}
|
||||
// Issue a stern warning if the user has explicitly set
|
||||
// UseMemSetInBOT (it is known to cause issues), but allow
|
||||
// use for experimentation and debugging.
|
||||
if (UseConcMarkSweepGC || UseG1GC) {
|
||||
if (UseMemSetInBOT) {
|
||||
assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
|
||||
warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
|
||||
" on sun4v; please understand that you are using at your own risk!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_niagara()) {
|
||||
// Indirect branch is the same cost as direct
|
||||
if (FLAG_IS_DEFAULT(UseInlineCaches)) {
|
||||
|
@ -4320,7 +4320,9 @@ void MacroAssembler::store_check(Register obj) {
|
||||
// register obj is destroyed afterwards.
|
||||
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||
bs->kind() == BarrierSet::CardTableExtension,
|
||||
"Wrong barrier set kind");
|
||||
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||
|
@ -722,7 +722,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -754,7 +754,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
break;
|
||||
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
|
@ -367,16 +367,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#ifdef ASSERT
|
||||
// verify that threads correspond
|
||||
{
|
||||
Label L, S;
|
||||
Label L1, L2, L3;
|
||||
__ cmpptr(r15_thread, thread);
|
||||
__ jcc(Assembler::notEqual, S);
|
||||
__ jcc(Assembler::equal, L1);
|
||||
__ stop("StubRoutines::call_stub: r15_thread is corrupted");
|
||||
__ bind(L1);
|
||||
__ get_thread(rbx);
|
||||
__ cmpptr(r15_thread, thread);
|
||||
__ jcc(Assembler::equal, L2);
|
||||
__ stop("StubRoutines::call_stub: r15_thread is modified by call");
|
||||
__ bind(L2);
|
||||
__ cmpptr(r15_thread, rbx);
|
||||
__ jcc(Assembler::equal, L);
|
||||
__ bind(S);
|
||||
__ jcc(Assembler::equal, L);
|
||||
__ jcc(Assembler::equal, L3);
|
||||
__ stop("StubRoutines::call_stub: threads must correspond");
|
||||
__ bind(L);
|
||||
__ bind(L3);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -450,15 +454,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#ifdef ASSERT
|
||||
// verify that threads correspond
|
||||
{
|
||||
Label L, S;
|
||||
Label L1, L2, L3;
|
||||
__ cmpptr(r15_thread, thread);
|
||||
__ jcc(Assembler::notEqual, S);
|
||||
__ jcc(Assembler::equal, L1);
|
||||
__ stop("StubRoutines::catch_exception: r15_thread is corrupted");
|
||||
__ bind(L1);
|
||||
__ get_thread(rbx);
|
||||
__ cmpptr(r15_thread, thread);
|
||||
__ jcc(Assembler::equal, L2);
|
||||
__ stop("StubRoutines::catch_exception: r15_thread is modified by call");
|
||||
__ bind(L2);
|
||||
__ cmpptr(r15_thread, rbx);
|
||||
__ jcc(Assembler::equal, L);
|
||||
__ bind(S);
|
||||
__ jcc(Assembler::equal, L3);
|
||||
__ stop("StubRoutines::catch_exception: threads must correspond");
|
||||
__ bind(L);
|
||||
__ bind(L3);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1244,7 +1253,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -1284,7 +1293,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
|
@ -200,7 +200,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
}
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
if (val == noreg) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,6 +53,10 @@ static bool detect_niagara() {
|
||||
return cpuinfo_field_contains("cpu", "Niagara");
|
||||
}
|
||||
|
||||
static bool detect_M_family() {
|
||||
return cpuinfo_field_contains("cpu", "SPARC-M");
|
||||
}
|
||||
|
||||
static bool detect_blkinit() {
|
||||
return cpuinfo_field_contains("cpucaps", "blkinit");
|
||||
}
|
||||
@ -66,6 +70,11 @@ int VM_Version::platform_features(int features) {
|
||||
features = niagara1_m | T_family_m;
|
||||
}
|
||||
|
||||
if (detect_M_family()) {
|
||||
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");)
|
||||
features = sun4v_m | generic_v9_m | M_family_m | T_family_m;
|
||||
}
|
||||
|
||||
if (detect_blkinit()) {
|
||||
features |= blk_init_instructions_m;
|
||||
}
|
||||
|
@ -1425,7 +1425,7 @@ void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
|
||||
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
// No pre barriers
|
||||
break;
|
||||
@ -1445,7 +1445,7 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
|
||||
G1SATBCardTableModRef_post_barrier(addr, new_val);
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
CardTableModRef_post_barrier(addr, new_val);
|
||||
break;
|
||||
|
@ -28,8 +28,8 @@
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/classLoaderData.inline.hpp"
|
||||
#include "classfile/classLoaderExt.hpp"
|
||||
#include "classfile/imageFile.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/jimage.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
@ -58,6 +58,7 @@
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/threadCritical.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "services/management.hpp"
|
||||
#include "services/threadService.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
@ -68,7 +69,7 @@
|
||||
#include "classfile/sharedPathsMiscInfo.hpp"
|
||||
#endif
|
||||
|
||||
// Entry points in zip.dll for loading zip/jar file entries and image file entries
|
||||
// Entry points in zip.dll for loading zip/jar file entries
|
||||
|
||||
typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
|
||||
typedef void (JNICALL *ZipClose_t)(jzfile *zip);
|
||||
@ -89,6 +90,15 @@ static canonicalize_fn_t CanonicalizeEntry = NULL;
|
||||
static ZipInflateFully_t ZipInflateFully = NULL;
|
||||
static Crc32_t Crc32 = NULL;
|
||||
|
||||
// Entry points for jimage.dll for loading jimage file entries
|
||||
|
||||
static JImageOpen_t JImageOpen = NULL;
|
||||
static JImageClose_t JImageClose = NULL;
|
||||
static JImagePackageToModule_t JImagePackageToModule = NULL;
|
||||
static JImageFindResource_t JImageFindResource = NULL;
|
||||
static JImageGetResource_t JImageGetResource = NULL;
|
||||
static JImageResourceIterator_t JImageResourceIterator = NULL;
|
||||
|
||||
// Globals
|
||||
|
||||
PerfCounter* ClassLoader::_perf_accumulated_time = NULL;
|
||||
@ -141,6 +151,15 @@ bool string_starts_with(const char* str, const char* str_to_find) {
|
||||
return (strncmp(str, str_to_find, str_to_find_len) == 0);
|
||||
}
|
||||
|
||||
static const char* get_jimage_version_string() {
|
||||
static char version_string[10] = "";
|
||||
if (version_string[0] == '\0') {
|
||||
jio_snprintf(version_string, sizeof(version_string), "%d.%d",
|
||||
Abstract_VM_Version::vm_minor_version(), Abstract_VM_Version::vm_micro_version());
|
||||
}
|
||||
return (const char*)version_string;
|
||||
}
|
||||
|
||||
bool string_ends_with(const char* str, const char* str_to_find) {
|
||||
size_t str_len = strlen(str);
|
||||
size_t str_to_find_len = strlen(str_to_find);
|
||||
@ -272,97 +291,113 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
|
||||
}
|
||||
}
|
||||
|
||||
ClassPathImageEntry::ClassPathImageEntry(ImageFileReader* image) :
|
||||
ClassPathImageEntry::ClassPathImageEntry(JImageFile* jimage, const char* name) :
|
||||
ClassPathEntry(),
|
||||
_image(image),
|
||||
_module_data(NULL) {
|
||||
guarantee(image != NULL, "image file is null");
|
||||
|
||||
char module_data_name[JVM_MAXPATHLEN];
|
||||
ImageModuleData::module_data_name(module_data_name, _image->name());
|
||||
_module_data = new ImageModuleData(_image, module_data_name);
|
||||
_jimage(jimage) {
|
||||
guarantee(jimage != NULL, "jimage file is null");
|
||||
guarantee(name != NULL, "jimage file name is null");
|
||||
size_t len = strlen(name) + 1;
|
||||
_name = NEW_C_HEAP_ARRAY(const char, len, mtClass);
|
||||
strncpy((char *)_name, name, len);
|
||||
}
|
||||
|
||||
ClassPathImageEntry::~ClassPathImageEntry() {
|
||||
if (_module_data != NULL) {
|
||||
delete _module_data;
|
||||
_module_data = NULL;
|
||||
if (_name != NULL) {
|
||||
FREE_C_HEAP_ARRAY(const char, _name);
|
||||
_name = NULL;
|
||||
}
|
||||
|
||||
if (_image != NULL) {
|
||||
ImageFileReader::close(_image);
|
||||
_image = NULL;
|
||||
if (_jimage != NULL) {
|
||||
(*JImageClose)(_jimage);
|
||||
_jimage = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const char* ClassPathImageEntry::name() {
|
||||
return _image ? _image->name() : "";
|
||||
void ClassPathImageEntry::name_to_package(const char* name, char* buffer, int length) {
|
||||
const char *pslash = strrchr(name, '/');
|
||||
if (pslash == NULL) {
|
||||
buffer[0] = '\0';
|
||||
return;
|
||||
}
|
||||
int len = pslash - name;
|
||||
#if INCLUDE_CDS
|
||||
if (len <= 0 && DumpSharedSpaces) {
|
||||
buffer[0] = '\0';
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
assert(len > 0, "Bad length for package name");
|
||||
if (len >= length) {
|
||||
buffer[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// drop name after last slash (including slash)
|
||||
// Ex., "java/lang/String.class" => "java/lang"
|
||||
strncpy(buffer, name, len);
|
||||
// ensure string termination (strncpy does not guarantee)
|
||||
buffer[len] = '\0';
|
||||
}
|
||||
|
||||
// For a class in a named module, look it up in the jimage file using this syntax:
|
||||
// /<module-name>/<package-name>/<base-class>
|
||||
//
|
||||
// Assumptions:
|
||||
// 1. There are no unnamed modules in the jimage file.
|
||||
// 2. A package is in at most one module in the jimage file.
|
||||
//
|
||||
ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) {
|
||||
ImageLocation location;
|
||||
bool found = _image->find_location(name, location);
|
||||
jlong size;
|
||||
JImageLocationRef location = (*JImageFindResource)(_jimage, "", get_jimage_version_string(), name, &size);
|
||||
|
||||
if (!found) {
|
||||
const char *pslash = strrchr(name, '/');
|
||||
int len = pslash - name;
|
||||
|
||||
// NOTE: IMAGE_MAX_PATH is used here since this path is internal to the jimage
|
||||
// (effectively unlimited.) There are several JCK tests that use paths over
|
||||
// 1024 characters long, the limit on Windows systems.
|
||||
if (pslash && 0 < len && len < IMAGE_MAX_PATH) {
|
||||
|
||||
char path[IMAGE_MAX_PATH];
|
||||
strncpy(path, name, len);
|
||||
path[len] = '\0';
|
||||
const char* moduleName = _module_data->package_to_module(path);
|
||||
|
||||
if (moduleName != NULL && (len + strlen(moduleName) + 2) < IMAGE_MAX_PATH) {
|
||||
jio_snprintf(path, IMAGE_MAX_PATH - 1, "/%s/%s", moduleName, name);
|
||||
location.clear_data();
|
||||
found = _image->find_location(path, location);
|
||||
}
|
||||
if (location == 0) {
|
||||
char package[JIMAGE_MAX_PATH];
|
||||
name_to_package(name, package, JIMAGE_MAX_PATH);
|
||||
if (package[0] != '\0') {
|
||||
const char* module = (*JImagePackageToModule)(_jimage, package);
|
||||
if (module == NULL) {
|
||||
module = "java.base";
|
||||
}
|
||||
location = (*JImageFindResource)(_jimage, module, get_jimage_version_string(), name, &size);
|
||||
}
|
||||
}
|
||||
|
||||
if (found) {
|
||||
u8 size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
|
||||
if (location != 0) {
|
||||
if (UsePerfData) {
|
||||
ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
|
||||
}
|
||||
u1* data = NEW_RESOURCE_ARRAY(u1, size);
|
||||
_image->get_resource(location, data);
|
||||
return new ClassFileStream(data, (int)size, _image->name()); // Resource allocated
|
||||
char* data = NEW_RESOURCE_ARRAY(char, size);
|
||||
(*JImageGetResource)(_jimage, location, data, size);
|
||||
return new ClassFileStream((u1*)data, (int)size, _name); // Resource allocated
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool ctw_visitor(JImageFile* jimage,
|
||||
const char* module_name, const char* version, const char* package,
|
||||
const char* name, const char* extension, void* arg) {
|
||||
if (strcmp(extension, "class") == 0) {
|
||||
Thread* THREAD = Thread::current();
|
||||
char path[JIMAGE_MAX_PATH];
|
||||
jio_snprintf(path, JIMAGE_MAX_PATH - 1, "%s/%s.class", package, name);
|
||||
ClassLoader::compile_the_world_in(path, *(Handle*)arg, THREAD);
|
||||
return !HAS_PENDING_EXCEPTION;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClassPathImageEntry::compile_the_world(Handle loader, TRAPS) {
|
||||
tty->print_cr("CompileTheWorld : Compiling all classes in %s", name());
|
||||
tty->cr();
|
||||
const ImageStrings strings = _image->get_strings();
|
||||
// Retrieve each path component string.
|
||||
u4 length = _image->table_length();
|
||||
for (u4 i = 0; i < length; i++) {
|
||||
u1* location_data = _image->get_location_data(i);
|
||||
|
||||
if (location_data != NULL) {
|
||||
ImageLocation location(location_data);
|
||||
char path[IMAGE_MAX_PATH];
|
||||
_image->location_path(location, path, IMAGE_MAX_PATH);
|
||||
ClassLoader::compile_the_world_in(path, loader, CHECK);
|
||||
}
|
||||
}
|
||||
(*JImageResourceIterator)(_jimage, (JImageResourceVisitor_t)ctw_visitor, (void *)&loader);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
|
||||
tty->print_cr("Increase class metadata storage if a limit was set");
|
||||
} else {
|
||||
tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
|
||||
}
|
||||
if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
|
||||
tty->print_cr("Increase class metadata storage if a limit was set");
|
||||
} else {
|
||||
tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -490,7 +525,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
|
||||
JavaThread* thread = JavaThread::current();
|
||||
ClassPathEntry* new_entry = NULL;
|
||||
if ((st->st_mode & S_IFREG) == S_IFREG) {
|
||||
// Regular file, should be a zip or image file
|
||||
// Regular file, should be a zip or jimage file
|
||||
// Canonicalized filename
|
||||
char canonical_path[JVM_MAXPATHLEN];
|
||||
if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
|
||||
@ -501,9 +536,10 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
ImageFileReader* image = ImageFileReader::open(canonical_path);
|
||||
if (image != NULL) {
|
||||
new_entry = new ClassPathImageEntry(image);
|
||||
jint error;
|
||||
JImageFile* jimage =(*JImageOpen)(canonical_path, &error);
|
||||
if (jimage != NULL) {
|
||||
new_entry = new ClassPathImageEntry(jimage, canonical_path);
|
||||
} else {
|
||||
char* error_msg = NULL;
|
||||
jzfile* zip;
|
||||
@ -682,6 +718,35 @@ void ClassLoader::load_zip_library() {
|
||||
// This lookup only works on 1.3. Do not check for non-null here
|
||||
}
|
||||
|
||||
void ClassLoader::load_jimage_library() {
|
||||
// First make sure native library is loaded
|
||||
os::native_java_library();
|
||||
// Load jimage library
|
||||
char path[JVM_MAXPATHLEN];
|
||||
char ebuf[1024];
|
||||
void* handle = NULL;
|
||||
if (os::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "jimage")) {
|
||||
handle = os::dll_load(path, ebuf, sizeof ebuf);
|
||||
}
|
||||
if (handle == NULL) {
|
||||
vm_exit_during_initialization("Unable to load jimage library", path);
|
||||
}
|
||||
|
||||
// Lookup jimage entry points
|
||||
JImageOpen = CAST_TO_FN_PTR(JImageOpen_t, os::dll_lookup(handle, "JIMAGE_Open"));
|
||||
guarantee(JImageOpen != NULL, "function JIMAGE_Open not found");
|
||||
JImageClose = CAST_TO_FN_PTR(JImageClose_t, os::dll_lookup(handle, "JIMAGE_Close"));
|
||||
guarantee(JImageClose != NULL, "function JIMAGE_Close not found");
|
||||
JImagePackageToModule = CAST_TO_FN_PTR(JImagePackageToModule_t, os::dll_lookup(handle, "JIMAGE_PackageToModule"));
|
||||
guarantee(JImagePackageToModule != NULL, "function JIMAGE_PackageToModule not found");
|
||||
JImageFindResource = CAST_TO_FN_PTR(JImageFindResource_t, os::dll_lookup(handle, "JIMAGE_FindResource"));
|
||||
guarantee(JImageFindResource != NULL, "function JIMAGE_FindResource not found");
|
||||
JImageGetResource = CAST_TO_FN_PTR(JImageGetResource_t, os::dll_lookup(handle, "JIMAGE_GetResource"));
|
||||
guarantee(JImageGetResource != NULL, "function JIMAGE_GetResource not found");
|
||||
JImageResourceIterator = CAST_TO_FN_PTR(JImageResourceIterator_t, os::dll_lookup(handle, "JIMAGE_ResourceIterator"));
|
||||
guarantee(JImageResourceIterator != NULL, "function JIMAGE_ResourceIterator not found");
|
||||
}
|
||||
|
||||
jboolean ClassLoader::decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg) {
|
||||
return (*ZipInflateFully)(in, inSize, out, outSize, pmsg);
|
||||
}
|
||||
@ -1086,6 +1151,8 @@ void ClassLoader::initialize() {
|
||||
|
||||
// lookup zip library entry points
|
||||
load_zip_library();
|
||||
// lookup jimage library entry points
|
||||
load_jimage_library();
|
||||
#if INCLUDE_CDS
|
||||
// initialize search path
|
||||
if (DumpSharedSpaces) {
|
||||
|
@ -37,8 +37,7 @@
|
||||
|
||||
// Class path entry (directory or zip file)
|
||||
|
||||
class ImageFileReader;
|
||||
class ImageModuleData;
|
||||
class JImageFile;
|
||||
|
||||
class ClassPathEntry: public CHeapObj<mtClass> {
|
||||
private:
|
||||
@ -52,7 +51,7 @@ class ClassPathEntry: public CHeapObj<mtClass> {
|
||||
}
|
||||
virtual bool is_jar_file() = 0;
|
||||
virtual const char* name() = 0;
|
||||
virtual ImageFileReader* image() = 0;
|
||||
virtual JImageFile* jimage() = 0;
|
||||
// Constructor
|
||||
ClassPathEntry();
|
||||
// Attempt to locate file_name through this class path entry.
|
||||
@ -70,7 +69,7 @@ class ClassPathDirEntry: public ClassPathEntry {
|
||||
public:
|
||||
bool is_jar_file() { return false; }
|
||||
const char* name() { return _dir; }
|
||||
ImageFileReader* image() { return NULL; }
|
||||
JImageFile* jimage() { return NULL; }
|
||||
ClassPathDirEntry(const char* dir);
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
// Debugging
|
||||
@ -100,7 +99,7 @@ class ClassPathZipEntry: public ClassPathEntry {
|
||||
public:
|
||||
bool is_jar_file() { return true; }
|
||||
const char* name() { return _zip_name; }
|
||||
ImageFileReader* image() { return NULL; }
|
||||
JImageFile* jimage() { return NULL; }
|
||||
ClassPathZipEntry(jzfile* zip, const char* zip_name);
|
||||
~ClassPathZipEntry();
|
||||
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
|
||||
@ -115,16 +114,16 @@ class ClassPathZipEntry: public ClassPathEntry {
|
||||
// For java image files
|
||||
class ClassPathImageEntry: public ClassPathEntry {
|
||||
private:
|
||||
ImageFileReader* _image;
|
||||
ImageModuleData* _module_data;
|
||||
JImageFile* _jimage;
|
||||
const char* _name;
|
||||
public:
|
||||
bool is_jar_file() { return false; }
|
||||
bool is_open() { return _image != NULL; }
|
||||
const char* name();
|
||||
ImageFileReader* image() { return _image; }
|
||||
ImageModuleData* module_data() { return _module_data; }
|
||||
ClassPathImageEntry(ImageFileReader* image);
|
||||
bool is_open() { return _jimage != NULL; }
|
||||
const char* name() { return _name == NULL ? "" : _name; }
|
||||
JImageFile* jimage() { return _jimage; }
|
||||
ClassPathImageEntry(JImageFile* jimage, const char* name);
|
||||
~ClassPathImageEntry();
|
||||
static void name_to_package(const char* name, char* buffer, int length);
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
|
||||
// Debugging
|
||||
@ -206,6 +205,7 @@ class ClassLoader: AllStatic {
|
||||
static void setup_search_path(const char *class_path);
|
||||
|
||||
static void load_zip_library();
|
||||
static void load_jimage_library();
|
||||
static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
|
||||
bool throw_exception, TRAPS);
|
||||
|
||||
|
@ -1,121 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "classfile/imageDecompressor.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/bytes.hpp"
|
||||
|
||||
/*
|
||||
* Allocate in C Heap not in resource area, otherwise JVM crashes.
|
||||
* This array life time is the VM life time. Array is never freed and
|
||||
* is not expected to contain more than few references.
|
||||
*/
|
||||
GrowableArray<ImageDecompressor*>* ImageDecompressor::_decompressors =
|
||||
new(ResourceObj::C_HEAP, mtInternal) GrowableArray<ImageDecompressor*>(2, true);
|
||||
|
||||
static Symbol* createSymbol(const char* str) {
|
||||
Thread* THREAD = Thread::current();
|
||||
Symbol* sym = SymbolTable::lookup(str, (int) strlen(str), THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
warning("can't create symbol\n");
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
return NULL;
|
||||
}
|
||||
return sym;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the array of decompressors.
|
||||
*/
|
||||
bool image_decompressor_init() {
|
||||
Symbol* zipSymbol = createSymbol("zip");
|
||||
if (zipSymbol == NULL) {
|
||||
return false;
|
||||
}
|
||||
ImageDecompressor::add_decompressor(new ZipDecompressor(zipSymbol));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decompression entry point. Called from ImageFileReader::get_resource.
|
||||
*/
|
||||
void ImageDecompressor::decompress_resource(u1* compressed, u1* uncompressed,
|
||||
u4 uncompressed_size, const ImageStrings* strings, bool is_C_heap) {
|
||||
bool has_header = false;
|
||||
u1* decompressed_resource = compressed;
|
||||
u1* compressed_resource = compressed;
|
||||
|
||||
// Resource could have been transformed by a stack of decompressors.
|
||||
// Iterate and decompress resources until there is no more header.
|
||||
do {
|
||||
ResourceHeader _header;
|
||||
memcpy(&_header, compressed_resource, sizeof (ResourceHeader));
|
||||
has_header = _header._magic == ResourceHeader::resource_header_magic;
|
||||
if (has_header) {
|
||||
// decompressed_resource array contains the result of decompression
|
||||
// when a resource content is terminal, it means that it is an actual resource,
|
||||
// not an intermediate not fully uncompressed content. In this case
|
||||
// the resource is allocated as an mtClass, otherwise as an mtOther
|
||||
decompressed_resource = is_C_heap && _header._is_terminal ?
|
||||
NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtClass) :
|
||||
NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtOther);
|
||||
// Retrieve the decompressor name
|
||||
const char* decompressor_name = strings->get(_header._decompressor_name_offset);
|
||||
if (decompressor_name == NULL) warning("image decompressor not found\n");
|
||||
guarantee(decompressor_name, "image decompressor not found");
|
||||
// Retrieve the decompressor instance
|
||||
ImageDecompressor* decompressor = get_decompressor(decompressor_name);
|
||||
if (decompressor == NULL) {
|
||||
warning("image decompressor %s not found\n", decompressor_name);
|
||||
}
|
||||
guarantee(decompressor, "image decompressor not found");
|
||||
u1* compressed_resource_base = compressed_resource;
|
||||
compressed_resource += ResourceHeader::resource_header_length;
|
||||
// Ask the decompressor to decompress the compressed content
|
||||
decompressor->decompress_resource(compressed_resource, decompressed_resource,
|
||||
&_header, strings);
|
||||
if (compressed_resource_base != compressed) {
|
||||
FREE_C_HEAP_ARRAY(char, compressed_resource_base);
|
||||
}
|
||||
compressed_resource = decompressed_resource;
|
||||
}
|
||||
} while (has_header);
|
||||
memcpy(uncompressed, decompressed_resource, uncompressed_size);
|
||||
}
|
||||
|
||||
// Zip decompressor
|
||||
|
||||
void ZipDecompressor::decompress_resource(u1* data, u1* uncompressed,
|
||||
ResourceHeader* header, const ImageStrings* strings) {
|
||||
char* msg = NULL;
|
||||
jboolean res = ClassLoader::decompress(data, header->_size, uncompressed,
|
||||
header->_uncompressed_size, &msg);
|
||||
if (!res) warning("decompression failed due to %s\n", msg);
|
||||
guarantee(res, "decompression failed");
|
||||
}
|
||||
|
||||
// END Zip Decompressor
|
@ -1,136 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
|
||||
#define SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
|
||||
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/imageFile.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
/*
|
||||
* Compressed resources located in image have an header.
|
||||
* This header contains:
|
||||
* - _magic: A magic u4, required to retrieved the header in the compressed content
|
||||
* - _size: The size of the compressed resource.
|
||||
* - _uncompressed_size: The uncompressed size of the compressed resource.
|
||||
* - _decompressor_name_offset: The ImageDecompressor instance name StringsTable offset.
|
||||
* - _decompressor_config_offset: StringsTable offset of configuration that could be needed by
|
||||
* the decompressor in order to decompress.
|
||||
* - _is_terminal: 1: the compressed content is terminal. Uncompressing it would
|
||||
* create the actual resource. 0: the compressed content is not terminal. Uncompressing it
|
||||
* will result in a compressed content to be decompressed (This occurs when a stack of compressors
|
||||
* have been used to compress the resource.
|
||||
*/
|
||||
struct ResourceHeader {
|
||||
/* Length of header, needed to retrieve content offset */
|
||||
static const u1 resource_header_length = 21;
|
||||
/* magic bytes that identifies a compressed resource header*/
|
||||
static const u4 resource_header_magic = 0xCAFEFAFA;
|
||||
u4 _magic; // Resource header
|
||||
u4 _size; // Resource size
|
||||
u4 _uncompressed_size; // Expected uncompressed size
|
||||
u4 _decompressor_name_offset; // Strings table decompressor offset
|
||||
u4 _decompressor_config_offset; // Strings table config offset
|
||||
u1 _is_terminal; // Last decompressor 1, otherwise 0.
|
||||
};
|
||||
|
||||
/*
|
||||
* Resources located in jimage file can be compressed. Compression occurs at
|
||||
* jimage file creation time. When compressed a resource is added an header that
|
||||
* contains the name of the compressor that compressed it.
|
||||
* Various compression strategies can be applied to compress a resource.
|
||||
* The same resource can even be compressed multiple time by a stack of compressors.
|
||||
* At runtime, a resource is decompressed in a loop until there is no more header
|
||||
* meaning that the resource is equivalent to the not compressed resource.
|
||||
* In each iteration, the name of the compressor located in the current header
|
||||
* is used to retrieve the associated instance of ImageDecompressor.
|
||||
* For example “zip” is the name of the compressor that compresses resources
|
||||
* using the zip algorithm. The ZipDecompressor class name is also “zip”.
|
||||
* ImageDecompressor instances are retrieved from a static array in which
|
||||
* they are registered.
|
||||
*/
|
||||
class ImageDecompressor: public CHeapObj<mtClass> {
|
||||
|
||||
private:
|
||||
const Symbol* _name;
|
||||
|
||||
/*
|
||||
* Array of concrete decompressors. This array is used to retrieve the decompressor
|
||||
* that can handle resource decompression.
|
||||
*/
|
||||
static GrowableArray<ImageDecompressor*>* _decompressors;
|
||||
|
||||
/*
|
||||
* Identifier of a decompressor. This name is the identification key to retrieve
|
||||
* decompressor from a resource header.
|
||||
*/
|
||||
inline const Symbol* get_name() const { return _name; }
|
||||
|
||||
protected:
|
||||
ImageDecompressor(const Symbol* name) : _name(name) {
|
||||
}
|
||||
virtual void decompress_resource(u1* data, u1* uncompressed,
|
||||
ResourceHeader* header, const ImageStrings* strings) = 0;
|
||||
|
||||
public:
|
||||
inline static void add_decompressor(ImageDecompressor* decompressor) {
|
||||
_decompressors->append(decompressor);
|
||||
}
|
||||
inline static ImageDecompressor* get_decompressor(const char * decompressor_name) {
|
||||
Thread* THREAD = Thread::current();
|
||||
TempNewSymbol sym = SymbolTable::new_symbol(decompressor_name,
|
||||
(int) strlen(decompressor_name), CHECK_NULL);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
warning("can't create symbol\n");
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
return NULL;
|
||||
}
|
||||
for (int i = 0; i < _decompressors->length(); i++) {
|
||||
ImageDecompressor* decompressor = _decompressors->at(i);
|
||||
if (decompressor->get_name()->fast_compare(sym) == 0) {
|
||||
return decompressor;
|
||||
}
|
||||
}
|
||||
guarantee(false, "No decompressor found.");
|
||||
return NULL;
|
||||
}
|
||||
static void decompress_resource(u1* compressed, u1* uncompressed,
|
||||
u4 uncompressed_size, const ImageStrings* strings, bool is_C_heap);
|
||||
};
|
||||
|
||||
/**
|
||||
* Zip decompressor.
|
||||
*/
|
||||
class ZipDecompressor : public ImageDecompressor {
|
||||
public:
|
||||
ZipDecompressor(const Symbol* sym) : ImageDecompressor(sym) { }
|
||||
void decompress_resource(u1* data, u1* uncompressed, ResourceHeader* header,
|
||||
const ImageStrings* strings);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
|
@ -1,546 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/imageDecompressor.hpp"
|
||||
#include "classfile/imageFile.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "utilities/endian.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
// Image files are an alternate file format for storing classes and resources. The
|
||||
// goal is to supply file access which is faster and smaller than the jar format.
|
||||
//
|
||||
// (More detailed nodes in the header.)
|
||||
//
|
||||
|
||||
// Compute the Perfect Hashing hash code for the supplied UTF-8 string.
|
||||
s4 ImageStrings::hash_code(const char* string, s4 seed) {
|
||||
// Access bytes as unsigned.
|
||||
u1* bytes = (u1*)string;
|
||||
// Compute hash code.
|
||||
for (u1 byte = *bytes++; byte; byte = *bytes++) {
|
||||
seed = (seed * HASH_MULTIPLIER) ^ byte;
|
||||
}
|
||||
// Ensure the result is not signed.
|
||||
return seed & 0x7FFFFFFF;
|
||||
}
|
||||
|
||||
// Match up a string in a perfect hash table. Result still needs validation
|
||||
// for precise match (false positive.)
|
||||
s4 ImageStrings::find(Endian* endian, const char* name, s4* redirect, u4 length) {
|
||||
// If the table is empty, then short cut.
|
||||
if (redirect == NULL || length == 0) {
|
||||
return NOT_FOUND;
|
||||
}
|
||||
// Compute the basic perfect hash for name.
|
||||
s4 hash_code = ImageStrings::hash_code(name);
|
||||
// Modulo table size.
|
||||
s4 index = hash_code % length;
|
||||
// Get redirect entry.
|
||||
// value == 0 then not found
|
||||
// value < 0 then -1 - value is true index
|
||||
// value > 0 then value is seed for recomputing hash.
|
||||
s4 value = endian->get(redirect[index]);
|
||||
// if recompute is required.
|
||||
if (value > 0) {
|
||||
// Entry collision value, need to recompute hash.
|
||||
hash_code = ImageStrings::hash_code(name, value);
|
||||
// Modulo table size.
|
||||
return hash_code % length;
|
||||
} else if (value < 0) {
|
||||
// Compute direct index.
|
||||
return -1 - value;
|
||||
}
|
||||
// No entry found.
|
||||
return NOT_FOUND;
|
||||
}
|
||||
|
||||
// Test to see if UTF-8 string begins with the start UTF-8 string. If so,
|
||||
// return non-NULL address of remaining portion of string. Otherwise, return
|
||||
// NULL. Used to test sections of a path without copying from image string
|
||||
// table.
|
||||
const char* ImageStrings::starts_with(const char* string, const char* start) {
|
||||
char ch1, ch2;
|
||||
// Match up the strings the best we can.
|
||||
while ((ch1 = *string) && (ch2 = *start)) {
|
||||
if (ch1 != ch2) {
|
||||
// Mismatch, return NULL.
|
||||
return NULL;
|
||||
}
|
||||
// Next characters.
|
||||
string++, start++;
|
||||
}
|
||||
// Return remainder of string.
|
||||
return string;
|
||||
}
|
||||
|
||||
// Inflates the attribute stream into individual values stored in the long
|
||||
// array _attributes. This allows an attribute value to be quickly accessed by
|
||||
// direct indexing. Unspecified values default to zero (from constructor.)
|
||||
void ImageLocation::set_data(u1* data) {
|
||||
// Deflate the attribute stream into an array of attributes.
|
||||
u1 byte;
|
||||
// Repeat until end header is found.
|
||||
while ((byte = *data)) {
|
||||
// Extract kind from header byte.
|
||||
u1 kind = attribute_kind(byte);
|
||||
guarantee(kind < ATTRIBUTE_COUNT, "invalid image location attribute");
|
||||
// Extract length of data (in bytes).
|
||||
u1 n = attribute_length(byte);
|
||||
// Read value (most significant first.)
|
||||
_attributes[kind] = attribute_value(data + 1, n);
|
||||
// Position to next attribute by skipping attribute header and data bytes.
|
||||
data += n + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Zero all attribute values.
|
||||
void ImageLocation::clear_data() {
|
||||
// Set defaults to zero.
|
||||
memset(_attributes, 0, sizeof(_attributes));
|
||||
}
|
||||
|
||||
// ImageModuleData constructor maps out sub-tables for faster access.
|
||||
ImageModuleData::ImageModuleData(const ImageFileReader* image_file,
|
||||
const char* module_data_name) :
|
||||
_image_file(image_file),
|
||||
_endian(image_file->endian()),
|
||||
_strings(image_file->get_strings()) {
|
||||
// Retrieve the resource containing the module data for the image file.
|
||||
ImageLocation location;
|
||||
bool found = image_file->find_location(module_data_name, location);
|
||||
guarantee(found, "missing module data");
|
||||
u8 data_size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
|
||||
_data = (u1*)NEW_C_HEAP_ARRAY(char, data_size, mtClass);
|
||||
_image_file->get_resource(location, _data);
|
||||
// Map out the header.
|
||||
_header = (Header*)_data;
|
||||
// Get the package to module entry count.
|
||||
u4 ptm_count = _header->ptm_count(_endian);
|
||||
// Get the module to package entry count.
|
||||
u4 mtp_count = _header->mtp_count(_endian);
|
||||
// Compute the offset of the package to module perfect hash redirect.
|
||||
u4 ptm_redirect_offset = sizeof(Header);
|
||||
// Compute the offset of the package to module data.
|
||||
u4 ptm_data_offset = ptm_redirect_offset + ptm_count * sizeof(s4);
|
||||
// Compute the offset of the module to package perfect hash redirect.
|
||||
u4 mtp_redirect_offset = ptm_data_offset + ptm_count * sizeof(PTMData);
|
||||
// Compute the offset of the module to package data.
|
||||
u4 mtp_data_offset = mtp_redirect_offset + mtp_count * sizeof(s4);
|
||||
// Compute the offset of the module to package tables.
|
||||
u4 mtp_packages_offset = mtp_data_offset + mtp_count * sizeof(MTPData);
|
||||
// Compute the address of the package to module perfect hash redirect.
|
||||
_ptm_redirect = (s4*)(_data + ptm_redirect_offset);
|
||||
// Compute the address of the package to module data.
|
||||
_ptm_data = (PTMData*)(_data + ptm_data_offset);
|
||||
// Compute the address of the module to package perfect hash redirect.
|
||||
_mtp_redirect = (s4*)(_data + mtp_redirect_offset);
|
||||
// Compute the address of the module to package data.
|
||||
_mtp_data = (MTPData*)(_data + mtp_data_offset);
|
||||
// Compute the address of the module to package tables.
|
||||
_mtp_packages = (s4*)(_data + mtp_packages_offset);
|
||||
}
|
||||
|
||||
// Release module data resource.
|
||||
ImageModuleData::~ImageModuleData() {
|
||||
if (_data != NULL) {
|
||||
FREE_C_HEAP_ARRAY(u1, _data);
|
||||
}
|
||||
}
|
||||
|
||||
// Return the name of the module data resource. Ex. "./lib/modules/file.jimage"
|
||||
// yields "file.jdata"
|
||||
void ImageModuleData::module_data_name(char* buffer, const char* image_file_name) {
|
||||
// Locate the last slash in the file name path.
|
||||
const char* slash = strrchr(image_file_name, os::file_separator()[0]);
|
||||
// Trim the path to name and extension.
|
||||
const char* name = slash != NULL ? slash + 1 : (char *)image_file_name;
|
||||
// Locate the extension period.
|
||||
const char* dot = strrchr(name, '.');
|
||||
guarantee(dot, "missing extension on jimage name");
|
||||
// Trim to only base name.
|
||||
int length = dot - name;
|
||||
strncpy(buffer, name, length);
|
||||
buffer[length] = '\0';
|
||||
// Append extension.
|
||||
strcat(buffer, ".jdata");
|
||||
}
|
||||
|
||||
// Return the module in which a package resides. Returns NULL if not found.
|
||||
const char* ImageModuleData::package_to_module(const char* package_name) {
|
||||
// Search the package to module table.
|
||||
s4 index = ImageStrings::find(_endian, package_name, _ptm_redirect,
|
||||
_header->ptm_count(_endian));
|
||||
// If entry is found.
|
||||
if (index != ImageStrings::NOT_FOUND) {
|
||||
// Retrieve the package to module entry.
|
||||
PTMData* data = _ptm_data + index;
|
||||
// Verify that it is the correct data.
|
||||
if (strcmp(package_name, get_string(data->name_offset(_endian))) != 0) {
|
||||
return NULL;
|
||||
}
|
||||
// Return the module name.
|
||||
return get_string(data->module_name_offset(_endian));
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Returns all the package names in a module. Returns NULL if module not found.
|
||||
GrowableArray<const char*>* ImageModuleData::module_to_packages(const char* module_name) {
|
||||
// Search the module to package table.
|
||||
s4 index = ImageStrings::find(_endian, module_name, _mtp_redirect,
|
||||
_header->mtp_count(_endian));
|
||||
// If entry is found.
|
||||
if (index != ImageStrings::NOT_FOUND) {
|
||||
// Retrieve the module to package entry.
|
||||
MTPData* data = _mtp_data + index;
|
||||
// Verify that it is the correct data.
|
||||
if (strcmp(module_name, get_string(data->name_offset(_endian))) != 0) {
|
||||
return NULL;
|
||||
}
|
||||
// Construct an array of all the package entries.
|
||||
GrowableArray<const char*>* packages = new GrowableArray<const char*>();
|
||||
s4 package_offset = data->package_offset(_endian);
|
||||
for (u4 i = 0; i < data->package_count(_endian); i++) {
|
||||
u4 package_name_offset = mtp_package(package_offset + i);
|
||||
const char* package_name = get_string(package_name_offset);
|
||||
packages->append(package_name);
|
||||
}
|
||||
return packages;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Table to manage multiple opens of an image file.
|
||||
GrowableArray<ImageFileReader*>* ImageFileReader::_reader_table =
|
||||
new(ResourceObj::C_HEAP, mtInternal) GrowableArray<ImageFileReader*>(2, true);
|
||||
|
||||
// Open an image file, reuse structure if file already open.
|
||||
ImageFileReader* ImageFileReader::open(const char* name, bool big_endian) {
|
||||
// Lock out _reader_table.
|
||||
MutexLocker ml(ImageFileReaderTable_lock);
|
||||
ImageFileReader* reader;
|
||||
// Search for an exist image file.
|
||||
for (int i = 0; i < _reader_table->length(); i++) {
|
||||
// Retrieve table entry.
|
||||
reader = _reader_table->at(i);
|
||||
// If name matches, then reuse (bump up use count.)
|
||||
if (strcmp(reader->name(), name) == 0) {
|
||||
reader->inc_use();
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
// Need a new image reader.
|
||||
reader = new ImageFileReader(name, big_endian);
|
||||
bool opened = reader->open();
|
||||
// If failed to open.
|
||||
if (!opened) {
|
||||
delete reader;
|
||||
return NULL;
|
||||
}
|
||||
// Bump use count and add to table.
|
||||
reader->inc_use();
|
||||
_reader_table->append(reader);
|
||||
return reader;
|
||||
}
|
||||
|
||||
// Close an image file if the file is not in use elsewhere.
|
||||
void ImageFileReader::close(ImageFileReader *reader) {
|
||||
// Lock out _reader_table.
|
||||
MutexLocker ml(ImageFileReaderTable_lock);
|
||||
// If last use then remove from table and then close.
|
||||
if (reader->dec_use()) {
|
||||
_reader_table->remove(reader);
|
||||
delete reader;
|
||||
}
|
||||
}
|
||||
|
||||
// Return an id for the specifed ImageFileReader.
|
||||
u8 ImageFileReader::readerToID(ImageFileReader *reader) {
|
||||
// ID is just the cloaked reader address.
|
||||
return (u8)reader;
|
||||
}
|
||||
|
||||
// Validate the image id.
|
||||
bool ImageFileReader::idCheck(u8 id) {
|
||||
// Make sure the ID is a managed (_reader_table) reader.
|
||||
MutexLocker ml(ImageFileReaderTable_lock);
|
||||
return _reader_table->contains((ImageFileReader*)id);
|
||||
}
|
||||
|
||||
// Return an id for the specifed ImageFileReader.
|
||||
ImageFileReader* ImageFileReader::idToReader(u8 id) {
|
||||
#ifdef PRODUCT
|
||||
// Fast convert.
|
||||
return (ImageFileReader*)id;
|
||||
#else
|
||||
// Do a slow check before fast convert.
|
||||
return idCheck(id) ? (ImageFileReader*)id : NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Constructor intializes to a closed state.
|
||||
ImageFileReader::ImageFileReader(const char* name, bool big_endian) {
|
||||
// Copy the image file name.
|
||||
_name = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtClass);
|
||||
strcpy(_name, name);
|
||||
// Initialize for a closed file.
|
||||
_fd = -1;
|
||||
_endian = Endian::get_handler(big_endian);
|
||||
_index_data = NULL;
|
||||
}
|
||||
|
||||
// Close image and free up data structures.
|
||||
ImageFileReader::~ImageFileReader() {
|
||||
// Ensure file is closed.
|
||||
close();
|
||||
// Free up name.
|
||||
if (_name != NULL) {
|
||||
FREE_C_HEAP_ARRAY(char, _name);
|
||||
_name = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Open image file for read access.
|
||||
bool ImageFileReader::open() {
|
||||
// If file exists open for reading.
|
||||
struct stat st;
|
||||
if (os::stat(_name, &st) != 0 ||
|
||||
(st.st_mode & S_IFREG) != S_IFREG ||
|
||||
(_fd = os::open(_name, 0, O_RDONLY)) == -1) {
|
||||
return false;
|
||||
}
|
||||
// Retrieve the file size.
|
||||
_file_size = (u8)st.st_size;
|
||||
// Read image file header and verify it has a valid header.
|
||||
size_t header_size = sizeof(ImageHeader);
|
||||
if (_file_size < header_size ||
|
||||
!read_at((u1*)&_header, header_size, 0) ||
|
||||
_header.magic(_endian) != IMAGE_MAGIC ||
|
||||
_header.major_version(_endian) != MAJOR_VERSION ||
|
||||
_header.minor_version(_endian) != MINOR_VERSION) {
|
||||
close();
|
||||
return false;
|
||||
}
|
||||
// Size of image index.
|
||||
_index_size = index_size();
|
||||
// Make sure file is large enough to contain the index.
|
||||
if (_file_size < _index_size) {
|
||||
return false;
|
||||
}
|
||||
// Determine how much of the image is memory mapped.
|
||||
off_t map_size = (off_t)(MemoryMapImage ? _file_size : _index_size);
|
||||
// Memory map image (minimally the index.)
|
||||
_index_data = (u1*)os::map_memory(_fd, _name, 0, NULL, map_size, true, false);
|
||||
guarantee(_index_data, "image file not memory mapped");
|
||||
// Retrieve length of index perfect hash table.
|
||||
u4 length = table_length();
|
||||
// Compute offset of the perfect hash table redirect table.
|
||||
u4 redirect_table_offset = (u4)header_size;
|
||||
// Compute offset of index attribute offsets.
|
||||
u4 offsets_table_offset = redirect_table_offset + length * sizeof(s4);
|
||||
// Compute offset of index location attribute data.
|
||||
u4 location_bytes_offset = offsets_table_offset + length * sizeof(u4);
|
||||
// Compute offset of index string table.
|
||||
u4 string_bytes_offset = location_bytes_offset + locations_size();
|
||||
// Compute address of the perfect hash table redirect table.
|
||||
_redirect_table = (s4*)(_index_data + redirect_table_offset);
|
||||
// Compute address of index attribute offsets.
|
||||
_offsets_table = (u4*)(_index_data + offsets_table_offset);
|
||||
// Compute address of index location attribute data.
|
||||
_location_bytes = _index_data + location_bytes_offset;
|
||||
// Compute address of index string table.
|
||||
_string_bytes = _index_data + string_bytes_offset;
|
||||
// Successful open.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Close image file.
|
||||
void ImageFileReader::close() {
|
||||
// Dealllocate the index.
|
||||
if (_index_data != NULL) {
|
||||
os::unmap_memory((char*)_index_data, _index_size);
|
||||
_index_data = NULL;
|
||||
}
|
||||
// Close file.
|
||||
if (_fd != -1) {
|
||||
os::close(_fd);
|
||||
_fd = -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Read directly from the file.
|
||||
bool ImageFileReader::read_at(u1* data, u8 size, u8 offset) const {
|
||||
return os::read_at(_fd, data, size, offset) == size;
|
||||
}
|
||||
|
||||
// Find the location attributes associated with the path. Returns true if
|
||||
// the location is found, false otherwise.
|
||||
bool ImageFileReader::find_location(const char* path, ImageLocation& location) const {
|
||||
// Locate the entry in the index perfect hash table.
|
||||
s4 index = ImageStrings::find(_endian, path, _redirect_table, table_length());
|
||||
// If is found.
|
||||
if (index != ImageStrings::NOT_FOUND) {
|
||||
// Get address of first byte of location attribute stream.
|
||||
u1* data = get_location_data(index);
|
||||
// Expand location attributes.
|
||||
location.set_data(data);
|
||||
// Make sure result is not a false positive.
|
||||
return verify_location(location, path);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Assemble the location path from the string fragments indicated in the location attributes.
|
||||
void ImageFileReader::location_path(ImageLocation& location, char* path, size_t max) const {
|
||||
// Manage the image string table.
|
||||
ImageStrings strings(_string_bytes, _header.strings_size(_endian));
|
||||
// Position to first character of the path buffer.
|
||||
char* next = path;
|
||||
// Temp for string length.
|
||||
size_t length;
|
||||
// Get module string.
|
||||
const char* module = location.get_attribute(ImageLocation::ATTRIBUTE_MODULE, strings);
|
||||
// If module string is not empty string.
|
||||
if (*module != '\0') {
|
||||
// Get length of module name.
|
||||
length = strlen(module);
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee(next - path + length + 2 < max, "buffer overflow");
|
||||
// Append '/module/'.
|
||||
*next++ = '/';
|
||||
strcpy(next, module); next += length;
|
||||
*next++ = '/';
|
||||
}
|
||||
// Get parent (package) string.
|
||||
const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
|
||||
// If parent string is not empty string.
|
||||
if (*parent != '\0') {
|
||||
// Get length of module string.
|
||||
length = strlen(parent);
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee(next - path + length + 1 < max, "buffer overflow");
|
||||
// Append 'patent/' .
|
||||
strcpy(next, parent); next += length;
|
||||
*next++ = '/';
|
||||
}
|
||||
// Get base name string.
|
||||
const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
|
||||
// Get length of base name.
|
||||
length = strlen(base);
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee(next - path + length < max, "buffer overflow");
|
||||
// Append base name.
|
||||
strcpy(next, base); next += length;
|
||||
// Get extension string.
|
||||
const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
|
||||
// If extension string is not empty string.
|
||||
if (*extension != '\0') {
|
||||
// Get length of extension string.
|
||||
length = strlen(extension);
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee(next - path + length + 1 < max, "buffer overflow");
|
||||
// Append '.extension' .
|
||||
*next++ = '.';
|
||||
strcpy(next, extension); next += length;
|
||||
}
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee((size_t)(next - path) < max, "buffer overflow");
|
||||
// Terminate string.
|
||||
*next = '\0';
|
||||
}
|
||||
|
||||
// Verify that a found location matches the supplied path (without copying.)
|
||||
bool ImageFileReader::verify_location(ImageLocation& location, const char* path) const {
|
||||
// Manage the image string table.
|
||||
ImageStrings strings(_string_bytes, _header.strings_size(_endian));
|
||||
// Position to first character of the path string.
|
||||
const char* next = path;
|
||||
// Get module name string.
|
||||
const char* module = location.get_attribute(ImageLocation::ATTRIBUTE_MODULE, strings);
|
||||
// If module string is not empty.
|
||||
if (*module != '\0') {
|
||||
// Compare '/module/' .
|
||||
if (*next++ != '/') return false;
|
||||
if (!(next = ImageStrings::starts_with(next, module))) return false;
|
||||
if (*next++ != '/') return false;
|
||||
}
|
||||
// Get parent (package) string
|
||||
const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
|
||||
// If parent string is not empty string.
|
||||
if (*parent != '\0') {
|
||||
// Compare 'parent/' .
|
||||
if (!(next = ImageStrings::starts_with(next, parent))) return false;
|
||||
if (*next++ != '/') return false;
|
||||
}
|
||||
// Get base name string.
|
||||
const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
|
||||
// Compare with basne name.
|
||||
if (!(next = ImageStrings::starts_with(next, base))) return false;
|
||||
// Get extension string.
|
||||
const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
|
||||
// If extension is not empty.
|
||||
if (*extension != '\0') {
|
||||
// Compare '.extension' .
|
||||
if (*next++ != '.') return false;
|
||||
if (!(next = ImageStrings::starts_with(next, extension))) return false;
|
||||
}
|
||||
// True only if complete match and no more characters.
|
||||
return *next == '\0';
|
||||
}
|
||||
|
||||
// Return the resource data for the supplied location.
|
||||
void ImageFileReader::get_resource(ImageLocation& location, u1* uncompressed_data) const {
|
||||
// Retrieve the byte offset and size of the resource.
|
||||
u8 offset = location.get_attribute(ImageLocation::ATTRIBUTE_OFFSET);
|
||||
u8 uncompressed_size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
|
||||
u8 compressed_size = location.get_attribute(ImageLocation::ATTRIBUTE_COMPRESSED);
|
||||
if (compressed_size != 0) {
|
||||
ResourceMark rm;
|
||||
u1* compressed_data;
|
||||
// If not memory mapped read in bytes.
|
||||
if (!MemoryMapImage) {
|
||||
// Allocate buffer for compression.
|
||||
compressed_data = NEW_RESOURCE_ARRAY(u1, compressed_size);
|
||||
// Read bytes from offset beyond the image index.
|
||||
bool is_read = read_at(compressed_data, compressed_size, _index_size + offset);
|
||||
guarantee(is_read, "error reading from image or short read");
|
||||
} else {
|
||||
compressed_data = get_data_address() + offset;
|
||||
}
|
||||
// Get image string table.
|
||||
const ImageStrings strings = get_strings();
|
||||
// Decompress resource.
|
||||
ImageDecompressor::decompress_resource(compressed_data, uncompressed_data, uncompressed_size,
|
||||
&strings, false);
|
||||
} else {
|
||||
// Read bytes from offset beyond the image index.
|
||||
bool is_read = read_at(uncompressed_data, uncompressed_size, _index_size + offset);
|
||||
guarantee(is_read, "error reading from image or short read");
|
||||
}
|
||||
}
|
@ -1,602 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_CLASSFILE_IMAGEFILE_HPP
|
||||
#define SHARE_VM_CLASSFILE_IMAGEFILE_HPP
|
||||
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/endian.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
// Image files are an alternate file format for storing classes and resources. The
|
||||
// goal is to supply file access which is faster and smaller than the jar format.
|
||||
// It should be noted that unlike jars, information stored in an image is in native
|
||||
// endian format. This allows the image to be mapped into memory without endian
|
||||
// translation. This also means that images are platform dependent.
|
||||
//
|
||||
// Image files are structured as three sections;
|
||||
//
|
||||
// +-----------+
|
||||
// | Header |
|
||||
// +-----------+
|
||||
// | |
|
||||
// | Index |
|
||||
// | |
|
||||
// +-----------+
|
||||
// | |
|
||||
// | |
|
||||
// | Resources |
|
||||
// | |
|
||||
// | |
|
||||
// +-----------+
|
||||
//
|
||||
// The header contains information related to identification and description of
|
||||
// contents.
|
||||
//
|
||||
// +-------------------------+
|
||||
// | Magic (0xCAFEDADA) |
|
||||
// +------------+------------+
|
||||
// | Major Vers | Minor Vers |
|
||||
// +------------+------------+
|
||||
// | Flags |
|
||||
// +-------------------------+
|
||||
// | Resource Count |
|
||||
// +-------------------------+
|
||||
// | Table Length |
|
||||
// +-------------------------+
|
||||
// | Attributes Size |
|
||||
// +-------------------------+
|
||||
// | Strings Size |
|
||||
// +-------------------------+
|
||||
//
|
||||
// Magic - means of identifying validity of the file. This avoids requiring a
|
||||
// special file extension.
|
||||
// Major vers, minor vers - differences in version numbers indicate structural
|
||||
// changes in the image.
|
||||
// Flags - various image wide flags (future).
|
||||
// Resource count - number of resources in the file.
|
||||
// Table length - the length of lookup tables used in the index.
|
||||
// Attributes size - number of bytes in the region used to store location attribute
|
||||
// streams.
|
||||
// Strings size - the size of the region used to store strings used by the
|
||||
// index and meta data.
|
||||
//
|
||||
// The index contains information related to resource lookup. The algorithm
|
||||
// used for lookup is "A Practical Minimal Perfect Hashing Method"
|
||||
// (http://homepages.dcc.ufmg.br/~nivio/papers/wea05.pdf). Given a path string
|
||||
// in the form /<module>/<package>/<base>.<extension> return the resource location
|
||||
// information;
|
||||
//
|
||||
// redirectIndex = hash(path, DEFAULT_SEED) % table_length;
|
||||
// redirect = redirectTable[redirectIndex];
|
||||
// if (redirect == 0) return not found;
|
||||
// locationIndex = redirect < 0 ? -1 - redirect : hash(path, redirect) % table_length;
|
||||
// location = locationTable[locationIndex];
|
||||
// if (!verify(location, path)) return not found;
|
||||
// return location;
|
||||
//
|
||||
// Note: The hash function takes an initial seed value. A different seed value
|
||||
// usually returns a different result for strings that would otherwise collide with
|
||||
// other seeds. The verify function guarantees the found resource location is
|
||||
// indeed the resource we are looking for.
|
||||
//
|
||||
// The following is the format of the index;
|
||||
//
|
||||
// +-------------------+
|
||||
// | Redirect Table |
|
||||
// +-------------------+
|
||||
// | Attribute Offsets |
|
||||
// +-------------------+
|
||||
// | Attribute Data |
|
||||
// +-------------------+
|
||||
// | Strings |
|
||||
// +-------------------+
|
||||
//
|
||||
// Redirect Table - Array of 32-bit signed values representing actions that
|
||||
// should take place for hashed strings that map to that
|
||||
// value. Negative values indicate no hash collision and can be
|
||||
// quickly converted to indices into attribute offsets. Positive
|
||||
// values represent a new seed for hashing an index into attribute
|
||||
// offsets. Zero indicates not found.
|
||||
// Attribute Offsets - Array of 32-bit unsigned values representing offsets into
|
||||
// attribute data. Attribute offsets can be iterated to do a
|
||||
// full survey of resources in the image. Offset of zero
|
||||
// indicates no attributes.
|
||||
// Attribute Data - Bytes representing compact attribute data for locations. (See
|
||||
// comments in ImageLocation.)
|
||||
// Strings - Collection of zero terminated UTF-8 strings used by the index and
|
||||
// image meta data. Each string is accessed by offset. Each string is
|
||||
// unique. Offset zero is reserved for the empty string.
|
||||
//
|
||||
// Note that the memory mapped index assumes 32 bit alignment of each component
|
||||
// in the index.
|
||||
//
|
||||
// Endianness of an image.
|
||||
// An image booted by hotspot is always in native endian. However, it is possible
|
||||
// to read (by the JDK) in alternate endian format. Primarily, this is during
|
||||
// cross platform scenarios. Ex, where javac needs to read an embedded image
|
||||
// to access classes for crossing compilation.
|
||||
//
|
||||
|
||||
class ImageFileReader; // forward declaration
|
||||
|
||||
// Manage image file string table.
|
||||
class ImageStrings VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
u1* _data; // Data bytes for strings.
|
||||
u4 _size; // Number of bytes in the string table.
|
||||
public:
|
||||
enum {
|
||||
// Not found result from find routine.
|
||||
NOT_FOUND = -1,
|
||||
// Prime used to generate hash for Perfect Hashing.
|
||||
HASH_MULTIPLIER = 0x01000193
|
||||
};
|
||||
|
||||
ImageStrings(u1* data, u4 size) : _data(data), _size(size) {}
|
||||
|
||||
// Return the UTF-8 string beginning at offset.
|
||||
inline const char* get(u4 offset) const {
|
||||
guarantee(offset < _size, "offset exceeds string table size");
|
||||
return (const char*)(_data + offset);
|
||||
}
|
||||
|
||||
// Compute the Perfect Hashing hash code for the supplied UTF-8 string.
|
||||
inline static u4 hash_code(const char* string) {
|
||||
return hash_code(string, HASH_MULTIPLIER);
|
||||
}
|
||||
|
||||
// Compute the Perfect Hashing hash code for the supplied string, starting at seed.
|
||||
static s4 hash_code(const char* string, s4 seed);
|
||||
|
||||
// Match up a string in a perfect hash table. Result still needs validation
|
||||
// for precise match.
|
||||
static s4 find(Endian* endian, const char* name, s4* redirect, u4 length);
|
||||
|
||||
// Test to see if UTF-8 string begins with the start UTF-8 string. If so,
|
||||
// return non-NULL address of remaining portion of string. Otherwise, return
|
||||
// NULL. Used to test sections of a path without copying from image string
|
||||
// table.
|
||||
static const char* starts_with(const char* string, const char* start);
|
||||
|
||||
// Test to see if UTF-8 string begins with start char. If so, return non-NULL
|
||||
// address of remaining portion of string. Otherwise, return NULL. Used
|
||||
// to test a character of a path without copying.
|
||||
inline static const char* starts_with(const char* string, const char ch) {
|
||||
return *string == ch ? string + 1 : NULL;
|
||||
}
|
||||
};
|
||||
|
||||
// Manage image file location attribute data. Within an image, a location's
|
||||
// attributes are compressed into a stream of bytes. An attribute stream is
|
||||
// composed of individual attribute sequences. Each attribute sequence begins with
|
||||
// a header byte containing the attribute 'kind' (upper 5 bits of header) and the
|
||||
// 'length' less 1 (lower 3 bits of header) of bytes that follow containing the
|
||||
// attribute value. Attribute values present as most significant byte first.
|
||||
//
|
||||
// Ex. Container offset (ATTRIBUTE_OFFSET) 0x33562 would be represented as 0x22
|
||||
// (kind = 4, length = 3), 0x03, 0x35, 0x62.
|
||||
//
|
||||
// An attribute stream is terminated with a header kind of ATTRIBUTE_END (header
|
||||
// byte of zero.)
|
||||
//
|
||||
// ImageLocation inflates the stream into individual values stored in the long
|
||||
// array _attributes. This allows an attribute value can be quickly accessed by
|
||||
// direct indexing. Unspecified values default to zero.
|
||||
//
|
||||
// Notes:
|
||||
// - Even though ATTRIBUTE_END is used to mark the end of the attribute stream,
|
||||
// streams will contain zero byte values to represent lesser significant bits.
|
||||
// Thus, detecting a zero byte is not sufficient to detect the end of an attribute
|
||||
// stream.
|
||||
// - ATTRIBUTE_OFFSET represents the number of bytes from the beginning of the region
|
||||
// storing the resources. Thus, in an image this represents the number of bytes
|
||||
// after the index.
|
||||
// - Currently, compressed resources are represented by having a non-zero
|
||||
// ATTRIBUTE_COMPRESSED value. This represents the number of bytes stored in the
|
||||
// image, and the value of ATTRIBUTE_UNCOMPRESSED represents number of bytes of the
|
||||
// inflated resource in memory. If the ATTRIBUTE_COMPRESSED is zero then the value
|
||||
// of ATTRIBUTE_UNCOMPRESSED represents both the number of bytes in the image and
|
||||
// in memory. In the future, additional compression techniques will be used and
|
||||
// represented differently.
|
||||
// - Package strings include trailing slash and extensions include prefix period.
|
||||
//
|
||||
class ImageLocation VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
enum {
|
||||
ATTRIBUTE_END, // End of attribute stream marker
|
||||
ATTRIBUTE_MODULE, // String table offset of module name
|
||||
ATTRIBUTE_PARENT, // String table offset of resource path parent
|
||||
ATTRIBUTE_BASE, // String table offset of resource path base
|
||||
ATTRIBUTE_EXTENSION, // String table offset of resource path extension
|
||||
ATTRIBUTE_OFFSET, // Container byte offset of resource
|
||||
ATTRIBUTE_COMPRESSED, // In image byte size of the compressed resource
|
||||
ATTRIBUTE_UNCOMPRESSED, // In memory byte size of the uncompressed resource
|
||||
ATTRIBUTE_COUNT // Number of attribute kinds
|
||||
};
|
||||
|
||||
private:
|
||||
// Values of inflated attributes.
|
||||
u8 _attributes[ATTRIBUTE_COUNT];
|
||||
|
||||
// Return the attribute value number of bytes.
|
||||
inline static u1 attribute_length(u1 data) {
|
||||
return (data & 0x7) + 1;
|
||||
}
|
||||
|
||||
// Return the attribute kind.
|
||||
inline static u1 attribute_kind(u1 data) {
|
||||
u1 kind = data >> 3;
|
||||
guarantee(kind < ATTRIBUTE_COUNT, "invalid attribute kind");
|
||||
return kind;
|
||||
}
|
||||
|
||||
// Return the attribute length.
|
||||
inline static u8 attribute_value(u1* data, u1 n) {
|
||||
guarantee(0 < n && n <= 8, "invalid attribute value length");
|
||||
u8 value = 0;
|
||||
// Most significant bytes first.
|
||||
for (u1 i = 0; i < n; i++) {
|
||||
value <<= 8;
|
||||
value |= data[i];
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
public:
|
||||
ImageLocation() {
|
||||
clear_data();
|
||||
}
|
||||
|
||||
ImageLocation(u1* data) {
|
||||
clear_data();
|
||||
set_data(data);
|
||||
}
|
||||
|
||||
// Inflates the attribute stream into individual values stored in the long
|
||||
// array _attributes. This allows an attribute value to be quickly accessed by
|
||||
// direct indexing. Unspecified values default to zero.
|
||||
void set_data(u1* data);
|
||||
|
||||
// Zero all attribute values.
|
||||
void clear_data();
|
||||
|
||||
// Retrieve an attribute value from the inflated array.
|
||||
inline u8 get_attribute(u1 kind) const {
|
||||
guarantee(ATTRIBUTE_END < kind && kind < ATTRIBUTE_COUNT, "invalid attribute kind");
|
||||
return _attributes[kind];
|
||||
}
|
||||
|
||||
// Retrieve an attribute string value from the inflated array.
|
||||
inline const char* get_attribute(u4 kind, const ImageStrings& strings) const {
|
||||
return strings.get((u4)get_attribute(kind));
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// NOTE: needs revision.
|
||||
// Each loader requires set of module meta data to identify which modules and
|
||||
// packages are managed by that loader. Currently, there is one image file per
|
||||
// builtin loader, so only one module meta data resource per file.
|
||||
//
|
||||
// Each element in the module meta data is a native endian 4 byte integer. Note
|
||||
// that entries with zero offsets for string table entries should be ignored (
|
||||
// padding for hash table lookup.)
|
||||
//
|
||||
// Format:
|
||||
// Count of package to module entries
|
||||
// Count of module to package entries
|
||||
// Perfect Hash redirect table[Count of package to module entries]
|
||||
// Package to module entries[Count of package to module entries]
|
||||
// Offset to package name in string table
|
||||
// Offset to module name in string table
|
||||
// Perfect Hash redirect table[Count of module to package entries]
|
||||
// Module to package entries[Count of module to package entries]
|
||||
// Offset to module name in string table
|
||||
// Count of packages in module
|
||||
// Offset to first package in packages table
|
||||
// Packages[]
|
||||
// Offset to package name in string table
|
||||
//
|
||||
// Manage the image module meta data.
|
||||
class ImageModuleData : public CHeapObj<mtClass> {
|
||||
class Header VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
u4 _ptm_count; // Count of package to module entries
|
||||
u4 _mtp_count; // Count of module to package entries
|
||||
public:
|
||||
inline u4 ptm_count(Endian* endian) const { return endian->get(_ptm_count); }
|
||||
inline u4 mtp_count(Endian* endian) const { return endian->get(_mtp_count); }
|
||||
};
|
||||
|
||||
// Hashtable entry
|
||||
class HashData VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
u4 _name_offset; // Name offset in string table
|
||||
public:
|
||||
inline s4 name_offset(Endian* endian) const { return endian->get(_name_offset); }
|
||||
};
|
||||
|
||||
// Package to module hashtable entry
|
||||
class PTMData : public HashData {
|
||||
private:
|
||||
u4 _module_name_offset; // Module name offset in string table
|
||||
public:
|
||||
inline s4 module_name_offset(Endian* endian) const { return endian->get(_module_name_offset); }
|
||||
};
|
||||
|
||||
// Module to package hashtable entry
|
||||
class MTPData : public HashData {
|
||||
private:
|
||||
u4 _package_count; // Number of packages in module
|
||||
u4 _package_offset; // Offset in package list
|
||||
public:
|
||||
inline u4 package_count(Endian* endian) const { return endian->get(_package_count); }
|
||||
inline u4 package_offset(Endian* endian) const { return endian->get(_package_offset); }
|
||||
};
|
||||
|
||||
const ImageFileReader* _image_file; // Source image file
|
||||
Endian* _endian; // Endian handler
|
||||
ImageStrings _strings; // Image file strings
|
||||
u1* _data; // Module data resource data
|
||||
u8 _data_size; // Size of resource data
|
||||
Header* _header; // Module data header
|
||||
s4* _ptm_redirect; // Package to module hashtable redirect
|
||||
PTMData* _ptm_data; // Package to module data
|
||||
s4* _mtp_redirect; // Module to packages hashtable redirect
|
||||
MTPData* _mtp_data; // Module to packages data
|
||||
s4* _mtp_packages; // Package data (name offsets)
|
||||
|
||||
// Return a string from the string table.
|
||||
inline const char* get_string(u4 offset) {
|
||||
return _strings.get(offset);
|
||||
}
|
||||
|
||||
inline u4 mtp_package(u4 index) {
|
||||
return _endian->get(_mtp_packages[index]);
|
||||
}
|
||||
|
||||
public:
|
||||
ImageModuleData(const ImageFileReader* image_file, const char* module_data_name);
|
||||
~ImageModuleData();
|
||||
|
||||
// Return the name of the module data resource.
|
||||
static void module_data_name(char* buffer, const char* image_file_name);
|
||||
|
||||
// Return the module in which a package resides. Returns NULL if not found.
|
||||
const char* package_to_module(const char* package_name);
|
||||
|
||||
// Returns all the package names in a module. Returns NULL if module not found.
|
||||
GrowableArray<const char*>* module_to_packages(const char* module_name);
|
||||
};
|
||||
|
||||
// Image file header, starting at offset 0.
|
||||
class ImageHeader VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
u4 _magic; // Image file marker
|
||||
u4 _version; // Image file major version number
|
||||
u4 _flags; // Image file flags
|
||||
u4 _resource_count; // Number of resources in file
|
||||
u4 _table_length; // Number of slots in index tables
|
||||
u4 _locations_size; // Number of bytes in attribute table
|
||||
u4 _strings_size; // Number of bytes in string table
|
||||
|
||||
public:
|
||||
u4 magic() const { return _magic; }
|
||||
u4 magic(Endian* endian) const { return endian->get(_magic); }
|
||||
void set_magic(Endian* endian, u4 magic) { return endian->set(_magic, magic); }
|
||||
|
||||
u4 major_version(Endian* endian) const { return endian->get(_version) >> 16; }
|
||||
u4 minor_version(Endian* endian) const { return endian->get(_version) & 0xFFFF; }
|
||||
void set_version(Endian* endian, u4 major_version, u4 minor_version) {
|
||||
return endian->set(_version, major_version << 16 | minor_version);
|
||||
}
|
||||
|
||||
u4 flags(Endian* endian) const { return endian->get(_flags); }
|
||||
void set_flags(Endian* endian, u4 value) { return endian->set(_flags, value); }
|
||||
|
||||
u4 resource_count(Endian* endian) const { return endian->get(_resource_count); }
|
||||
void set_resource_count(Endian* endian, u4 count) { return endian->set(_resource_count, count); }
|
||||
|
||||
u4 table_length(Endian* endian) const { return endian->get(_table_length); }
|
||||
void set_table_length(Endian* endian, u4 count) { return endian->set(_table_length, count); }
|
||||
|
||||
u4 locations_size(Endian* endian) const { return endian->get(_locations_size); }
|
||||
void set_locations_size(Endian* endian, u4 size) { return endian->set(_locations_size, size); }
|
||||
|
||||
u4 strings_size(Endian* endian) const { return endian->get(_strings_size); }
|
||||
void set_strings_size(Endian* endian, u4 size) { return endian->set(_strings_size, size); }
|
||||
};
|
||||
|
||||
// Max path length limit independent of platform. Windows max path is 1024,
|
||||
// other platforms use 4096. The JCK fails several tests when 1024 is used.
|
||||
#define IMAGE_MAX_PATH 4096
|
||||
|
||||
// Manage the image file.
|
||||
// ImageFileReader manages the content of an image file.
|
||||
// Initially, the header of the image file is read for validation. If valid,
|
||||
// values in the header are used calculate the size of the image index. The
|
||||
// index is then memory mapped to allow load on demand and sharing. The
|
||||
// -XX:+MemoryMapImage flag determines if the entire file is loaded (server use.)
|
||||
// An image can be used by Hotspot and multiple reference points in the JDK, thus
|
||||
// it is desirable to share a reader. To accomodate sharing, a share table is
|
||||
// defined (see ImageFileReaderTable in imageFile.cpp) To track the number of
|
||||
// uses, ImageFileReader keeps a use count (_use). Use is incremented when
|
||||
// 'opened' by reference point and decremented when 'closed'. Use of zero
|
||||
// leads the ImageFileReader to be actually closed and discarded.
|
||||
class ImageFileReader : public CHeapObj<mtClass> {
|
||||
private:
|
||||
// Manage a number of image files such that an image can be shared across
|
||||
// multiple uses (ex. loader.)
|
||||
static GrowableArray<ImageFileReader*>* _reader_table;
|
||||
|
||||
char* _name; // Name of image
|
||||
s4 _use; // Use count
|
||||
int _fd; // File descriptor
|
||||
Endian* _endian; // Endian handler
|
||||
u8 _file_size; // File size in bytes
|
||||
ImageHeader _header; // Image header
|
||||
size_t _index_size; // Total size of index
|
||||
u1* _index_data; // Raw index data
|
||||
s4* _redirect_table; // Perfect hash redirect table
|
||||
u4* _offsets_table; // Location offset table
|
||||
u1* _location_bytes; // Location attributes
|
||||
u1* _string_bytes; // String table
|
||||
|
||||
ImageFileReader(const char* name, bool big_endian);
|
||||
~ImageFileReader();
|
||||
|
||||
// Compute number of bytes in image file index.
|
||||
inline u8 index_size() {
|
||||
return sizeof(ImageHeader) +
|
||||
table_length() * sizeof(u4) * 2 + locations_size() + strings_size();
|
||||
}
|
||||
|
||||
public:
|
||||
enum {
|
||||
// Image file marker.
|
||||
IMAGE_MAGIC = 0xCAFEDADA,
|
||||
// Endian inverted Image file marker.
|
||||
IMAGE_MAGIC_INVERT = 0xDADAFECA,
|
||||
// Image file major version number.
|
||||
MAJOR_VERSION = 1,
|
||||
// Image file minor version number.
|
||||
MINOR_VERSION = 0
|
||||
};
|
||||
|
||||
// Open an image file, reuse structure if file already open.
|
||||
static ImageFileReader* open(const char* name, bool big_endian = Endian::is_big_endian());
|
||||
|
||||
// Close an image file if the file is not in use elsewhere.
|
||||
static void close(ImageFileReader *reader);
|
||||
|
||||
// Return an id for the specifed ImageFileReader.
|
||||
static u8 readerToID(ImageFileReader *reader);
|
||||
|
||||
// Validate the image id.
|
||||
static bool idCheck(u8 id);
|
||||
|
||||
// Return an id for the specifed ImageFileReader.
|
||||
static ImageFileReader* idToReader(u8 id);
|
||||
|
||||
// Open image file for read access.
|
||||
bool open();
|
||||
|
||||
// Close image file.
|
||||
void close();
|
||||
|
||||
// Read directly from the file.
|
||||
bool read_at(u1* data, u8 size, u8 offset) const;
|
||||
|
||||
inline Endian* endian() const { return _endian; }
|
||||
|
||||
// Retrieve name of image file.
|
||||
inline const char* name() const {
|
||||
return _name;
|
||||
}
|
||||
|
||||
// Retrieve size of image file.
|
||||
inline u8 file_size() const {
|
||||
return _file_size;
|
||||
}
|
||||
|
||||
// Return first address of index data.
|
||||
inline u1* get_index_address() const {
|
||||
return _index_data;
|
||||
}
|
||||
|
||||
// Return first address of resource data.
|
||||
inline u1* get_data_address() const {
|
||||
return _index_data + _index_size;
|
||||
}
|
||||
|
||||
// Get the size of the index data.
|
||||
size_t get_index_size() const {
|
||||
return _index_size;
|
||||
}
|
||||
|
||||
inline u4 table_length() const {
|
||||
return _header.table_length(_endian);
|
||||
}
|
||||
|
||||
inline u4 locations_size() const {
|
||||
return _header.locations_size(_endian);
|
||||
}
|
||||
|
||||
inline u4 strings_size()const {
|
||||
return _header.strings_size(_endian);
|
||||
}
|
||||
|
||||
inline u4* offsets_table() const {
|
||||
return _offsets_table;
|
||||
}
|
||||
|
||||
// Increment use count.
|
||||
inline void inc_use() {
|
||||
_use++;
|
||||
}
|
||||
|
||||
// Decrement use count.
|
||||
inline bool dec_use() {
|
||||
return --_use == 0;
|
||||
}
|
||||
|
||||
// Return a string table accessor.
|
||||
inline const ImageStrings get_strings() const {
|
||||
return ImageStrings(_string_bytes, _header.strings_size(_endian));
|
||||
}
|
||||
|
||||
// Return location attribute stream at offset.
|
||||
inline u1* get_location_offset_data(u4 offset) const {
|
||||
guarantee((u4)offset < _header.locations_size(_endian),
|
||||
"offset exceeds location attributes size");
|
||||
return offset != 0 ? _location_bytes + offset : NULL;
|
||||
}
|
||||
|
||||
// Return location attribute stream for location i.
|
||||
inline u1* get_location_data(u4 index) const {
|
||||
guarantee((u4)index < _header.table_length(_endian),
|
||||
"index exceeds location count");
|
||||
u4 offset = _endian->get(_offsets_table[index]);
|
||||
|
||||
return get_location_offset_data(offset);
|
||||
}
|
||||
|
||||
// Find the location attributes associated with the path. Returns true if
|
||||
// the location is found, false otherwise.
|
||||
bool find_location(const char* path, ImageLocation& location) const;
|
||||
|
||||
// Assemble the location path.
|
||||
void location_path(ImageLocation& location, char* path, size_t max) const;
|
||||
|
||||
// Verify that a found location matches the supplied path.
|
||||
bool verify_location(ImageLocation& location, const char* path) const;
|
||||
|
||||
// Return the resource for the supplied path.
|
||||
void get_resource(ImageLocation& location, u1* uncompressed_data) const;
|
||||
};
|
||||
#endif // SHARE_VM_CLASSFILE_IMAGEFILE_HPP
|
176
hotspot/src/share/vm/classfile/jimage.hpp
Normal file
176
hotspot/src/share/vm/classfile/jimage.hpp
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "jni.h"
|
||||
|
||||
// Opaque reference to a JImage file.
|
||||
class JImageFile;
|
||||
// Opaque reference to an image file resource location.
|
||||
typedef jlong JImageLocationRef;
|
||||
|
||||
// Max path length limit independent of platform. Windows max path is 1024,
|
||||
// other platforms use 4096. The JCK fails several tests when 1024 is used.
|
||||
#define JIMAGE_MAX_PATH 4096
|
||||
|
||||
// JImage Error Codes
|
||||
|
||||
// The image file is not prefixed with 0xCAFEDADA
|
||||
#define JIMAGE_BAD_MAGIC (-1)
|
||||
// The image file does not have a compatible (translatable) version
|
||||
#define JIMAGE_BAD_VERSION (-2)
|
||||
// The image file content is malformed
|
||||
#define JIMAGE_CORRUPTED (-3)
|
||||
|
||||
/*
|
||||
* JImageOpen - Given the supplied full path file name, open an image file. This
|
||||
* function will also initialize tables and retrieve meta-data necessary to
|
||||
* satisfy other functions in the API. If the image file has been previously
|
||||
* open, a new open request will share memory and resources used by the previous
|
||||
* open. A call to JImageOpen should be balanced by a call to JImageClose, to
|
||||
* release memory and resources used. If the image file is not found or cannot
|
||||
* be open, then NULL is returned and error will contain a reason for the
|
||||
* failure; a positive value for a system error number, negative for a jimage
|
||||
* specific error (see JImage Error Codes.)
|
||||
*
|
||||
* Ex.
|
||||
* jint error;
|
||||
* JImageFile* jimage = (*JImageOpen)(JAVA_HOME "lib/modules/bootmodules.jimage", &error);
|
||||
* if (image == NULL) {
|
||||
* tty->print_cr("JImage failed to open: %d", error);
|
||||
* ...
|
||||
* }
|
||||
* ...
|
||||
*/
|
||||
|
||||
extern "C" JImageFile* JIMAGE_Open(const char *name, jint* error);
|
||||
|
||||
typedef JImageFile* (*JImageOpen_t)(const char *name, jint* error);
|
||||
|
||||
/*
|
||||
* JImageClose - Given the supplied open image file (see JImageOpen), release
|
||||
* memory and resources used by the open file and close the file. If the image
|
||||
* file is shared by other uses, release and close is deferred until the last use
|
||||
* is also closed.
|
||||
*
|
||||
* Ex.
|
||||
* (*JImageClose)(image);
|
||||
*/
|
||||
|
||||
extern "C" void JIMAGE_Close(JImageFile* jimage);
|
||||
|
||||
typedef void (*JImageClose_t)(JImageFile* jimage);
|
||||
|
||||
|
||||
/*
|
||||
* JImagePackageToModule - Given an open image file (see JImageOpen) and the name
|
||||
* of a package, return the name of module where the package resides. If the
|
||||
* package does not exist in the image file, the function returns NULL.
|
||||
* The resulting string does/should not have to be released. All strings are
|
||||
* utf-8, zero byte terminated.
|
||||
*
|
||||
* Ex.
|
||||
* const char* package = (*JImagePackageToModule)(image, "java/lang");
|
||||
* tty->print_cr(package);
|
||||
* —> java.base
|
||||
*/
|
||||
|
||||
extern "C" const char * JIMAGE_PackageToModule(JImageFile* jimage, const char* package_name);
|
||||
|
||||
typedef const char* (*JImagePackageToModule_t)(JImageFile* jimage, const char* package_name);
|
||||
|
||||
|
||||
/*
|
||||
* JImageFindResource - Given an open image file (see JImageOpen), a module
|
||||
* name, a version string and the name of a class/resource, return location
|
||||
* information describing the resource and its size. If no resource is found, the
|
||||
* function returns JIMAGE_NOT_FOUND and the value of size is undefined.
|
||||
* The version number should be "9.0" and is not used in locating the resource.
|
||||
* The resulting location does/should not have to be released.
|
||||
* All strings are utf-8, zero byte terminated.
|
||||
*
|
||||
* Ex.
|
||||
* jlong size;
|
||||
* JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
|
||||
*/
|
||||
extern "C" JImageLocationRef JIMAGE_FindResource(JImageFile* jimage,
|
||||
const char* module_name, const char* version, const char* name,
|
||||
jlong* size);
|
||||
|
||||
typedef JImageLocationRef(*JImageFindResource_t)(JImageFile* jimage,
|
||||
const char* module_name, const char* version, const char* name,
|
||||
jlong* size);
|
||||
|
||||
|
||||
/*
|
||||
* JImageGetResource - Given an open image file (see JImageOpen), a resource’s
|
||||
* location information (see JImageFindResource), a buffer of appropriate
|
||||
* size and the size, retrieve the bytes associated with the
|
||||
* resource. If the size is less than the resource size then the read is truncated.
|
||||
* If the size is greater than the resource size then the remainder of the buffer
|
||||
* is zero filled. The function will return the actual size of the resource.
|
||||
*
|
||||
* Ex.
|
||||
* jlong size;
|
||||
* JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
|
||||
* char* buffer = new char[size];
|
||||
* (*JImageGetResource)(image, location, buffer, size);
|
||||
*/
|
||||
extern "C" jlong JIMAGE_GetResource(JImageFile* jimage, JImageLocationRef location,
|
||||
char* buffer, jlong size);
|
||||
|
||||
typedef jlong(*JImageGetResource_t)(JImageFile* jimage, JImageLocationRef location,
|
||||
char* buffer, jlong size);
|
||||
|
||||
|
||||
/*
|
||||
* JImageResourceIterator - Given an open image file (see JImageOpen), a visitor
|
||||
* function and a visitor argument, iterator through each of the image's resources.
|
||||
* The visitor function is called with the image file, the module name, the
|
||||
* package name, the base name, the extension and the visitor argument. The return
|
||||
* value of the visitor function should be true, unless an early iteration exit is
|
||||
* required. All strings are utf-8, zero byte terminated.file.
|
||||
*
|
||||
* Ex.
|
||||
* bool ctw_visitor(JImageFile* jimage, const char* module_name, const char* version, const char* package, const char* name, const char* extension, void* arg) {
|
||||
* if (strcmp(extension, “class”) == 0) {
|
||||
* char path[JIMAGE_MAX_PATH];
|
||||
* Thread* THREAD = Thread::current();
|
||||
* jio_snprintf(path, JIMAGE_MAX_PATH - 1, "/%s/%s", package, name);
|
||||
* ClassLoader::compile_the_world_in(path, (Handle)arg, THREAD);
|
||||
* return !HAS_PENDING_EXCEPTION;
|
||||
* }
|
||||
* return true;
|
||||
* }
|
||||
* (*JImageResourceIterator)(image, ctw_visitor, loader);
|
||||
*/
|
||||
|
||||
typedef bool (*JImageResourceVisitor_t)(JImageFile* jimage,
|
||||
const char* module_name, const char* version, const char* package,
|
||||
const char* name, const char* extension, void* arg);
|
||||
|
||||
extern "C" void JIMAGE_ResourceIterator(JImageFile* jimage,
|
||||
JImageResourceVisitor_t visitor, void *arg);
|
||||
|
||||
typedef void (*JImageResourceIterator_t)(JImageFile* jimage,
|
||||
JImageResourceVisitor_t visitor, void* arg);
|
@ -66,7 +66,8 @@ class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
|
||||
virtual void do_klass(Klass* k);
|
||||
void do_klass_nv(Klass* k);
|
||||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld);
|
||||
virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
|
||||
void do_cld_nv(ClassLoaderData* cld);
|
||||
};
|
||||
|
||||
class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
|
||||
|
@ -50,11 +50,11 @@ inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
|
||||
|
||||
inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
|
||||
ClassLoaderData* cld = k->class_loader_data();
|
||||
do_class_loader_data(cld);
|
||||
do_cld_nv(cld);
|
||||
}
|
||||
inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
|
||||
inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
|
||||
assert(_klass_closure._oop_closure == this, "Must be");
|
||||
|
||||
bool claim = true; // Must claim the class loader data before processing.
|
||||
|
@ -702,7 +702,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,
|
||||
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
|
||||
oop(bottom)) && \
|
||||
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
|
||||
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
|
||||
size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
|
||||
bottom += _cfls->adjustObjectSize(word_sz); \
|
||||
} else { \
|
||||
bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
|
||||
@ -729,7 +729,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,
|
||||
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
|
||||
oop(bottom)) && \
|
||||
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
|
||||
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
|
||||
size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
|
||||
bottom += _cfls->adjustObjectSize(word_sz); \
|
||||
} else { \
|
||||
bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
|
||||
@ -2989,7 +2989,7 @@ initialize_sequential_subtasks_for_marking(int n_threads,
|
||||
assert(task_size > CardTableModRefBS::card_size_in_words &&
|
||||
(task_size % CardTableModRefBS::card_size_in_words == 0),
|
||||
"Otherwise arithmetic below would be incorrect");
|
||||
MemRegion span = _gen->reserved();
|
||||
MemRegion span = _old_gen->reserved();
|
||||
if (low != NULL) {
|
||||
if (span.contains(low)) {
|
||||
// Align low down to a card boundary so that
|
||||
|
@ -99,7 +99,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
BlockOffsetArrayNonContigSpace _bt;
|
||||
|
||||
CMSCollector* _collector;
|
||||
ConcurrentMarkSweepGeneration* _gen;
|
||||
ConcurrentMarkSweepGeneration* _old_gen;
|
||||
|
||||
// Data structures for free blocks (used during allocation/sweeping)
|
||||
|
||||
|
@ -212,7 +212,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||
use_adaptive_freelists,
|
||||
dictionaryChoice);
|
||||
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
|
||||
_cmsSpace->_gen = this;
|
||||
_cmsSpace->_old_gen = this;
|
||||
|
||||
_gc_stats = new CMSGCStats();
|
||||
|
||||
@ -359,13 +359,13 @@ double CMSStats::time_until_cms_gen_full() const {
|
||||
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
|
||||
if (cms_free > expected_promotion) {
|
||||
// Start a cms collection if there isn't enough space to promote
|
||||
// for the next minor collection. Use the padded average as
|
||||
// for the next young collection. Use the padded average as
|
||||
// a safety factor.
|
||||
cms_free -= expected_promotion;
|
||||
|
||||
// Adjust by the safety factor.
|
||||
double cms_free_dbl = (double)cms_free;
|
||||
double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
|
||||
double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
|
||||
// Apply a further correction factor which tries to adjust
|
||||
// for recent occurance of concurrent mode failures.
|
||||
cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
|
||||
@ -531,7 +531,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
if (CMSConcurrentMTEnabled) {
|
||||
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
|
||||
// just for now
|
||||
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
|
||||
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
|
||||
}
|
||||
if (ConcGCThreads > 1) {
|
||||
_conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
|
||||
@ -592,7 +592,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
|
||||
|
||||
// Clip CMSBootstrapOccupancy between 0 and 100.
|
||||
_bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
|
||||
_bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
|
||||
|
||||
// Now tell CMS generations the identity of their collector
|
||||
ConcurrentMarkSweepGeneration::set_collector(this);
|
||||
@ -613,7 +613,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_end_addr = gch->end_addr();
|
||||
assert(_young_gen != NULL, "no _young_gen");
|
||||
_eden_chunk_index = 0;
|
||||
_eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
|
||||
_eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
|
||||
_eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
|
||||
}
|
||||
|
||||
@ -795,29 +795,22 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
|
||||
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
|
||||
gclog_or_tty->print_cr("\nFrom compute_new_size: ");
|
||||
gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
|
||||
gclog_or_tty->print_cr(" Desired free fraction %f",
|
||||
desired_free_percentage);
|
||||
gclog_or_tty->print_cr(" Maximum free fraction %f",
|
||||
maximum_free_percentage);
|
||||
gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity()/1000);
|
||||
gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT,
|
||||
desired_capacity/1000);
|
||||
gclog_or_tty->print_cr(" Desired free fraction %f", desired_free_percentage);
|
||||
gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage);
|
||||
gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity() / 1000);
|
||||
gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
|
||||
size_t young_size = gch->young_gen()->capacity();
|
||||
gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000);
|
||||
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT,
|
||||
unsafe_max_alloc_nogc()/1000);
|
||||
gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT,
|
||||
contiguous_available()/1000);
|
||||
gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)",
|
||||
expand_bytes);
|
||||
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
|
||||
gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
|
||||
gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
|
||||
}
|
||||
// safe if expansion fails
|
||||
expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr(" Expanded free fraction %f",
|
||||
((double) free()) / capacity());
|
||||
gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity());
|
||||
}
|
||||
} else {
|
||||
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
|
||||
@ -834,16 +827,14 @@ Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
|
||||
return cmsSpace()->freelistLock();
|
||||
}
|
||||
|
||||
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
|
||||
bool tlab) {
|
||||
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
|
||||
CMSSynchronousYieldRequest yr;
|
||||
MutexLockerEx x(freelistLock(),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
|
||||
return have_lock_and_allocate(size, tlab);
|
||||
}
|
||||
|
||||
HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
|
||||
bool tlab /* ignored */) {
|
||||
bool tlab /* ignored */) {
|
||||
assert_lock_strong(freelistLock());
|
||||
size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
|
||||
HeapWord* res = cmsSpace()->allocate(adjustedSize);
|
||||
@ -2426,7 +2417,7 @@ void CMSCollector::verify_after_remark_work_1() {
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens are roots
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
@ -2498,7 +2489,7 @@ void CMSCollector::verify_after_remark_work_2() {
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens are roots
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
@ -2952,12 +2943,7 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
|
||||
assert(_collectorState == InitialMarking, "just checking");
|
||||
|
||||
// If there has not been a GC[n-1] since last GC[n] cycle completed,
|
||||
// precede our marking with a collection of all
|
||||
// younger generations to keep floating garbage to a minimum.
|
||||
// XXX: we won't do this for now -- it's an optimization to be done later.
|
||||
|
||||
// already have locks
|
||||
// Already have locks.
|
||||
assert_lock_strong(bitMapLock());
|
||||
assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
|
||||
|
||||
@ -3027,7 +3013,7 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens are roots
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
@ -3037,7 +3023,7 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
}
|
||||
|
||||
// Clear mod-union table; it will be dirtied in the prologue of
|
||||
// CMS generation per each younger generation collection.
|
||||
// CMS generation per each young generation collection.
|
||||
|
||||
assert(_modUnionTable.isAllClear(),
|
||||
"Was cleared in most recent final checkpoint phase"
|
||||
@ -3057,7 +3043,7 @@ bool CMSCollector::markFromRoots() {
|
||||
// assert(!SafepointSynchronize::is_at_safepoint(),
|
||||
// "inconsistent argument?");
|
||||
// However that wouldn't be right, because it's possible that
|
||||
// a safepoint is indeed in progress as a younger generation
|
||||
// a safepoint is indeed in progress as a young generation
|
||||
// stop-the-world GC happens even as we mark in this generation.
|
||||
assert(_collectorState == Marking, "inconsistent state?");
|
||||
check_correct_thread_executing();
|
||||
@ -3065,7 +3051,7 @@ bool CMSCollector::markFromRoots() {
|
||||
|
||||
// Weak ref discovery note: We may be discovering weak
|
||||
// refs in this generation concurrent (but interleaved) with
|
||||
// weak ref discovery by a younger generation collector.
|
||||
// weak ref discovery by the young generation collector.
|
||||
|
||||
CMSTokenSyncWithLocks ts(true, bitMapLock());
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
@ -3095,7 +3081,7 @@ bool CMSCollector::markFromRootsWork() {
|
||||
|
||||
// Note that when we do a marking step we need to hold the
|
||||
// bit map lock -- recall that direct allocation (by mutators)
|
||||
// and promotion (by younger generation collectors) is also
|
||||
// and promotion (by the young generation collector) is also
|
||||
// marking the bit map. [the so-called allocate live policy.]
|
||||
// Because the implementation of bit map marking is not
|
||||
// robust wrt simultaneous marking of bits in the same word,
|
||||
@ -4049,7 +4035,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
// one of these methods, please check the other method too.
|
||||
|
||||
size_t CMSCollector::preclean_mod_union_table(
|
||||
ConcurrentMarkSweepGeneration* gen,
|
||||
ConcurrentMarkSweepGeneration* old_gen,
|
||||
ScanMarkedObjectsAgainCarefullyClosure* cl) {
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
@ -4064,10 +4050,10 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
// generation, but we might potentially miss cards when the
|
||||
// generation is rapidly expanding while we are in the midst
|
||||
// of precleaning.
|
||||
HeapWord* startAddr = gen->reserved().start();
|
||||
HeapWord* endAddr = gen->reserved().end();
|
||||
HeapWord* startAddr = old_gen->reserved().start();
|
||||
HeapWord* endAddr = old_gen->reserved().end();
|
||||
|
||||
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
|
||||
cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
|
||||
|
||||
size_t numDirtyCards, cumNumDirtyCards;
|
||||
HeapWord *nextAddr, *lastAddr;
|
||||
@ -4109,7 +4095,7 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
HeapWord* stop_point = NULL;
|
||||
stopTimer();
|
||||
// Potential yield point
|
||||
CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
|
||||
CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
|
||||
bitMapLock());
|
||||
startTimer();
|
||||
{
|
||||
@ -4117,7 +4103,7 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
verify_overflow_empty();
|
||||
sample_eden();
|
||||
stop_point =
|
||||
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
}
|
||||
if (stop_point != NULL) {
|
||||
// The careful iteration stopped early either because it found an
|
||||
@ -4152,15 +4138,15 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
// below are largely identical; if you need to modify
|
||||
// one of these methods, please check the other method too.
|
||||
|
||||
size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
|
||||
ScanMarkedObjectsAgainCarefullyClosure* cl) {
|
||||
// strategy: it's similar to precleamModUnionTable above, in that
|
||||
// we accumulate contiguous ranges of dirty cards, mark these cards
|
||||
// precleaned, then scan the region covered by these cards.
|
||||
HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
|
||||
HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
|
||||
HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high());
|
||||
HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
|
||||
|
||||
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
|
||||
cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
|
||||
|
||||
size_t numDirtyCards, cumNumDirtyCards;
|
||||
HeapWord *lastAddr, *nextAddr;
|
||||
@ -4197,13 +4183,13 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
|
||||
if (!dirtyRegion.is_empty()) {
|
||||
stopTimer();
|
||||
CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
|
||||
CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
|
||||
startTimer();
|
||||
sample_eden();
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
HeapWord* stop_point =
|
||||
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
if (stop_point != NULL) {
|
||||
assert((_collectorState == AbortablePreclean && should_abort_preclean()),
|
||||
"Should only be AbortablePreclean.");
|
||||
@ -4623,7 +4609,7 @@ void CMSParRemarkTask::work(uint worker_id) {
|
||||
ResourceMark rm;
|
||||
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
|
||||
for (int i = 0; i < array->length(); i++) {
|
||||
par_mrias_cl.do_class_loader_data(array->at(i));
|
||||
par_mrias_cl.do_cld_nv(array->at(i));
|
||||
}
|
||||
|
||||
// We don't need to keep track of new CLDs anymore.
|
||||
@ -5086,7 +5072,7 @@ void CMSCollector::do_remark_parallel() {
|
||||
// preclean phase did of eden, plus the [two] tasks of
|
||||
// scanning the [two] survivor spaces. Further fine-grain
|
||||
// parallelization of the scanning of the survivor spaces
|
||||
// themselves, and of precleaning of the younger gen itself
|
||||
// themselves, and of precleaning of the young gen itself
|
||||
// is deferred to the future.
|
||||
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
|
||||
|
||||
@ -5177,7 +5163,7 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens as roots
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
&mrias_cl,
|
||||
@ -5199,7 +5185,7 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
ResourceMark rm;
|
||||
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
|
||||
for (int i = 0; i < array->length(); i++) {
|
||||
mrias_cl.do_class_loader_data(array->at(i));
|
||||
mrias_cl.do_cld_nv(array->at(i));
|
||||
}
|
||||
|
||||
// We don't need to keep track of new CLDs anymore.
|
||||
@ -5661,7 +5647,7 @@ void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generati
|
||||
}
|
||||
}
|
||||
|
||||
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
|
||||
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
|
||||
// We iterate over the space(s) underlying this generation,
|
||||
// checking the mark bit map to see if the bits corresponding
|
||||
// to specific blocks are marked or not. Blocks that are
|
||||
@ -5690,26 +5676,26 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
|
||||
// check that we hold the requisite locks
|
||||
assert(have_cms_token(), "Should hold cms token");
|
||||
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
|
||||
assert_lock_strong(gen->freelistLock());
|
||||
assert_lock_strong(old_gen->freelistLock());
|
||||
assert_lock_strong(bitMapLock());
|
||||
|
||||
assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
|
||||
assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
|
||||
gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
|
||||
_inter_sweep_estimate.padded_average(),
|
||||
_intra_sweep_estimate.padded_average());
|
||||
gen->setNearLargestChunk();
|
||||
old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
|
||||
_inter_sweep_estimate.padded_average(),
|
||||
_intra_sweep_estimate.padded_average());
|
||||
old_gen->setNearLargestChunk();
|
||||
|
||||
{
|
||||
SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
|
||||
gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
|
||||
SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
|
||||
old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
|
||||
// We need to free-up/coalesce garbage/blocks from a
|
||||
// co-terminal free run. This is done in the SweepClosure
|
||||
// destructor; so, do not remove this scope, else the
|
||||
// end-of-sweep-census below will be off by a little bit.
|
||||
}
|
||||
gen->cmsSpace()->sweep_completed();
|
||||
gen->cmsSpace()->endSweepFLCensus(sweep_count());
|
||||
old_gen->cmsSpace()->sweep_completed();
|
||||
old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
|
||||
if (should_unload_classes()) { // unloaded classes this cycle,
|
||||
_concurrent_cycles_since_last_unload = 0; // ... reset count
|
||||
} else { // did not unload classes,
|
||||
@ -6324,12 +6310,12 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
||||
// objArrays are precisely marked; restrict scanning
|
||||
// to dirty cards only.
|
||||
size = CompactibleFreeListSpace::adjustObjectSize(
|
||||
p->oop_iterate(_scanningClosure, mr));
|
||||
p->oop_iterate_size(_scanningClosure, mr));
|
||||
} else {
|
||||
// A non-array may have been imprecisely marked; we need
|
||||
// to scan object in its entirety.
|
||||
size = CompactibleFreeListSpace::adjustObjectSize(
|
||||
p->oop_iterate(_scanningClosure));
|
||||
p->oop_iterate_size(_scanningClosure));
|
||||
}
|
||||
#ifdef ASSERT
|
||||
size_t direct_size =
|
||||
@ -6417,7 +6403,7 @@ size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
|
||||
// Note that we do not yield while we iterate over
|
||||
// the interior oops of p, pushing the relevant ones
|
||||
// on our marking stack.
|
||||
size_t size = p->oop_iterate(_scanning_closure);
|
||||
size_t size = p->oop_iterate_size(_scanning_closure);
|
||||
do_yield_check();
|
||||
// Observe that below, we do not abandon the preclean
|
||||
// phase as soon as we should; rather we empty the
|
||||
|
@ -723,7 +723,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
|
||||
private:
|
||||
// Support for parallelizing young gen rescan in CMS remark phase
|
||||
ParNewGeneration* _young_gen; // the younger gen
|
||||
ParNewGeneration* _young_gen;
|
||||
|
||||
HeapWord** _top_addr; // ... Top of Eden
|
||||
HeapWord** _end_addr; // ... End of Eden
|
||||
@ -772,9 +772,9 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
private:
|
||||
|
||||
// Concurrent precleaning work
|
||||
size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
|
||||
size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen,
|
||||
ScanMarkedObjectsAgainCarefullyClosure* cl);
|
||||
size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
|
||||
ScanMarkedObjectsAgainCarefullyClosure* cl);
|
||||
// Does precleaning work, returning a quantity indicative of
|
||||
// the amount of "useful work" done.
|
||||
@ -797,7 +797,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
void refProcessingWork();
|
||||
|
||||
// Concurrent sweeping work
|
||||
void sweepWork(ConcurrentMarkSweepGeneration* gen);
|
||||
void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
|
||||
|
||||
// (Concurrent) resetting of support data structures
|
||||
void reset(bool concurrent);
|
||||
@ -1120,10 +1120,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
MemRegion used_region_at_save_marks() const;
|
||||
|
||||
// Does a "full" (forced) collection invoked on this generation collect
|
||||
// all younger generations as well? Note that the second conjunct is a
|
||||
// hack to allow the collection of the younger gen first if the flag is
|
||||
// set.
|
||||
virtual bool full_collects_younger_generations() const {
|
||||
// the young generation as well?
|
||||
virtual bool full_collects_young_generation() const {
|
||||
return !ScavengeBeforeFullGC;
|
||||
}
|
||||
|
||||
@ -1153,9 +1151,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
|
||||
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
|
||||
|
||||
// Inform this (non-young) generation that a promotion failure was
|
||||
// encountered during a collection of a younger generation that
|
||||
// promotes into this generation.
|
||||
// Inform this (old) generation that a promotion failure was
|
||||
// encountered during a collection of the young generation.
|
||||
virtual void promotion_failure_occurred();
|
||||
|
||||
bool should_collect(bool full, size_t size, bool tlab);
|
||||
|
@ -295,7 +295,7 @@ inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
|
||||
promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
|
||||
}
|
||||
|
||||
// If the younger gen collections were skipped, then the
|
||||
// If the young gen collection was skipped, then the
|
||||
// number of promoted bytes will be 0 and adding it to the
|
||||
// average will incorrectly lessen the average. It is, however,
|
||||
// also possible that no promotion was needed.
|
||||
|
@ -39,23 +39,17 @@
|
||||
|
||||
// ======= Concurrent Mark Sweep Thread ========
|
||||
|
||||
// The CMS thread is created when Concurrent Mark Sweep is used in the
|
||||
// older of two generations in a generational memory system.
|
||||
ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
|
||||
CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
|
||||
bool ConcurrentMarkSweepThread::_should_terminate = false;
|
||||
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
|
||||
|
||||
ConcurrentMarkSweepThread*
|
||||
ConcurrentMarkSweepThread::_cmst = NULL;
|
||||
CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
|
||||
bool ConcurrentMarkSweepThread::_should_terminate = false;
|
||||
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
|
||||
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
|
||||
|
||||
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
|
||||
|
||||
SurrogateLockerThread*
|
||||
ConcurrentMarkSweepThread::_slt = NULL;
|
||||
SurrogateLockerThread* ConcurrentMarkSweepThread::_slt = NULL;
|
||||
SurrogateLockerThread::SLT_msg_type
|
||||
ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
|
||||
Monitor*
|
||||
ConcurrentMarkSweepThread::_sltMonitor = NULL;
|
||||
Monitor* ConcurrentMarkSweepThread::_sltMonitor = NULL;
|
||||
|
||||
ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
|
||||
: ConcurrentGCThread() {
|
||||
|
@ -69,20 +69,28 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
Stack<oop, mtGC>* overflow_stacks_,
|
||||
size_t desired_plab_sz_,
|
||||
ParallelTaskTerminator& term_) :
|
||||
_to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
|
||||
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
|
||||
_to_space(to_space_),
|
||||
_old_gen(old_gen_),
|
||||
_young_gen(young_gen_),
|
||||
_thread_num(thread_num_),
|
||||
_work_queue(work_queue_set_->queue(thread_num_)),
|
||||
_to_space_full(false),
|
||||
_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
|
||||
_ageTable(false), // false ==> not the global age table, no perf data.
|
||||
_to_space_alloc_buffer(desired_plab_sz_),
|
||||
_to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
|
||||
_to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
|
||||
_to_space_closure(young_gen_, this),
|
||||
_old_gen_closure(young_gen_, this),
|
||||
_to_space_root_closure(young_gen_, this),
|
||||
_old_gen_root_closure(young_gen_, this),
|
||||
_older_gen_closure(young_gen_, this),
|
||||
_evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
|
||||
&_to_space_root_closure, young_gen_, &_old_gen_root_closure,
|
||||
work_queue_set_, &term_),
|
||||
_is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
|
||||
_is_alive_closure(young_gen_),
|
||||
_scan_weak_ref_closure(young_gen_, this),
|
||||
_keep_alive_closure(&_scan_weak_ref_closure),
|
||||
_strong_roots_time(0.0), _term_time(0.0)
|
||||
_strong_roots_time(0.0),
|
||||
_term_time(0.0)
|
||||
{
|
||||
#if TASKQUEUE_STATS
|
||||
_term_attempts = 0;
|
||||
@ -90,8 +98,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
_overflow_refill_objs = 0;
|
||||
#endif // TASKQUEUE_STATS
|
||||
|
||||
_survivor_chunk_array =
|
||||
(ChunkArray*) old_gen()->get_data_recorder(thread_num());
|
||||
_survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
|
||||
_hash_seed = 17; // Might want to take time-based random value.
|
||||
_start = os::elapsedTime();
|
||||
_old_gen_closure.set_generation(old_gen_);
|
||||
@ -154,7 +161,6 @@ void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ParScanThreadState::trim_queues(int max_size) {
|
||||
ObjToScanQueue* queue = work_queue();
|
||||
do {
|
||||
@ -222,15 +228,12 @@ void ParScanThreadState::push_on_overflow_stack(oop p) {
|
||||
}
|
||||
|
||||
HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
|
||||
|
||||
// Otherwise, if the object is small enough, try to reallocate the
|
||||
// buffer.
|
||||
// If the object is small enough, try to reallocate the buffer.
|
||||
HeapWord* obj = NULL;
|
||||
if (!_to_space_full) {
|
||||
PLAB* const plab = to_space_alloc_buffer();
|
||||
Space* const sp = to_space();
|
||||
if (word_sz * 100 <
|
||||
ParallelGCBufferWastePct * plab->word_sz()) {
|
||||
Space* const sp = to_space();
|
||||
if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
|
||||
// Is small enough; abandon this buffer and start a new one.
|
||||
plab->retire();
|
||||
size_t buf_size = plab->word_sz();
|
||||
@ -241,8 +244,7 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
|
||||
size_t free_bytes = sp->free();
|
||||
while(buf_space == NULL && free_bytes >= min_bytes) {
|
||||
buf_size = free_bytes >> LogHeapWordSize;
|
||||
assert(buf_size == (size_t)align_object_size(buf_size),
|
||||
"Invariant");
|
||||
assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
|
||||
buf_space = sp->par_allocate(buf_size);
|
||||
free_bytes = sp->free();
|
||||
}
|
||||
@ -262,7 +264,6 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
|
||||
// We're used up.
|
||||
_to_space_full = true;
|
||||
}
|
||||
|
||||
} else {
|
||||
// Too large; allocate the object individually.
|
||||
obj = sp->par_allocate(word_sz);
|
||||
@ -271,7 +272,6 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
||||
void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
|
||||
to_space_alloc_buffer()->undo_allocation(obj, word_sz);
|
||||
}
|
||||
@ -288,7 +288,7 @@ public:
|
||||
// Initializes states for the specified number of threads;
|
||||
ParScanThreadStateSet(int num_threads,
|
||||
Space& to_space,
|
||||
ParNewGeneration& gen,
|
||||
ParNewGeneration& young_gen,
|
||||
Generation& old_gen,
|
||||
ObjToScanQueueSet& queue_set,
|
||||
Stack<oop, mtGC>* overflow_stacks_,
|
||||
@ -315,21 +315,25 @@ public:
|
||||
|
||||
private:
|
||||
ParallelTaskTerminator& _term;
|
||||
ParNewGeneration& _gen;
|
||||
ParNewGeneration& _young_gen;
|
||||
Generation& _old_gen;
|
||||
public:
|
||||
bool is_valid(int id) const { return id < length(); }
|
||||
ParallelTaskTerminator* terminator() { return &_term; }
|
||||
};
|
||||
|
||||
|
||||
ParScanThreadStateSet::ParScanThreadStateSet(
|
||||
int num_threads, Space& to_space, ParNewGeneration& gen,
|
||||
Generation& old_gen, ObjToScanQueueSet& queue_set,
|
||||
Stack<oop, mtGC>* overflow_stacks,
|
||||
size_t desired_plab_sz, ParallelTaskTerminator& term)
|
||||
ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
|
||||
Space& to_space,
|
||||
ParNewGeneration& young_gen,
|
||||
Generation& old_gen,
|
||||
ObjToScanQueueSet& queue_set,
|
||||
Stack<oop, mtGC>* overflow_stacks,
|
||||
size_t desired_plab_sz,
|
||||
ParallelTaskTerminator& term)
|
||||
: ResourceArray(sizeof(ParScanThreadState), num_threads),
|
||||
_gen(gen), _old_gen(old_gen), _term(term)
|
||||
_young_gen(young_gen),
|
||||
_old_gen(old_gen),
|
||||
_term(term)
|
||||
{
|
||||
assert(num_threads > 0, "sanity check!");
|
||||
assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
|
||||
@ -337,13 +341,12 @@ ParScanThreadStateSet::ParScanThreadStateSet(
|
||||
// Initialize states.
|
||||
for (int i = 0; i < num_threads; ++i) {
|
||||
new ((ParScanThreadState*)_data + i)
|
||||
ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
|
||||
ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
|
||||
overflow_stacks, desired_plab_sz, term);
|
||||
}
|
||||
}
|
||||
|
||||
inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
|
||||
{
|
||||
inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
|
||||
assert(i >= 0 && i < length(), "sanity check!");
|
||||
return ((ParScanThreadState*)_data)[i];
|
||||
}
|
||||
@ -357,8 +360,7 @@ void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_trace
|
||||
}
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
|
||||
{
|
||||
void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
|
||||
_term.reset_for_reuse(active_threads);
|
||||
if (promotion_failed) {
|
||||
for (int i = 0; i < length(); ++i) {
|
||||
@ -368,36 +370,27 @@ void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
|
||||
}
|
||||
|
||||
#if TASKQUEUE_STATS
|
||||
void
|
||||
ParScanThreadState::reset_stats()
|
||||
{
|
||||
void ParScanThreadState::reset_stats() {
|
||||
taskqueue_stats().reset();
|
||||
_term_attempts = 0;
|
||||
_overflow_refills = 0;
|
||||
_overflow_refill_objs = 0;
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::reset_stats()
|
||||
{
|
||||
void ParScanThreadStateSet::reset_stats() {
|
||||
for (int i = 0; i < length(); ++i) {
|
||||
thread_state(i).reset_stats();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
|
||||
{
|
||||
void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
|
||||
st->print_raw_cr("GC Termination Stats");
|
||||
st->print_raw_cr(" elapsed --strong roots-- "
|
||||
"-------termination-------");
|
||||
st->print_raw_cr("thr ms ms % "
|
||||
" ms % attempts");
|
||||
st->print_raw_cr("--- --------- --------- ------ "
|
||||
"--------- ------ --------");
|
||||
st->print_raw_cr(" elapsed --strong roots-- -------termination-------");
|
||||
st->print_raw_cr("thr ms ms % ms % attempts");
|
||||
st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
|
||||
{
|
||||
void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {
|
||||
print_termination_stats_hdr(st);
|
||||
|
||||
for (int i = 0; i < length(); ++i) {
|
||||
@ -405,23 +398,20 @@ void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
|
||||
const double elapsed_ms = pss.elapsed_time() * 1000.0;
|
||||
const double s_roots_ms = pss.strong_roots_time() * 1000.0;
|
||||
const double term_ms = pss.term_time() * 1000.0;
|
||||
st->print_cr("%3d %9.2f %9.2f %6.2f "
|
||||
"%9.2f %6.2f " SIZE_FORMAT_W(8),
|
||||
st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
|
||||
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
|
||||
term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
|
||||
}
|
||||
}
|
||||
|
||||
// Print stats related to work queue activity.
|
||||
void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st)
|
||||
{
|
||||
void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
|
||||
st->print_raw_cr("GC Task Stats");
|
||||
st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
|
||||
st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
|
||||
{
|
||||
void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {
|
||||
print_taskqueue_stats_hdr(st);
|
||||
|
||||
TaskQueueStats totals;
|
||||
@ -443,8 +433,7 @@ void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
|
||||
}
|
||||
#endif // TASKQUEUE_STATS
|
||||
|
||||
void ParScanThreadStateSet::flush()
|
||||
{
|
||||
void ParScanThreadStateSet::flush() {
|
||||
// Work in this loop should be kept as lightweight as
|
||||
// possible since this might otherwise become a bottleneck
|
||||
// to scaling. Should we add heavy-weight work into this
|
||||
@ -454,12 +443,12 @@ void ParScanThreadStateSet::flush()
|
||||
|
||||
// Flush stats related to To-space PLAB activity and
|
||||
// retire the last buffer.
|
||||
par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats());
|
||||
par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
|
||||
|
||||
// Every thread has its own age table. We need to merge
|
||||
// them all into one.
|
||||
ageTable *local_table = par_scan_state.age_table();
|
||||
_gen.age_table()->merge(local_table);
|
||||
_young_gen.age_table()->merge(local_table);
|
||||
|
||||
// Inform old gen that we're done.
|
||||
_old_gen.par_promote_alloc_done(i);
|
||||
@ -478,8 +467,7 @@ void ParScanThreadStateSet::flush()
|
||||
|
||||
ParScanClosure::ParScanClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state) :
|
||||
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
|
||||
{
|
||||
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
|
||||
_boundary = _g->reserved().end();
|
||||
}
|
||||
|
||||
@ -531,24 +519,23 @@ void ParEvacuateFollowersClosure::do_void() {
|
||||
ObjToScanQueue* work_q = par_scan_state()->work_queue();
|
||||
|
||||
while (true) {
|
||||
|
||||
// Scan to-space and old-gen objs until we run out of both.
|
||||
oop obj_to_scan;
|
||||
par_scan_state()->trim_queues(0);
|
||||
|
||||
// We have no local work, attempt to steal from other threads.
|
||||
|
||||
// attempt to steal work from promoted.
|
||||
// Attempt to steal work from promoted.
|
||||
if (task_queues()->steal(par_scan_state()->thread_num(),
|
||||
par_scan_state()->hash_seed(),
|
||||
obj_to_scan)) {
|
||||
bool res = work_q->push(obj_to_scan);
|
||||
assert(res, "Empty queue should have room for a push.");
|
||||
|
||||
// if successful, goto Start.
|
||||
// If successful, goto Start.
|
||||
continue;
|
||||
|
||||
// try global overflow list.
|
||||
// Try global overflow list.
|
||||
} else if (par_gen()->take_from_overflow_list(par_scan_state())) {
|
||||
continue;
|
||||
}
|
||||
@ -564,15 +551,17 @@ void ParEvacuateFollowersClosure::do_void() {
|
||||
par_scan_state()->end_term_time();
|
||||
}
|
||||
|
||||
ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
|
||||
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
|
||||
ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
|
||||
Generation* old_gen,
|
||||
HeapWord* young_old_boundary,
|
||||
ParScanThreadStateSet* state_set,
|
||||
StrongRootsScope* strong_roots_scope) :
|
||||
AbstractGangTask("ParNewGeneration collection"),
|
||||
_young_gen(young_gen), _old_gen(old_gen),
|
||||
_young_old_boundary(young_old_boundary),
|
||||
_state_set(state_set),
|
||||
_strong_roots_scope(strong_roots_scope)
|
||||
{}
|
||||
{}
|
||||
|
||||
void ParNewGenTask::work(uint worker_id) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
@ -595,8 +584,7 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
par_scan_state.start_strong_roots();
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
GenCollectedHeap::YoungGen,
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
true, // Process younger gens, if any, as strong roots.
|
||||
GenCollectedHeap::SO_ScavengeCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&par_scan_state.to_space_root_closure(),
|
||||
@ -613,8 +601,7 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
#pragma warning( push )
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
#endif
|
||||
ParNewGeneration::
|
||||
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
|
||||
ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
|
||||
: DefNewGeneration(rs, initial_byte_size, "PCopy"),
|
||||
_overflow_list(NULL),
|
||||
_is_alive_closure(this),
|
||||
@ -625,20 +612,19 @@ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
|
||||
_task_queues = new ObjToScanQueueSet(ParallelGCThreads);
|
||||
guarantee(_task_queues != NULL, "task_queues allocation failure.");
|
||||
|
||||
for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
ObjToScanQueue *q = new ObjToScanQueue();
|
||||
guarantee(q != NULL, "work_queue Allocation failure.");
|
||||
_task_queues->register_queue(i1, q);
|
||||
_task_queues->register_queue(i, q);
|
||||
}
|
||||
|
||||
for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
|
||||
_task_queues->queue(i2)->initialize();
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
_task_queues->queue(i)->initialize();
|
||||
}
|
||||
|
||||
_overflow_stacks = NULL;
|
||||
if (ParGCUseLocalOverflow) {
|
||||
|
||||
// typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
|
||||
// with ','
|
||||
// typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
|
||||
typedef Stack<oop, mtGC> GCOopStack;
|
||||
|
||||
_overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
|
||||
@ -742,7 +728,7 @@ class ParNewRefProcTaskProxy: public AbstractGangTask {
|
||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||
public:
|
||||
ParNewRefProcTaskProxy(ProcessTask& task,
|
||||
ParNewGeneration& gen,
|
||||
ParNewGeneration& young_gen,
|
||||
Generation& old_gen,
|
||||
HeapWord* young_old_boundary,
|
||||
ParScanThreadStateSet& state_set);
|
||||
@ -768,11 +754,9 @@ ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
|
||||
_old_gen(old_gen),
|
||||
_young_old_boundary(young_old_boundary),
|
||||
_state_set(state_set)
|
||||
{
|
||||
}
|
||||
{ }
|
||||
|
||||
void ParNewRefProcTaskProxy::work(uint worker_id)
|
||||
{
|
||||
void ParNewRefProcTaskProxy::work(uint worker_id) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
|
||||
@ -792,15 +776,12 @@ public:
|
||||
_task(task)
|
||||
{ }
|
||||
|
||||
virtual void work(uint worker_id)
|
||||
{
|
||||
virtual void work(uint worker_id) {
|
||||
_task.work(worker_id);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
|
||||
{
|
||||
void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
WorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
@ -812,8 +793,7 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
|
||||
_young_gen.promotion_failed());
|
||||
}
|
||||
|
||||
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
|
||||
{
|
||||
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
WorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
@ -821,8 +801,7 @@ void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
|
||||
workers->run_task(&enq_task);
|
||||
}
|
||||
|
||||
void ParNewRefProcTaskExecutor::set_single_threaded_mode()
|
||||
{
|
||||
void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
|
||||
_state_set.flush();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
gch->save_marks();
|
||||
@ -830,7 +809,8 @@ void ParNewRefProcTaskExecutor::set_single_threaded_mode()
|
||||
|
||||
ScanClosureWithParBarrier::
|
||||
ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
|
||||
ScanClosure(g, gc_barrier) {}
|
||||
ScanClosure(g, gc_barrier)
|
||||
{ }
|
||||
|
||||
EvacuateFollowersClosureGeneral::
|
||||
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
|
||||
@ -838,7 +818,7 @@ EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
|
||||
OopsInGenClosure* older) :
|
||||
_gch(gch),
|
||||
_scan_cur_or_nonheap(cur), _scan_older(older)
|
||||
{}
|
||||
{ }
|
||||
|
||||
void EvacuateFollowersClosureGeneral::do_void() {
|
||||
do {
|
||||
@ -850,7 +830,6 @@ void EvacuateFollowersClosureGeneral::do_void() {
|
||||
} while (!_gch->no_allocs_since_save_marks());
|
||||
}
|
||||
|
||||
|
||||
// A Generation that does parallel young-gen collection.
|
||||
|
||||
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
|
||||
@ -996,9 +975,9 @@ void ParNewGeneration::collect(bool full,
|
||||
if (ZapUnusedHeapArea) {
|
||||
// This is now done here because of the piece-meal mangling which
|
||||
// can check for valid mangling at intermediate points in the
|
||||
// collection(s). When a minor collection fails to collect
|
||||
// collection(s). When a young collection fails to collect
|
||||
// sufficient space resizing of the young generation can occur
|
||||
// an redistribute the spaces in the young generation. Mangle
|
||||
// and redistribute the spaces in the young generation. Mangle
|
||||
// here so that unzapped regions don't get distributed to
|
||||
// other spaces.
|
||||
to()->mangle_unused_area();
|
||||
@ -1113,8 +1092,10 @@ void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
|
||||
// thus avoiding the need to undo the copy as in
|
||||
// copy_to_survivor_space_avoiding_with_undo.
|
||||
|
||||
oop ParNewGeneration::copy_to_survivor_space(
|
||||
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
|
||||
oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
|
||||
oop old,
|
||||
size_t sz,
|
||||
markOop m) {
|
||||
// In the sequential version, this assert also says that the object is
|
||||
// not forwarded. That might not be the case here. It is the case that
|
||||
// the caller observed it to be not forwarded at some time in the past.
|
||||
@ -1141,8 +1122,7 @@ oop ParNewGeneration::copy_to_survivor_space(
|
||||
}
|
||||
|
||||
if (new_obj == NULL) {
|
||||
// Either to-space is full or we decided to promote
|
||||
// try allocating obj tenured
|
||||
// Either to-space is full or we decided to promote try allocating obj tenured
|
||||
|
||||
// Attempt to install a null forwarding pointer (atomically),
|
||||
// to claim the right to install the real forwarding pointer.
|
||||
|
@ -71,11 +71,7 @@ class ParScanThreadState {
|
||||
ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
|
||||
ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
|
||||
ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
|
||||
// One of these two will be passed to process_roots, which will
|
||||
// set its generation. The first is for two-gen configs where the
|
||||
// old gen collects the perm gen; the second is for arbitrary configs.
|
||||
// The second isn't used right now (it used to be used for the train, an
|
||||
// incremental collector) but the declaration has been left as a reminder.
|
||||
// Will be passed to process_roots to set its generation.
|
||||
ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
|
||||
// This closure will always be bound to the old gen; it will be used
|
||||
// in evacuate_followers.
|
||||
@ -85,7 +81,6 @@ class ParScanThreadState {
|
||||
ParScanWeakRefClosure _scan_weak_ref_closure;
|
||||
ParKeepAliveClosure _keep_alive_closure;
|
||||
|
||||
|
||||
Space* _to_space;
|
||||
Space* to_space() { return _to_space; }
|
||||
|
||||
|
@ -1143,7 +1143,7 @@ void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
|
||||
while (curr < end) {
|
||||
Prefetch::read(curr, interval);
|
||||
oop obj = oop(curr);
|
||||
int size = obj->oop_iterate(&cl);
|
||||
int size = obj->oop_iterate_size(&cl);
|
||||
assert(size == obj->size(), "sanity");
|
||||
curr += size;
|
||||
}
|
||||
|
@ -367,7 +367,7 @@ bool G1ArchiveAllocator::alloc_new_region() {
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
|
||||
// Tell mark-sweep that objects in this region are not to be marked.
|
||||
G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
|
||||
G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
|
||||
|
||||
// Since we've modified the old set, call update_sizes.
|
||||
_g1h->g1mm()->update_sizes();
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/g1/g1BlockOffsetTable.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
|
||||
inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
|
||||
@ -68,15 +69,7 @@ void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_cha
|
||||
check_index(right, "right index out of range");
|
||||
assert(left <= right, "indexes out of order");
|
||||
size_t num_cards = right - left + 1;
|
||||
if (UseMemSetInBOT) {
|
||||
memset(&_offset_array[left], offset, num_cards);
|
||||
} else {
|
||||
size_t i = left;
|
||||
const size_t end = i + num_cards;
|
||||
for (; i < end; i++) {
|
||||
_offset_array[i] = offset;
|
||||
}
|
||||
}
|
||||
memset_with_concurrent_readers(&_offset_array[left], offset, num_cards);
|
||||
}
|
||||
|
||||
// Variant of index_for that does not check the index for validity.
|
||||
|
63
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.cpp
Normal file
63
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.cpp
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "gc/g1/g1CodeBlobClosure.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
template <typename T>
|
||||
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
|
||||
_work->do_oop(p);
|
||||
T oop_or_narrowoop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(oop_or_narrowoop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(o);
|
||||
assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
|
||||
hr->add_strong_code_root(_nm);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(oop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
|
||||
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(narrowOop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
|
||||
void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
if (!nm->test_set_oops_do_mark()) {
|
||||
_oc.set_nm(nm);
|
||||
nm->oops_do(&_oc);
|
||||
nm->fix_oop_relocations();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
55
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp
Normal file
55
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
|
||||
class nmethod;
|
||||
|
||||
class G1CodeBlobClosure : public CodeBlobClosure {
|
||||
class HeapRegionGatheringOopClosure : public OopClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
OopClosure* _work;
|
||||
nmethod* _nm;
|
||||
|
||||
template <typename T>
|
||||
void do_oop_work(T* p);
|
||||
|
||||
public:
|
||||
HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
|
||||
|
||||
void do_oop(oop* o);
|
||||
void do_oop(narrowOop* o);
|
||||
|
||||
void set_nm(nmethod* nm) {
|
||||
_nm = nm;
|
||||
}
|
||||
};
|
||||
|
||||
HeapRegionGatheringOopClosure _oc;
|
||||
public:
|
||||
G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
|
||||
|
||||
void do_code_blob(CodeBlob* cb);
|
||||
};
|
@ -65,6 +65,7 @@
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -949,6 +950,7 @@ bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MutexLockerEx x(Heap_lock);
|
||||
@ -1037,12 +1039,13 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
}
|
||||
|
||||
// Notify mark-sweep of the archive range.
|
||||
G1MarkSweep::mark_range_archive(curr_range);
|
||||
G1MarkSweep::set_range_archive(curr_range, true);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
@ -1125,6 +1128,81 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||
return result;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
HeapWord* prev_last_addr = NULL;
|
||||
HeapRegion* prev_last_region = NULL;
|
||||
size_t size_used = 0;
|
||||
size_t uncommitted_regions = 0;
|
||||
|
||||
// For each Memregion, free the G1 regions that constitute it, and
|
||||
// notify mark-sweep that the range is no longer to be considered 'archive.'
|
||||
MutexLockerEx x(Heap_lock);
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
HeapWord* start_address = ranges[i].start();
|
||||
HeapWord* last_address = ranges[i].last();
|
||||
|
||||
assert(reserved.contains(start_address) && reserved.contains(last_address),
|
||||
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address)));
|
||||
assert(start_address > prev_last_addr,
|
||||
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr)));
|
||||
size_used += ranges[i].byte_size();
|
||||
prev_last_addr = last_address;
|
||||
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
|
||||
// Check for ranges that start in the same G1 region in which the previous
|
||||
// range ended, and adjust the start address so we don't try to free
|
||||
// the same region again. If the current range is entirely within that
|
||||
// region, skip it.
|
||||
if (start_region == prev_last_region) {
|
||||
start_address = start_region->end();
|
||||
if (start_address > last_address) {
|
||||
continue;
|
||||
}
|
||||
start_region = _hrm.addr_to_region(start_address);
|
||||
}
|
||||
prev_last_region = last_region;
|
||||
|
||||
// After verifying that each region was marked as an archive region by
|
||||
// alloc_archive_regions, set it free and empty and uncommit it.
|
||||
HeapRegion* curr_region = start_region;
|
||||
while (curr_region != NULL) {
|
||||
guarantee(curr_region->is_archive(),
|
||||
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
|
||||
uint curr_index = curr_region->hrm_index();
|
||||
_old_set.remove(curr_region);
|
||||
curr_region->set_free();
|
||||
curr_region->set_top(curr_region->bottom());
|
||||
if (curr_region != last_region) {
|
||||
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||
} else {
|
||||
curr_region = NULL;
|
||||
}
|
||||
_hrm.shrink_at(curr_index, 1);
|
||||
uncommitted_regions++;
|
||||
}
|
||||
|
||||
// Notify mark-sweep that this is no longer an archive range.
|
||||
G1MarkSweep::set_range_archive(ranges[i], false);
|
||||
}
|
||||
|
||||
if (uncommitted_regions != 0) {
|
||||
ergo_verbose1(ErgoHeapSizing,
|
||||
"attempt heap shrinking",
|
||||
ergo_format_reason("uncommitted archive regions")
|
||||
ergo_format_byte("total size"),
|
||||
HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
|
||||
}
|
||||
decrease_used(size_used);
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
@ -2845,9 +2923,9 @@ size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
|
||||
}
|
||||
|
||||
// For G1 TLABs should not contain humongous objects, so the maximum TLAB size
|
||||
// must be smaller than the humongous object limit.
|
||||
// must be equal to the humongous object limit.
|
||||
size_t G1CollectedHeap::max_tlab_size() const {
|
||||
return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
|
||||
return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||
@ -4051,7 +4129,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
|
||||
g1_policy()->finalize_cset(target_pause_time_ms);
|
||||
|
||||
evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
|
||||
|
||||
register_humongous_regions_with_cset();
|
||||
|
||||
@ -4175,7 +4255,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// investigate this in CR 7178365.
|
||||
double sample_end_time_sec = os::elapsedTime();
|
||||
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
|
||||
g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
|
||||
g1_policy()->record_collection_pause_end(pause_time_ms);
|
||||
|
||||
evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
|
||||
evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
|
||||
|
||||
MemoryService::track_memory_usage();
|
||||
|
||||
@ -4501,8 +4584,7 @@ public:
|
||||
bool only_young, bool claim)
|
||||
: _oop_closure(oop_closure),
|
||||
_oop_in_klass_closure(oop_closure->g1(),
|
||||
oop_closure->pss(),
|
||||
oop_closure->rp()),
|
||||
oop_closure->pss()),
|
||||
_klass_in_cld_closure(&_oop_in_klass_closure, only_young),
|
||||
_claim(claim) {
|
||||
|
||||
@ -4531,18 +4613,18 @@ public:
|
||||
bool only_young = _g1h->collector_state()->gcs_are_young();
|
||||
|
||||
// Non-IM young GC.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss, rp);
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss);
|
||||
G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
|
||||
only_young, // Only process dirty klasses.
|
||||
false); // No need to claim CLDs.
|
||||
// IM young GC.
|
||||
// Strong roots closures.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss, rp);
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss);
|
||||
G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
|
||||
false, // Process all klasses.
|
||||
true); // Need to claim CLDs.
|
||||
// Weak roots closures.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss, rp);
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
|
||||
G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
|
||||
false, // Process all klasses.
|
||||
true); // Need to claim CLDs.
|
||||
@ -4582,9 +4664,9 @@ public:
|
||||
worker_id);
|
||||
|
||||
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
|
||||
_root_processor->scan_remembered_sets(&push_heap_rs_cl,
|
||||
weak_root_cl,
|
||||
worker_id);
|
||||
_g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
|
||||
weak_root_cl,
|
||||
worker_id);
|
||||
double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
|
||||
|
||||
double term_sec = 0.0;
|
||||
@ -5241,9 +5323,9 @@ public:
|
||||
G1ParScanThreadState* pss = _pss[worker_id];
|
||||
pss->set_ref_processor(NULL);
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL);
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
|
||||
@ -5341,9 +5423,9 @@ public:
|
||||
pss->set_ref_processor(NULL);
|
||||
assert(pss->queue_is_empty(), "both queue and overflow should be empty");
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL);
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
|
||||
@ -5451,9 +5533,9 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadState** per_t
|
||||
// closures while we're actually processing the discovered
|
||||
// reference objects.
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss, NULL);
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL);
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
|
||||
|
@ -757,6 +757,12 @@ public:
|
||||
// alloc_archive_regions, and after class loading has occurred.
|
||||
void fill_archive_regions(MemRegion* range, size_t count);
|
||||
|
||||
// For each of the specified MemRegions, uncommit the containing G1 regions
|
||||
// which had been allocated by alloc_archive_regions. This should be called
|
||||
// rather than fill_archive_regions at JVM init time if the archive file
|
||||
// mapping failed, with the same non-overlapping and sorted MemRegion array.
|
||||
void dealloc_archive_regions(MemRegion* range, size_t count);
|
||||
|
||||
protected:
|
||||
|
||||
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
||||
|
@ -181,15 +181,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
G1ErgoVerbose::set_enabled(false);
|
||||
}
|
||||
|
||||
// Verify PLAB sizes
|
||||
const size_t region_size = HeapRegion::GrainWords;
|
||||
if (YoungPLABSize > region_size || OldPLABSize > region_size) {
|
||||
char buffer[128];
|
||||
jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most " SIZE_FORMAT,
|
||||
OldPLABSize > region_size ? "Old" : "Young", region_size);
|
||||
vm_exit_during_initialization(buffer);
|
||||
}
|
||||
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||
|
||||
@ -932,7 +923,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
|
||||
// Anything below that is considered to be zero
|
||||
#define MIN_TIMER_GRANULARITY 0.0000001
|
||||
|
||||
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
|
||||
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
|
||||
"otherwise, the subtraction below does not make sense");
|
||||
@ -964,9 +955,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
||||
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
|
||||
end_time_sec, _g1->gc_tracer_stw()->gc_id());
|
||||
|
||||
evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
|
||||
evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
|
||||
|
||||
if (update_stats) {
|
||||
_trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
|
||||
// this is where we update the allocation rate of the application
|
||||
@ -1883,7 +1871,7 @@ uint G1CollectorPolicy::calc_max_old_cset_length() {
|
||||
}
|
||||
|
||||
|
||||
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
|
||||
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
|
||||
double young_start_time_sec = os::elapsedTime();
|
||||
|
||||
YoungList* young_list = _g1->young_list();
|
||||
@ -2093,7 +2081,6 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
|
||||
|
||||
double non_young_end_time_sec = os::elapsedTime();
|
||||
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
|
||||
evacuation_info.set_collectionset_regions(cset_region_length());
|
||||
}
|
||||
|
||||
void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
|
||||
|
@ -604,10 +604,6 @@ public:
|
||||
|
||||
virtual G1CollectorPolicy* as_g1_policy() { return this; }
|
||||
|
||||
virtual CollectorPolicy::Name kind() {
|
||||
return CollectorPolicy::G1CollectorPolicyKind;
|
||||
}
|
||||
|
||||
G1CollectorState* collector_state();
|
||||
|
||||
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
||||
@ -634,13 +630,11 @@ public:
|
||||
virtual HeapWord* satisfy_failed_allocation(size_t size,
|
||||
bool is_tlab);
|
||||
|
||||
BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
|
||||
|
||||
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
||||
|
||||
// Record the start and end of an evacuation pause.
|
||||
void record_collection_pause_start(double start_time_sec);
|
||||
void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
|
||||
void record_collection_pause_end(double pause_time_ms);
|
||||
|
||||
// Record the start and end of a full collection.
|
||||
void record_full_collection_start();
|
||||
@ -682,6 +676,10 @@ public:
|
||||
return _bytes_copied_during_gc;
|
||||
}
|
||||
|
||||
size_t collection_set_bytes_used_before() const {
|
||||
return _collection_set_bytes_used_before;
|
||||
}
|
||||
|
||||
// Determine whether there are candidate regions so that the
|
||||
// next GC should be mixed. The two action strings are used
|
||||
// in the ergo output when the method returns true or false.
|
||||
@ -691,7 +689,7 @@ public:
|
||||
// Choose a new collection set. Marks the chosen regions as being
|
||||
// "in_collection_set", and links them together. The head and number of
|
||||
// the collection set are available via access methods.
|
||||
void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
|
||||
void finalize_cset(double target_pause_time_ms);
|
||||
|
||||
// The head of the list (via "next_in_collection_set()") representing the
|
||||
// current collection set.
|
||||
|
@ -54,17 +54,46 @@ void G1EvacStats::adjust_desired_plab_sz() {
|
||||
_allocated, _wasted, _region_end_waste, _unused, used()));
|
||||
_allocated = 1;
|
||||
}
|
||||
// We account region end waste fully to PLAB allocation. This is not completely fair,
|
||||
// but is a conservative assumption because PLABs may be sized flexibly while we
|
||||
// cannot adjust direct allocations.
|
||||
// In some cases, wasted_frac may become > 1 but that just reflects the problem
|
||||
// with region_end_waste.
|
||||
double wasted_frac = (double)(_unused + _wasted + _region_end_waste) / (double)_allocated;
|
||||
size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
|
||||
if (target_refills == 0) {
|
||||
target_refills = 1;
|
||||
}
|
||||
size_t cur_plab_sz = used() / target_refills;
|
||||
// The size of the PLAB caps the amount of space that can be wasted at the
|
||||
// end of the collection. In the worst case the last PLAB could be completely
|
||||
// empty.
|
||||
// This allows us to calculate the new PLAB size to achieve the
|
||||
// TargetPLABWastePct given the latest memory usage and that the last buffer
|
||||
// will be G1LastPLABAverageOccupancy full.
|
||||
//
|
||||
// E.g. assume that if in the current GC 100 words were allocated and a
|
||||
// TargetPLABWastePct of 10 had been set.
|
||||
//
|
||||
// So we could waste up to 10 words to meet that percentage. Given that we
|
||||
// also assume that that buffer is typically half-full, the new desired PLAB
|
||||
// size is set to 20 words.
|
||||
//
|
||||
// The amount of allocation performed should be independent of the number of
|
||||
// threads, so should the maximum waste we can spend in total. So if
|
||||
// we used n threads to allocate, each of them can spend maximum waste/n words in
|
||||
// a first rough approximation. The number of threads only comes into play later
|
||||
// when actually retrieving the actual desired PLAB size.
|
||||
//
|
||||
// After calculating this optimal PLAB size the algorithm applies the usual
|
||||
// exponential decaying average over this value to guess the next PLAB size.
|
||||
//
|
||||
// We account region end waste fully to PLAB allocation (in the calculation of
|
||||
// what we consider as "used_for_waste_calculation" below). This is not
|
||||
// completely fair, but is a conservative assumption because PLABs may be sized
|
||||
// flexibly while we cannot adjust inline allocations.
|
||||
// Allocation during GC will try to minimize region end waste so this impact
|
||||
// should be minimal.
|
||||
//
|
||||
// We need to cover overflow when calculating the amount of space actually used
|
||||
// by objects in PLABs when subtracting the region end waste.
|
||||
// Region end waste may be higher than actual allocation. This may occur if many
|
||||
// threads do not allocate anything but a few rather large objects. In this
|
||||
// degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
|
||||
// which is an okay reaction.
|
||||
size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
|
||||
|
||||
size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
|
||||
size_t const cur_plab_sz = (double)total_waste_allowed / G1LastPLABAverageOccupancy;
|
||||
// Take historical weighted average
|
||||
_filter.sample(cur_plab_sz);
|
||||
// Clip from above and below, and align to object boundary
|
||||
|
@ -74,7 +74,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
assert(rp != NULL, "should be non-NULL");
|
||||
assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
|
||||
|
||||
GenMarkSweep::_ref_processor = rp;
|
||||
GenMarkSweep::set_ref_processor(rp);
|
||||
rp->setup_policy(clear_all_softrefs);
|
||||
|
||||
// When collecting the permanent generation Method*s may be moving,
|
||||
@ -108,7 +108,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
JvmtiExport::gc_epilogue();
|
||||
|
||||
// refs processing: clean slate
|
||||
GenMarkSweep::_ref_processor = NULL;
|
||||
GenMarkSweep::set_ref_processor(NULL);
|
||||
}
|
||||
|
||||
|
||||
@ -310,9 +310,9 @@ void G1MarkSweep::enable_archive_object_check() {
|
||||
HeapRegion::GrainBytes);
|
||||
}
|
||||
|
||||
void G1MarkSweep::mark_range_archive(MemRegion range) {
|
||||
void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) {
|
||||
assert(_archive_check_enabled, "archive range check not enabled");
|
||||
_archive_region_map.set_by_address(range, true);
|
||||
_archive_region_map.set_by_address(range, is_archive);
|
||||
}
|
||||
|
||||
bool G1MarkSweep::in_archive_range(oop object) {
|
||||
|
@ -58,8 +58,8 @@ class G1MarkSweep : AllStatic {
|
||||
// Create the _archive_region_map which is used to identify archive objects.
|
||||
static void enable_archive_object_check();
|
||||
|
||||
// Mark the regions containing the specified address range as archive regions.
|
||||
static void mark_range_archive(MemRegion range);
|
||||
// Set the regions containing the specified address range as archive/non-archive.
|
||||
static void set_range_archive(MemRegion range, bool is_archive);
|
||||
|
||||
// Check if an object is in an archive region using the _archive_region_map.
|
||||
static bool in_archive_range(oop object);
|
||||
|
@ -125,8 +125,7 @@ private:
|
||||
template <class T> void do_oop_work(T* p);
|
||||
|
||||
public:
|
||||
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
|
||||
ReferenceProcessor* rp) :
|
||||
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParCopyHelper(g1, par_scan_state) {
|
||||
assert(_ref_processor == NULL, "sanity");
|
||||
}
|
||||
@ -141,7 +140,6 @@ public:
|
||||
|
||||
G1CollectedHeap* g1() { return _g1; };
|
||||
G1ParScanThreadState* pss() { return _par_scan_state; }
|
||||
ReferenceProcessor* rp() { return _ref_processor; };
|
||||
};
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure;
|
||||
|
@ -186,6 +186,21 @@ InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop co
|
||||
return dest(state);
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
|
||||
oop const old, size_t word_sz, uint age,
|
||||
HeapWord * const obj_ptr,
|
||||
const AllocationContext_t context) const {
|
||||
G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
|
||||
if (alloc_buf->contains(obj_ptr)) {
|
||||
_g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
|
||||
dest_state.value() == InCSetState::Old,
|
||||
alloc_buf->word_sz());
|
||||
} else {
|
||||
_g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
|
||||
dest_state.value() == InCSetState::Old);
|
||||
}
|
||||
}
|
||||
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
oop const old,
|
||||
markOop const old_mark) {
|
||||
@ -219,6 +234,10 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
}
|
||||
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
|
||||
// The events are checked individually as part of the actual commit
|
||||
report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
|
||||
}
|
||||
}
|
||||
|
||||
assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
|
||||
|
@ -173,6 +173,10 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||
bool previous_plab_refill_failed);
|
||||
|
||||
inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
|
||||
|
||||
void report_promotion_event(InCSetState const dest_state,
|
||||
oop const old, size_t word_sz, uint age,
|
||||
HeapWord * const obj_ptr, const AllocationContext_t context) const;
|
||||
public:
|
||||
|
||||
oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/g1/concurrentG1Refine.hpp"
|
||||
#include "gc/g1/concurrentG1RefineThread.hpp"
|
||||
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc/g1/g1CodeBlobClosure.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
@ -228,12 +229,15 @@ public:
|
||||
};
|
||||
|
||||
void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
OopClosure* non_heap_roots,
|
||||
uint worker_i) {
|
||||
double rs_time_start = os::elapsedTime();
|
||||
|
||||
G1CodeBlobClosure code_root_cl(non_heap_roots);
|
||||
|
||||
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
|
||||
|
||||
ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
|
||||
ScanRSClosure scanRScl(oc, &code_root_cl, worker_i);
|
||||
|
||||
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
||||
scanRScl.set_try_claimed();
|
||||
@ -295,7 +299,7 @@ void G1RemSet::cleanupHRRS() {
|
||||
}
|
||||
|
||||
void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
OopClosure* non_heap_roots,
|
||||
uint worker_i) {
|
||||
#if CARD_REPEAT_HISTO
|
||||
ct_freq_update_histo_and_reset();
|
||||
@ -318,7 +322,7 @@ void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
||||
DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
|
||||
|
||||
updateRS(&into_cset_dcq, worker_i);
|
||||
scanRS(oc, code_root_cl, worker_i);
|
||||
scanRS(oc, non_heap_roots, worker_i);
|
||||
|
||||
// We now clear the cached values of _cset_rs_update_cl for this worker
|
||||
_cset_rs_update_cl[worker_i] = NULL;
|
||||
|
@ -85,7 +85,7 @@ public:
|
||||
// invoked "blk->set_region" to set the "from" region correctly
|
||||
// beforehand.)
|
||||
//
|
||||
// Invoke code_root_cl->do_code_blob on the unmarked nmethods
|
||||
// Apply non_heap_roots on the oops of the unmarked nmethods
|
||||
// on the strong code roots list for each region in the
|
||||
// collection set.
|
||||
//
|
||||
@ -95,7 +95,7 @@ public:
|
||||
// the "i" passed to the calling thread's work(i) function.
|
||||
// In the sequential case this param will be ignored.
|
||||
void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
OopClosure* non_heap_roots,
|
||||
uint worker_i);
|
||||
|
||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||
@ -107,7 +107,7 @@ public:
|
||||
void cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
void scanRS(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
OopClosure* non_heap_roots,
|
||||
uint worker_i);
|
||||
|
||||
void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc/g1/bufferingOopClosure.hpp"
|
||||
#include "gc/g1/g1CodeBlobClosure.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
@ -40,57 +41,6 @@
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "services/management.hpp"
|
||||
|
||||
class G1CodeBlobClosure : public CodeBlobClosure {
|
||||
class HeapRegionGatheringOopClosure : public OopClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
OopClosure* _work;
|
||||
nmethod* _nm;
|
||||
|
||||
template <typename T>
|
||||
void do_oop_work(T* p) {
|
||||
_work->do_oop(p);
|
||||
T oop_or_narrowoop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(oop_or_narrowoop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(o);
|
||||
assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
|
||||
hr->add_strong_code_root(_nm);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
|
||||
|
||||
void do_oop(oop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
|
||||
void do_oop(narrowOop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
|
||||
void set_nm(nmethod* nm) {
|
||||
_nm = nm;
|
||||
}
|
||||
};
|
||||
|
||||
HeapRegionGatheringOopClosure _oc;
|
||||
public:
|
||||
G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
|
||||
|
||||
void do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
if (!nm->test_set_oops_do_mark()) {
|
||||
_oc.set_nm(nm);
|
||||
nm->oops_do(&_oc);
|
||||
nm->fix_oop_relocations();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void G1RootProcessor::worker_has_discovered_all_strong_classes() {
|
||||
assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
|
||||
|
||||
@ -321,14 +271,6 @@ void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
|
||||
}
|
||||
}
|
||||
|
||||
void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
uint worker_i) {
|
||||
G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
|
||||
|
||||
_g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
|
||||
}
|
||||
|
||||
uint G1RootProcessor::n_workers() const {
|
||||
return _srs.n_threads();
|
||||
}
|
||||
|
@ -107,13 +107,6 @@ public:
|
||||
CLDClosure* clds,
|
||||
CodeBlobClosure* blobs);
|
||||
|
||||
// Apply scan_rs to all locations in the union of the remembered sets for all
|
||||
// regions in the collection set
|
||||
// (having done "set_region" to indicate the region in which the root resides),
|
||||
void scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
uint worker_i);
|
||||
|
||||
// Number of worker threads used by the root processor.
|
||||
uint n_workers() const;
|
||||
};
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/satbQueue.hpp"
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
@ -108,15 +109,7 @@ void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
|
||||
jbyte *const first = byte_for(mr.start());
|
||||
jbyte *const last = byte_after(mr.last());
|
||||
|
||||
// Below we may use an explicit loop instead of memset() because on
|
||||
// certain platforms memset() can give concurrent readers phantom zeros.
|
||||
if (UseMemSetInBOT) {
|
||||
memset(first, g1_young_gen, last - first);
|
||||
} else {
|
||||
for (jbyte* i = first; i < last; i++) {
|
||||
*i = g1_young_gen;
|
||||
}
|
||||
}
|
||||
memset_with_concurrent_readers(first, g1_young_gen, last - first);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -207,7 +200,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
|
||||
// Otherwise, log it.
|
||||
G1SATBCardTableLoggingModRefBS* g1_bs =
|
||||
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set());
|
||||
g1_bs->write_ref_field_work(field, new_val);
|
||||
g1_bs->write_ref_field_work(field, new_val, false);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -147,6 +147,10 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
||||
private:
|
||||
G1SATBCardTableLoggingModRefBSChangedListener _listener;
|
||||
DirtyCardQueueSet& _dcqs;
|
||||
|
||||
protected:
|
||||
virtual void write_ref_field_work(void* field, oop new_val, bool release);
|
||||
|
||||
public:
|
||||
static size_t compute_size(size_t mem_region_size_in_words) {
|
||||
size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
|
||||
@ -165,8 +169,6 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
||||
|
||||
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
|
||||
|
||||
void write_ref_field_work(void* field, oop new_val, bool release = false);
|
||||
|
||||
// Can be called from static contexts.
|
||||
static void write_ref_field_static(void* field, oop new_val);
|
||||
|
||||
|
@ -82,6 +82,11 @@
|
||||
"If true, enable reference discovery during concurrent " \
|
||||
"marking and reference processing at the end of remark.") \
|
||||
\
|
||||
experimental(double, G1LastPLABAverageOccupancy, 50.0, \
|
||||
"The expected average occupancy of the last PLAB in " \
|
||||
"percent.") \
|
||||
range(0.001, 100.0) \
|
||||
\
|
||||
product(size_t, G1SATBBufferSize, 1*K, \
|
||||
"Number of entries in an SATB log buffer.") \
|
||||
\
|
||||
|
@ -68,7 +68,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
// or it was allocated after marking finished, then we add it. Otherwise
|
||||
// we can safely ignore the object.
|
||||
if (!g1h->is_obj_dead(oop(cur), _hr)) {
|
||||
oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
|
||||
oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
|
||||
} else {
|
||||
oop_size = _hr->block_size(cur);
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
|
||||
(num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
|
||||
uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
|
||||
|
||||
uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
|
||||
shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
|
||||
|
||||
cur = idx_last_found;
|
||||
removed += to_remove;
|
||||
@ -437,6 +437,17 @@ uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
|
||||
return removed;
|
||||
}
|
||||
|
||||
void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
|
||||
#ifdef ASSERT
|
||||
for (uint i = index; i < (index + num_regions); i++) {
|
||||
assert(is_available(i), err_msg("Expected available region at index %u", i));
|
||||
assert(at(i)->is_empty(), err_msg("Expected empty region at index %u", i));
|
||||
assert(at(i)->is_free(), err_msg("Expected free region at index %u", i));
|
||||
}
|
||||
#endif
|
||||
uncommit_regions(index, num_regions);
|
||||
}
|
||||
|
||||
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
|
||||
guarantee(start_idx < _allocated_heapregions_length, "checking");
|
||||
guarantee(res_idx != NULL, "checking");
|
||||
|
@ -241,6 +241,10 @@ public:
|
||||
// Return the actual number of uncommitted regions.
|
||||
uint shrink_by(uint num_regions_to_remove);
|
||||
|
||||
// Uncommit a number of regions starting at the specified index, which must be available,
|
||||
// empty, and free.
|
||||
void shrink_at(uint index, size_t num_regions);
|
||||
|
||||
void verify();
|
||||
|
||||
// Do some sanity checking.
|
||||
|
@ -35,7 +35,7 @@ private:
|
||||
// We encode the value of the heap region type so the generation can be
|
||||
// determined quickly. The tag is split into two parts:
|
||||
//
|
||||
// major type (young, humongous) : top N-1 bits
|
||||
// major type (young, old, humongous, archive) : top N-1 bits
|
||||
// minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
|
||||
//
|
||||
// If there's need to increase the number of minor types in the
|
||||
|
@ -89,7 +89,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
|
||||
CheckForUnmarkedOops object_check(_young_gen, _card_table);
|
||||
obj->oop_iterate_no_header(&object_check);
|
||||
if (object_check.has_unmarked_oop()) {
|
||||
assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
|
||||
guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -56,13 +56,7 @@ class CardTableExtension : public CardTableModRefBS {
|
||||
CardTableExtension(MemRegion whole_heap) :
|
||||
CardTableModRefBS(
|
||||
whole_heap,
|
||||
// Concrete tag should be BarrierSet::CardTableExtension.
|
||||
// That will presently break things in a bunch of places though.
|
||||
// The concrete tag is used as a dispatch key in many places, and
|
||||
// CardTableExtension does not correctly dispatch in some of those
|
||||
// uses. This will be addressed as part of a reorganization of the
|
||||
// BarrierSet hierarchy.
|
||||
BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableExtension))
|
||||
BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
|
||||
{ }
|
||||
|
||||
// Scavenge support
|
||||
|
@ -44,7 +44,7 @@ void ImmutableSpace::oop_iterate(ExtendedOopClosure* cl) {
|
||||
HeapWord* t = end();
|
||||
// Could call objects iterate, but this is easier.
|
||||
while (obj_addr < t) {
|
||||
obj_addr += oop(obj_addr)->oop_iterate(cl);
|
||||
obj_addr += oop(obj_addr)->oop_iterate_size(cl);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -213,15 +213,6 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
|
||||
return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
|
||||
}
|
||||
|
||||
void MutableSpace::oop_iterate(ExtendedOopClosure* cl) {
|
||||
HeapWord* obj_addr = bottom();
|
||||
HeapWord* t = top();
|
||||
// Could call objects iterate, but this is easier.
|
||||
while (obj_addr < t) {
|
||||
obj_addr += oop(obj_addr)->oop_iterate(cl);
|
||||
}
|
||||
}
|
||||
|
||||
void MutableSpace::oop_iterate_no_header(OopClosure* cl) {
|
||||
HeapWord* obj_addr = bottom();
|
||||
HeapWord* t = top();
|
||||
|
@ -134,7 +134,6 @@ class MutableSpace: public ImmutableSpace {
|
||||
bool cas_deallocate(HeapWord *obj, size_t size);
|
||||
|
||||
// Iteration.
|
||||
void oop_iterate(ExtendedOopClosure* cl);
|
||||
void oop_iterate_no_header(OopClosure* cl);
|
||||
void object_iterate(ObjectClosure* cl);
|
||||
|
||||
|
@ -30,26 +30,22 @@
|
||||
#include "gc/parallel/psParallelCompact.hpp"
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
|
||||
inline size_t ParallelScavengeHeap::total_invocations()
|
||||
{
|
||||
inline size_t ParallelScavengeHeap::total_invocations() {
|
||||
return UseParallelOldGC ? PSParallelCompact::total_invocations() :
|
||||
PSMarkSweep::total_invocations();
|
||||
}
|
||||
|
||||
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
|
||||
{
|
||||
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
|
||||
const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
|
||||
return size < eden_size / 2;
|
||||
}
|
||||
|
||||
inline void ParallelScavengeHeap::invoke_scavenge()
|
||||
{
|
||||
inline void ParallelScavengeHeap::invoke_scavenge() {
|
||||
PSScavenge::invoke();
|
||||
}
|
||||
|
||||
inline bool ParallelScavengeHeap::is_in_young(oop p) {
|
||||
// Assumes the the old gen address range is lower than that of the young gen.
|
||||
const void* loc = (void*) p;
|
||||
bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
|
||||
assert(result == young_gen()->is_in_reserved(p),
|
||||
err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
|
||||
|
@ -299,7 +299,7 @@ void PSAdaptiveSizePolicy::compute_eden_space_size(
|
||||
// subtracted out.
|
||||
size_t eden_limit = max_eden_size;
|
||||
|
||||
const double gc_cost_limit = GCTimeLimit/100.0;
|
||||
const double gc_cost_limit = GCTimeLimit / 100.0;
|
||||
|
||||
// Which way should we go?
|
||||
// if pause requirement is not met
|
||||
|
@ -60,7 +60,7 @@ CollectorCounters* PSMarkSweep::_counters = NULL;
|
||||
|
||||
void PSMarkSweep::initialize() {
|
||||
MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
|
||||
_ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc
|
||||
set_ref_processor(new ReferenceProcessor(mr)); // a vanilla ref proc
|
||||
_counters = new CollectorCounters("PSMarkSweep", 1);
|
||||
}
|
||||
|
||||
|
@ -486,12 +486,12 @@ void PSOldGen::verify() {
|
||||
object_space()->verify();
|
||||
}
|
||||
class VerifyObjectStartArrayClosure : public ObjectClosure {
|
||||
PSOldGen* _gen;
|
||||
PSOldGen* _old_gen;
|
||||
ObjectStartArray* _start_array;
|
||||
|
||||
public:
|
||||
VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
|
||||
_gen(gen), _start_array(start_array) { }
|
||||
VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) :
|
||||
_old_gen(old_gen), _start_array(start_array) { }
|
||||
|
||||
virtual void do_object(oop obj) {
|
||||
HeapWord* test_addr = (HeapWord*)obj + 1;
|
||||
|
@ -958,7 +958,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
|
||||
{
|
||||
// Update the from & to space pointers in space_info, since they are swapped
|
||||
// at each young gen gc. Do the update unconditionally (even though a
|
||||
// promotion failure does not swap spaces) because an unknown number of minor
|
||||
// promotion failure does not swap spaces) because an unknown number of young
|
||||
// collections will have swapped the spaces an unknown number of times.
|
||||
GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
@ -303,7 +303,7 @@ public:
|
||||
// completed(), which is desirable since a region must be claimed before it
|
||||
// can be completed.
|
||||
bool available() const { return _dc_and_los < dc_one; }
|
||||
bool claimed() const { return _dc_and_los >= dc_claimed; }
|
||||
bool claimed() const { return _dc_and_los >= dc_claimed; }
|
||||
bool completed() const { return _dc_and_los >= dc_completed; }
|
||||
|
||||
// These are not atomic.
|
||||
@ -979,7 +979,6 @@ class PSParallelCompact : AllStatic {
|
||||
static bool _dwl_initialized;
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
|
||||
public:
|
||||
static ParallelOldTracer* gc_tracer() { return &_gc_tracer; }
|
||||
|
||||
|
@ -597,9 +597,9 @@ bool PSScavenge::invoke_no_policy() {
|
||||
// to allow resizes that may have been inhibited by the
|
||||
// relative location of the "to" and "from" spaces.
|
||||
|
||||
// Resizing the old gen at minor collects can cause increases
|
||||
// Resizing the old gen at young collections can cause increases
|
||||
// that don't feed back to the generation sizing policy until
|
||||
// a major collection. Don't resize the old gen here.
|
||||
// a full collection. Don't resize the old gen here.
|
||||
|
||||
heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
|
||||
size_policy->calculated_survivor_size_in_bytes());
|
||||
|
@ -172,10 +172,10 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
// There are not old-to-young pointers if the old gen is empty.
|
||||
assert(!_gen->object_space()->is_empty(),
|
||||
assert(!_old_gen->object_space()->is_empty(),
|
||||
"Should not be called is there is no work");
|
||||
assert(_gen != NULL, "Sanity");
|
||||
assert(_gen->object_space()->contains(_gen_top) || _gen_top == _gen->object_space()->top(), "Sanity");
|
||||
assert(_old_gen != NULL, "Sanity");
|
||||
assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity");
|
||||
assert(_stripe_number < ParallelGCThreads, "Sanity");
|
||||
|
||||
{
|
||||
@ -183,8 +183,8 @@ void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
CardTableExtension* card_table =
|
||||
barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
|
||||
|
||||
card_table->scavenge_contents_parallel(_gen->start_array(),
|
||||
_gen->object_space(),
|
||||
card_table->scavenge_contents_parallel(_old_gen->start_array(),
|
||||
_old_gen->object_space(),
|
||||
_gen_top,
|
||||
pm,
|
||||
_stripe_number,
|
||||
|
@ -160,17 +160,17 @@ class StealTask : public GCTask {
|
||||
|
||||
class OldToYoungRootsTask : public GCTask {
|
||||
private:
|
||||
PSOldGen* _gen;
|
||||
PSOldGen* _old_gen;
|
||||
HeapWord* _gen_top;
|
||||
uint _stripe_number;
|
||||
uint _stripe_total;
|
||||
|
||||
public:
|
||||
OldToYoungRootsTask(PSOldGen *gen,
|
||||
OldToYoungRootsTask(PSOldGen *old_gen,
|
||||
HeapWord* gen_top,
|
||||
uint stripe_number,
|
||||
uint stripe_total) :
|
||||
_gen(gen),
|
||||
_old_gen(old_gen),
|
||||
_gen_top(gen_top),
|
||||
_stripe_number(stripe_number),
|
||||
_stripe_total(stripe_total) { }
|
||||
|
@ -106,14 +106,14 @@ FastEvacuateFollowersClosure(GenCollectedHeap* gch,
|
||||
_gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
|
||||
{
|
||||
assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
|
||||
_gen = (DefNewGeneration*)_gch->young_gen();
|
||||
_young_gen = (DefNewGeneration*)_gch->young_gen();
|
||||
}
|
||||
|
||||
void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
|
||||
do {
|
||||
_gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
|
||||
} while (!_gch->no_allocs_since_save_marks());
|
||||
guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
|
||||
guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
|
||||
}
|
||||
|
||||
ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
|
||||
@ -200,8 +200,9 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
||||
_from_space = new ContiguousSpace();
|
||||
_to_space = new ContiguousSpace();
|
||||
|
||||
if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
|
||||
if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
|
||||
vm_exit_during_initialization("Could not allocate a new gen space");
|
||||
}
|
||||
|
||||
// Compute the maximum eden and survivor space sizes. These sizes
|
||||
// are computed assuming the entire reserved space is committed.
|
||||
@ -655,7 +656,7 @@ void DefNewGeneration::collect(bool full,
|
||||
if (ZapUnusedHeapArea) {
|
||||
// This is now done here because of the piece-meal mangling which
|
||||
// can check for valid mangling at intermediate points in the
|
||||
// collection(s). When a minor collection fails to collect
|
||||
// collection(s). When a young collection fails to collect
|
||||
// sufficient space resizing of the young generation can occur
|
||||
// an redistribute the spaces in the young generation. Mangle
|
||||
// here so that unzapped regions don't get distributed to
|
||||
|
@ -193,7 +193,7 @@ protected:
|
||||
|
||||
class FastEvacuateFollowersClosure: public VoidClosure {
|
||||
GenCollectedHeap* _gch;
|
||||
DefNewGeneration* _gen;
|
||||
DefNewGeneration* _young_gen;
|
||||
FastScanClosure* _scan_cur_or_nonheap;
|
||||
FastScanClosure* _scan_older;
|
||||
public:
|
||||
|
@ -57,8 +57,8 @@ inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
|
||||
// each generation, allowing them in turn to examine the modified
|
||||
// field.
|
||||
//
|
||||
// We could check that p is also in an older generation, but
|
||||
// dirty cards in the youngest gen are never scanned, so the
|
||||
// We could check that p is also in the old generation, but
|
||||
// dirty cards in the young gen are never scanned, so the
|
||||
// extra check probably isn't worthwhile.
|
||||
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
|
@ -67,7 +67,7 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
|
||||
// hook up weak ref data so it can be used during Mark-Sweep
|
||||
assert(ref_processor() == NULL, "no stomping");
|
||||
assert(rp != NULL, "should be non-NULL");
|
||||
_ref_processor = rp;
|
||||
set_ref_processor(rp);
|
||||
rp->setup_policy(clear_all_softrefs);
|
||||
|
||||
GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
|
||||
@ -136,7 +136,7 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
|
||||
}
|
||||
|
||||
// refs processing: clean slate
|
||||
_ref_processor = NULL;
|
||||
set_ref_processor(NULL);
|
||||
|
||||
// Update heap occupancy information which is used as
|
||||
// input to soft ref clearing policy at the next gc.
|
||||
|
@ -28,11 +28,20 @@
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
#include "gc/shared/specialized_oop_closures.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/instanceRefKlass.inline.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
uint MarkSweep::_total_invocations = 0;
|
||||
|
||||
@ -50,176 +59,101 @@ SerialOldTracer* MarkSweep::_gc_tracer = NULL;
|
||||
|
||||
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
|
||||
|
||||
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
|
||||
void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
|
||||
|
||||
MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
|
||||
MarkAndPushClosure MarkSweep::mark_and_push_closure;
|
||||
CLDToOopClosure MarkSweep::follow_cld_closure(&mark_and_push_closure);
|
||||
CLDToOopClosure MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
|
||||
|
||||
template <typename T>
|
||||
void MarkSweep::MarkAndPushClosure::do_oop_nv(T* p) { mark_and_push(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
inline void MarkSweep::mark_object(oop obj) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
// We must enqueue the object before it is marked
|
||||
// as we otherwise can't read the object's age.
|
||||
G1StringDedup::enqueue_from_mark(obj);
|
||||
}
|
||||
#endif
|
||||
// some marks may contain information we need to preserve so we store them away
|
||||
// and overwrite the mark. We'll restore it at the end of markSweep.
|
||||
markOop mark = obj->mark();
|
||||
obj->set_mark(markOopDesc::prototype()->set_marked());
|
||||
|
||||
void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
|
||||
if (mark->must_be_preserved(obj)) {
|
||||
preserve_mark(obj, mark);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
_marking_stack.push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_klass(Klass* klass) {
|
||||
oop op = klass->klass_holder();
|
||||
MarkSweep::mark_and_push(&op);
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_cld(ClassLoaderData* cld) {
|
||||
MarkSweep::follow_cld_closure.do_cld(cld);
|
||||
}
|
||||
|
||||
void InstanceKlass::oop_ms_follow_contents(oop obj) {
|
||||
assert(obj != NULL, "can't follow the content of NULL object");
|
||||
MarkSweep::follow_klass(this);
|
||||
template <typename T>
|
||||
inline void MarkAndPushClosure::do_oop_nv(T* p) { MarkSweep::mark_and_push(p); }
|
||||
void MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); }
|
||||
void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
inline bool MarkAndPushClosure::do_metadata_nv() { return true; }
|
||||
bool MarkAndPushClosure::do_metadata() { return do_metadata_nv(); }
|
||||
inline void MarkAndPushClosure::do_klass_nv(Klass* k) { MarkSweep::follow_klass(k); }
|
||||
void MarkAndPushClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
inline void MarkAndPushClosure::do_cld_nv(ClassLoaderData* cld) { MarkSweep::follow_cld(cld); }
|
||||
void MarkAndPushClosure::do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
|
||||
|
||||
oop_oop_iterate_oop_maps<true>(obj, &MarkSweep::mark_and_push_closure);
|
||||
template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
|
||||
mark_and_push(p);
|
||||
}
|
||||
|
||||
void InstanceMirrorKlass::oop_ms_follow_contents(oop obj) {
|
||||
InstanceKlass::oop_ms_follow_contents(obj);
|
||||
void MarkSweep::push_objarray(oop obj, size_t index) {
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
_objarray_stack.push(task);
|
||||
}
|
||||
|
||||
// Follow the klass field in the mirror
|
||||
Klass* klass = java_lang_Class::as_Klass(obj);
|
||||
if (klass != NULL) {
|
||||
// An anonymous class doesn't have its own class loader, so the call
|
||||
// to follow_klass will mark and push its java mirror instead of the
|
||||
// class loader. When handling the java mirror for an anonymous class
|
||||
// we need to make sure its class loader data is claimed, this is done
|
||||
// by calling follow_class_loader explicitly. For non-anonymous classes
|
||||
// the call to follow_class_loader is made when the class loader itself
|
||||
// is handled.
|
||||
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
|
||||
MarkSweep::follow_class_loader(klass->class_loader_data());
|
||||
} else {
|
||||
MarkSweep::follow_klass(klass);
|
||||
}
|
||||
inline void MarkSweep::follow_array(objArrayOop array) {
|
||||
MarkSweep::follow_klass(array->klass());
|
||||
// Don't push empty arrays to avoid unnecessary work.
|
||||
if (array->length() > 0) {
|
||||
MarkSweep::push_objarray(array, 0);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_object(oop obj) {
|
||||
assert(obj->is_gc_marked(), "should be marked");
|
||||
if (obj->is_objArray()) {
|
||||
// Handle object arrays explicitly to allow them to
|
||||
// be split into chunks if needed.
|
||||
MarkSweep::follow_array((objArrayOop)obj);
|
||||
} else {
|
||||
// If klass is NULL then this a mirror for a primitive type.
|
||||
// We don't have to follow them, since they are handled as strong
|
||||
// roots in Universe::oops_do.
|
||||
assert(java_lang_Class::is_primitive(obj), "Sanity check");
|
||||
}
|
||||
|
||||
oop_oop_iterate_statics<true>(obj, &MarkSweep::mark_and_push_closure);
|
||||
}
|
||||
|
||||
void InstanceClassLoaderKlass::oop_ms_follow_contents(oop obj) {
|
||||
InstanceKlass::oop_ms_follow_contents(obj);
|
||||
|
||||
ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
|
||||
|
||||
// We must NULL check here, since the class loader
|
||||
// can be found before the loader data has been set up.
|
||||
if(loader_data != NULL) {
|
||||
MarkSweep::follow_class_loader(loader_data);
|
||||
obj->oop_iterate(&mark_and_push_closure);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void oop_ms_follow_contents_specialized(InstanceRefKlass* klass, oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("InstanceRefKlass::oop_ms_follow_contents_specialized " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!referent->is_gc_marked() &&
|
||||
MarkSweep::ref_processor()->discover_reference(obj, klass->reference_type())) {
|
||||
// reference was discovered, referent will be traversed later
|
||||
klass->InstanceKlass::oop_ms_follow_contents(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL enqueued " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
return;
|
||||
} else {
|
||||
// treat referent as normal oop
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(referent_addr);
|
||||
}
|
||||
}
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process discovered as normal "
|
||||
PTR_FORMAT, p2i(discovered_addr));
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(discovered_addr);
|
||||
}
|
||||
// treat next as normal oop. next is a link in the reference queue.
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process next as normal " PTR_FORMAT, p2i(next_addr));
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(next_addr);
|
||||
klass->InstanceKlass::oop_ms_follow_contents(obj);
|
||||
}
|
||||
|
||||
void InstanceRefKlass::oop_ms_follow_contents(oop obj) {
|
||||
if (UseCompressedOops) {
|
||||
oop_ms_follow_contents_specialized<narrowOop>(this, obj);
|
||||
} else {
|
||||
oop_ms_follow_contents_specialized<oop>(this, obj);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void oop_ms_follow_contents_specialized(oop obj, int index) {
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
const size_t len = size_t(a->length());
|
||||
const size_t beg_index = size_t(index);
|
||||
void MarkSweep::follow_array_chunk(objArrayOop array, int index) {
|
||||
const int len = array->length();
|
||||
const int beg_index = index;
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
|
||||
const size_t end_index = beg_index + stride;
|
||||
T* const base = (T*)a->base();
|
||||
T* const beg = base + beg_index;
|
||||
T* const end = base + end_index;
|
||||
const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
|
||||
const int end_index = beg_index + stride;
|
||||
|
||||
// Push the non-NULL elements of the next stride on the marking stack.
|
||||
for (T* e = beg; e < end; e++) {
|
||||
MarkSweep::mark_and_push<T>(e);
|
||||
}
|
||||
array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
|
||||
|
||||
if (end_index < len) {
|
||||
MarkSweep::push_objarray(a, end_index); // Push the continuation.
|
||||
}
|
||||
}
|
||||
|
||||
void ObjArrayKlass::oop_ms_follow_contents(oop obj) {
|
||||
assert (obj->is_array(), "obj must be array");
|
||||
MarkSweep::follow_klass(this);
|
||||
if (UseCompressedOops) {
|
||||
oop_ms_follow_contents_specialized<narrowOop>(obj, 0);
|
||||
} else {
|
||||
oop_ms_follow_contents_specialized<oop>(obj, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void TypeArrayKlass::oop_ms_follow_contents(oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
}
|
||||
|
||||
void MarkSweep::follow_array(objArrayOop array, int index) {
|
||||
if (UseCompressedOops) {
|
||||
oop_ms_follow_contents_specialized<narrowOop>(array, index);
|
||||
} else {
|
||||
oop_ms_follow_contents_specialized<oop>(array, index);
|
||||
MarkSweep::push_objarray(array, end_index); // Push the continuation.
|
||||
}
|
||||
}
|
||||
|
||||
@ -233,7 +167,7 @@ void MarkSweep::follow_stack() {
|
||||
// Process ObjArrays one at a time to avoid marking stack bloat.
|
||||
if (!_objarray_stack.is_empty()) {
|
||||
ObjArrayTask task = _objarray_stack.pop();
|
||||
follow_array(objArrayOop(task.obj()), task.index());
|
||||
follow_array_chunk(objArrayOop(task.obj()), task.index());
|
||||
}
|
||||
} while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
|
||||
}
|
||||
@ -242,6 +176,24 @@ MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
|
||||
|
||||
void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
|
||||
|
||||
template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
follow_object(obj);
|
||||
}
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
|
||||
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
|
||||
void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
|
||||
|
||||
void PreservedMark::adjust_pointer() {
|
||||
MarkSweep::adjust_pointer(&_obj);
|
||||
}
|
||||
@ -266,6 +218,11 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) {
|
||||
}
|
||||
}
|
||||
|
||||
void MarkSweep::set_ref_processor(ReferenceProcessor* rp) {
|
||||
_ref_processor = rp;
|
||||
mark_and_push_closure.set_ref_processor(_ref_processor);
|
||||
}
|
||||
|
||||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
|
||||
|
||||
template <typename T>
|
||||
@ -405,3 +362,6 @@ int TypeArrayKlass::oop_ms_adjust_pointers(oop obj) {
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
return t->object_size();
|
||||
}
|
||||
|
||||
// Generate MS specialized oop_oop_iterate functions.
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
|
||||
|
@ -49,6 +49,7 @@ class STWGCTimer;
|
||||
|
||||
// declared at end
|
||||
class PreservedMark;
|
||||
class MarkAndPushClosure;
|
||||
|
||||
class MarkSweep : AllStatic {
|
||||
//
|
||||
@ -60,13 +61,6 @@ class MarkSweep : AllStatic {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class MarkAndPushClosure: public ExtendedOopClosure {
|
||||
public:
|
||||
template <typename T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class FollowStackClosure: public VoidClosure {
|
||||
public:
|
||||
virtual void do_void();
|
||||
@ -146,6 +140,7 @@ class MarkSweep : AllStatic {
|
||||
|
||||
// Reference Processing
|
||||
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
||||
static void set_ref_processor(ReferenceProcessor* rp);
|
||||
|
||||
// Archive Object handling
|
||||
static inline bool is_archive_object(oop object);
|
||||
@ -153,34 +148,55 @@ class MarkSweep : AllStatic {
|
||||
static STWGCTimer* gc_timer() { return _gc_timer; }
|
||||
static SerialOldTracer* gc_tracer() { return _gc_tracer; }
|
||||
|
||||
// Call backs for marking
|
||||
static void mark_object(oop obj);
|
||||
// Mark pointer and follow contents. Empty marking stack afterwards.
|
||||
template <class T> static inline void follow_root(T* p);
|
||||
|
||||
// Check mark and maybe push on marking stack
|
||||
template <class T> static void mark_and_push(T* p);
|
||||
|
||||
static inline void push_objarray(oop obj, size_t index);
|
||||
|
||||
static void follow_stack(); // Empty marking stack.
|
||||
|
||||
static void follow_object(oop obj);
|
||||
|
||||
static void follow_array(objArrayOop array, int index);
|
||||
|
||||
static void follow_klass(Klass* klass);
|
||||
|
||||
static void follow_class_loader(ClassLoaderData* cld);
|
||||
|
||||
static int adjust_pointers(oop obj);
|
||||
|
||||
static void preserve_mark(oop p, markOop mark);
|
||||
// Save the mark word so it can be restored later
|
||||
static void adjust_marks(); // Adjust the pointers in the preserved marks table
|
||||
static void restore_marks(); // Restore the marks that we saved in preserve_mark
|
||||
|
||||
static int adjust_pointers(oop obj);
|
||||
|
||||
static void follow_stack(); // Empty marking stack.
|
||||
|
||||
static void follow_klass(Klass* klass);
|
||||
|
||||
static void follow_cld(ClassLoaderData* cld);
|
||||
|
||||
template <class T> static inline void adjust_pointer(T* p);
|
||||
|
||||
// Check mark and maybe push on marking stack
|
||||
template <class T> static void mark_and_push(T* p);
|
||||
|
||||
private:
|
||||
// Call backs for marking
|
||||
static void mark_object(oop obj);
|
||||
// Mark pointer and follow contents. Empty marking stack afterwards.
|
||||
template <class T> static inline void follow_root(T* p);
|
||||
|
||||
static inline void push_objarray(oop obj, size_t index);
|
||||
|
||||
static void follow_object(oop obj);
|
||||
|
||||
static void follow_array(objArrayOop array);
|
||||
|
||||
static void follow_array_chunk(objArrayOop array, int index);
|
||||
};
|
||||
|
||||
class MarkAndPushClosure: public ExtendedOopClosure {
|
||||
public:
|
||||
template <typename T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
virtual bool do_metadata();
|
||||
bool do_metadata_nv();
|
||||
|
||||
virtual void do_klass(Klass* k);
|
||||
void do_klass_nv(Klass* k);
|
||||
|
||||
virtual void do_cld(ClassLoaderData* cld);
|
||||
void do_cld_nv(ClassLoaderData* cld);
|
||||
|
||||
void set_ref_processor(ReferenceProcessor* rp) { _ref_processor = rp; }
|
||||
};
|
||||
|
||||
class PreservedMark VALUE_OBJ_CLASS_SPEC {
|
||||
|
@ -26,38 +26,13 @@
|
||||
#define SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
|
||||
|
||||
#include "gc/serial/markSweep.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/instanceRefKlass.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/markOop.inline.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/g1/g1MarkSweep.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
inline void MarkSweep::mark_object(oop obj) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
// We must enqueue the object before it is marked
|
||||
// as we otherwise can't read the object's age.
|
||||
G1StringDedup::enqueue_from_mark(obj);
|
||||
}
|
||||
#endif
|
||||
// some marks may contain information we need to preserve so we store them away
|
||||
// and overwrite the mark. We'll restore it at the end of markSweep.
|
||||
markOop mark = obj->mark();
|
||||
obj->set_mark(markOopDesc::prototype()->set_marked());
|
||||
|
||||
if (mark->must_be_preserved(obj)) {
|
||||
preserve_mark(obj, mark);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool MarkSweep::is_archive_object(oop object) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
return (G1MarkSweep::archive_check_enabled() &&
|
||||
@ -67,51 +42,6 @@ inline bool MarkSweep::is_archive_object(oop object) {
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_klass(Klass* klass) {
|
||||
oop op = klass->klass_holder();
|
||||
MarkSweep::mark_and_push(&op);
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_object(oop obj) {
|
||||
assert(obj->is_gc_marked(), "should be marked");
|
||||
|
||||
obj->ms_follow_contents();
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
follow_object(obj);
|
||||
}
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
_marking_stack.push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MarkSweep::push_objarray(oop obj, size_t index) {
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
_objarray_stack.push(task);
|
||||
}
|
||||
|
||||
inline int MarkSweep::adjust_pointers(oop obj) {
|
||||
return obj->ms_adjust_pointers();
|
||||
}
|
||||
@ -139,8 +69,4 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
|
||||
mark_and_push(p);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
|
||||
|
@ -108,7 +108,7 @@ bool TenuredGeneration::should_collect(bool full,
|
||||
free());
|
||||
}
|
||||
}
|
||||
// If we had to expand to accommodate promotions from younger generations
|
||||
// If we had to expand to accommodate promotions from the young generation
|
||||
if (!result && _capacity_at_prologue < capacity()) {
|
||||
result = true;
|
||||
if (PrintGC && Verbose) {
|
||||
@ -140,11 +140,11 @@ void TenuredGeneration::update_gc_stats(Generation* current_generation,
|
||||
// that are of interest at this point.
|
||||
bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
|
||||
if (!full && current_is_young) {
|
||||
// Calculate size of data promoted from the younger generations
|
||||
// Calculate size of data promoted from the young generation
|
||||
// before doing the collection.
|
||||
size_t used_before_gc = used();
|
||||
|
||||
// If the younger gen collections were skipped, then the
|
||||
// If the young gen collection was skipped, then the
|
||||
// number of promoted bytes will be 0 and adding it to the
|
||||
// average will incorrectly lessen the average. It is, however,
|
||||
// also possible that no promotion was needed.
|
||||
|
@ -42,10 +42,10 @@ class TenuredGeneration: public CardGeneration {
|
||||
friend class VM_PopulateDumpSharedSpace;
|
||||
|
||||
protected:
|
||||
ContiguousSpace* _the_space; // Actual space holding objects
|
||||
ContiguousSpace* _the_space; // Actual space holding objects
|
||||
|
||||
GenerationCounters* _gen_counters;
|
||||
CSpaceCounters* _space_counters;
|
||||
GenerationCounters* _gen_counters;
|
||||
CSpaceCounters* _space_counters;
|
||||
|
||||
// Allocation failure
|
||||
virtual bool expand(size_t bytes, size_t expand_bytes);
|
||||
@ -54,6 +54,7 @@ class TenuredGeneration: public CardGeneration {
|
||||
ContiguousSpace* space() const { return _the_space; }
|
||||
|
||||
void assert_correct_size_change_locking();
|
||||
|
||||
public:
|
||||
TenuredGeneration(ReservedSpace rs,
|
||||
size_t initial_byte_size,
|
||||
@ -66,10 +67,9 @@ class TenuredGeneration: public CardGeneration {
|
||||
const char* short_name() const { return "Tenured"; }
|
||||
|
||||
// Does a "full" (forced) collection invoked on this generation collect
|
||||
// all younger generations as well? Note that this is a
|
||||
// hack to allow the collection of the younger gen first if the flag is
|
||||
// set.
|
||||
virtual bool full_collects_younger_generations() const {
|
||||
// the young generation as well? Note that this is a hack to allow the
|
||||
// collection of the young gen first if the flag is set.
|
||||
virtual bool full_collects_young_generation() const {
|
||||
return !ScavengeBeforeFullGC;
|
||||
}
|
||||
|
||||
@ -99,15 +99,16 @@ class TenuredGeneration: public CardGeneration {
|
||||
bool clear_all_soft_refs,
|
||||
size_t size,
|
||||
bool is_tlab);
|
||||
|
||||
HeapWord* expand_and_allocate(size_t size,
|
||||
bool is_tlab,
|
||||
bool parallel = false);
|
||||
|
||||
virtual void prepare_for_verify();
|
||||
|
||||
|
||||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
|
||||
bool should_collect(bool full,
|
||||
size_t word_size,
|
||||
bool is_tlab);
|
||||
|
@ -266,22 +266,22 @@ void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
|
||||
}
|
||||
|
||||
// The policy does not have enough data until at least some
|
||||
// minor collections have been done.
|
||||
// young collections have been done.
|
||||
_young_gen_policy_is_ready =
|
||||
(_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
|
||||
|
||||
// Calculate variables used to estimate pause time vs. gen sizes
|
||||
double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
|
||||
double eden_size_in_mbytes = ((double)_eden_size) / ((double)M);
|
||||
update_minor_pause_young_estimator(minor_pause_in_ms);
|
||||
update_minor_pause_old_estimator(minor_pause_in_ms);
|
||||
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: "
|
||||
"minor gc cost: %f average: %f", collection_cost,
|
||||
_avg_minor_gc_cost->average());
|
||||
"minor gc cost: %f average: %f", collection_cost,
|
||||
_avg_minor_gc_cost->average());
|
||||
gclog_or_tty->print_cr(" minor pause: %f minor period %f",
|
||||
minor_pause_in_ms,
|
||||
_latest_minor_mutator_interval_seconds * MILLIUNITS);
|
||||
minor_pause_in_ms,
|
||||
_latest_minor_mutator_interval_seconds * MILLIUNITS);
|
||||
}
|
||||
|
||||
// Calculate variable used to estimate collection cost vs. gen sizes
|
||||
@ -295,8 +295,7 @@ void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
|
||||
_minor_timer.start();
|
||||
}
|
||||
|
||||
size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden,
|
||||
uint percent_change) {
|
||||
size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) {
|
||||
size_t eden_heap_delta;
|
||||
eden_heap_delta = cur_eden / 100 * percent_change;
|
||||
return eden_heap_delta;
|
||||
@ -312,8 +311,7 @@ size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
|
||||
return eden_heap_delta;
|
||||
}
|
||||
|
||||
size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo,
|
||||
uint percent_change) {
|
||||
size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) {
|
||||
size_t promo_heap_delta;
|
||||
promo_heap_delta = cur_promo / 100 * percent_change;
|
||||
return promo_heap_delta;
|
||||
|
@ -132,6 +132,9 @@ public:
|
||||
// First the pre-write versions...
|
||||
template <class T> inline void write_ref_field_pre(T* field, oop new_val);
|
||||
private:
|
||||
// Helper for write_ref_field_pre and friends, testing for specialized cases.
|
||||
bool devirtualize_reference_writes() const;
|
||||
|
||||
// Keep this private so as to catch violations at build time.
|
||||
virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); };
|
||||
protected:
|
||||
@ -142,7 +145,7 @@ public:
|
||||
// ...then the post-write version.
|
||||
inline void write_ref_field(void* field, oop new_val, bool release = false);
|
||||
protected:
|
||||
virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
|
||||
virtual void write_ref_field_work(void* field, oop new_val, bool release) = 0;
|
||||
public:
|
||||
|
||||
// Invoke the barrier, if any, necessary when writing the "bytes"-byte
|
||||
|
@ -32,8 +32,18 @@
|
||||
// performance-critical calls when the barrier is the most common
|
||||
// card-table kind.
|
||||
|
||||
inline bool BarrierSet::devirtualize_reference_writes() const {
|
||||
switch (kind()) {
|
||||
case CardTableForRS:
|
||||
case CardTableExtension:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
|
||||
if (kind() == CardTableModRef) {
|
||||
if (devirtualize_reference_writes()) {
|
||||
barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
|
||||
} else {
|
||||
write_ref_field_pre_work(field, new_val);
|
||||
@ -41,7 +51,7 @@ template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
|
||||
}
|
||||
|
||||
void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
|
||||
if (kind() == CardTableModRef) {
|
||||
if (devirtualize_reference_writes()) {
|
||||
barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
|
||||
} else {
|
||||
write_ref_field_work(field, new_val, release);
|
||||
@ -77,7 +87,7 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
||||
|
||||
|
||||
inline void BarrierSet::write_region(MemRegion mr) {
|
||||
if (kind() == CardTableModRef) {
|
||||
if (devirtualize_reference_writes()) {
|
||||
barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
|
||||
} else {
|
||||
write_region_work(mr);
|
||||
|
@ -25,9 +25,12 @@
|
||||
#ifndef SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
|
||||
#define SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
|
||||
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// The CollectedHeap type requires subtypes to implement a method
|
||||
// "block_start". For some subtypes, notably generational
|
||||
@ -126,6 +129,19 @@ class BlockOffsetSharedArray: public CHeapObj<mtGC> {
|
||||
VirtualSpace _vs;
|
||||
u_char* _offset_array; // byte array keeping backwards offsets
|
||||
|
||||
void fill_range(size_t start, size_t num_cards, u_char offset) {
|
||||
void* start_ptr = &_offset_array[start];
|
||||
#if INCLUDE_ALL_GCS
|
||||
// If collector is concurrent, special handling may be needed.
|
||||
assert(!UseG1GC, "Shouldn't be here when using G1");
|
||||
if (UseConcMarkSweepGC) {
|
||||
memset_with_concurrent_readers(start_ptr, offset, num_cards);
|
||||
return;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
memset(start_ptr, offset, num_cards);
|
||||
}
|
||||
|
||||
protected:
|
||||
// Bounds checking accessors:
|
||||
// For performance these have to devolve to array accesses in product builds.
|
||||
@ -160,20 +176,7 @@ class BlockOffsetSharedArray: public CHeapObj<mtGC> {
|
||||
assert(left < right, "Heap addresses out of order");
|
||||
size_t num_cards = pointer_delta(right, left) >> LogN_words;
|
||||
|
||||
// Below, we may use an explicit loop instead of memset()
|
||||
// because on certain platforms memset() can give concurrent
|
||||
// readers "out-of-thin-air," phantom zeros; see 6948537.
|
||||
if (UseMemSetInBOT) {
|
||||
memset(&_offset_array[index_for(left)], offset, num_cards);
|
||||
} else {
|
||||
size_t i = index_for(left);
|
||||
const size_t end = i + num_cards;
|
||||
for (; i < end; i++) {
|
||||
// Elided until CR 6977974 is fixed properly.
|
||||
// assert(!reducing || _offset_array[i] >= offset, "Not reducing");
|
||||
_offset_array[i] = offset;
|
||||
}
|
||||
}
|
||||
fill_range(index_for(left), num_cards, offset);
|
||||
}
|
||||
|
||||
void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
|
||||
@ -182,20 +185,7 @@ class BlockOffsetSharedArray: public CHeapObj<mtGC> {
|
||||
assert(left <= right, "indexes out of order");
|
||||
size_t num_cards = right - left + 1;
|
||||
|
||||
// Below, we may use an explicit loop instead of memset
|
||||
// because on certain platforms memset() can give concurrent
|
||||
// readers "out-of-thin-air," phantom zeros; see 6948537.
|
||||
if (UseMemSetInBOT) {
|
||||
memset(&_offset_array[left], offset, num_cards);
|
||||
} else {
|
||||
size_t i = left;
|
||||
const size_t end = i + num_cards;
|
||||
for (; i < end; i++) {
|
||||
// Elided until CR 6977974 is fixed properly.
|
||||
// assert(!reducing || _offset_array[i] >= offset, "Not reducing");
|
||||
_offset_array[i] = offset;
|
||||
}
|
||||
}
|
||||
fill_range(left, num_cards, offset);
|
||||
}
|
||||
|
||||
void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
|
||||
|
@ -183,7 +183,7 @@ protected:
|
||||
// these functions here for performance.
|
||||
|
||||
void write_ref_field_work(oop obj, size_t offset, oop newVal);
|
||||
virtual void write_ref_field_work(void* field, oop newVal, bool release = false);
|
||||
virtual void write_ref_field_work(void* field, oop newVal, bool release);
|
||||
public:
|
||||
|
||||
bool has_write_ref_array_opt() { return true; }
|
||||
|
@ -31,13 +31,7 @@
|
||||
CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
|
||||
CardTableModRefBS(
|
||||
whole_heap,
|
||||
// Concrete tag should be BarrierSet::CardTableForRS.
|
||||
// That will presently break things in a bunch of places though.
|
||||
// The concrete tag is used as a dispatch key in many places, and
|
||||
// CardTableForRS does not correctly dispatch in some of those
|
||||
// uses. This will be addressed as part of a reorganization of the
|
||||
// BarrierSet hierarchy.
|
||||
BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableForRS)),
|
||||
BarrierSet::FakeRtti(BarrierSet::CardTableForRS)),
|
||||
// LNC functionality
|
||||
_lowest_non_clean(NULL),
|
||||
_lowest_non_clean_chunk_size(NULL),
|
||||
|
@ -80,7 +80,9 @@ jbyte CardTableRS::find_unused_youngergenP_card_value() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!seen) return v;
|
||||
if (!seen) {
|
||||
return v;
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
@ -502,7 +504,7 @@ void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
|
||||
//
|
||||
// The main point below is that the parallel card scanning code
|
||||
// deals correctly with these stale card values. There are two main
|
||||
// cases to consider where we have a stale "younger gen" value and a
|
||||
// cases to consider where we have a stale "young gen" value and a
|
||||
// "derivative" case to consider, where we have a stale
|
||||
// "cur_younger_gen_and_prev_non_clean" value, as will become
|
||||
// apparent in the case analysis below.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user