Merge
This commit is contained in:
commit
9374360851
@ -633,9 +633,9 @@ create_jdk: copy_jdk update_jdk
|
||||
|
||||
update_jdk: export_product_jdk export_fastdebug_jdk test_jdk
|
||||
|
||||
copy_jdk: $(JDK_IMAGE_DIR)/jre/lib/rt.jar
|
||||
copy_jdk: $(JDK_IMAGE_DIR)/bin/java
|
||||
|
||||
$(JDK_IMAGE_DIR)/jre/lib/rt.jar:
|
||||
$(JDK_IMAGE_DIR)/bin/java:
|
||||
$(RM) -r $(JDK_IMAGE_DIR)
|
||||
$(MKDIR) -p $(JDK_IMAGE_DIR)
|
||||
($(CD) $(JDK_IMPORT_PATH) && \
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -139,18 +139,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -139,18 +139,6 @@
|
||||
_JVM_Halt
|
||||
_JVM_HoldsLock
|
||||
_JVM_IHashCode
|
||||
_JVM_ImageAttributeOffsets
|
||||
_JVM_ImageAttributeOffsetsLength
|
||||
_JVM_ImageClose
|
||||
_JVM_ImageFindAttributes
|
||||
_JVM_ImageGetAttributes
|
||||
_JVM_ImageGetAttributesCount
|
||||
_JVM_ImageGetDataAddress
|
||||
_JVM_ImageGetIndexAddress
|
||||
_JVM_ImageGetStringBytes
|
||||
_JVM_ImageOpen
|
||||
_JVM_ImageRead
|
||||
_JVM_ImageReadCompressed
|
||||
_JVM_InitAgentProperties
|
||||
_JVM_InitProperties
|
||||
_JVM_InternString
|
||||
|
@ -139,18 +139,6 @@
|
||||
_JVM_Halt
|
||||
_JVM_HoldsLock
|
||||
_JVM_IHashCode
|
||||
_JVM_ImageAttributeOffsets
|
||||
_JVM_ImageAttributeOffsetsLength
|
||||
_JVM_ImageClose
|
||||
_JVM_ImageFindAttributes
|
||||
_JVM_ImageGetAttributes
|
||||
_JVM_ImageGetAttributesCount
|
||||
_JVM_ImageGetDataAddress
|
||||
_JVM_ImageGetIndexAddress
|
||||
_JVM_ImageGetStringBytes
|
||||
_JVM_ImageOpen
|
||||
_JVM_ImageRead
|
||||
_JVM_ImageReadCompressed
|
||||
_JVM_InitAgentProperties
|
||||
_JVM_InitProperties
|
||||
_JVM_InternString
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -131,7 +131,7 @@ endif
|
||||
# By default, link the *.o into the library, not the executable.
|
||||
LINK_INTO$(LINK_INTO) = LIBJVM
|
||||
|
||||
JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
|
||||
JDK_LIBDIR = $(JAVA_HOME)/lib/$(LIBARCH)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
# jvm_db & dtrace
|
||||
|
@ -49,7 +49,7 @@ fi
|
||||
# Just in case:
|
||||
JAVA_HOME=`( cd $JAVA_HOME; pwd )`
|
||||
|
||||
if [ "${ALT_BOOTDIR-}" = "" -o ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/jre/lib/ ]; then
|
||||
if [ "${ALT_BOOTDIR-}" = "" -o ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/lib/ ]; then
|
||||
ALT_BOOTDIR=${JAVA_HOME}
|
||||
fi
|
||||
|
||||
|
@ -127,7 +127,7 @@ fi
|
||||
# o $JRE/lib/$ARCH
|
||||
# followed by the user's previous effective LD_LIBRARY_PATH, if
|
||||
# any.
|
||||
JRE=$JDK/jre
|
||||
JRE=$JDK
|
||||
JAVA_HOME=$JDK
|
||||
export JAVA_HOME
|
||||
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -76,6 +76,11 @@ endif
|
||||
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
|
||||
CFLAGS_WARN = +w -errwarn
|
||||
endif
|
||||
# When using compiler version 5.13 (Solaris Studio 12.4), calls to explicitly
|
||||
# instantiated template functions trigger this warning when +w is active.
|
||||
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 513), 1)
|
||||
CFLAGS_WARN += -erroff=notemsource
|
||||
endif
|
||||
CFLAGS += $(CFLAGS_WARN)
|
||||
|
||||
ifeq ("${Platform_compiler}", "sparcWorks")
|
||||
|
@ -270,6 +270,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
|
||||
echo "CP ?= cp"; \
|
||||
echo "MV ?= mv"; \
|
||||
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
|
||||
echo "include \$$(GAMMADIR)/make/excludeSrc.make"; \
|
||||
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
|
||||
) > $@
|
||||
|
||||
|
@ -141,18 +141,6 @@ SUNWprivate_1.1 {
|
||||
JVM_Halt;
|
||||
JVM_HoldsLock;
|
||||
JVM_IHashCode;
|
||||
JVM_ImageAttributeOffsets;
|
||||
JVM_ImageAttributeOffsetsLength;
|
||||
JVM_ImageClose;
|
||||
JVM_ImageFindAttributes;
|
||||
JVM_ImageGetAttributes;
|
||||
JVM_ImageGetAttributesCount;
|
||||
JVM_ImageGetDataAddress;
|
||||
JVM_ImageGetIndexAddress;
|
||||
JVM_ImageGetStringBytes;
|
||||
JVM_ImageOpen;
|
||||
JVM_ImageRead;
|
||||
JVM_ImageReadCompressed;
|
||||
JVM_InitAgentProperties;
|
||||
JVM_InitProperties;
|
||||
JVM_InternString;
|
||||
|
@ -197,7 +197,7 @@ Src_Dirs/COMPILER1 := $(CORE_PATHS) $(COMPILER1_PATHS)
|
||||
Src_Dirs/COMPILER2 := $(CORE_PATHS) $(COMPILER2_PATHS)
|
||||
Src_Dirs/TIERED := $(CORE_PATHS) $(COMPILER1_PATHS) $(COMPILER2_PATHS)
|
||||
Src_Dirs/ZERO := $(CORE_PATHS)
|
||||
Src_Dirs/SHARK := $(CORE_PATHS)
|
||||
Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS)
|
||||
Src_Dirs := $(Src_Dirs/$(TYPE))
|
||||
|
||||
COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
|
||||
@ -206,7 +206,7 @@ SHARK_SPECIFIC_FILES := shark
|
||||
ZERO_SPECIFIC_FILES := zero
|
||||
|
||||
# Always exclude these.
|
||||
Src_Files_EXCLUDE := dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
|
||||
Src_Files_EXCLUDE += dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
|
||||
|
||||
# Exclude per type.
|
||||
Src_Files_EXCLUDE/CORE := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
|
||||
|
@ -4373,12 +4373,12 @@ encode %{
|
||||
return;
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
__ biased_locking_enter(disp_hdr, oop, box, tmp, true, cont);
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
__ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
|
||||
}
|
||||
|
||||
// Handle existing monitor
|
||||
if (EmitSync & 0x02) {
|
||||
if ((EmitSync & 0x02) == 0) {
|
||||
// we can use AArch64's bit test and branch here but
|
||||
// markoopDesc does not define a bit index just the bit value
|
||||
// so assert in case the bit pos changes
|
||||
@ -4518,7 +4518,7 @@ encode %{
|
||||
return;
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
__ biased_locking_exit(oop, tmp, cont);
|
||||
}
|
||||
|
||||
|
@ -1210,7 +1210,7 @@ public:
|
||||
|
||||
INSN(ldrs, 0b00, 1);
|
||||
INSN(ldrd, 0b01, 1);
|
||||
INSN(ldrq, 0x10, 1);
|
||||
INSN(ldrq, 0b10, 1);
|
||||
|
||||
#undef INSN
|
||||
|
||||
@ -2285,13 +2285,13 @@ public:
|
||||
#undef INSN
|
||||
|
||||
// Table vector lookup
|
||||
#define INSN(NAME, op) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \
|
||||
starti; \
|
||||
assert(T == T8B || T == T16B, "invalid arrangement"); \
|
||||
assert(0 < registers && registers <= 4, "invalid number of registers"); \
|
||||
f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \
|
||||
f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \
|
||||
#define INSN(NAME, op) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \
|
||||
starti; \
|
||||
assert(T == T8B || T == T16B, "invalid arrangement"); \
|
||||
assert(0 < registers && registers <= 4, "invalid number of registers"); \
|
||||
f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \
|
||||
f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(tbl, 0);
|
||||
@ -2299,6 +2299,7 @@ public:
|
||||
|
||||
#undef INSN
|
||||
|
||||
// AdvSIMD two-reg misc
|
||||
#define INSN(NAME, U, opcode) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
|
||||
starti; \
|
||||
@ -2316,10 +2317,19 @@ public:
|
||||
|
||||
#define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H)
|
||||
INSN(rev32, 1, 0b00000);
|
||||
private:
|
||||
INSN(_rbit, 1, 0b00101);
|
||||
public:
|
||||
|
||||
#undef ASSERTION
|
||||
|
||||
#define ASSERTION (T == T8B || T == T16B)
|
||||
INSN(rev16, 0, 0b00001);
|
||||
// RBIT only allows T8B and T16B but encodes them oddly. Argh...
|
||||
void rbit(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
|
||||
assert((ASSERTION), MSG);
|
||||
_rbit(Vd, SIMD_Arrangement(T & 1 | 0b010), Vn);
|
||||
}
|
||||
#undef ASSERTION
|
||||
|
||||
#undef MSG
|
||||
|
@ -3043,7 +3043,9 @@ void MacroAssembler::store_check(Register obj) {
|
||||
// register obj is destroyed afterwards.
|
||||
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||
bs->kind() == BarrierSet::CardTableExtension,
|
||||
"Wrong barrier set kind");
|
||||
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||
|
@ -691,7 +691,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
|
||||
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -731,7 +731,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
||||
@ -2364,7 +2364,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
* c_rarg3 - int* table
|
||||
*
|
||||
* Ouput:
|
||||
* rax - int crc result
|
||||
* r0 - int crc result
|
||||
*/
|
||||
address generate_updateBytesCRC32C() {
|
||||
assert(UseCRC32CIntrinsics, "what are we doing here?");
|
||||
@ -2435,6 +2435,69 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi,
|
||||
FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0,
|
||||
FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3, FloatRegister tmp4) {
|
||||
// Karatsuba multiplication performs a 128*128 -> 256-bit
|
||||
// multiplication in three 128-bit multiplications and a few
|
||||
// additions.
|
||||
//
|
||||
// (C1:C0) = A1*B1, (D1:D0) = A0*B0, (E1:E0) = (A0+A1)(B0+B1)
|
||||
// (A1:A0)(B1:B0) = C1:(C0+C1+D1+E1):(D1+C0+D0+E0):D0
|
||||
//
|
||||
// Inputs:
|
||||
//
|
||||
// A0 in a.d[0] (subkey)
|
||||
// A1 in a.d[1]
|
||||
// (A1+A0) in a1_xor_a0.d[0]
|
||||
//
|
||||
// B0 in b.d[0] (state)
|
||||
// B1 in b.d[1]
|
||||
|
||||
__ ext(tmp1, __ T16B, b, b, 0x08);
|
||||
__ pmull2(result_hi, __ T1Q, b, a, __ T2D); // A1*B1
|
||||
__ eor(tmp1, __ T16B, tmp1, b); // (B1+B0)
|
||||
__ pmull(result_lo, __ T1Q, b, a, __ T1D); // A0*B0
|
||||
__ pmull(tmp2, __ T1Q, tmp1, a1_xor_a0, __ T1D); // (A1+A0)(B1+B0)
|
||||
|
||||
__ ext(tmp4, __ T16B, result_lo, result_hi, 0x08);
|
||||
__ eor(tmp3, __ T16B, result_hi, result_lo); // A1*B1+A0*B0
|
||||
__ eor(tmp2, __ T16B, tmp2, tmp4);
|
||||
__ eor(tmp2, __ T16B, tmp2, tmp3);
|
||||
|
||||
// Register pair <result_hi:result_lo> holds the result of carry-less multiplication
|
||||
__ ins(result_hi, __ D, tmp2, 0, 1);
|
||||
__ ins(result_lo, __ D, tmp2, 1, 0);
|
||||
}
|
||||
|
||||
void ghash_reduce(FloatRegister result, FloatRegister lo, FloatRegister hi,
|
||||
FloatRegister p, FloatRegister z, FloatRegister t1) {
|
||||
const FloatRegister t0 = result;
|
||||
|
||||
// The GCM field polynomial f is z^128 + p(z), where p =
|
||||
// z^7+z^2+z+1.
|
||||
//
|
||||
// z^128 === -p(z) (mod (z^128 + p(z)))
|
||||
//
|
||||
// so, given that the product we're reducing is
|
||||
// a == lo + hi * z^128
|
||||
// substituting,
|
||||
// === lo - hi * p(z) (mod (z^128 + p(z)))
|
||||
//
|
||||
// we reduce by multiplying hi by p(z) and subtracting the result
|
||||
// from (i.e. XORing it with) lo. Because p has no nonzero high
|
||||
// bits we can do this with two 64-bit multiplications, lo*p and
|
||||
// hi*p.
|
||||
|
||||
__ pmull2(t0, __ T1Q, hi, p, __ T2D);
|
||||
__ ext(t1, __ T16B, t0, z, 8);
|
||||
__ eor(hi, __ T16B, hi, t1);
|
||||
__ ext(t1, __ T16B, z, t0, 8);
|
||||
__ eor(lo, __ T16B, lo, t1);
|
||||
__ pmull(t0, __ T1Q, hi, p, __ T1D);
|
||||
__ eor(result, __ T16B, lo, t0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
@ -2448,10 +2511,27 @@ class StubGenerator: public StubCodeGenerator {
|
||||
* Updated state at c_rarg0
|
||||
*/
|
||||
address generate_ghash_processBlocks() {
|
||||
__ align(CodeEntryAlignment);
|
||||
Label L_ghash_loop, L_exit;
|
||||
// Bafflingly, GCM uses little-endian for the byte order, but
|
||||
// big-endian for the bit order. For example, the polynomial 1 is
|
||||
// represented as the 16-byte string 80 00 00 00 | 12 bytes of 00.
|
||||
//
|
||||
// So, we must either reverse the bytes in each word and do
|
||||
// everything big-endian or reverse the bits in each byte and do
|
||||
// it little-endian. On AArch64 it's more idiomatic to reverse
|
||||
// the bits in each byte (we have an instruction, RBIT, to do
|
||||
// that) and keep the data in little-endian bit order throught the
|
||||
// calculation, bit-reversing the inputs and outputs.
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
|
||||
__ align(wordSize * 2);
|
||||
address p = __ pc();
|
||||
__ emit_int64(0x87); // The low-order bits of the field
|
||||
// polynomial (i.e. p = z^7+z^2+z+1)
|
||||
// repeated in the low and high parts of a
|
||||
// 128-bit vector
|
||||
__ emit_int64(0x87);
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
address start = __ pc();
|
||||
|
||||
Register state = c_rarg0;
|
||||
@ -2462,104 +2542,43 @@ class StubGenerator: public StubCodeGenerator {
|
||||
FloatRegister vzr = v30;
|
||||
__ eor(vzr, __ T16B, vzr, vzr); // zero register
|
||||
|
||||
__ mov(v26, __ T16B, 1);
|
||||
__ mov(v27, __ T16B, 63);
|
||||
__ mov(v28, __ T16B, 62);
|
||||
__ mov(v29, __ T16B, 57);
|
||||
__ ldrq(v0, Address(state));
|
||||
__ ldrq(v1, Address(subkeyH));
|
||||
|
||||
__ ldrq(v6, Address(state));
|
||||
__ ldrq(v16, Address(subkeyH));
|
||||
__ rev64(v0, __ T16B, v0); // Bit-reverse words in state and subkeyH
|
||||
__ rbit(v0, __ T16B, v0);
|
||||
__ rev64(v1, __ T16B, v1);
|
||||
__ rbit(v1, __ T16B, v1);
|
||||
|
||||
__ ext(v0, __ T16B, v6, v6, 0x08);
|
||||
__ ext(v1, __ T16B, v16, v16, 0x08);
|
||||
__ eor(v16, __ T16B, v16, v1);
|
||||
__ ldrq(v26, p);
|
||||
|
||||
__ bind(L_ghash_loop);
|
||||
__ ext(v16, __ T16B, v1, v1, 0x08); // long-swap subkeyH into v1
|
||||
__ eor(v16, __ T16B, v16, v1); // xor subkeyH into subkeyL (Karatsuba: (A1+A0))
|
||||
|
||||
__ ldrq(v2, Address(__ post(data, 0x10)));
|
||||
__ rev64(v2, __ T16B, v2); // swap data
|
||||
{
|
||||
Label L_ghash_loop;
|
||||
__ bind(L_ghash_loop);
|
||||
|
||||
__ ext(v6, __ T16B, v0, v0, 0x08);
|
||||
__ eor(v6, __ T16B, v6, v2);
|
||||
__ ext(v2, __ T16B, v6, v6, 0x08);
|
||||
__ ldrq(v2, Address(__ post(data, 0x10))); // Load the data, bit
|
||||
// reversing each byte
|
||||
__ rbit(v2, __ T16B, v2);
|
||||
__ eor(v2, __ T16B, v0, v2); // bit-swapped data ^ bit-swapped state
|
||||
|
||||
__ pmull2(v7, __ T1Q, v2, v1, __ T2D); // A1*B1
|
||||
__ eor(v6, __ T16B, v6, v2);
|
||||
__ pmull(v5, __ T1Q, v2, v1, __ T1D); // A0*B0
|
||||
__ pmull(v20, __ T1Q, v6, v16, __ T1D); // (A1 + A0)(B1 + B0)
|
||||
// Multiply state in v2 by subkey in v1
|
||||
ghash_multiply(/*result_lo*/v5, /*result_hi*/v7,
|
||||
/*a*/v1, /*b*/v2, /*a1_xor_a0*/v16,
|
||||
/*temps*/v6, v20, v18, v21);
|
||||
// Reduce v7:v5 by the field polynomial
|
||||
ghash_reduce(v0, v5, v7, v26, vzr, v20);
|
||||
|
||||
__ ext(v21, __ T16B, v5, v7, 0x08);
|
||||
__ eor(v18, __ T16B, v7, v5); // A1*B1 xor A0*B0
|
||||
__ eor(v20, __ T16B, v20, v21);
|
||||
__ eor(v20, __ T16B, v20, v18);
|
||||
__ sub(blocks, blocks, 1);
|
||||
__ cbnz(blocks, L_ghash_loop);
|
||||
}
|
||||
|
||||
// Registers pair <v7:v5> holds the result of carry-less multiplication
|
||||
__ ins(v7, __ D, v20, 0, 1);
|
||||
__ ins(v5, __ D, v20, 1, 0);
|
||||
// The bit-reversed result is at this point in v0
|
||||
__ rev64(v1, __ T16B, v0);
|
||||
__ rbit(v1, __ T16B, v1);
|
||||
|
||||
// Result of the multiplication is shifted by one bit position
|
||||
// [X3:X2:X1:X0] = [X3:X2:X1:X0] << 1
|
||||
__ ushr(v18, __ T2D, v5, -63 & 63);
|
||||
__ ins(v25, __ D, v18, 1, 0);
|
||||
__ ins(v25, __ D, vzr, 0, 0);
|
||||
__ ushl(v5, __ T2D, v5, v26);
|
||||
__ orr(v5, __ T16B, v5, v25);
|
||||
|
||||
__ ushr(v19, __ T2D, v7, -63 & 63);
|
||||
__ ins(v19, __ D, v19, 1, 0);
|
||||
__ ins(v19, __ D, v18, 0, 1);
|
||||
__ ushl(v7, __ T2D, v7, v26);
|
||||
__ orr(v6, __ T16B, v7, v19);
|
||||
|
||||
__ ins(v24, __ D, v5, 0, 1);
|
||||
|
||||
// A = X0 << 63
|
||||
__ ushl(v21, __ T2D, v5, v27);
|
||||
|
||||
// A = X0 << 62
|
||||
__ ushl(v22, __ T2D, v5, v28);
|
||||
|
||||
// A = X0 << 57
|
||||
__ ushl(v23, __ T2D, v5, v29);
|
||||
|
||||
// D = X1^A^B^C
|
||||
__ eor(v21, __ T16B, v21, v22);
|
||||
__ eor(v21, __ T16B, v21, v23);
|
||||
__ eor(v21, __ T16B, v21, v24);
|
||||
__ ins(v5, __ D, v21, 1, 0);
|
||||
|
||||
// [E1:E0] = [D:X0] >> 1
|
||||
__ ushr(v20, __ T2D, v5, -1 & 63);
|
||||
__ ushl(v18, __ T2D, v5, v27);
|
||||
__ ext(v25, __ T16B, v18, vzr, 0x08);
|
||||
__ orr(v19, __ T16B, v20, v25);
|
||||
|
||||
__ eor(v7, __ T16B, v5, v19);
|
||||
|
||||
// [F1:F0] = [D:X0] >> 2
|
||||
__ ushr(v20, __ T2D, v5, -2 & 63);
|
||||
__ ushl(v18, __ T2D, v5, v28);
|
||||
__ ins(v25, __ D, v18, 0, 1);
|
||||
__ orr(v19, __ T16B, v20, v25);
|
||||
|
||||
__ eor(v7, __ T16B, v7, v19);
|
||||
|
||||
// [G1:G0] = [D:X0] >> 7
|
||||
__ ushr(v20, __ T2D, v5, -7 & 63);
|
||||
__ ushl(v18, __ T2D, v5, v29);
|
||||
__ ins(v25, __ D, v18, 0, 1);
|
||||
__ orr(v19, __ T16B, v20, v25);
|
||||
|
||||
// [H1:H0] = [D^E1^F1^G1:X0^E0^F0^G0]
|
||||
__ eor(v7, __ T16B, v7, v19);
|
||||
|
||||
// Result = [H1:H0]^[X3:X2]
|
||||
__ eor(v0, __ T16B, v7, v6);
|
||||
|
||||
__ subs(blocks, blocks, 1);
|
||||
__ cbnz(blocks, L_ghash_loop);
|
||||
|
||||
__ ext(v1, __ T16B, v0, v0, 0x08);
|
||||
__ st1(v1, __ T16B, state);
|
||||
__ ret(lr);
|
||||
|
||||
|
@ -186,7 +186,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
}
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
if (val == noreg) {
|
||||
|
@ -177,6 +177,12 @@ void VM_Version::get_processor_features() {
|
||||
if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
|
||||
warning("UseCRC32 specified, but not supported on this CPU");
|
||||
}
|
||||
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32Intrinsics not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (auxv & HWCAP_AES) {
|
||||
UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
|
||||
UseAESIntrinsics =
|
||||
|
@ -2614,7 +2614,7 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register t
|
||||
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
|
||||
CardTableModRefBS* bs =
|
||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef ||
|
||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
||||
#ifdef ASSERT
|
||||
cmpdi(CCR0, Rnew_val, 0);
|
||||
|
@ -656,7 +656,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bind(filtered);
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -697,7 +697,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
Label Lskip_loop, Lstore_loop;
|
||||
|
@ -105,7 +105,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
}
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
Label Lnull, Ldone;
|
||||
|
@ -200,6 +200,11 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32Intrinsics not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
UseMultiplyToLenIntrinsic = true;
|
||||
}
|
||||
|
@ -3958,7 +3958,7 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
|
||||
if (new_val == G0) return;
|
||||
CardTableModRefBS* bs =
|
||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef ||
|
||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
||||
card_table_write(bs->byte_map_base, tmp, store_addr);
|
||||
}
|
||||
|
@ -0,0 +1,159 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
|
||||
// An implementation of memset, for use when there may be concurrent
|
||||
// readers of the region being stored into.
|
||||
//
|
||||
// We can't use the standard library memset if it is implemented using
|
||||
// block initializing stores. Doing so can result in concurrent readers
|
||||
// seeing spurious zeros.
|
||||
//
|
||||
// We can't use the obvious C/C++ for-loop, because the compiler may
|
||||
// recognize the idiomatic loop and optimize it into a call to the
|
||||
// standard library memset; we've seen exactly this happen with, for
|
||||
// example, Solaris Studio 12.3. Hence the use of inline assembly
|
||||
// code, hiding loops from the compiler's optimizer.
|
||||
//
|
||||
// We don't attempt to use the standard library memset when it is safe
|
||||
// to do so. We could conservatively do so by detecting the presence
|
||||
// of block initializing stores (VM_Version::has_blk_init()), but the
|
||||
// implementation provided here should be sufficient.
|
||||
|
||||
inline void fill_subword(void* start, void* end, int value) {
|
||||
STATIC_ASSERT(BytesPerWord == 8);
|
||||
assert(pointer_delta(end, start, 1) < BytesPerWord, "precondition");
|
||||
// Dispatch on (end - start).
|
||||
void* pc;
|
||||
__asm__ volatile(
|
||||
// offset := (7 - (end - start)) + 3
|
||||
// 3 instructions from rdpc to DISPATCH
|
||||
" sub %[offset], %[end], %[offset]\n\t" // offset := start - end
|
||||
" sllx %[offset], 2, %[offset]\n\t" // scale offset for instruction size of 4
|
||||
" add %[offset], 40, %[offset]\n\t" // offset += 10 * instruction size
|
||||
" rd %pc, %[pc]\n\t" // dispatch on scaled offset
|
||||
" jmpl %[pc]+%[offset], %g0\n\t"
|
||||
" nop\n\t"
|
||||
// DISPATCH: no direct reference, but without it the store block may be elided.
|
||||
"1:\n\t"
|
||||
" stb %[value], [%[end]-7]\n\t" // end[-7] = value
|
||||
" stb %[value], [%[end]-6]\n\t"
|
||||
" stb %[value], [%[end]-5]\n\t"
|
||||
" stb %[value], [%[end]-4]\n\t"
|
||||
" stb %[value], [%[end]-3]\n\t"
|
||||
" stb %[value], [%[end]-2]\n\t"
|
||||
" stb %[value], [%[end]-1]\n\t" // end[-1] = value
|
||||
: /* no outputs */
|
||||
[pc] "&=r" (pc) // temp
|
||||
: [offset] "&+r" (start),
|
||||
[end] "r" (end),
|
||||
[value] "r" (value)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
void memset_with_concurrent_readers(void* to, int value, size_t size) {
|
||||
Prefetch::write(to, 0);
|
||||
void* end = static_cast<char*>(to) + size;
|
||||
if (size >= BytesPerWord) {
|
||||
// Fill any partial word prefix.
|
||||
uintx* aligned_to = static_cast<uintx*>(align_ptr_up(to, BytesPerWord));
|
||||
fill_subword(to, aligned_to, value);
|
||||
|
||||
// Compute fill word.
|
||||
STATIC_ASSERT(BitsPerByte == 8);
|
||||
STATIC_ASSERT(BitsPerWord == 64);
|
||||
uintx xvalue = value & 0xff;
|
||||
xvalue |= (xvalue << 8);
|
||||
xvalue |= (xvalue << 16);
|
||||
xvalue |= (xvalue << 32);
|
||||
|
||||
uintx* aligned_end = static_cast<uintx*>(align_ptr_down(end, BytesPerWord));
|
||||
assert(aligned_to <= aligned_end, "invariant");
|
||||
|
||||
// for ( ; aligned_to < aligned_end; ++aligned_to) {
|
||||
// *aligned_to = xvalue;
|
||||
// }
|
||||
uintptr_t temp;
|
||||
__asm__ volatile(
|
||||
// Unroll loop x8.
|
||||
" sub %[aend], %[ato], %[temp]\n\t"
|
||||
" cmp %[temp], 56\n\t" // cc := (aligned_end - aligned_to) > 7 words
|
||||
" ba %xcc, 2f\n\t" // goto TEST always
|
||||
" sub %[aend], 56, %[temp]\n\t" // limit := aligned_end - 7 words
|
||||
// LOOP:
|
||||
"1:\n\t" // unrolled x8 store loop top
|
||||
" cmp %[temp], %[ato]\n\t" // cc := limit > (next) aligned_to
|
||||
" stx %[xvalue], [%[ato]-64]\n\t" // store 8 words, aligned_to pre-incremented
|
||||
" stx %[xvalue], [%[ato]-56]\n\t"
|
||||
" stx %[xvalue], [%[ato]-48]\n\t"
|
||||
" stx %[xvalue], [%[ato]-40]\n\t"
|
||||
" stx %[xvalue], [%[ato]-32]\n\t"
|
||||
" stx %[xvalue], [%[ato]-24]\n\t"
|
||||
" stx %[xvalue], [%[ato]-16]\n\t"
|
||||
" stx %[xvalue], [%[ato]-8]\n\t"
|
||||
// TEST:
|
||||
"2:\n\t"
|
||||
" bgu,a %xcc, 1b\n\t" // goto LOOP if more than 7 words remaining
|
||||
" add %[ato], 64, %[ato]\n\t" // aligned_to += 8, for next iteration
|
||||
// Fill remaining < 8 full words.
|
||||
// Dispatch on (aligned_end - aligned_to).
|
||||
// offset := (7 - (aligned_end - aligned_to)) + 3
|
||||
// 3 instructions from rdpc to DISPATCH
|
||||
" sub %[ato], %[aend], %[ato]\n\t" // offset := aligned_to - aligned_end
|
||||
" srax %[ato], 1, %[ato]\n\t" // scale offset for instruction size of 4
|
||||
" add %[ato], 40, %[ato]\n\t" // offset += 10 * instruction size
|
||||
" rd %pc, %[temp]\n\t" // dispatch on scaled offset
|
||||
" jmpl %[temp]+%[ato], %g0\n\t"
|
||||
" nop\n\t"
|
||||
// DISPATCH: no direct reference, but without it the store block may be elided.
|
||||
"3:\n\t"
|
||||
" stx %[xvalue], [%[aend]-56]\n\t" // aligned_end[-7] = xvalue
|
||||
" stx %[xvalue], [%[aend]-48]\n\t"
|
||||
" stx %[xvalue], [%[aend]-40]\n\t"
|
||||
" stx %[xvalue], [%[aend]-32]\n\t"
|
||||
" stx %[xvalue], [%[aend]-24]\n\t"
|
||||
" stx %[xvalue], [%[aend]-16]\n\t"
|
||||
" stx %[xvalue], [%[aend]-8]\n\t" // aligned_end[-1] = xvalue
|
||||
: /* no outputs */
|
||||
[temp] "&=r" (temp)
|
||||
: [ato] "&+r" (aligned_to),
|
||||
[aend] "r" (aligned_end),
|
||||
[xvalue] "r" (xvalue)
|
||||
: "cc", "memory");
|
||||
to = aligned_end; // setup for suffix
|
||||
}
|
||||
// Fill any partial word suffix. Also the prefix if size < BytesPerWord.
|
||||
fill_subword(to, end, value);
|
||||
}
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
@ -981,7 +981,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ restore();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -1014,7 +1014,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ restore();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
@ -5110,6 +5110,188 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
#define ADLER32_NUM_TEMPS 16
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
* Inputs:
|
||||
* O0 - int adler
|
||||
* O1 - byte* buff
|
||||
* O2 - int len
|
||||
*
|
||||
* Output:
|
||||
* O0 - int adler result
|
||||
*/
|
||||
address generate_updateBytesAdler32() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32");
|
||||
address start = __ pc();
|
||||
|
||||
Label L_cleanup_loop, L_cleanup_loop_check;
|
||||
Label L_main_loop_check, L_main_loop, L_inner_loop, L_inner_loop_check;
|
||||
Label L_nmax_check_done;
|
||||
|
||||
// Aliases
|
||||
Register s1 = O0;
|
||||
Register s2 = O3;
|
||||
Register buff = O1;
|
||||
Register len = O2;
|
||||
Register temp[ADLER32_NUM_TEMPS] = {L0, L1, L2, L3, L4, L5, L6, L7, I0, I1, I2, I3, I4, I5, G3, I7};
|
||||
|
||||
// Max number of bytes we can process before having to take the mod
|
||||
// 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
|
||||
unsigned long NMAX = 0x15B0;
|
||||
|
||||
// Zero-out the upper bits of len
|
||||
__ clruwu(len);
|
||||
|
||||
// Create the mask 0xFFFF
|
||||
__ set64(0x00FFFF, O4, O5); // O5 is the temp register
|
||||
|
||||
// s1 is initialized to the lower 16 bits of adler
|
||||
// s2 is initialized to the upper 16 bits of adler
|
||||
__ srlx(O0, 16, O5); // adler >> 16
|
||||
__ and3(O0, O4, s1); // s1 = (adler & 0xFFFF)
|
||||
__ and3(O5, O4, s2); // s2 = ((adler >> 16) & 0xFFFF)
|
||||
|
||||
// The pipelined loop needs at least 16 elements for 1 iteration
|
||||
// It does check this, but it is more effective to skip to the cleanup loop
|
||||
// Setup the constant for cutoff checking
|
||||
__ mov(15, O4);
|
||||
|
||||
// Check if we are above the cutoff, if not go to the cleanup loop immediately
|
||||
__ cmp_and_br_short(len, O4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_loop_check);
|
||||
|
||||
// Free up some registers for our use
|
||||
for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
|
||||
__ movxtod(temp[i], as_FloatRegister(2*i));
|
||||
}
|
||||
|
||||
// Loop maintenance stuff is done at the end of the loop, so skip to there
|
||||
__ ba_short(L_main_loop_check);
|
||||
|
||||
__ BIND(L_main_loop);
|
||||
|
||||
// Prologue for inner loop
|
||||
__ ldub(buff, 0, L0);
|
||||
__ dec(O5);
|
||||
|
||||
for (int i = 1; i < 8; i++) {
|
||||
__ ldub(buff, i, temp[i]);
|
||||
}
|
||||
|
||||
__ inc(buff, 8);
|
||||
|
||||
// Inner loop processes 16 elements at a time, might never execute if only 16 elements
|
||||
// to be processed by the outter loop
|
||||
__ ba_short(L_inner_loop_check);
|
||||
|
||||
__ BIND(L_inner_loop);
|
||||
|
||||
for (int i = 0; i < 8; i++) {
|
||||
__ ldub(buff, (2*i), temp[(8+(2*i)) % ADLER32_NUM_TEMPS]);
|
||||
__ add(s1, temp[i], s1);
|
||||
__ ldub(buff, (2*i)+1, temp[(8+(2*i)+1) % ADLER32_NUM_TEMPS]);
|
||||
__ add(s2, s1, s2);
|
||||
}
|
||||
|
||||
// Original temp 0-7 used and new loads to temp 0-7 issued
|
||||
// temp 8-15 ready to be consumed
|
||||
__ add(s1, I0, s1);
|
||||
__ dec(O5);
|
||||
__ add(s2, s1, s2);
|
||||
__ add(s1, I1, s1);
|
||||
__ inc(buff, 16);
|
||||
__ add(s2, s1, s2);
|
||||
|
||||
for (int i = 0; i < 6; i++) {
|
||||
__ add(s1, temp[10+i], s1);
|
||||
__ add(s2, s1, s2);
|
||||
}
|
||||
|
||||
__ BIND(L_inner_loop_check);
|
||||
__ nop();
|
||||
__ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_inner_loop);
|
||||
|
||||
// Epilogue
|
||||
for (int i = 0; i < 4; i++) {
|
||||
__ ldub(buff, (2*i), temp[8+(2*i)]);
|
||||
__ add(s1, temp[i], s1);
|
||||
__ ldub(buff, (2*i)+1, temp[8+(2*i)+1]);
|
||||
__ add(s2, s1, s2);
|
||||
}
|
||||
|
||||
__ add(s1, temp[4], s1);
|
||||
__ inc(buff, 8);
|
||||
|
||||
for (int i = 0; i < 11; i++) {
|
||||
__ add(s2, s1, s2);
|
||||
__ add(s1, temp[5+i], s1);
|
||||
}
|
||||
|
||||
__ add(s2, s1, s2);
|
||||
|
||||
// Take the mod for s1 and s2
|
||||
__ set64(0xFFF1, L0, L1);
|
||||
__ udivx(s1, L0, L1);
|
||||
__ udivx(s2, L0, L2);
|
||||
__ mulx(L0, L1, L1);
|
||||
__ mulx(L0, L2, L2);
|
||||
__ sub(s1, L1, s1);
|
||||
__ sub(s2, L2, s2);
|
||||
|
||||
// Make sure there is something left to process
|
||||
__ BIND(L_main_loop_check);
|
||||
__ set64(NMAX, L0, L1);
|
||||
// k = len < NMAX ? len : NMAX
|
||||
__ cmp_and_br_short(len, L0, Assembler::greaterEqualUnsigned, Assembler::pt, L_nmax_check_done);
|
||||
__ andn(len, 0x0F, L0); // only loop a multiple of 16 times
|
||||
__ BIND(L_nmax_check_done);
|
||||
__ mov(L0, O5);
|
||||
__ sub(len, L0, len); // len -= k
|
||||
|
||||
__ srlx(O5, 4, O5); // multiplies of 16
|
||||
__ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_main_loop);
|
||||
|
||||
// Restore anything we used, take the mod one last time, combine and return
|
||||
// Restore any registers we saved
|
||||
for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
|
||||
__ movdtox(as_FloatRegister(2*i), temp[i]);
|
||||
}
|
||||
|
||||
// There might be nothing left to process
|
||||
__ ba_short(L_cleanup_loop_check);
|
||||
|
||||
__ BIND(L_cleanup_loop);
|
||||
__ ldub(buff, 0, O4); // load single byte form buffer
|
||||
__ inc(buff); // buff++
|
||||
__ add(s1, O4, s1); // s1 += *buff++;
|
||||
__ dec(len); // len--
|
||||
__ add(s1, s2, s2); // s2 += s1;
|
||||
__ BIND(L_cleanup_loop_check);
|
||||
__ nop();
|
||||
__ cmp_and_br_short(len, 0, Assembler::notEqual, Assembler::pt, L_cleanup_loop);
|
||||
|
||||
// Take the mod one last time
|
||||
__ set64(0xFFF1, O1, O2);
|
||||
__ udivx(s1, O1, O2);
|
||||
__ udivx(s2, O1, O5);
|
||||
__ mulx(O1, O2, O2);
|
||||
__ mulx(O1, O5, O5);
|
||||
__ sub(s1, O2, s1);
|
||||
__ sub(s2, O5, s2);
|
||||
|
||||
// Combine lower bits and higher bits
|
||||
__ sllx(s2, 16, s2); // s2 = s2 << 16
|
||||
__ or3(s1, s2, s1); // adler = s2 | s1
|
||||
// Final return value is in O0
|
||||
__ retl();
|
||||
__ delayed()->nop();
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
void generate_initial() {
|
||||
// Generates all stubs and initializes the entry points
|
||||
|
||||
@ -5206,6 +5388,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
if (UseCRC32CIntrinsics) {
|
||||
StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
|
||||
}
|
||||
|
||||
// generate Adler32 intrinsics code
|
||||
if (UseAdler32Intrinsics) {
|
||||
StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -41,7 +41,7 @@ static bool returns_to_call_stub(address return_pc) {
|
||||
enum /* platform_dependent_constants */ {
|
||||
// %%%%%%%% May be able to shrink this a lot
|
||||
code_size1 = 20000, // simply increase if too small (assembler will crash if too small)
|
||||
code_size2 = 24000 // simply increase if too small (assembler will crash if too small)
|
||||
code_size2 = 27000 // simply increase if too small (assembler will crash if too small)
|
||||
};
|
||||
|
||||
class Sparc {
|
||||
|
@ -91,7 +91,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
}
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
if (index == noreg ) {
|
||||
|
@ -85,27 +85,6 @@ void VM_Version::initialize() {
|
||||
_supports_cx8 = has_v9();
|
||||
_supports_atomic_getset4 = true; // swap instruction
|
||||
|
||||
// There are Fujitsu Sparc64 CPUs which support blk_init as well so
|
||||
// we have to take this check out of the 'is_niagara()' block below.
|
||||
if (has_blk_init()) {
|
||||
// When using CMS or G1, we cannot use memset() in BOT updates
|
||||
// because the sun4v/CMT version in libc_psr uses BIS which
|
||||
// exposes "phantom zeros" to concurrent readers. See 6948537.
|
||||
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
|
||||
FLAG_SET_DEFAULT(UseMemSetInBOT, false);
|
||||
}
|
||||
// Issue a stern warning if the user has explicitly set
|
||||
// UseMemSetInBOT (it is known to cause issues), but allow
|
||||
// use for experimentation and debugging.
|
||||
if (UseConcMarkSweepGC || UseG1GC) {
|
||||
if (UseMemSetInBOT) {
|
||||
assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
|
||||
warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
|
||||
" on sun4v; please understand that you are using at your own risk!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_niagara()) {
|
||||
// Indirect branch is the same cost as direct
|
||||
if (FLAG_IS_DEFAULT(UseInlineCaches)) {
|
||||
@ -377,6 +356,15 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
|
||||
}
|
||||
|
||||
if (UseVIS > 2) {
|
||||
if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
|
||||
}
|
||||
} else if (UseAdler32Intrinsics) {
|
||||
warning("SPARC Adler32 intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
|
||||
(cache_line_size > ContendedPaddingWidth))
|
||||
ContendedPaddingWidth = cache_line_size;
|
||||
|
@ -4320,7 +4320,9 @@ void MacroAssembler::store_check(Register obj) {
|
||||
// register obj is destroyed afterwards.
|
||||
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||
bs->kind() == BarrierSet::CardTableExtension,
|
||||
"Wrong barrier set kind");
|
||||
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||
|
@ -722,7 +722,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -754,7 +754,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
break;
|
||||
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
|
@ -367,16 +367,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#ifdef ASSERT
|
||||
// verify that threads correspond
|
||||
{
|
||||
Label L, S;
|
||||
Label L1, L2, L3;
|
||||
__ cmpptr(r15_thread, thread);
|
||||
__ jcc(Assembler::notEqual, S);
|
||||
__ jcc(Assembler::equal, L1);
|
||||
__ stop("StubRoutines::call_stub: r15_thread is corrupted");
|
||||
__ bind(L1);
|
||||
__ get_thread(rbx);
|
||||
__ cmpptr(r15_thread, thread);
|
||||
__ jcc(Assembler::equal, L2);
|
||||
__ stop("StubRoutines::call_stub: r15_thread is modified by call");
|
||||
__ bind(L2);
|
||||
__ cmpptr(r15_thread, rbx);
|
||||
__ jcc(Assembler::equal, L);
|
||||
__ bind(S);
|
||||
__ jcc(Assembler::equal, L);
|
||||
__ jcc(Assembler::equal, L3);
|
||||
__ stop("StubRoutines::call_stub: threads must correspond");
|
||||
__ bind(L);
|
||||
__ bind(L3);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -450,15 +454,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#ifdef ASSERT
|
||||
// verify that threads correspond
|
||||
{
|
||||
Label L, S;
|
||||
Label L1, L2, L3;
|
||||
__ cmpptr(r15_thread, thread);
|
||||
__ jcc(Assembler::notEqual, S);
|
||||
__ jcc(Assembler::equal, L1);
|
||||
__ stop("StubRoutines::catch_exception: r15_thread is corrupted");
|
||||
__ bind(L1);
|
||||
__ get_thread(rbx);
|
||||
__ cmpptr(r15_thread, thread);
|
||||
__ jcc(Assembler::equal, L2);
|
||||
__ stop("StubRoutines::catch_exception: r15_thread is modified by call");
|
||||
__ bind(L2);
|
||||
__ cmpptr(r15_thread, rbx);
|
||||
__ jcc(Assembler::equal, L);
|
||||
__ bind(S);
|
||||
__ jcc(Assembler::equal, L3);
|
||||
__ stop("StubRoutines::catch_exception: threads must correspond");
|
||||
__ bind(L);
|
||||
__ bind(L3);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1244,7 +1253,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
case BarrierSet::ModRef:
|
||||
break;
|
||||
@ -1284,7 +1293,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
|
@ -200,7 +200,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
}
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
if (val == noreg) {
|
||||
|
@ -714,6 +714,11 @@ void VM_Version::get_processor_features() {
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
|
||||
}
|
||||
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32Intrinsics not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
// Adjust RTM (Restricted Transactional Memory) flags
|
||||
if (!supports_rtm() && UseRTMLocking) {
|
||||
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,6 +53,10 @@ static bool detect_niagara() {
|
||||
return cpuinfo_field_contains("cpu", "Niagara");
|
||||
}
|
||||
|
||||
static bool detect_M_family() {
|
||||
return cpuinfo_field_contains("cpu", "SPARC-M");
|
||||
}
|
||||
|
||||
static bool detect_blkinit() {
|
||||
return cpuinfo_field_contains("cpucaps", "blkinit");
|
||||
}
|
||||
@ -66,6 +70,11 @@ int VM_Version::platform_features(int features) {
|
||||
features = niagara1_m | T_family_m;
|
||||
}
|
||||
|
||||
if (detect_M_family()) {
|
||||
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");)
|
||||
features = sun4v_m | generic_v9_m | M_family_m | T_family_m;
|
||||
}
|
||||
|
||||
if (detect_blkinit()) {
|
||||
features |= blk_init_instructions_m;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 1997, 1998, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,7 +33,7 @@ HotSpot Architecture Description Language. This language is used to describe
|
||||
the architecture of a processor, and is the input to the ADL Compiler. The
|
||||
ADL Compiler compiles an ADL file into code which is incorporated into the
|
||||
Optimizing Just In Time Compiler (OJIT) to generate efficient and correct code
|
||||
for the target architecture. The ADL describes three bassic different types
|
||||
for the target architecture. The ADL describes three basic different types
|
||||
of architectural features. It describes the instruction set (and associated
|
||||
operands) of the target architecture. It describes the register set of the
|
||||
target architecture along with relevant information for the register allocator.
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "c1/c1_ValueType.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "interpreter/linkResolver.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
|
@ -4212,7 +4212,7 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes
|
||||
if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
|
||||
return;
|
||||
}
|
||||
CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
|
||||
CompileTask::print_inlining_tty(callee, scope()->level(), bci(), msg);
|
||||
if (success && CIPrintMethodCodes) {
|
||||
callee->print_codes();
|
||||
}
|
||||
|
@ -1425,7 +1425,7 @@ void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
|
||||
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
// No pre barriers
|
||||
break;
|
||||
@ -1445,7 +1445,7 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
|
||||
G1SATBCardTableModRef_post_barrier(addr, new_val);
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableForRS:
|
||||
case BarrierSet::CardTableExtension:
|
||||
CardTableModRef_post_barrier(addr, new_val);
|
||||
break;
|
||||
|
@ -1447,7 +1447,6 @@ BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
|
||||
|
||||
if (methodData() == NULL)
|
||||
return;
|
||||
bool printit = _method->should_print_assembly();
|
||||
if (methodData()->has_escape_info()) {
|
||||
TRACE_BCEA(2, tty->print_cr("[EA] Reading previous results for %s.%s",
|
||||
method->holder()->name()->as_utf8(),
|
||||
|
@ -28,8 +28,8 @@
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/classLoaderData.inline.hpp"
|
||||
#include "classfile/classLoaderExt.hpp"
|
||||
#include "classfile/imageFile.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/jimage.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
@ -58,6 +58,7 @@
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/threadCritical.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "services/management.hpp"
|
||||
#include "services/threadService.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
@ -68,7 +69,7 @@
|
||||
#include "classfile/sharedPathsMiscInfo.hpp"
|
||||
#endif
|
||||
|
||||
// Entry points in zip.dll for loading zip/jar file entries and image file entries
|
||||
// Entry points in zip.dll for loading zip/jar file entries
|
||||
|
||||
typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
|
||||
typedef void (JNICALL *ZipClose_t)(jzfile *zip);
|
||||
@ -89,6 +90,15 @@ static canonicalize_fn_t CanonicalizeEntry = NULL;
|
||||
static ZipInflateFully_t ZipInflateFully = NULL;
|
||||
static Crc32_t Crc32 = NULL;
|
||||
|
||||
// Entry points for jimage.dll for loading jimage file entries
|
||||
|
||||
static JImageOpen_t JImageOpen = NULL;
|
||||
static JImageClose_t JImageClose = NULL;
|
||||
static JImagePackageToModule_t JImagePackageToModule = NULL;
|
||||
static JImageFindResource_t JImageFindResource = NULL;
|
||||
static JImageGetResource_t JImageGetResource = NULL;
|
||||
static JImageResourceIterator_t JImageResourceIterator = NULL;
|
||||
|
||||
// Globals
|
||||
|
||||
PerfCounter* ClassLoader::_perf_accumulated_time = NULL;
|
||||
@ -141,6 +151,15 @@ bool string_starts_with(const char* str, const char* str_to_find) {
|
||||
return (strncmp(str, str_to_find, str_to_find_len) == 0);
|
||||
}
|
||||
|
||||
static const char* get_jimage_version_string() {
|
||||
static char version_string[10] = "";
|
||||
if (version_string[0] == '\0') {
|
||||
jio_snprintf(version_string, sizeof(version_string), "%d.%d",
|
||||
Abstract_VM_Version::vm_minor_version(), Abstract_VM_Version::vm_micro_version());
|
||||
}
|
||||
return (const char*)version_string;
|
||||
}
|
||||
|
||||
bool string_ends_with(const char* str, const char* str_to_find) {
|
||||
size_t str_len = strlen(str);
|
||||
size_t str_to_find_len = strlen(str_to_find);
|
||||
@ -272,97 +291,113 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
|
||||
}
|
||||
}
|
||||
|
||||
ClassPathImageEntry::ClassPathImageEntry(ImageFileReader* image) :
|
||||
ClassPathImageEntry::ClassPathImageEntry(JImageFile* jimage, const char* name) :
|
||||
ClassPathEntry(),
|
||||
_image(image),
|
||||
_module_data(NULL) {
|
||||
guarantee(image != NULL, "image file is null");
|
||||
|
||||
char module_data_name[JVM_MAXPATHLEN];
|
||||
ImageModuleData::module_data_name(module_data_name, _image->name());
|
||||
_module_data = new ImageModuleData(_image, module_data_name);
|
||||
_jimage(jimage) {
|
||||
guarantee(jimage != NULL, "jimage file is null");
|
||||
guarantee(name != NULL, "jimage file name is null");
|
||||
size_t len = strlen(name) + 1;
|
||||
_name = NEW_C_HEAP_ARRAY(const char, len, mtClass);
|
||||
strncpy((char *)_name, name, len);
|
||||
}
|
||||
|
||||
ClassPathImageEntry::~ClassPathImageEntry() {
|
||||
if (_module_data != NULL) {
|
||||
delete _module_data;
|
||||
_module_data = NULL;
|
||||
if (_name != NULL) {
|
||||
FREE_C_HEAP_ARRAY(const char, _name);
|
||||
_name = NULL;
|
||||
}
|
||||
|
||||
if (_image != NULL) {
|
||||
ImageFileReader::close(_image);
|
||||
_image = NULL;
|
||||
if (_jimage != NULL) {
|
||||
(*JImageClose)(_jimage);
|
||||
_jimage = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const char* ClassPathImageEntry::name() {
|
||||
return _image ? _image->name() : "";
|
||||
void ClassPathImageEntry::name_to_package(const char* name, char* buffer, int length) {
|
||||
const char *pslash = strrchr(name, '/');
|
||||
if (pslash == NULL) {
|
||||
buffer[0] = '\0';
|
||||
return;
|
||||
}
|
||||
int len = pslash - name;
|
||||
#if INCLUDE_CDS
|
||||
if (len <= 0 && DumpSharedSpaces) {
|
||||
buffer[0] = '\0';
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
assert(len > 0, "Bad length for package name");
|
||||
if (len >= length) {
|
||||
buffer[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// drop name after last slash (including slash)
|
||||
// Ex., "java/lang/String.class" => "java/lang"
|
||||
strncpy(buffer, name, len);
|
||||
// ensure string termination (strncpy does not guarantee)
|
||||
buffer[len] = '\0';
|
||||
}
|
||||
|
||||
// For a class in a named module, look it up in the jimage file using this syntax:
|
||||
// /<module-name>/<package-name>/<base-class>
|
||||
//
|
||||
// Assumptions:
|
||||
// 1. There are no unnamed modules in the jimage file.
|
||||
// 2. A package is in at most one module in the jimage file.
|
||||
//
|
||||
ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) {
|
||||
ImageLocation location;
|
||||
bool found = _image->find_location(name, location);
|
||||
jlong size;
|
||||
JImageLocationRef location = (*JImageFindResource)(_jimage, "", get_jimage_version_string(), name, &size);
|
||||
|
||||
if (!found) {
|
||||
const char *pslash = strrchr(name, '/');
|
||||
int len = pslash - name;
|
||||
|
||||
// NOTE: IMAGE_MAX_PATH is used here since this path is internal to the jimage
|
||||
// (effectively unlimited.) There are several JCK tests that use paths over
|
||||
// 1024 characters long, the limit on Windows systems.
|
||||
if (pslash && 0 < len && len < IMAGE_MAX_PATH) {
|
||||
|
||||
char path[IMAGE_MAX_PATH];
|
||||
strncpy(path, name, len);
|
||||
path[len] = '\0';
|
||||
const char* moduleName = _module_data->package_to_module(path);
|
||||
|
||||
if (moduleName != NULL && (len + strlen(moduleName) + 2) < IMAGE_MAX_PATH) {
|
||||
jio_snprintf(path, IMAGE_MAX_PATH - 1, "/%s/%s", moduleName, name);
|
||||
location.clear_data();
|
||||
found = _image->find_location(path, location);
|
||||
}
|
||||
if (location == 0) {
|
||||
char package[JIMAGE_MAX_PATH];
|
||||
name_to_package(name, package, JIMAGE_MAX_PATH);
|
||||
if (package[0] != '\0') {
|
||||
const char* module = (*JImagePackageToModule)(_jimage, package);
|
||||
if (module == NULL) {
|
||||
module = "java.base";
|
||||
}
|
||||
location = (*JImageFindResource)(_jimage, module, get_jimage_version_string(), name, &size);
|
||||
}
|
||||
}
|
||||
|
||||
if (found) {
|
||||
u8 size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
|
||||
if (location != 0) {
|
||||
if (UsePerfData) {
|
||||
ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
|
||||
}
|
||||
u1* data = NEW_RESOURCE_ARRAY(u1, size);
|
||||
_image->get_resource(location, data);
|
||||
return new ClassFileStream(data, (int)size, _image->name()); // Resource allocated
|
||||
char* data = NEW_RESOURCE_ARRAY(char, size);
|
||||
(*JImageGetResource)(_jimage, location, data, size);
|
||||
return new ClassFileStream((u1*)data, (int)size, _name); // Resource allocated
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool ctw_visitor(JImageFile* jimage,
|
||||
const char* module_name, const char* version, const char* package,
|
||||
const char* name, const char* extension, void* arg) {
|
||||
if (strcmp(extension, "class") == 0) {
|
||||
Thread* THREAD = Thread::current();
|
||||
char path[JIMAGE_MAX_PATH];
|
||||
jio_snprintf(path, JIMAGE_MAX_PATH - 1, "%s/%s.class", package, name);
|
||||
ClassLoader::compile_the_world_in(path, *(Handle*)arg, THREAD);
|
||||
return !HAS_PENDING_EXCEPTION;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClassPathImageEntry::compile_the_world(Handle loader, TRAPS) {
|
||||
tty->print_cr("CompileTheWorld : Compiling all classes in %s", name());
|
||||
tty->cr();
|
||||
const ImageStrings strings = _image->get_strings();
|
||||
// Retrieve each path component string.
|
||||
u4 length = _image->table_length();
|
||||
for (u4 i = 0; i < length; i++) {
|
||||
u1* location_data = _image->get_location_data(i);
|
||||
|
||||
if (location_data != NULL) {
|
||||
ImageLocation location(location_data);
|
||||
char path[IMAGE_MAX_PATH];
|
||||
_image->location_path(location, path, IMAGE_MAX_PATH);
|
||||
ClassLoader::compile_the_world_in(path, loader, CHECK);
|
||||
}
|
||||
}
|
||||
(*JImageResourceIterator)(_jimage, (JImageResourceVisitor_t)ctw_visitor, (void *)&loader);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
|
||||
tty->print_cr("Increase class metadata storage if a limit was set");
|
||||
} else {
|
||||
tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
|
||||
}
|
||||
if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
|
||||
tty->print_cr("Increase class metadata storage if a limit was set");
|
||||
} else {
|
||||
tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -490,7 +525,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
|
||||
JavaThread* thread = JavaThread::current();
|
||||
ClassPathEntry* new_entry = NULL;
|
||||
if ((st->st_mode & S_IFREG) == S_IFREG) {
|
||||
// Regular file, should be a zip or image file
|
||||
// Regular file, should be a zip or jimage file
|
||||
// Canonicalized filename
|
||||
char canonical_path[JVM_MAXPATHLEN];
|
||||
if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
|
||||
@ -501,9 +536,10 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
ImageFileReader* image = ImageFileReader::open(canonical_path);
|
||||
if (image != NULL) {
|
||||
new_entry = new ClassPathImageEntry(image);
|
||||
jint error;
|
||||
JImageFile* jimage =(*JImageOpen)(canonical_path, &error);
|
||||
if (jimage != NULL) {
|
||||
new_entry = new ClassPathImageEntry(jimage, canonical_path);
|
||||
} else {
|
||||
char* error_msg = NULL;
|
||||
jzfile* zip;
|
||||
@ -682,6 +718,35 @@ void ClassLoader::load_zip_library() {
|
||||
// This lookup only works on 1.3. Do not check for non-null here
|
||||
}
|
||||
|
||||
void ClassLoader::load_jimage_library() {
|
||||
// First make sure native library is loaded
|
||||
os::native_java_library();
|
||||
// Load jimage library
|
||||
char path[JVM_MAXPATHLEN];
|
||||
char ebuf[1024];
|
||||
void* handle = NULL;
|
||||
if (os::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "jimage")) {
|
||||
handle = os::dll_load(path, ebuf, sizeof ebuf);
|
||||
}
|
||||
if (handle == NULL) {
|
||||
vm_exit_during_initialization("Unable to load jimage library", path);
|
||||
}
|
||||
|
||||
// Lookup jimage entry points
|
||||
JImageOpen = CAST_TO_FN_PTR(JImageOpen_t, os::dll_lookup(handle, "JIMAGE_Open"));
|
||||
guarantee(JImageOpen != NULL, "function JIMAGE_Open not found");
|
||||
JImageClose = CAST_TO_FN_PTR(JImageClose_t, os::dll_lookup(handle, "JIMAGE_Close"));
|
||||
guarantee(JImageClose != NULL, "function JIMAGE_Close not found");
|
||||
JImagePackageToModule = CAST_TO_FN_PTR(JImagePackageToModule_t, os::dll_lookup(handle, "JIMAGE_PackageToModule"));
|
||||
guarantee(JImagePackageToModule != NULL, "function JIMAGE_PackageToModule not found");
|
||||
JImageFindResource = CAST_TO_FN_PTR(JImageFindResource_t, os::dll_lookup(handle, "JIMAGE_FindResource"));
|
||||
guarantee(JImageFindResource != NULL, "function JIMAGE_FindResource not found");
|
||||
JImageGetResource = CAST_TO_FN_PTR(JImageGetResource_t, os::dll_lookup(handle, "JIMAGE_GetResource"));
|
||||
guarantee(JImageGetResource != NULL, "function JIMAGE_GetResource not found");
|
||||
JImageResourceIterator = CAST_TO_FN_PTR(JImageResourceIterator_t, os::dll_lookup(handle, "JIMAGE_ResourceIterator"));
|
||||
guarantee(JImageResourceIterator != NULL, "function JIMAGE_ResourceIterator not found");
|
||||
}
|
||||
|
||||
jboolean ClassLoader::decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg) {
|
||||
return (*ZipInflateFully)(in, inSize, out, outSize, pmsg);
|
||||
}
|
||||
@ -1086,6 +1151,8 @@ void ClassLoader::initialize() {
|
||||
|
||||
// lookup zip library entry points
|
||||
load_zip_library();
|
||||
// lookup jimage library entry points
|
||||
load_jimage_library();
|
||||
#if INCLUDE_CDS
|
||||
// initialize search path
|
||||
if (DumpSharedSpaces) {
|
||||
|
@ -37,8 +37,7 @@
|
||||
|
||||
// Class path entry (directory or zip file)
|
||||
|
||||
class ImageFileReader;
|
||||
class ImageModuleData;
|
||||
class JImageFile;
|
||||
|
||||
class ClassPathEntry: public CHeapObj<mtClass> {
|
||||
private:
|
||||
@ -52,7 +51,7 @@ class ClassPathEntry: public CHeapObj<mtClass> {
|
||||
}
|
||||
virtual bool is_jar_file() = 0;
|
||||
virtual const char* name() = 0;
|
||||
virtual ImageFileReader* image() = 0;
|
||||
virtual JImageFile* jimage() = 0;
|
||||
// Constructor
|
||||
ClassPathEntry();
|
||||
// Attempt to locate file_name through this class path entry.
|
||||
@ -70,7 +69,7 @@ class ClassPathDirEntry: public ClassPathEntry {
|
||||
public:
|
||||
bool is_jar_file() { return false; }
|
||||
const char* name() { return _dir; }
|
||||
ImageFileReader* image() { return NULL; }
|
||||
JImageFile* jimage() { return NULL; }
|
||||
ClassPathDirEntry(const char* dir);
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
// Debugging
|
||||
@ -100,7 +99,7 @@ class ClassPathZipEntry: public ClassPathEntry {
|
||||
public:
|
||||
bool is_jar_file() { return true; }
|
||||
const char* name() { return _zip_name; }
|
||||
ImageFileReader* image() { return NULL; }
|
||||
JImageFile* jimage() { return NULL; }
|
||||
ClassPathZipEntry(jzfile* zip, const char* zip_name);
|
||||
~ClassPathZipEntry();
|
||||
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
|
||||
@ -115,16 +114,16 @@ class ClassPathZipEntry: public ClassPathEntry {
|
||||
// For java image files
|
||||
class ClassPathImageEntry: public ClassPathEntry {
|
||||
private:
|
||||
ImageFileReader* _image;
|
||||
ImageModuleData* _module_data;
|
||||
JImageFile* _jimage;
|
||||
const char* _name;
|
||||
public:
|
||||
bool is_jar_file() { return false; }
|
||||
bool is_open() { return _image != NULL; }
|
||||
const char* name();
|
||||
ImageFileReader* image() { return _image; }
|
||||
ImageModuleData* module_data() { return _module_data; }
|
||||
ClassPathImageEntry(ImageFileReader* image);
|
||||
bool is_open() { return _jimage != NULL; }
|
||||
const char* name() { return _name == NULL ? "" : _name; }
|
||||
JImageFile* jimage() { return _jimage; }
|
||||
ClassPathImageEntry(JImageFile* jimage, const char* name);
|
||||
~ClassPathImageEntry();
|
||||
static void name_to_package(const char* name, char* buffer, int length);
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
|
||||
// Debugging
|
||||
@ -206,6 +205,7 @@ class ClassLoader: AllStatic {
|
||||
static void setup_search_path(const char *class_path);
|
||||
|
||||
static void load_zip_library();
|
||||
static void load_jimage_library();
|
||||
static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
|
||||
bool throw_exception, TRAPS);
|
||||
|
||||
|
@ -1,121 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "classfile/imageDecompressor.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/bytes.hpp"
|
||||
|
||||
/*
|
||||
* Allocate in C Heap not in resource area, otherwise JVM crashes.
|
||||
* This array life time is the VM life time. Array is never freed and
|
||||
* is not expected to contain more than few references.
|
||||
*/
|
||||
GrowableArray<ImageDecompressor*>* ImageDecompressor::_decompressors =
|
||||
new(ResourceObj::C_HEAP, mtInternal) GrowableArray<ImageDecompressor*>(2, true);
|
||||
|
||||
static Symbol* createSymbol(const char* str) {
|
||||
Thread* THREAD = Thread::current();
|
||||
Symbol* sym = SymbolTable::lookup(str, (int) strlen(str), THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
warning("can't create symbol\n");
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
return NULL;
|
||||
}
|
||||
return sym;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the array of decompressors.
|
||||
*/
|
||||
bool image_decompressor_init() {
|
||||
Symbol* zipSymbol = createSymbol("zip");
|
||||
if (zipSymbol == NULL) {
|
||||
return false;
|
||||
}
|
||||
ImageDecompressor::add_decompressor(new ZipDecompressor(zipSymbol));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decompression entry point. Called from ImageFileReader::get_resource.
|
||||
*/
|
||||
void ImageDecompressor::decompress_resource(u1* compressed, u1* uncompressed,
|
||||
u4 uncompressed_size, const ImageStrings* strings, bool is_C_heap) {
|
||||
bool has_header = false;
|
||||
u1* decompressed_resource = compressed;
|
||||
u1* compressed_resource = compressed;
|
||||
|
||||
// Resource could have been transformed by a stack of decompressors.
|
||||
// Iterate and decompress resources until there is no more header.
|
||||
do {
|
||||
ResourceHeader _header;
|
||||
memcpy(&_header, compressed_resource, sizeof (ResourceHeader));
|
||||
has_header = _header._magic == ResourceHeader::resource_header_magic;
|
||||
if (has_header) {
|
||||
// decompressed_resource array contains the result of decompression
|
||||
// when a resource content is terminal, it means that it is an actual resource,
|
||||
// not an intermediate not fully uncompressed content. In this case
|
||||
// the resource is allocated as an mtClass, otherwise as an mtOther
|
||||
decompressed_resource = is_C_heap && _header._is_terminal ?
|
||||
NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtClass) :
|
||||
NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtOther);
|
||||
// Retrieve the decompressor name
|
||||
const char* decompressor_name = strings->get(_header._decompressor_name_offset);
|
||||
if (decompressor_name == NULL) warning("image decompressor not found\n");
|
||||
guarantee(decompressor_name, "image decompressor not found");
|
||||
// Retrieve the decompressor instance
|
||||
ImageDecompressor* decompressor = get_decompressor(decompressor_name);
|
||||
if (decompressor == NULL) {
|
||||
warning("image decompressor %s not found\n", decompressor_name);
|
||||
}
|
||||
guarantee(decompressor, "image decompressor not found");
|
||||
u1* compressed_resource_base = compressed_resource;
|
||||
compressed_resource += ResourceHeader::resource_header_length;
|
||||
// Ask the decompressor to decompress the compressed content
|
||||
decompressor->decompress_resource(compressed_resource, decompressed_resource,
|
||||
&_header, strings);
|
||||
if (compressed_resource_base != compressed) {
|
||||
FREE_C_HEAP_ARRAY(char, compressed_resource_base);
|
||||
}
|
||||
compressed_resource = decompressed_resource;
|
||||
}
|
||||
} while (has_header);
|
||||
memcpy(uncompressed, decompressed_resource, uncompressed_size);
|
||||
}
|
||||
|
||||
// Zip decompressor
|
||||
|
||||
void ZipDecompressor::decompress_resource(u1* data, u1* uncompressed,
|
||||
ResourceHeader* header, const ImageStrings* strings) {
|
||||
char* msg = NULL;
|
||||
jboolean res = ClassLoader::decompress(data, header->_size, uncompressed,
|
||||
header->_uncompressed_size, &msg);
|
||||
if (!res) warning("decompression failed due to %s\n", msg);
|
||||
guarantee(res, "decompression failed");
|
||||
}
|
||||
|
||||
// END Zip Decompressor
|
@ -1,136 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
|
||||
#define SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
|
||||
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/imageFile.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
/*
|
||||
* Compressed resources located in image have an header.
|
||||
* This header contains:
|
||||
* - _magic: A magic u4, required to retrieved the header in the compressed content
|
||||
* - _size: The size of the compressed resource.
|
||||
* - _uncompressed_size: The uncompressed size of the compressed resource.
|
||||
* - _decompressor_name_offset: The ImageDecompressor instance name StringsTable offset.
|
||||
* - _decompressor_config_offset: StringsTable offset of configuration that could be needed by
|
||||
* the decompressor in order to decompress.
|
||||
* - _is_terminal: 1: the compressed content is terminal. Uncompressing it would
|
||||
* create the actual resource. 0: the compressed content is not terminal. Uncompressing it
|
||||
* will result in a compressed content to be decompressed (This occurs when a stack of compressors
|
||||
* have been used to compress the resource.
|
||||
*/
|
||||
struct ResourceHeader {
|
||||
/* Length of header, needed to retrieve content offset */
|
||||
static const u1 resource_header_length = 21;
|
||||
/* magic bytes that identifies a compressed resource header*/
|
||||
static const u4 resource_header_magic = 0xCAFEFAFA;
|
||||
u4 _magic; // Resource header
|
||||
u4 _size; // Resource size
|
||||
u4 _uncompressed_size; // Expected uncompressed size
|
||||
u4 _decompressor_name_offset; // Strings table decompressor offset
|
||||
u4 _decompressor_config_offset; // Strings table config offset
|
||||
u1 _is_terminal; // Last decompressor 1, otherwise 0.
|
||||
};
|
||||
|
||||
/*
|
||||
* Resources located in jimage file can be compressed. Compression occurs at
|
||||
* jimage file creation time. When compressed a resource is added an header that
|
||||
* contains the name of the compressor that compressed it.
|
||||
* Various compression strategies can be applied to compress a resource.
|
||||
* The same resource can even be compressed multiple time by a stack of compressors.
|
||||
* At runtime, a resource is decompressed in a loop until there is no more header
|
||||
* meaning that the resource is equivalent to the not compressed resource.
|
||||
* In each iteration, the name of the compressor located in the current header
|
||||
* is used to retrieve the associated instance of ImageDecompressor.
|
||||
* For example “zip” is the name of the compressor that compresses resources
|
||||
* using the zip algorithm. The ZipDecompressor class name is also “zip”.
|
||||
* ImageDecompressor instances are retrieved from a static array in which
|
||||
* they are registered.
|
||||
*/
|
||||
class ImageDecompressor: public CHeapObj<mtClass> {
|
||||
|
||||
private:
|
||||
const Symbol* _name;
|
||||
|
||||
/*
|
||||
* Array of concrete decompressors. This array is used to retrieve the decompressor
|
||||
* that can handle resource decompression.
|
||||
*/
|
||||
static GrowableArray<ImageDecompressor*>* _decompressors;
|
||||
|
||||
/*
|
||||
* Identifier of a decompressor. This name is the identification key to retrieve
|
||||
* decompressor from a resource header.
|
||||
*/
|
||||
inline const Symbol* get_name() const { return _name; }
|
||||
|
||||
protected:
|
||||
ImageDecompressor(const Symbol* name) : _name(name) {
|
||||
}
|
||||
virtual void decompress_resource(u1* data, u1* uncompressed,
|
||||
ResourceHeader* header, const ImageStrings* strings) = 0;
|
||||
|
||||
public:
|
||||
inline static void add_decompressor(ImageDecompressor* decompressor) {
|
||||
_decompressors->append(decompressor);
|
||||
}
|
||||
inline static ImageDecompressor* get_decompressor(const char * decompressor_name) {
|
||||
Thread* THREAD = Thread::current();
|
||||
TempNewSymbol sym = SymbolTable::new_symbol(decompressor_name,
|
||||
(int) strlen(decompressor_name), CHECK_NULL);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
warning("can't create symbol\n");
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
return NULL;
|
||||
}
|
||||
for (int i = 0; i < _decompressors->length(); i++) {
|
||||
ImageDecompressor* decompressor = _decompressors->at(i);
|
||||
if (decompressor->get_name()->fast_compare(sym) == 0) {
|
||||
return decompressor;
|
||||
}
|
||||
}
|
||||
guarantee(false, "No decompressor found.");
|
||||
return NULL;
|
||||
}
|
||||
static void decompress_resource(u1* compressed, u1* uncompressed,
|
||||
u4 uncompressed_size, const ImageStrings* strings, bool is_C_heap);
|
||||
};
|
||||
|
||||
/**
|
||||
* Zip decompressor.
|
||||
*/
|
||||
class ZipDecompressor : public ImageDecompressor {
|
||||
public:
|
||||
ZipDecompressor(const Symbol* sym) : ImageDecompressor(sym) { }
|
||||
void decompress_resource(u1* data, u1* uncompressed, ResourceHeader* header,
|
||||
const ImageStrings* strings);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
|
@ -1,546 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/imageDecompressor.hpp"
|
||||
#include "classfile/imageFile.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "utilities/endian.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
// Image files are an alternate file format for storing classes and resources. The
|
||||
// goal is to supply file access which is faster and smaller than the jar format.
|
||||
//
|
||||
// (More detailed nodes in the header.)
|
||||
//
|
||||
|
||||
// Compute the Perfect Hashing hash code for the supplied UTF-8 string.
|
||||
s4 ImageStrings::hash_code(const char* string, s4 seed) {
|
||||
// Access bytes as unsigned.
|
||||
u1* bytes = (u1*)string;
|
||||
// Compute hash code.
|
||||
for (u1 byte = *bytes++; byte; byte = *bytes++) {
|
||||
seed = (seed * HASH_MULTIPLIER) ^ byte;
|
||||
}
|
||||
// Ensure the result is not signed.
|
||||
return seed & 0x7FFFFFFF;
|
||||
}
|
||||
|
||||
// Match up a string in a perfect hash table. Result still needs validation
|
||||
// for precise match (false positive.)
|
||||
s4 ImageStrings::find(Endian* endian, const char* name, s4* redirect, u4 length) {
|
||||
// If the table is empty, then short cut.
|
||||
if (redirect == NULL || length == 0) {
|
||||
return NOT_FOUND;
|
||||
}
|
||||
// Compute the basic perfect hash for name.
|
||||
s4 hash_code = ImageStrings::hash_code(name);
|
||||
// Modulo table size.
|
||||
s4 index = hash_code % length;
|
||||
// Get redirect entry.
|
||||
// value == 0 then not found
|
||||
// value < 0 then -1 - value is true index
|
||||
// value > 0 then value is seed for recomputing hash.
|
||||
s4 value = endian->get(redirect[index]);
|
||||
// if recompute is required.
|
||||
if (value > 0) {
|
||||
// Entry collision value, need to recompute hash.
|
||||
hash_code = ImageStrings::hash_code(name, value);
|
||||
// Modulo table size.
|
||||
return hash_code % length;
|
||||
} else if (value < 0) {
|
||||
// Compute direct index.
|
||||
return -1 - value;
|
||||
}
|
||||
// No entry found.
|
||||
return NOT_FOUND;
|
||||
}
|
||||
|
||||
// Test to see if UTF-8 string begins with the start UTF-8 string. If so,
|
||||
// return non-NULL address of remaining portion of string. Otherwise, return
|
||||
// NULL. Used to test sections of a path without copying from image string
|
||||
// table.
|
||||
const char* ImageStrings::starts_with(const char* string, const char* start) {
|
||||
char ch1, ch2;
|
||||
// Match up the strings the best we can.
|
||||
while ((ch1 = *string) && (ch2 = *start)) {
|
||||
if (ch1 != ch2) {
|
||||
// Mismatch, return NULL.
|
||||
return NULL;
|
||||
}
|
||||
// Next characters.
|
||||
string++, start++;
|
||||
}
|
||||
// Return remainder of string.
|
||||
return string;
|
||||
}
|
||||
|
||||
// Inflates the attribute stream into individual values stored in the long
|
||||
// array _attributes. This allows an attribute value to be quickly accessed by
|
||||
// direct indexing. Unspecified values default to zero (from constructor.)
|
||||
void ImageLocation::set_data(u1* data) {
|
||||
// Deflate the attribute stream into an array of attributes.
|
||||
u1 byte;
|
||||
// Repeat until end header is found.
|
||||
while ((byte = *data)) {
|
||||
// Extract kind from header byte.
|
||||
u1 kind = attribute_kind(byte);
|
||||
guarantee(kind < ATTRIBUTE_COUNT, "invalid image location attribute");
|
||||
// Extract length of data (in bytes).
|
||||
u1 n = attribute_length(byte);
|
||||
// Read value (most significant first.)
|
||||
_attributes[kind] = attribute_value(data + 1, n);
|
||||
// Position to next attribute by skipping attribute header and data bytes.
|
||||
data += n + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Zero all attribute values.
|
||||
void ImageLocation::clear_data() {
|
||||
// Set defaults to zero.
|
||||
memset(_attributes, 0, sizeof(_attributes));
|
||||
}
|
||||
|
||||
// ImageModuleData constructor maps out sub-tables for faster access.
|
||||
ImageModuleData::ImageModuleData(const ImageFileReader* image_file,
|
||||
const char* module_data_name) :
|
||||
_image_file(image_file),
|
||||
_endian(image_file->endian()),
|
||||
_strings(image_file->get_strings()) {
|
||||
// Retrieve the resource containing the module data for the image file.
|
||||
ImageLocation location;
|
||||
bool found = image_file->find_location(module_data_name, location);
|
||||
guarantee(found, "missing module data");
|
||||
u8 data_size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
|
||||
_data = (u1*)NEW_C_HEAP_ARRAY(char, data_size, mtClass);
|
||||
_image_file->get_resource(location, _data);
|
||||
// Map out the header.
|
||||
_header = (Header*)_data;
|
||||
// Get the package to module entry count.
|
||||
u4 ptm_count = _header->ptm_count(_endian);
|
||||
// Get the module to package entry count.
|
||||
u4 mtp_count = _header->mtp_count(_endian);
|
||||
// Compute the offset of the package to module perfect hash redirect.
|
||||
u4 ptm_redirect_offset = sizeof(Header);
|
||||
// Compute the offset of the package to module data.
|
||||
u4 ptm_data_offset = ptm_redirect_offset + ptm_count * sizeof(s4);
|
||||
// Compute the offset of the module to package perfect hash redirect.
|
||||
u4 mtp_redirect_offset = ptm_data_offset + ptm_count * sizeof(PTMData);
|
||||
// Compute the offset of the module to package data.
|
||||
u4 mtp_data_offset = mtp_redirect_offset + mtp_count * sizeof(s4);
|
||||
// Compute the offset of the module to package tables.
|
||||
u4 mtp_packages_offset = mtp_data_offset + mtp_count * sizeof(MTPData);
|
||||
// Compute the address of the package to module perfect hash redirect.
|
||||
_ptm_redirect = (s4*)(_data + ptm_redirect_offset);
|
||||
// Compute the address of the package to module data.
|
||||
_ptm_data = (PTMData*)(_data + ptm_data_offset);
|
||||
// Compute the address of the module to package perfect hash redirect.
|
||||
_mtp_redirect = (s4*)(_data + mtp_redirect_offset);
|
||||
// Compute the address of the module to package data.
|
||||
_mtp_data = (MTPData*)(_data + mtp_data_offset);
|
||||
// Compute the address of the module to package tables.
|
||||
_mtp_packages = (s4*)(_data + mtp_packages_offset);
|
||||
}
|
||||
|
||||
// Release module data resource.
|
||||
ImageModuleData::~ImageModuleData() {
|
||||
if (_data != NULL) {
|
||||
FREE_C_HEAP_ARRAY(u1, _data);
|
||||
}
|
||||
}
|
||||
|
||||
// Return the name of the module data resource. Ex. "./lib/modules/file.jimage"
|
||||
// yields "file.jdata"
|
||||
void ImageModuleData::module_data_name(char* buffer, const char* image_file_name) {
|
||||
// Locate the last slash in the file name path.
|
||||
const char* slash = strrchr(image_file_name, os::file_separator()[0]);
|
||||
// Trim the path to name and extension.
|
||||
const char* name = slash != NULL ? slash + 1 : (char *)image_file_name;
|
||||
// Locate the extension period.
|
||||
const char* dot = strrchr(name, '.');
|
||||
guarantee(dot, "missing extension on jimage name");
|
||||
// Trim to only base name.
|
||||
int length = dot - name;
|
||||
strncpy(buffer, name, length);
|
||||
buffer[length] = '\0';
|
||||
// Append extension.
|
||||
strcat(buffer, ".jdata");
|
||||
}
|
||||
|
||||
// Return the module in which a package resides. Returns NULL if not found.
|
||||
const char* ImageModuleData::package_to_module(const char* package_name) {
|
||||
// Search the package to module table.
|
||||
s4 index = ImageStrings::find(_endian, package_name, _ptm_redirect,
|
||||
_header->ptm_count(_endian));
|
||||
// If entry is found.
|
||||
if (index != ImageStrings::NOT_FOUND) {
|
||||
// Retrieve the package to module entry.
|
||||
PTMData* data = _ptm_data + index;
|
||||
// Verify that it is the correct data.
|
||||
if (strcmp(package_name, get_string(data->name_offset(_endian))) != 0) {
|
||||
return NULL;
|
||||
}
|
||||
// Return the module name.
|
||||
return get_string(data->module_name_offset(_endian));
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Returns all the package names in a module. Returns NULL if module not found.
|
||||
GrowableArray<const char*>* ImageModuleData::module_to_packages(const char* module_name) {
|
||||
// Search the module to package table.
|
||||
s4 index = ImageStrings::find(_endian, module_name, _mtp_redirect,
|
||||
_header->mtp_count(_endian));
|
||||
// If entry is found.
|
||||
if (index != ImageStrings::NOT_FOUND) {
|
||||
// Retrieve the module to package entry.
|
||||
MTPData* data = _mtp_data + index;
|
||||
// Verify that it is the correct data.
|
||||
if (strcmp(module_name, get_string(data->name_offset(_endian))) != 0) {
|
||||
return NULL;
|
||||
}
|
||||
// Construct an array of all the package entries.
|
||||
GrowableArray<const char*>* packages = new GrowableArray<const char*>();
|
||||
s4 package_offset = data->package_offset(_endian);
|
||||
for (u4 i = 0; i < data->package_count(_endian); i++) {
|
||||
u4 package_name_offset = mtp_package(package_offset + i);
|
||||
const char* package_name = get_string(package_name_offset);
|
||||
packages->append(package_name);
|
||||
}
|
||||
return packages;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Table to manage multiple opens of an image file.
|
||||
GrowableArray<ImageFileReader*>* ImageFileReader::_reader_table =
|
||||
new(ResourceObj::C_HEAP, mtInternal) GrowableArray<ImageFileReader*>(2, true);
|
||||
|
||||
// Open an image file, reuse structure if file already open.
|
||||
ImageFileReader* ImageFileReader::open(const char* name, bool big_endian) {
|
||||
// Lock out _reader_table.
|
||||
MutexLocker ml(ImageFileReaderTable_lock);
|
||||
ImageFileReader* reader;
|
||||
// Search for an exist image file.
|
||||
for (int i = 0; i < _reader_table->length(); i++) {
|
||||
// Retrieve table entry.
|
||||
reader = _reader_table->at(i);
|
||||
// If name matches, then reuse (bump up use count.)
|
||||
if (strcmp(reader->name(), name) == 0) {
|
||||
reader->inc_use();
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
// Need a new image reader.
|
||||
reader = new ImageFileReader(name, big_endian);
|
||||
bool opened = reader->open();
|
||||
// If failed to open.
|
||||
if (!opened) {
|
||||
delete reader;
|
||||
return NULL;
|
||||
}
|
||||
// Bump use count and add to table.
|
||||
reader->inc_use();
|
||||
_reader_table->append(reader);
|
||||
return reader;
|
||||
}
|
||||
|
||||
// Close an image file if the file is not in use elsewhere.
|
||||
void ImageFileReader::close(ImageFileReader *reader) {
|
||||
// Lock out _reader_table.
|
||||
MutexLocker ml(ImageFileReaderTable_lock);
|
||||
// If last use then remove from table and then close.
|
||||
if (reader->dec_use()) {
|
||||
_reader_table->remove(reader);
|
||||
delete reader;
|
||||
}
|
||||
}
|
||||
|
||||
// Return an id for the specifed ImageFileReader.
|
||||
u8 ImageFileReader::readerToID(ImageFileReader *reader) {
|
||||
// ID is just the cloaked reader address.
|
||||
return (u8)reader;
|
||||
}
|
||||
|
||||
// Validate the image id.
|
||||
bool ImageFileReader::idCheck(u8 id) {
|
||||
// Make sure the ID is a managed (_reader_table) reader.
|
||||
MutexLocker ml(ImageFileReaderTable_lock);
|
||||
return _reader_table->contains((ImageFileReader*)id);
|
||||
}
|
||||
|
||||
// Return an id for the specifed ImageFileReader.
|
||||
ImageFileReader* ImageFileReader::idToReader(u8 id) {
|
||||
#ifdef PRODUCT
|
||||
// Fast convert.
|
||||
return (ImageFileReader*)id;
|
||||
#else
|
||||
// Do a slow check before fast convert.
|
||||
return idCheck(id) ? (ImageFileReader*)id : NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Constructor intializes to a closed state.
|
||||
ImageFileReader::ImageFileReader(const char* name, bool big_endian) {
|
||||
// Copy the image file name.
|
||||
_name = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtClass);
|
||||
strcpy(_name, name);
|
||||
// Initialize for a closed file.
|
||||
_fd = -1;
|
||||
_endian = Endian::get_handler(big_endian);
|
||||
_index_data = NULL;
|
||||
}
|
||||
|
||||
// Close image and free up data structures.
|
||||
ImageFileReader::~ImageFileReader() {
|
||||
// Ensure file is closed.
|
||||
close();
|
||||
// Free up name.
|
||||
if (_name != NULL) {
|
||||
FREE_C_HEAP_ARRAY(char, _name);
|
||||
_name = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Open image file for read access.
|
||||
bool ImageFileReader::open() {
|
||||
// If file exists open for reading.
|
||||
struct stat st;
|
||||
if (os::stat(_name, &st) != 0 ||
|
||||
(st.st_mode & S_IFREG) != S_IFREG ||
|
||||
(_fd = os::open(_name, 0, O_RDONLY)) == -1) {
|
||||
return false;
|
||||
}
|
||||
// Retrieve the file size.
|
||||
_file_size = (u8)st.st_size;
|
||||
// Read image file header and verify it has a valid header.
|
||||
size_t header_size = sizeof(ImageHeader);
|
||||
if (_file_size < header_size ||
|
||||
!read_at((u1*)&_header, header_size, 0) ||
|
||||
_header.magic(_endian) != IMAGE_MAGIC ||
|
||||
_header.major_version(_endian) != MAJOR_VERSION ||
|
||||
_header.minor_version(_endian) != MINOR_VERSION) {
|
||||
close();
|
||||
return false;
|
||||
}
|
||||
// Size of image index.
|
||||
_index_size = index_size();
|
||||
// Make sure file is large enough to contain the index.
|
||||
if (_file_size < _index_size) {
|
||||
return false;
|
||||
}
|
||||
// Determine how much of the image is memory mapped.
|
||||
off_t map_size = (off_t)(MemoryMapImage ? _file_size : _index_size);
|
||||
// Memory map image (minimally the index.)
|
||||
_index_data = (u1*)os::map_memory(_fd, _name, 0, NULL, map_size, true, false);
|
||||
guarantee(_index_data, "image file not memory mapped");
|
||||
// Retrieve length of index perfect hash table.
|
||||
u4 length = table_length();
|
||||
// Compute offset of the perfect hash table redirect table.
|
||||
u4 redirect_table_offset = (u4)header_size;
|
||||
// Compute offset of index attribute offsets.
|
||||
u4 offsets_table_offset = redirect_table_offset + length * sizeof(s4);
|
||||
// Compute offset of index location attribute data.
|
||||
u4 location_bytes_offset = offsets_table_offset + length * sizeof(u4);
|
||||
// Compute offset of index string table.
|
||||
u4 string_bytes_offset = location_bytes_offset + locations_size();
|
||||
// Compute address of the perfect hash table redirect table.
|
||||
_redirect_table = (s4*)(_index_data + redirect_table_offset);
|
||||
// Compute address of index attribute offsets.
|
||||
_offsets_table = (u4*)(_index_data + offsets_table_offset);
|
||||
// Compute address of index location attribute data.
|
||||
_location_bytes = _index_data + location_bytes_offset;
|
||||
// Compute address of index string table.
|
||||
_string_bytes = _index_data + string_bytes_offset;
|
||||
// Successful open.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Close image file.
|
||||
void ImageFileReader::close() {
|
||||
// Dealllocate the index.
|
||||
if (_index_data != NULL) {
|
||||
os::unmap_memory((char*)_index_data, _index_size);
|
||||
_index_data = NULL;
|
||||
}
|
||||
// Close file.
|
||||
if (_fd != -1) {
|
||||
os::close(_fd);
|
||||
_fd = -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Read directly from the file.
|
||||
bool ImageFileReader::read_at(u1* data, u8 size, u8 offset) const {
|
||||
return os::read_at(_fd, data, size, offset) == size;
|
||||
}
|
||||
|
||||
// Find the location attributes associated with the path. Returns true if
|
||||
// the location is found, false otherwise.
|
||||
bool ImageFileReader::find_location(const char* path, ImageLocation& location) const {
|
||||
// Locate the entry in the index perfect hash table.
|
||||
s4 index = ImageStrings::find(_endian, path, _redirect_table, table_length());
|
||||
// If is found.
|
||||
if (index != ImageStrings::NOT_FOUND) {
|
||||
// Get address of first byte of location attribute stream.
|
||||
u1* data = get_location_data(index);
|
||||
// Expand location attributes.
|
||||
location.set_data(data);
|
||||
// Make sure result is not a false positive.
|
||||
return verify_location(location, path);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Assemble the location path from the string fragments indicated in the location attributes.
|
||||
void ImageFileReader::location_path(ImageLocation& location, char* path, size_t max) const {
|
||||
// Manage the image string table.
|
||||
ImageStrings strings(_string_bytes, _header.strings_size(_endian));
|
||||
// Position to first character of the path buffer.
|
||||
char* next = path;
|
||||
// Temp for string length.
|
||||
size_t length;
|
||||
// Get module string.
|
||||
const char* module = location.get_attribute(ImageLocation::ATTRIBUTE_MODULE, strings);
|
||||
// If module string is not empty string.
|
||||
if (*module != '\0') {
|
||||
// Get length of module name.
|
||||
length = strlen(module);
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee(next - path + length + 2 < max, "buffer overflow");
|
||||
// Append '/module/'.
|
||||
*next++ = '/';
|
||||
strcpy(next, module); next += length;
|
||||
*next++ = '/';
|
||||
}
|
||||
// Get parent (package) string.
|
||||
const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
|
||||
// If parent string is not empty string.
|
||||
if (*parent != '\0') {
|
||||
// Get length of module string.
|
||||
length = strlen(parent);
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee(next - path + length + 1 < max, "buffer overflow");
|
||||
// Append 'patent/' .
|
||||
strcpy(next, parent); next += length;
|
||||
*next++ = '/';
|
||||
}
|
||||
// Get base name string.
|
||||
const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
|
||||
// Get length of base name.
|
||||
length = strlen(base);
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee(next - path + length < max, "buffer overflow");
|
||||
// Append base name.
|
||||
strcpy(next, base); next += length;
|
||||
// Get extension string.
|
||||
const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
|
||||
// If extension string is not empty string.
|
||||
if (*extension != '\0') {
|
||||
// Get length of extension string.
|
||||
length = strlen(extension);
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee(next - path + length + 1 < max, "buffer overflow");
|
||||
// Append '.extension' .
|
||||
*next++ = '.';
|
||||
strcpy(next, extension); next += length;
|
||||
}
|
||||
// Make sure there is no buffer overflow.
|
||||
guarantee((size_t)(next - path) < max, "buffer overflow");
|
||||
// Terminate string.
|
||||
*next = '\0';
|
||||
}
|
||||
|
||||
// Verify that a found location matches the supplied path (without copying.)
|
||||
bool ImageFileReader::verify_location(ImageLocation& location, const char* path) const {
|
||||
// Manage the image string table.
|
||||
ImageStrings strings(_string_bytes, _header.strings_size(_endian));
|
||||
// Position to first character of the path string.
|
||||
const char* next = path;
|
||||
// Get module name string.
|
||||
const char* module = location.get_attribute(ImageLocation::ATTRIBUTE_MODULE, strings);
|
||||
// If module string is not empty.
|
||||
if (*module != '\0') {
|
||||
// Compare '/module/' .
|
||||
if (*next++ != '/') return false;
|
||||
if (!(next = ImageStrings::starts_with(next, module))) return false;
|
||||
if (*next++ != '/') return false;
|
||||
}
|
||||
// Get parent (package) string
|
||||
const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
|
||||
// If parent string is not empty string.
|
||||
if (*parent != '\0') {
|
||||
// Compare 'parent/' .
|
||||
if (!(next = ImageStrings::starts_with(next, parent))) return false;
|
||||
if (*next++ != '/') return false;
|
||||
}
|
||||
// Get base name string.
|
||||
const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
|
||||
// Compare with basne name.
|
||||
if (!(next = ImageStrings::starts_with(next, base))) return false;
|
||||
// Get extension string.
|
||||
const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
|
||||
// If extension is not empty.
|
||||
if (*extension != '\0') {
|
||||
// Compare '.extension' .
|
||||
if (*next++ != '.') return false;
|
||||
if (!(next = ImageStrings::starts_with(next, extension))) return false;
|
||||
}
|
||||
// True only if complete match and no more characters.
|
||||
return *next == '\0';
|
||||
}
|
||||
|
||||
// Return the resource data for the supplied location.
|
||||
void ImageFileReader::get_resource(ImageLocation& location, u1* uncompressed_data) const {
|
||||
// Retrieve the byte offset and size of the resource.
|
||||
u8 offset = location.get_attribute(ImageLocation::ATTRIBUTE_OFFSET);
|
||||
u8 uncompressed_size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
|
||||
u8 compressed_size = location.get_attribute(ImageLocation::ATTRIBUTE_COMPRESSED);
|
||||
if (compressed_size != 0) {
|
||||
ResourceMark rm;
|
||||
u1* compressed_data;
|
||||
// If not memory mapped read in bytes.
|
||||
if (!MemoryMapImage) {
|
||||
// Allocate buffer for compression.
|
||||
compressed_data = NEW_RESOURCE_ARRAY(u1, compressed_size);
|
||||
// Read bytes from offset beyond the image index.
|
||||
bool is_read = read_at(compressed_data, compressed_size, _index_size + offset);
|
||||
guarantee(is_read, "error reading from image or short read");
|
||||
} else {
|
||||
compressed_data = get_data_address() + offset;
|
||||
}
|
||||
// Get image string table.
|
||||
const ImageStrings strings = get_strings();
|
||||
// Decompress resource.
|
||||
ImageDecompressor::decompress_resource(compressed_data, uncompressed_data, uncompressed_size,
|
||||
&strings, false);
|
||||
} else {
|
||||
// Read bytes from offset beyond the image index.
|
||||
bool is_read = read_at(uncompressed_data, uncompressed_size, _index_size + offset);
|
||||
guarantee(is_read, "error reading from image or short read");
|
||||
}
|
||||
}
|
@ -1,602 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_CLASSFILE_IMAGEFILE_HPP
|
||||
#define SHARE_VM_CLASSFILE_IMAGEFILE_HPP
|
||||
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/endian.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
// Image files are an alternate file format for storing classes and resources. The
|
||||
// goal is to supply file access which is faster and smaller than the jar format.
|
||||
// It should be noted that unlike jars, information stored in an image is in native
|
||||
// endian format. This allows the image to be mapped into memory without endian
|
||||
// translation. This also means that images are platform dependent.
|
||||
//
|
||||
// Image files are structured as three sections;
|
||||
//
|
||||
// +-----------+
|
||||
// | Header |
|
||||
// +-----------+
|
||||
// | |
|
||||
// | Index |
|
||||
// | |
|
||||
// +-----------+
|
||||
// | |
|
||||
// | |
|
||||
// | Resources |
|
||||
// | |
|
||||
// | |
|
||||
// +-----------+
|
||||
//
|
||||
// The header contains information related to identification and description of
|
||||
// contents.
|
||||
//
|
||||
// +-------------------------+
|
||||
// | Magic (0xCAFEDADA) |
|
||||
// +------------+------------+
|
||||
// | Major Vers | Minor Vers |
|
||||
// +------------+------------+
|
||||
// | Flags |
|
||||
// +-------------------------+
|
||||
// | Resource Count |
|
||||
// +-------------------------+
|
||||
// | Table Length |
|
||||
// +-------------------------+
|
||||
// | Attributes Size |
|
||||
// +-------------------------+
|
||||
// | Strings Size |
|
||||
// +-------------------------+
|
||||
//
|
||||
// Magic - means of identifying validity of the file. This avoids requiring a
|
||||
// special file extension.
|
||||
// Major vers, minor vers - differences in version numbers indicate structural
|
||||
// changes in the image.
|
||||
// Flags - various image wide flags (future).
|
||||
// Resource count - number of resources in the file.
|
||||
// Table length - the length of lookup tables used in the index.
|
||||
// Attributes size - number of bytes in the region used to store location attribute
|
||||
// streams.
|
||||
// Strings size - the size of the region used to store strings used by the
|
||||
// index and meta data.
|
||||
//
|
||||
// The index contains information related to resource lookup. The algorithm
|
||||
// used for lookup is "A Practical Minimal Perfect Hashing Method"
|
||||
// (http://homepages.dcc.ufmg.br/~nivio/papers/wea05.pdf). Given a path string
|
||||
// in the form /<module>/<package>/<base>.<extension> return the resource location
|
||||
// information;
|
||||
//
|
||||
// redirectIndex = hash(path, DEFAULT_SEED) % table_length;
|
||||
// redirect = redirectTable[redirectIndex];
|
||||
// if (redirect == 0) return not found;
|
||||
// locationIndex = redirect < 0 ? -1 - redirect : hash(path, redirect) % table_length;
|
||||
// location = locationTable[locationIndex];
|
||||
// if (!verify(location, path)) return not found;
|
||||
// return location;
|
||||
//
|
||||
// Note: The hash function takes an initial seed value. A different seed value
|
||||
// usually returns a different result for strings that would otherwise collide with
|
||||
// other seeds. The verify function guarantees the found resource location is
|
||||
// indeed the resource we are looking for.
|
||||
//
|
||||
// The following is the format of the index;
|
||||
//
|
||||
// +-------------------+
|
||||
// | Redirect Table |
|
||||
// +-------------------+
|
||||
// | Attribute Offsets |
|
||||
// +-------------------+
|
||||
// | Attribute Data |
|
||||
// +-------------------+
|
||||
// | Strings |
|
||||
// +-------------------+
|
||||
//
|
||||
// Redirect Table - Array of 32-bit signed values representing actions that
|
||||
// should take place for hashed strings that map to that
|
||||
// value. Negative values indicate no hash collision and can be
|
||||
// quickly converted to indices into attribute offsets. Positive
|
||||
// values represent a new seed for hashing an index into attribute
|
||||
// offsets. Zero indicates not found.
|
||||
// Attribute Offsets - Array of 32-bit unsigned values representing offsets into
|
||||
// attribute data. Attribute offsets can be iterated to do a
|
||||
// full survey of resources in the image. Offset of zero
|
||||
// indicates no attributes.
|
||||
// Attribute Data - Bytes representing compact attribute data for locations. (See
|
||||
// comments in ImageLocation.)
|
||||
// Strings - Collection of zero terminated UTF-8 strings used by the index and
|
||||
// image meta data. Each string is accessed by offset. Each string is
|
||||
// unique. Offset zero is reserved for the empty string.
|
||||
//
|
||||
// Note that the memory mapped index assumes 32 bit alignment of each component
|
||||
// in the index.
|
||||
//
|
||||
// Endianness of an image.
|
||||
// An image booted by hotspot is always in native endian. However, it is possible
|
||||
// to read (by the JDK) in alternate endian format. Primarily, this is during
|
||||
// cross platform scenarios. Ex, where javac needs to read an embedded image
|
||||
// to access classes for crossing compilation.
|
||||
//
|
||||
|
||||
class ImageFileReader; // forward declaration
|
||||
|
||||
// Manage image file string table.
|
||||
class ImageStrings VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
u1* _data; // Data bytes for strings.
|
||||
u4 _size; // Number of bytes in the string table.
|
||||
public:
|
||||
enum {
|
||||
// Not found result from find routine.
|
||||
NOT_FOUND = -1,
|
||||
// Prime used to generate hash for Perfect Hashing.
|
||||
HASH_MULTIPLIER = 0x01000193
|
||||
};
|
||||
|
||||
ImageStrings(u1* data, u4 size) : _data(data), _size(size) {}
|
||||
|
||||
// Return the UTF-8 string beginning at offset.
|
||||
inline const char* get(u4 offset) const {
|
||||
guarantee(offset < _size, "offset exceeds string table size");
|
||||
return (const char*)(_data + offset);
|
||||
}
|
||||
|
||||
// Compute the Perfect Hashing hash code for the supplied UTF-8 string.
|
||||
inline static u4 hash_code(const char* string) {
|
||||
return hash_code(string, HASH_MULTIPLIER);
|
||||
}
|
||||
|
||||
// Compute the Perfect Hashing hash code for the supplied string, starting at seed.
|
||||
static s4 hash_code(const char* string, s4 seed);
|
||||
|
||||
// Match up a string in a perfect hash table. Result still needs validation
|
||||
// for precise match.
|
||||
static s4 find(Endian* endian, const char* name, s4* redirect, u4 length);
|
||||
|
||||
// Test to see if UTF-8 string begins with the start UTF-8 string. If so,
|
||||
// return non-NULL address of remaining portion of string. Otherwise, return
|
||||
// NULL. Used to test sections of a path without copying from image string
|
||||
// table.
|
||||
static const char* starts_with(const char* string, const char* start);
|
||||
|
||||
// Test to see if UTF-8 string begins with start char. If so, return non-NULL
|
||||
// address of remaining portion of string. Otherwise, return NULL. Used
|
||||
// to test a character of a path without copying.
|
||||
inline static const char* starts_with(const char* string, const char ch) {
|
||||
return *string == ch ? string + 1 : NULL;
|
||||
}
|
||||
};
|
||||
|
||||
// Manage image file location attribute data. Within an image, a location's
|
||||
// attributes are compressed into a stream of bytes. An attribute stream is
|
||||
// composed of individual attribute sequences. Each attribute sequence begins with
|
||||
// a header byte containing the attribute 'kind' (upper 5 bits of header) and the
|
||||
// 'length' less 1 (lower 3 bits of header) of bytes that follow containing the
|
||||
// attribute value. Attribute values present as most significant byte first.
|
||||
//
|
||||
// Ex. Container offset (ATTRIBUTE_OFFSET) 0x33562 would be represented as 0x22
|
||||
// (kind = 4, length = 3), 0x03, 0x35, 0x62.
|
||||
//
|
||||
// An attribute stream is terminated with a header kind of ATTRIBUTE_END (header
|
||||
// byte of zero.)
|
||||
//
|
||||
// ImageLocation inflates the stream into individual values stored in the long
|
||||
// array _attributes. This allows an attribute value can be quickly accessed by
|
||||
// direct indexing. Unspecified values default to zero.
|
||||
//
|
||||
// Notes:
|
||||
// - Even though ATTRIBUTE_END is used to mark the end of the attribute stream,
|
||||
// streams will contain zero byte values to represent lesser significant bits.
|
||||
// Thus, detecting a zero byte is not sufficient to detect the end of an attribute
|
||||
// stream.
|
||||
// - ATTRIBUTE_OFFSET represents the number of bytes from the beginning of the region
|
||||
// storing the resources. Thus, in an image this represents the number of bytes
|
||||
// after the index.
|
||||
// - Currently, compressed resources are represented by having a non-zero
|
||||
// ATTRIBUTE_COMPRESSED value. This represents the number of bytes stored in the
|
||||
// image, and the value of ATTRIBUTE_UNCOMPRESSED represents number of bytes of the
|
||||
// inflated resource in memory. If the ATTRIBUTE_COMPRESSED is zero then the value
|
||||
// of ATTRIBUTE_UNCOMPRESSED represents both the number of bytes in the image and
|
||||
// in memory. In the future, additional compression techniques will be used and
|
||||
// represented differently.
|
||||
// - Package strings include trailing slash and extensions include prefix period.
|
||||
//
|
||||
class ImageLocation VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
enum {
|
||||
ATTRIBUTE_END, // End of attribute stream marker
|
||||
ATTRIBUTE_MODULE, // String table offset of module name
|
||||
ATTRIBUTE_PARENT, // String table offset of resource path parent
|
||||
ATTRIBUTE_BASE, // String table offset of resource path base
|
||||
ATTRIBUTE_EXTENSION, // String table offset of resource path extension
|
||||
ATTRIBUTE_OFFSET, // Container byte offset of resource
|
||||
ATTRIBUTE_COMPRESSED, // In image byte size of the compressed resource
|
||||
ATTRIBUTE_UNCOMPRESSED, // In memory byte size of the uncompressed resource
|
||||
ATTRIBUTE_COUNT // Number of attribute kinds
|
||||
};
|
||||
|
||||
private:
|
||||
// Values of inflated attributes.
|
||||
u8 _attributes[ATTRIBUTE_COUNT];
|
||||
|
||||
// Return the attribute value number of bytes.
|
||||
inline static u1 attribute_length(u1 data) {
|
||||
return (data & 0x7) + 1;
|
||||
}
|
||||
|
||||
// Return the attribute kind.
|
||||
inline static u1 attribute_kind(u1 data) {
|
||||
u1 kind = data >> 3;
|
||||
guarantee(kind < ATTRIBUTE_COUNT, "invalid attribute kind");
|
||||
return kind;
|
||||
}
|
||||
|
||||
// Return the attribute length.
|
||||
inline static u8 attribute_value(u1* data, u1 n) {
|
||||
guarantee(0 < n && n <= 8, "invalid attribute value length");
|
||||
u8 value = 0;
|
||||
// Most significant bytes first.
|
||||
for (u1 i = 0; i < n; i++) {
|
||||
value <<= 8;
|
||||
value |= data[i];
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
public:
|
||||
ImageLocation() {
|
||||
clear_data();
|
||||
}
|
||||
|
||||
ImageLocation(u1* data) {
|
||||
clear_data();
|
||||
set_data(data);
|
||||
}
|
||||
|
||||
// Inflates the attribute stream into individual values stored in the long
|
||||
// array _attributes. This allows an attribute value to be quickly accessed by
|
||||
// direct indexing. Unspecified values default to zero.
|
||||
void set_data(u1* data);
|
||||
|
||||
// Zero all attribute values.
|
||||
void clear_data();
|
||||
|
||||
// Retrieve an attribute value from the inflated array.
|
||||
inline u8 get_attribute(u1 kind) const {
|
||||
guarantee(ATTRIBUTE_END < kind && kind < ATTRIBUTE_COUNT, "invalid attribute kind");
|
||||
return _attributes[kind];
|
||||
}
|
||||
|
||||
// Retrieve an attribute string value from the inflated array.
|
||||
inline const char* get_attribute(u4 kind, const ImageStrings& strings) const {
|
||||
return strings.get((u4)get_attribute(kind));
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// NOTE: needs revision.
|
||||
// Each loader requires set of module meta data to identify which modules and
|
||||
// packages are managed by that loader. Currently, there is one image file per
|
||||
// builtin loader, so only one module meta data resource per file.
|
||||
//
|
||||
// Each element in the module meta data is a native endian 4 byte integer. Note
|
||||
// that entries with zero offsets for string table entries should be ignored (
|
||||
// padding for hash table lookup.)
|
||||
//
|
||||
// Format:
|
||||
// Count of package to module entries
|
||||
// Count of module to package entries
|
||||
// Perfect Hash redirect table[Count of package to module entries]
|
||||
// Package to module entries[Count of package to module entries]
|
||||
// Offset to package name in string table
|
||||
// Offset to module name in string table
|
||||
// Perfect Hash redirect table[Count of module to package entries]
|
||||
// Module to package entries[Count of module to package entries]
|
||||
// Offset to module name in string table
|
||||
// Count of packages in module
|
||||
// Offset to first package in packages table
|
||||
// Packages[]
|
||||
// Offset to package name in string table
|
||||
//
|
||||
// Manage the image module meta data.
|
||||
class ImageModuleData : public CHeapObj<mtClass> {
|
||||
class Header VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
u4 _ptm_count; // Count of package to module entries
|
||||
u4 _mtp_count; // Count of module to package entries
|
||||
public:
|
||||
inline u4 ptm_count(Endian* endian) const { return endian->get(_ptm_count); }
|
||||
inline u4 mtp_count(Endian* endian) const { return endian->get(_mtp_count); }
|
||||
};
|
||||
|
||||
// Hashtable entry
|
||||
class HashData VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
u4 _name_offset; // Name offset in string table
|
||||
public:
|
||||
inline s4 name_offset(Endian* endian) const { return endian->get(_name_offset); }
|
||||
};
|
||||
|
||||
// Package to module hashtable entry
|
||||
class PTMData : public HashData {
|
||||
private:
|
||||
u4 _module_name_offset; // Module name offset in string table
|
||||
public:
|
||||
inline s4 module_name_offset(Endian* endian) const { return endian->get(_module_name_offset); }
|
||||
};
|
||||
|
||||
// Module to package hashtable entry
|
||||
class MTPData : public HashData {
|
||||
private:
|
||||
u4 _package_count; // Number of packages in module
|
||||
u4 _package_offset; // Offset in package list
|
||||
public:
|
||||
inline u4 package_count(Endian* endian) const { return endian->get(_package_count); }
|
||||
inline u4 package_offset(Endian* endian) const { return endian->get(_package_offset); }
|
||||
};
|
||||
|
||||
const ImageFileReader* _image_file; // Source image file
|
||||
Endian* _endian; // Endian handler
|
||||
ImageStrings _strings; // Image file strings
|
||||
u1* _data; // Module data resource data
|
||||
u8 _data_size; // Size of resource data
|
||||
Header* _header; // Module data header
|
||||
s4* _ptm_redirect; // Package to module hashtable redirect
|
||||
PTMData* _ptm_data; // Package to module data
|
||||
s4* _mtp_redirect; // Module to packages hashtable redirect
|
||||
MTPData* _mtp_data; // Module to packages data
|
||||
s4* _mtp_packages; // Package data (name offsets)
|
||||
|
||||
// Return a string from the string table.
|
||||
inline const char* get_string(u4 offset) {
|
||||
return _strings.get(offset);
|
||||
}
|
||||
|
||||
inline u4 mtp_package(u4 index) {
|
||||
return _endian->get(_mtp_packages[index]);
|
||||
}
|
||||
|
||||
public:
|
||||
ImageModuleData(const ImageFileReader* image_file, const char* module_data_name);
|
||||
~ImageModuleData();
|
||||
|
||||
// Return the name of the module data resource.
|
||||
static void module_data_name(char* buffer, const char* image_file_name);
|
||||
|
||||
// Return the module in which a package resides. Returns NULL if not found.
|
||||
const char* package_to_module(const char* package_name);
|
||||
|
||||
// Returns all the package names in a module. Returns NULL if module not found.
|
||||
GrowableArray<const char*>* module_to_packages(const char* module_name);
|
||||
};
|
||||
|
||||
// Image file header, starting at offset 0.
|
||||
class ImageHeader VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
u4 _magic; // Image file marker
|
||||
u4 _version; // Image file major version number
|
||||
u4 _flags; // Image file flags
|
||||
u4 _resource_count; // Number of resources in file
|
||||
u4 _table_length; // Number of slots in index tables
|
||||
u4 _locations_size; // Number of bytes in attribute table
|
||||
u4 _strings_size; // Number of bytes in string table
|
||||
|
||||
public:
|
||||
u4 magic() const { return _magic; }
|
||||
u4 magic(Endian* endian) const { return endian->get(_magic); }
|
||||
void set_magic(Endian* endian, u4 magic) { return endian->set(_magic, magic); }
|
||||
|
||||
u4 major_version(Endian* endian) const { return endian->get(_version) >> 16; }
|
||||
u4 minor_version(Endian* endian) const { return endian->get(_version) & 0xFFFF; }
|
||||
void set_version(Endian* endian, u4 major_version, u4 minor_version) {
|
||||
return endian->set(_version, major_version << 16 | minor_version);
|
||||
}
|
||||
|
||||
u4 flags(Endian* endian) const { return endian->get(_flags); }
|
||||
void set_flags(Endian* endian, u4 value) { return endian->set(_flags, value); }
|
||||
|
||||
u4 resource_count(Endian* endian) const { return endian->get(_resource_count); }
|
||||
void set_resource_count(Endian* endian, u4 count) { return endian->set(_resource_count, count); }
|
||||
|
||||
u4 table_length(Endian* endian) const { return endian->get(_table_length); }
|
||||
void set_table_length(Endian* endian, u4 count) { return endian->set(_table_length, count); }
|
||||
|
||||
u4 locations_size(Endian* endian) const { return endian->get(_locations_size); }
|
||||
void set_locations_size(Endian* endian, u4 size) { return endian->set(_locations_size, size); }
|
||||
|
||||
u4 strings_size(Endian* endian) const { return endian->get(_strings_size); }
|
||||
void set_strings_size(Endian* endian, u4 size) { return endian->set(_strings_size, size); }
|
||||
};
|
||||
|
||||
// Max path length limit independent of platform. Windows max path is 1024,
|
||||
// other platforms use 4096. The JCK fails several tests when 1024 is used.
|
||||
#define IMAGE_MAX_PATH 4096
|
||||
|
||||
// Manage the image file.
|
||||
// ImageFileReader manages the content of an image file.
|
||||
// Initially, the header of the image file is read for validation. If valid,
|
||||
// values in the header are used calculate the size of the image index. The
|
||||
// index is then memory mapped to allow load on demand and sharing. The
|
||||
// -XX:+MemoryMapImage flag determines if the entire file is loaded (server use.)
|
||||
// An image can be used by Hotspot and multiple reference points in the JDK, thus
|
||||
// it is desirable to share a reader. To accomodate sharing, a share table is
|
||||
// defined (see ImageFileReaderTable in imageFile.cpp) To track the number of
|
||||
// uses, ImageFileReader keeps a use count (_use). Use is incremented when
|
||||
// 'opened' by reference point and decremented when 'closed'. Use of zero
|
||||
// leads the ImageFileReader to be actually closed and discarded.
|
||||
class ImageFileReader : public CHeapObj<mtClass> {
|
||||
private:
|
||||
// Manage a number of image files such that an image can be shared across
|
||||
// multiple uses (ex. loader.)
|
||||
static GrowableArray<ImageFileReader*>* _reader_table;
|
||||
|
||||
char* _name; // Name of image
|
||||
s4 _use; // Use count
|
||||
int _fd; // File descriptor
|
||||
Endian* _endian; // Endian handler
|
||||
u8 _file_size; // File size in bytes
|
||||
ImageHeader _header; // Image header
|
||||
size_t _index_size; // Total size of index
|
||||
u1* _index_data; // Raw index data
|
||||
s4* _redirect_table; // Perfect hash redirect table
|
||||
u4* _offsets_table; // Location offset table
|
||||
u1* _location_bytes; // Location attributes
|
||||
u1* _string_bytes; // String table
|
||||
|
||||
ImageFileReader(const char* name, bool big_endian);
|
||||
~ImageFileReader();
|
||||
|
||||
// Compute number of bytes in image file index.
|
||||
inline u8 index_size() {
|
||||
return sizeof(ImageHeader) +
|
||||
table_length() * sizeof(u4) * 2 + locations_size() + strings_size();
|
||||
}
|
||||
|
||||
public:
|
||||
enum {
|
||||
// Image file marker.
|
||||
IMAGE_MAGIC = 0xCAFEDADA,
|
||||
// Endian inverted Image file marker.
|
||||
IMAGE_MAGIC_INVERT = 0xDADAFECA,
|
||||
// Image file major version number.
|
||||
MAJOR_VERSION = 1,
|
||||
// Image file minor version number.
|
||||
MINOR_VERSION = 0
|
||||
};
|
||||
|
||||
// Open an image file, reuse structure if file already open.
|
||||
static ImageFileReader* open(const char* name, bool big_endian = Endian::is_big_endian());
|
||||
|
||||
// Close an image file if the file is not in use elsewhere.
|
||||
static void close(ImageFileReader *reader);
|
||||
|
||||
// Return an id for the specifed ImageFileReader.
|
||||
static u8 readerToID(ImageFileReader *reader);
|
||||
|
||||
// Validate the image id.
|
||||
static bool idCheck(u8 id);
|
||||
|
||||
// Return an id for the specifed ImageFileReader.
|
||||
static ImageFileReader* idToReader(u8 id);
|
||||
|
||||
// Open image file for read access.
|
||||
bool open();
|
||||
|
||||
// Close image file.
|
||||
void close();
|
||||
|
||||
// Read directly from the file.
|
||||
bool read_at(u1* data, u8 size, u8 offset) const;
|
||||
|
||||
inline Endian* endian() const { return _endian; }
|
||||
|
||||
// Retrieve name of image file.
|
||||
inline const char* name() const {
|
||||
return _name;
|
||||
}
|
||||
|
||||
// Retrieve size of image file.
|
||||
inline u8 file_size() const {
|
||||
return _file_size;
|
||||
}
|
||||
|
||||
// Return first address of index data.
|
||||
inline u1* get_index_address() const {
|
||||
return _index_data;
|
||||
}
|
||||
|
||||
// Return first address of resource data.
|
||||
inline u1* get_data_address() const {
|
||||
return _index_data + _index_size;
|
||||
}
|
||||
|
||||
// Get the size of the index data.
|
||||
size_t get_index_size() const {
|
||||
return _index_size;
|
||||
}
|
||||
|
||||
inline u4 table_length() const {
|
||||
return _header.table_length(_endian);
|
||||
}
|
||||
|
||||
inline u4 locations_size() const {
|
||||
return _header.locations_size(_endian);
|
||||
}
|
||||
|
||||
inline u4 strings_size()const {
|
||||
return _header.strings_size(_endian);
|
||||
}
|
||||
|
||||
inline u4* offsets_table() const {
|
||||
return _offsets_table;
|
||||
}
|
||||
|
||||
// Increment use count.
|
||||
inline void inc_use() {
|
||||
_use++;
|
||||
}
|
||||
|
||||
// Decrement use count.
|
||||
inline bool dec_use() {
|
||||
return --_use == 0;
|
||||
}
|
||||
|
||||
// Return a string table accessor.
|
||||
inline const ImageStrings get_strings() const {
|
||||
return ImageStrings(_string_bytes, _header.strings_size(_endian));
|
||||
}
|
||||
|
||||
// Return location attribute stream at offset.
|
||||
inline u1* get_location_offset_data(u4 offset) const {
|
||||
guarantee((u4)offset < _header.locations_size(_endian),
|
||||
"offset exceeds location attributes size");
|
||||
return offset != 0 ? _location_bytes + offset : NULL;
|
||||
}
|
||||
|
||||
// Return location attribute stream for location i.
|
||||
inline u1* get_location_data(u4 index) const {
|
||||
guarantee((u4)index < _header.table_length(_endian),
|
||||
"index exceeds location count");
|
||||
u4 offset = _endian->get(_offsets_table[index]);
|
||||
|
||||
return get_location_offset_data(offset);
|
||||
}
|
||||
|
||||
// Find the location attributes associated with the path. Returns true if
|
||||
// the location is found, false otherwise.
|
||||
bool find_location(const char* path, ImageLocation& location) const;
|
||||
|
||||
// Assemble the location path.
|
||||
void location_path(ImageLocation& location, char* path, size_t max) const;
|
||||
|
||||
// Verify that a found location matches the supplied path.
|
||||
bool verify_location(ImageLocation& location, const char* path) const;
|
||||
|
||||
// Return the resource for the supplied path.
|
||||
void get_resource(ImageLocation& location, u1* uncompressed_data) const;
|
||||
};
|
||||
#endif // SHARE_VM_CLASSFILE_IMAGEFILE_HPP
|
@ -29,7 +29,6 @@
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/debugInfo.hpp"
|
||||
#include "code/pcDesc.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
|
176
hotspot/src/share/vm/classfile/jimage.hpp
Normal file
176
hotspot/src/share/vm/classfile/jimage.hpp
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "jni.h"
|
||||
|
||||
// Opaque reference to a JImage file.
|
||||
class JImageFile;
|
||||
// Opaque reference to an image file resource location.
|
||||
typedef jlong JImageLocationRef;
|
||||
|
||||
// Max path length limit independent of platform. Windows max path is 1024,
|
||||
// other platforms use 4096. The JCK fails several tests when 1024 is used.
|
||||
#define JIMAGE_MAX_PATH 4096
|
||||
|
||||
// JImage Error Codes
|
||||
|
||||
// The image file is not prefixed with 0xCAFEDADA
|
||||
#define JIMAGE_BAD_MAGIC (-1)
|
||||
// The image file does not have a compatible (translatable) version
|
||||
#define JIMAGE_BAD_VERSION (-2)
|
||||
// The image file content is malformed
|
||||
#define JIMAGE_CORRUPTED (-3)
|
||||
|
||||
/*
|
||||
* JImageOpen - Given the supplied full path file name, open an image file. This
|
||||
* function will also initialize tables and retrieve meta-data necessary to
|
||||
* satisfy other functions in the API. If the image file has been previously
|
||||
* open, a new open request will share memory and resources used by the previous
|
||||
* open. A call to JImageOpen should be balanced by a call to JImageClose, to
|
||||
* release memory and resources used. If the image file is not found or cannot
|
||||
* be open, then NULL is returned and error will contain a reason for the
|
||||
* failure; a positive value for a system error number, negative for a jimage
|
||||
* specific error (see JImage Error Codes.)
|
||||
*
|
||||
* Ex.
|
||||
* jint error;
|
||||
* JImageFile* jimage = (*JImageOpen)(JAVA_HOME "lib/modules/bootmodules.jimage", &error);
|
||||
* if (image == NULL) {
|
||||
* tty->print_cr("JImage failed to open: %d", error);
|
||||
* ...
|
||||
* }
|
||||
* ...
|
||||
*/
|
||||
|
||||
extern "C" JImageFile* JIMAGE_Open(const char *name, jint* error);
|
||||
|
||||
typedef JImageFile* (*JImageOpen_t)(const char *name, jint* error);
|
||||
|
||||
/*
|
||||
* JImageClose - Given the supplied open image file (see JImageOpen), release
|
||||
* memory and resources used by the open file and close the file. If the image
|
||||
* file is shared by other uses, release and close is deferred until the last use
|
||||
* is also closed.
|
||||
*
|
||||
* Ex.
|
||||
* (*JImageClose)(image);
|
||||
*/
|
||||
|
||||
extern "C" void JIMAGE_Close(JImageFile* jimage);
|
||||
|
||||
typedef void (*JImageClose_t)(JImageFile* jimage);
|
||||
|
||||
|
||||
/*
|
||||
* JImagePackageToModule - Given an open image file (see JImageOpen) and the name
|
||||
* of a package, return the name of module where the package resides. If the
|
||||
* package does not exist in the image file, the function returns NULL.
|
||||
* The resulting string does/should not have to be released. All strings are
|
||||
* utf-8, zero byte terminated.
|
||||
*
|
||||
* Ex.
|
||||
* const char* package = (*JImagePackageToModule)(image, "java/lang");
|
||||
* tty->print_cr(package);
|
||||
* —> java.base
|
||||
*/
|
||||
|
||||
extern "C" const char * JIMAGE_PackageToModule(JImageFile* jimage, const char* package_name);
|
||||
|
||||
typedef const char* (*JImagePackageToModule_t)(JImageFile* jimage, const char* package_name);
|
||||
|
||||
|
||||
/*
|
||||
* JImageFindResource - Given an open image file (see JImageOpen), a module
|
||||
* name, a version string and the name of a class/resource, return location
|
||||
* information describing the resource and its size. If no resource is found, the
|
||||
* function returns JIMAGE_NOT_FOUND and the value of size is undefined.
|
||||
* The version number should be "9.0" and is not used in locating the resource.
|
||||
* The resulting location does/should not have to be released.
|
||||
* All strings are utf-8, zero byte terminated.
|
||||
*
|
||||
* Ex.
|
||||
* jlong size;
|
||||
* JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
|
||||
*/
|
||||
extern "C" JImageLocationRef JIMAGE_FindResource(JImageFile* jimage,
|
||||
const char* module_name, const char* version, const char* name,
|
||||
jlong* size);
|
||||
|
||||
typedef JImageLocationRef(*JImageFindResource_t)(JImageFile* jimage,
|
||||
const char* module_name, const char* version, const char* name,
|
||||
jlong* size);
|
||||
|
||||
|
||||
/*
|
||||
* JImageGetResource - Given an open image file (see JImageOpen), a resource’s
|
||||
* location information (see JImageFindResource), a buffer of appropriate
|
||||
* size and the size, retrieve the bytes associated with the
|
||||
* resource. If the size is less than the resource size then the read is truncated.
|
||||
* If the size is greater than the resource size then the remainder of the buffer
|
||||
* is zero filled. The function will return the actual size of the resource.
|
||||
*
|
||||
* Ex.
|
||||
* jlong size;
|
||||
* JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
|
||||
* char* buffer = new char[size];
|
||||
* (*JImageGetResource)(image, location, buffer, size);
|
||||
*/
|
||||
extern "C" jlong JIMAGE_GetResource(JImageFile* jimage, JImageLocationRef location,
|
||||
char* buffer, jlong size);
|
||||
|
||||
typedef jlong(*JImageGetResource_t)(JImageFile* jimage, JImageLocationRef location,
|
||||
char* buffer, jlong size);
|
||||
|
||||
|
||||
/*
|
||||
* JImageResourceIterator - Given an open image file (see JImageOpen), a visitor
|
||||
* function and a visitor argument, iterator through each of the image's resources.
|
||||
* The visitor function is called with the image file, the module name, the
|
||||
* package name, the base name, the extension and the visitor argument. The return
|
||||
* value of the visitor function should be true, unless an early iteration exit is
|
||||
* required. All strings are utf-8, zero byte terminated.file.
|
||||
*
|
||||
* Ex.
|
||||
* bool ctw_visitor(JImageFile* jimage, const char* module_name, const char* version, const char* package, const char* name, const char* extension, void* arg) {
|
||||
* if (strcmp(extension, “class”) == 0) {
|
||||
* char path[JIMAGE_MAX_PATH];
|
||||
* Thread* THREAD = Thread::current();
|
||||
* jio_snprintf(path, JIMAGE_MAX_PATH - 1, "/%s/%s", package, name);
|
||||
* ClassLoader::compile_the_world_in(path, (Handle)arg, THREAD);
|
||||
* return !HAS_PENDING_EXCEPTION;
|
||||
* }
|
||||
* return true;
|
||||
* }
|
||||
* (*JImageResourceIterator)(image, ctw_visitor, loader);
|
||||
*/
|
||||
|
||||
typedef bool (*JImageResourceVisitor_t)(JImageFile* jimage,
|
||||
const char* module_name, const char* version, const char* package,
|
||||
const char* name, const char* extension, void* arg);
|
||||
|
||||
extern "C" void JIMAGE_ResourceIterator(JImageFile* jimage,
|
||||
JImageResourceVisitor_t visitor, void *arg);
|
||||
|
||||
typedef void (*JImageResourceIterator_t)(JImageFile* jimage,
|
||||
JImageResourceVisitor_t visitor, void* arg);
|
@ -625,6 +625,10 @@ bool vmIntrinsics::is_disabled_by_flags(methodHandle method, methodHandle compil
|
||||
case vmIntrinsics::_updateDirectByteBufferCRC32C:
|
||||
if (!UseCRC32CIntrinsics) return true;
|
||||
break;
|
||||
case vmIntrinsics::_updateBytesAdler32:
|
||||
case vmIntrinsics::_updateByteBufferAdler32:
|
||||
if (!UseAdler32Intrinsics) return true;
|
||||
break;
|
||||
case vmIntrinsics::_copyMemory:
|
||||
if (!InlineArrayCopy || !InlineUnsafeOps) return true;
|
||||
break;
|
||||
|
@ -927,6 +927,12 @@
|
||||
do_intrinsic(_updateDirectByteBufferCRC32C, java_util_zip_CRC32C, updateDirectByteBuffer_C_name, updateByteBuffer_signature, F_S) \
|
||||
do_name( updateDirectByteBuffer_C_name, "updateDirectByteBuffer") \
|
||||
\
|
||||
/* support for java.util.zip.Adler32 */ \
|
||||
do_class(java_util_zip_Adler32, "java/util/zip/Adler32") \
|
||||
do_intrinsic(_updateBytesAdler32, java_util_zip_Adler32, updateBytes_C_name, updateBytes_signature, F_SN) \
|
||||
do_intrinsic(_updateByteBufferAdler32, java_util_zip_Adler32, updateByteBuffer_A_name, updateByteBuffer_signature, F_SN) \
|
||||
do_name( updateByteBuffer_A_name, "updateByteBuffer") \
|
||||
\
|
||||
/* support for sun.misc.Unsafe */ \
|
||||
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
|
||||
\
|
||||
|
@ -848,10 +848,10 @@ void nmethod::print_on(outputStream* st, const char* msg) const {
|
||||
if (st != NULL) {
|
||||
ttyLocker ttyl;
|
||||
if (WizardMode) {
|
||||
CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
|
||||
CompileTask::print(st, this, msg, /*short_form:*/ true);
|
||||
st->print_cr(" (" INTPTR_FORMAT ")", this);
|
||||
} else {
|
||||
CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
|
||||
CompileTask::print(st, this, msg, /*short_form:*/ false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -157,7 +157,6 @@ long CompileBroker::_peak_compilation_time = 0;
|
||||
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
|
||||
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
|
||||
|
||||
|
||||
class CompilationLog : public StringEventLog {
|
||||
public:
|
||||
CompilationLog() : StringEventLog("Compilation events") {
|
||||
@ -167,7 +166,7 @@ class CompilationLog : public StringEventLog {
|
||||
StringLogMessage lm;
|
||||
stringStream sstr = lm.stream();
|
||||
// msg.time_stamp().update_to(tty->time_stamp().ticks());
|
||||
task->print_compilation(&sstr, NULL, true, false);
|
||||
task->print(&sstr, NULL, true, false);
|
||||
log(thread, "%s", (const char*)lm);
|
||||
}
|
||||
|
||||
@ -233,371 +232,6 @@ CompileTaskWrapper::~CompileTaskWrapper() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
CompileTask* CompileTask::_task_free_list = NULL;
|
||||
#ifdef ASSERT
|
||||
int CompileTask::_num_allocated_tasks = 0;
|
||||
#endif
|
||||
/**
|
||||
* Allocate a CompileTask, from the free list if possible.
|
||||
*/
|
||||
CompileTask* CompileTask::allocate() {
|
||||
MutexLocker locker(CompileTaskAlloc_lock);
|
||||
CompileTask* task = NULL;
|
||||
|
||||
if (_task_free_list != NULL) {
|
||||
task = _task_free_list;
|
||||
_task_free_list = task->next();
|
||||
task->set_next(NULL);
|
||||
} else {
|
||||
task = new CompileTask();
|
||||
DEBUG_ONLY(_num_allocated_tasks++;)
|
||||
assert (WhiteBoxAPI || _num_allocated_tasks < 10000, "Leaking compilation tasks?");
|
||||
task->set_next(NULL);
|
||||
task->set_is_free(true);
|
||||
}
|
||||
assert(task->is_free(), "Task must be free.");
|
||||
task->set_is_free(false);
|
||||
return task;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Add a task to the free list.
|
||||
*/
|
||||
void CompileTask::free(CompileTask* task) {
|
||||
MutexLocker locker(CompileTaskAlloc_lock);
|
||||
if (!task->is_free()) {
|
||||
task->set_code(NULL);
|
||||
assert(!task->lock()->is_locked(), "Should not be locked when freed");
|
||||
JNIHandles::destroy_global(task->_method_holder);
|
||||
JNIHandles::destroy_global(task->_hot_method_holder);
|
||||
|
||||
task->set_is_free(true);
|
||||
task->set_next(_task_free_list);
|
||||
_task_free_list = task;
|
||||
}
|
||||
}
|
||||
|
||||
void CompileTask::initialize(int compile_id,
|
||||
methodHandle method,
|
||||
int osr_bci,
|
||||
int comp_level,
|
||||
methodHandle hot_method,
|
||||
int hot_count,
|
||||
const char* comment,
|
||||
bool is_blocking) {
|
||||
assert(!_lock->is_locked(), "bad locking");
|
||||
|
||||
_compile_id = compile_id;
|
||||
_method = method();
|
||||
_method_holder = JNIHandles::make_global(method->method_holder()->klass_holder());
|
||||
_osr_bci = osr_bci;
|
||||
_is_blocking = is_blocking;
|
||||
_comp_level = comp_level;
|
||||
_num_inlined_bytecodes = 0;
|
||||
|
||||
_is_complete = false;
|
||||
_is_success = false;
|
||||
_code_handle = NULL;
|
||||
|
||||
_hot_method = NULL;
|
||||
_hot_method_holder = NULL;
|
||||
_hot_count = hot_count;
|
||||
_time_queued = 0; // tidy
|
||||
_comment = comment;
|
||||
_failure_reason = NULL;
|
||||
|
||||
if (LogCompilation) {
|
||||
_time_queued = os::elapsed_counter();
|
||||
if (hot_method.not_null()) {
|
||||
if (hot_method == method) {
|
||||
_hot_method = _method;
|
||||
} else {
|
||||
_hot_method = hot_method();
|
||||
// only add loader or mirror if different from _method_holder
|
||||
_hot_method_holder = JNIHandles::make_global(hot_method->method_holder()->klass_holder());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_next = NULL;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::code/set_code
|
||||
nmethod* CompileTask::code() const {
|
||||
if (_code_handle == NULL) return NULL;
|
||||
return _code_handle->code();
|
||||
}
|
||||
void CompileTask::set_code(nmethod* nm) {
|
||||
if (_code_handle == NULL && nm == NULL) return;
|
||||
guarantee(_code_handle != NULL, "");
|
||||
_code_handle->set_code(nm);
|
||||
if (nm == NULL) _code_handle = NULL; // drop the handle also
|
||||
}
|
||||
|
||||
void CompileTask::mark_on_stack() {
|
||||
// Mark these methods as something redefine classes cannot remove.
|
||||
_method->set_on_stack(true);
|
||||
if (_hot_method != NULL) {
|
||||
_hot_method->set_on_stack(true);
|
||||
}
|
||||
}
|
||||
|
||||
// RedefineClasses support
|
||||
void CompileTask::metadata_do(void f(Metadata*)) {
|
||||
f(method());
|
||||
if (hot_method() != NULL && hot_method() != method()) {
|
||||
f(hot_method());
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_line_on_error
|
||||
//
|
||||
// This function is called by fatal error handler when the thread
|
||||
// causing troubles is a compiler thread.
|
||||
//
|
||||
// Do not grab any lock, do not allocate memory.
|
||||
//
|
||||
// Otherwise it's the same as CompileTask::print_line()
|
||||
//
|
||||
void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
|
||||
// print compiler name
|
||||
st->print("%s:", CompileBroker::compiler_name(comp_level()));
|
||||
print_compilation(st);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_line
|
||||
void CompileTask::print_tty() {
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
// print compiler name if requested
|
||||
if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler_name(comp_level()));
|
||||
print_compilation(tty);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_compilation_impl
|
||||
void CompileTask::print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
|
||||
bool is_osr_method, int osr_bci, bool is_blocking,
|
||||
const char* msg, bool short_form, bool cr) {
|
||||
if (!short_form) {
|
||||
st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp
|
||||
}
|
||||
st->print("%4d ", compile_id); // print compilation number
|
||||
|
||||
// For unloaded methods the transition to zombie occurs after the
|
||||
// method is cleared so it's impossible to report accurate
|
||||
// information for that case.
|
||||
bool is_synchronized = false;
|
||||
bool has_exception_handler = false;
|
||||
bool is_native = false;
|
||||
if (method != NULL) {
|
||||
is_synchronized = method->is_synchronized();
|
||||
has_exception_handler = method->has_exception_handler();
|
||||
is_native = method->is_native();
|
||||
}
|
||||
// method attributes
|
||||
const char compile_type = is_osr_method ? '%' : ' ';
|
||||
const char sync_char = is_synchronized ? 's' : ' ';
|
||||
const char exception_char = has_exception_handler ? '!' : ' ';
|
||||
const char blocking_char = is_blocking ? 'b' : ' ';
|
||||
const char native_char = is_native ? 'n' : ' ';
|
||||
|
||||
// print method attributes
|
||||
st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
|
||||
|
||||
if (TieredCompilation) {
|
||||
if (comp_level != -1) st->print("%d ", comp_level);
|
||||
else st->print("- ");
|
||||
}
|
||||
st->print(" "); // more indent
|
||||
|
||||
if (method == NULL) {
|
||||
st->print("(method)");
|
||||
} else {
|
||||
method->print_short_name(st);
|
||||
if (is_osr_method) {
|
||||
st->print(" @ %d", osr_bci);
|
||||
}
|
||||
if (method->is_native())
|
||||
st->print(" (native)");
|
||||
else
|
||||
st->print(" (%d bytes)", method->code_size());
|
||||
}
|
||||
|
||||
if (msg != NULL) {
|
||||
st->print(" %s", msg);
|
||||
}
|
||||
if (cr) {
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_inlining
|
||||
void CompileTask::print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg) {
|
||||
// 1234567
|
||||
st->print(" "); // print timestamp
|
||||
// 1234
|
||||
st->print(" "); // print compilation number
|
||||
|
||||
// method attributes
|
||||
if (method->is_loaded()) {
|
||||
const char sync_char = method->is_synchronized() ? 's' : ' ';
|
||||
const char exception_char = method->has_exception_handlers() ? '!' : ' ';
|
||||
const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' ';
|
||||
|
||||
// print method attributes
|
||||
st->print(" %c%c%c ", sync_char, exception_char, monitors_char);
|
||||
} else {
|
||||
// %s!bn
|
||||
st->print(" "); // print method attributes
|
||||
}
|
||||
|
||||
if (TieredCompilation) {
|
||||
st->print(" ");
|
||||
}
|
||||
st->print(" "); // more indent
|
||||
st->print(" "); // initial inlining indent
|
||||
|
||||
for (int i = 0; i < inline_level; i++) st->print(" ");
|
||||
|
||||
st->print("@ %d ", bci); // print bci
|
||||
method->print_short_name(st);
|
||||
if (method->is_loaded())
|
||||
st->print(" (%d bytes)", method->code_size());
|
||||
else
|
||||
st->print(" (not loaded)");
|
||||
|
||||
if (msg != NULL) {
|
||||
st->print(" %s", msg);
|
||||
}
|
||||
st->cr();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_inline_indent
|
||||
void CompileTask::print_inline_indent(int inline_level, outputStream* st) {
|
||||
// 1234567
|
||||
st->print(" "); // print timestamp
|
||||
// 1234
|
||||
st->print(" "); // print compilation number
|
||||
// %s!bn
|
||||
st->print(" "); // print method attributes
|
||||
if (TieredCompilation) {
|
||||
st->print(" ");
|
||||
}
|
||||
st->print(" "); // more indent
|
||||
st->print(" "); // initial inlining indent
|
||||
for (int i = 0; i < inline_level; i++) st->print(" ");
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_compilation
|
||||
void CompileTask::print_compilation(outputStream* st, const char* msg, bool short_form, bool cr) {
|
||||
bool is_osr_method = osr_bci() != InvocationEntryBci;
|
||||
print_compilation_impl(st, method(), compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), msg, short_form, cr);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::log_task
|
||||
void CompileTask::log_task(xmlStream* log) {
|
||||
Thread* thread = Thread::current();
|
||||
methodHandle method(thread, this->method());
|
||||
ResourceMark rm(thread);
|
||||
|
||||
// <task compiler='Cx' id='9' method='M' osr_bci='X' level='1' blocking='1' stamp='1.234'>
|
||||
log->print(" compiler='%s' compile_id='%d'", _comp_level <= CompLevel_full_profile ? "C1" : "C2", _compile_id);
|
||||
if (_osr_bci != CompileBroker::standard_entry_bci) {
|
||||
log->print(" compile_kind='osr'"); // same as nmethod::compile_kind
|
||||
} // else compile_kind='c2c'
|
||||
if (!method.is_null()) log->method(method);
|
||||
if (_osr_bci != CompileBroker::standard_entry_bci) {
|
||||
log->print(" osr_bci='%d'", _osr_bci);
|
||||
}
|
||||
if (_comp_level != CompLevel_highest_tier) {
|
||||
log->print(" level='%d'", _comp_level);
|
||||
}
|
||||
if (_is_blocking) {
|
||||
log->print(" blocking='1'");
|
||||
}
|
||||
log->stamp();
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::log_task_queued
|
||||
void CompileTask::log_task_queued() {
|
||||
Thread* thread = Thread::current();
|
||||
ttyLocker ttyl;
|
||||
ResourceMark rm(thread);
|
||||
|
||||
xtty->begin_elem("task_queued");
|
||||
log_task(xtty);
|
||||
if (_comment != NULL) {
|
||||
xtty->print(" comment='%s'", _comment);
|
||||
}
|
||||
if (_hot_method != NULL) {
|
||||
methodHandle hot(thread, _hot_method);
|
||||
methodHandle method(thread, _method);
|
||||
if (hot() != method()) {
|
||||
xtty->method(hot);
|
||||
}
|
||||
}
|
||||
if (_hot_count != 0) {
|
||||
xtty->print(" hot_count='%d'", _hot_count);
|
||||
}
|
||||
xtty->end_elem();
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::log_task_start
|
||||
void CompileTask::log_task_start(CompileLog* log) {
|
||||
log->begin_head("task");
|
||||
log_task(log);
|
||||
log->end_head();
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::log_task_done
|
||||
void CompileTask::log_task_done(CompileLog* log) {
|
||||
Thread* thread = Thread::current();
|
||||
methodHandle method(thread, this->method());
|
||||
ResourceMark rm(thread);
|
||||
|
||||
if (!_is_success) {
|
||||
const char* reason = _failure_reason != NULL ? _failure_reason : "unknown";
|
||||
log->elem("failure reason='%s'", reason);
|
||||
}
|
||||
|
||||
// <task_done ... stamp='1.234'> </task>
|
||||
nmethod* nm = code();
|
||||
log->begin_elem("task_done success='%d' nmsize='%d' count='%d'",
|
||||
_is_success, nm == NULL ? 0 : nm->content_size(),
|
||||
method->invocation_count());
|
||||
int bec = method->backedge_count();
|
||||
if (bec != 0) log->print(" backedge_count='%d'", bec);
|
||||
// Note: "_is_complete" is about to be set, but is not.
|
||||
if (_num_inlined_bytecodes != 0) {
|
||||
log->print(" inlined_bytes='%d'", _num_inlined_bytecodes);
|
||||
}
|
||||
log->stamp();
|
||||
log->end_elem();
|
||||
log->tail("task");
|
||||
log->clear_identities(); // next task will have different CI
|
||||
if (log->unflushed_count() > 2000) {
|
||||
log->flush();
|
||||
}
|
||||
log->mark_file_end();
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Add a CompileTask to a CompileQueue.
|
||||
*/
|
||||
@ -807,7 +441,7 @@ void CompileQueue::print(outputStream* st) {
|
||||
st->print_cr("Empty");
|
||||
} else {
|
||||
while (task != NULL) {
|
||||
task->print_compilation(st, NULL, true, true);
|
||||
task->print(st, NULL, true, true);
|
||||
task = task->next();
|
||||
}
|
||||
}
|
||||
@ -1349,7 +983,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
#ifndef TIERED
|
||||
// seems like an assert of dubious value
|
||||
assert(comp_level == CompLevel_highest_tier,
|
||||
"all OSR compiles are assumed to be at a single compilation lavel");
|
||||
"all OSR compiles are assumed to be at a single compilation level");
|
||||
#endif // TIERED
|
||||
// We accept a higher level osr method
|
||||
nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
|
||||
@ -2037,7 +1671,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
FormatBufferResource msg = retry_message != NULL ?
|
||||
err_msg_res("COMPILE SKIPPED: %s (%s)", ci_env.failure_reason(), retry_message) :
|
||||
err_msg_res("COMPILE SKIPPED: %s", ci_env.failure_reason());
|
||||
task->print_compilation(tty, msg);
|
||||
task->print(tty, msg);
|
||||
}
|
||||
} else {
|
||||
task->mark_success();
|
||||
|
@ -27,127 +27,12 @@
|
||||
|
||||
#include "ci/compilerInterface.hpp"
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
|
||||
class nmethod;
|
||||
class nmethodLocker;
|
||||
|
||||
// CompileTask
|
||||
//
|
||||
// An entry in the compile queue. It represents a pending or current
|
||||
// compilation.
|
||||
class CompileTask : public CHeapObj<mtCompiler> {
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
static CompileTask* _task_free_list;
|
||||
#ifdef ASSERT
|
||||
static int _num_allocated_tasks;
|
||||
#endif
|
||||
|
||||
Monitor* _lock;
|
||||
uint _compile_id;
|
||||
Method* _method;
|
||||
jobject _method_holder;
|
||||
int _osr_bci;
|
||||
bool _is_complete;
|
||||
bool _is_success;
|
||||
bool _is_blocking;
|
||||
int _comp_level;
|
||||
int _num_inlined_bytecodes;
|
||||
nmethodLocker* _code_handle; // holder of eventual result
|
||||
CompileTask* _next, *_prev;
|
||||
bool _is_free;
|
||||
// Fields used for logging why the compilation was initiated:
|
||||
jlong _time_queued; // in units of os::elapsed_counter()
|
||||
Method* _hot_method; // which method actually triggered this task
|
||||
jobject _hot_method_holder;
|
||||
int _hot_count; // information about its invocation counter
|
||||
const char* _comment; // more info about the task
|
||||
const char* _failure_reason;
|
||||
|
||||
public:
|
||||
CompileTask() {
|
||||
_lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
|
||||
}
|
||||
|
||||
void initialize(int compile_id, methodHandle method, int osr_bci, int comp_level,
|
||||
methodHandle hot_method, int hot_count, const char* comment,
|
||||
bool is_blocking);
|
||||
|
||||
static CompileTask* allocate();
|
||||
static void free(CompileTask* task);
|
||||
|
||||
int compile_id() const { return _compile_id; }
|
||||
Method* method() const { return _method; }
|
||||
Method* hot_method() const { return _hot_method; }
|
||||
int osr_bci() const { return _osr_bci; }
|
||||
bool is_complete() const { return _is_complete; }
|
||||
bool is_blocking() const { return _is_blocking; }
|
||||
bool is_success() const { return _is_success; }
|
||||
|
||||
nmethodLocker* code_handle() const { return _code_handle; }
|
||||
void set_code_handle(nmethodLocker* l) { _code_handle = l; }
|
||||
nmethod* code() const; // _code_handle->code()
|
||||
void set_code(nmethod* nm); // _code_handle->set_code(nm)
|
||||
|
||||
Monitor* lock() const { return _lock; }
|
||||
|
||||
void mark_complete() { _is_complete = true; }
|
||||
void mark_success() { _is_success = true; }
|
||||
|
||||
int comp_level() { return _comp_level;}
|
||||
void set_comp_level(int comp_level) { _comp_level = comp_level;}
|
||||
|
||||
int num_inlined_bytecodes() const { return _num_inlined_bytecodes; }
|
||||
void set_num_inlined_bytecodes(int n) { _num_inlined_bytecodes = n; }
|
||||
|
||||
CompileTask* next() const { return _next; }
|
||||
void set_next(CompileTask* next) { _next = next; }
|
||||
CompileTask* prev() const { return _prev; }
|
||||
void set_prev(CompileTask* prev) { _prev = prev; }
|
||||
bool is_free() const { return _is_free; }
|
||||
void set_is_free(bool val) { _is_free = val; }
|
||||
|
||||
// RedefineClasses support
|
||||
void metadata_do(void f(Metadata*));
|
||||
|
||||
private:
|
||||
static void print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
|
||||
bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false,
|
||||
const char* msg = NULL, bool short_form = false, bool cr = true);
|
||||
|
||||
public:
|
||||
void print_compilation(outputStream* st = tty, const char* msg = NULL, bool short_form = false, bool cr = true);
|
||||
static void print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL, bool short_form = false, bool cr = true) {
|
||||
print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(),
|
||||
nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false,
|
||||
msg, short_form, cr);
|
||||
}
|
||||
|
||||
static void print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL);
|
||||
static void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
|
||||
print_inlining(tty, method, inline_level, bci, msg);
|
||||
}
|
||||
|
||||
// Redefine Classes support
|
||||
void mark_on_stack();
|
||||
|
||||
static void print_inline_indent(int inline_level, outputStream* st = tty);
|
||||
|
||||
void print_tty();
|
||||
void print_line_on_error(outputStream* st, char* buf, int buflen);
|
||||
|
||||
void log_task(xmlStream* log);
|
||||
void log_task_queued();
|
||||
void log_task_start(CompileLog* log);
|
||||
void log_task_done(CompileLog* log);
|
||||
|
||||
void set_failure_reason(const char* reason) {
|
||||
_failure_reason = reason;
|
||||
}
|
||||
};
|
||||
|
||||
// CompilerCounters
|
||||
//
|
||||
// Per Compiler Performance Counters.
|
||||
|
391
hotspot/src/share/vm/compiler/compileTask.cpp
Normal file
391
hotspot/src/share/vm/compiler/compileTask.cpp
Normal file
@ -0,0 +1,391 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "compiler/compileLog.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
|
||||
CompileTask* CompileTask::_task_free_list = NULL;
|
||||
#ifdef ASSERT
|
||||
int CompileTask::_num_allocated_tasks = 0;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Allocate a CompileTask, from the free list if possible.
|
||||
*/
|
||||
CompileTask* CompileTask::allocate() {
|
||||
MutexLocker locker(CompileTaskAlloc_lock);
|
||||
CompileTask* task = NULL;
|
||||
|
||||
if (_task_free_list != NULL) {
|
||||
task = _task_free_list;
|
||||
_task_free_list = task->next();
|
||||
task->set_next(NULL);
|
||||
} else {
|
||||
task = new CompileTask();
|
||||
DEBUG_ONLY(_num_allocated_tasks++;)
|
||||
assert (WhiteBoxAPI || _num_allocated_tasks < 10000, "Leaking compilation tasks?");
|
||||
task->set_next(NULL);
|
||||
task->set_is_free(true);
|
||||
}
|
||||
assert(task->is_free(), "Task must be free.");
|
||||
task->set_is_free(false);
|
||||
return task;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a task to the free list.
|
||||
*/
|
||||
|
||||
void CompileTask::free(CompileTask* task) {
|
||||
MutexLocker locker(CompileTaskAlloc_lock);
|
||||
if (!task->is_free()) {
|
||||
task->set_code(NULL);
|
||||
assert(!task->lock()->is_locked(), "Should not be locked when freed");
|
||||
JNIHandles::destroy_global(task->_method_holder);
|
||||
JNIHandles::destroy_global(task->_hot_method_holder);
|
||||
|
||||
task->set_is_free(true);
|
||||
task->set_next(_task_free_list);
|
||||
_task_free_list = task;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CompileTask::initialize(int compile_id,
|
||||
methodHandle method,
|
||||
int osr_bci,
|
||||
int comp_level,
|
||||
methodHandle hot_method,
|
||||
int hot_count,
|
||||
const char* comment,
|
||||
bool is_blocking) {
|
||||
assert(!_lock->is_locked(), "bad locking");
|
||||
|
||||
_compile_id = compile_id;
|
||||
_method = method();
|
||||
_method_holder = JNIHandles::make_global(method->method_holder()->klass_holder());
|
||||
_osr_bci = osr_bci;
|
||||
_is_blocking = is_blocking;
|
||||
_comp_level = comp_level;
|
||||
_num_inlined_bytecodes = 0;
|
||||
|
||||
_is_complete = false;
|
||||
_is_success = false;
|
||||
_code_handle = NULL;
|
||||
|
||||
_hot_method = NULL;
|
||||
_hot_method_holder = NULL;
|
||||
_hot_count = hot_count;
|
||||
_time_queued = 0; // tidy
|
||||
_comment = comment;
|
||||
_failure_reason = NULL;
|
||||
|
||||
if (LogCompilation) {
|
||||
_time_queued = os::elapsed_counter();
|
||||
if (hot_method.not_null()) {
|
||||
if (hot_method == method) {
|
||||
_hot_method = _method;
|
||||
} else {
|
||||
_hot_method = hot_method();
|
||||
// only add loader or mirror if different from _method_holder
|
||||
_hot_method_holder = JNIHandles::make_global(hot_method->method_holder()->klass_holder());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_next = NULL;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::code/set_code
|
||||
//
|
||||
nmethod* CompileTask::code() const {
|
||||
if (_code_handle == NULL) return NULL;
|
||||
return _code_handle->code();
|
||||
}
|
||||
|
||||
void CompileTask::set_code(nmethod* nm) {
|
||||
if (_code_handle == NULL && nm == NULL) return;
|
||||
guarantee(_code_handle != NULL, "");
|
||||
_code_handle->set_code(nm);
|
||||
if (nm == NULL) _code_handle = NULL; // drop the handle also
|
||||
}
|
||||
|
||||
void CompileTask::mark_on_stack() {
|
||||
// Mark these methods as something redefine classes cannot remove.
|
||||
_method->set_on_stack(true);
|
||||
if (_hot_method != NULL) {
|
||||
_hot_method->set_on_stack(true);
|
||||
}
|
||||
}
|
||||
|
||||
// RedefineClasses support
|
||||
void CompileTask::metadata_do(void f(Metadata*)) {
|
||||
f(method());
|
||||
if (hot_method() != NULL && hot_method() != method()) {
|
||||
f(hot_method());
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_line_on_error
|
||||
//
|
||||
// This function is called by fatal error handler when the thread
|
||||
// causing troubles is a compiler thread.
|
||||
//
|
||||
// Do not grab any lock, do not allocate memory.
|
||||
//
|
||||
// Otherwise it's the same as CompileTask::print_line()
|
||||
//
|
||||
void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
|
||||
// print compiler name
|
||||
st->print("%s:", CompileBroker::compiler_name(comp_level()));
|
||||
print(st);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_tty
|
||||
void CompileTask::print_tty() {
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
// print compiler name if requested
|
||||
if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler_name(comp_level()));
|
||||
print(tty);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_impl
|
||||
void CompileTask::print_impl(outputStream* st, Method* method, int compile_id, int comp_level,
|
||||
bool is_osr_method, int osr_bci, bool is_blocking,
|
||||
const char* msg, bool short_form, bool cr) {
|
||||
if (!short_form) {
|
||||
st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp
|
||||
}
|
||||
st->print("%4d ", compile_id); // print compilation number
|
||||
|
||||
// For unloaded methods the transition to zombie occurs after the
|
||||
// method is cleared so it's impossible to report accurate
|
||||
// information for that case.
|
||||
bool is_synchronized = false;
|
||||
bool has_exception_handler = false;
|
||||
bool is_native = false;
|
||||
if (method != NULL) {
|
||||
is_synchronized = method->is_synchronized();
|
||||
has_exception_handler = method->has_exception_handler();
|
||||
is_native = method->is_native();
|
||||
}
|
||||
// method attributes
|
||||
const char compile_type = is_osr_method ? '%' : ' ';
|
||||
const char sync_char = is_synchronized ? 's' : ' ';
|
||||
const char exception_char = has_exception_handler ? '!' : ' ';
|
||||
const char blocking_char = is_blocking ? 'b' : ' ';
|
||||
const char native_char = is_native ? 'n' : ' ';
|
||||
|
||||
// print method attributes
|
||||
st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
|
||||
|
||||
if (TieredCompilation) {
|
||||
if (comp_level != -1) st->print("%d ", comp_level);
|
||||
else st->print("- ");
|
||||
}
|
||||
st->print(" "); // more indent
|
||||
|
||||
if (method == NULL) {
|
||||
st->print("(method)");
|
||||
} else {
|
||||
method->print_short_name(st);
|
||||
if (is_osr_method) {
|
||||
st->print(" @ %d", osr_bci);
|
||||
}
|
||||
if (method->is_native())
|
||||
st->print(" (native)");
|
||||
else
|
||||
st->print(" (%d bytes)", method->code_size());
|
||||
}
|
||||
|
||||
if (msg != NULL) {
|
||||
st->print(" %s", msg);
|
||||
}
|
||||
if (cr) {
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
|
||||
void CompileTask::print_inline_indent(int inline_level, outputStream* st) {
|
||||
// 1234567
|
||||
st->print(" "); // print timestamp
|
||||
// 1234
|
||||
st->print(" "); // print compilation number
|
||||
// %s!bn
|
||||
st->print(" "); // print method attributes
|
||||
if (TieredCompilation) {
|
||||
st->print(" ");
|
||||
}
|
||||
st->print(" "); // more indent
|
||||
st->print(" "); // initial inlining indent
|
||||
for (int i = 0; i < inline_level; i++) st->print(" ");
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_compilation
|
||||
void CompileTask::print(outputStream* st, const char* msg, bool short_form, bool cr) {
|
||||
bool is_osr_method = osr_bci() != InvocationEntryBci;
|
||||
print_impl(st, method(), compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), msg, short_form, cr);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::log_task
|
||||
void CompileTask::log_task(xmlStream* log) {
|
||||
Thread* thread = Thread::current();
|
||||
methodHandle method(thread, this->method());
|
||||
ResourceMark rm(thread);
|
||||
|
||||
// <task id='9' method='M' osr_bci='X' level='1' blocking='1' stamp='1.234'>
|
||||
log->print(" compile_id='%d'", _compile_id);
|
||||
if (_osr_bci != CompileBroker::standard_entry_bci) {
|
||||
log->print(" compile_kind='osr'"); // same as nmethod::compile_kind
|
||||
} // else compile_kind='c2c'
|
||||
if (!method.is_null()) log->method(method);
|
||||
if (_osr_bci != CompileBroker::standard_entry_bci) {
|
||||
log->print(" osr_bci='%d'", _osr_bci);
|
||||
}
|
||||
if (_comp_level != CompLevel_highest_tier) {
|
||||
log->print(" level='%d'", _comp_level);
|
||||
}
|
||||
if (_is_blocking) {
|
||||
log->print(" blocking='1'");
|
||||
}
|
||||
log->stamp();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::log_task_queued
|
||||
void CompileTask::log_task_queued() {
|
||||
Thread* thread = Thread::current();
|
||||
ttyLocker ttyl;
|
||||
ResourceMark rm(thread);
|
||||
|
||||
xtty->begin_elem("task_queued");
|
||||
log_task(xtty);
|
||||
if (_comment != NULL) {
|
||||
xtty->print(" comment='%s'", _comment);
|
||||
}
|
||||
if (_hot_method != NULL) {
|
||||
methodHandle hot(thread, _hot_method);
|
||||
methodHandle method(thread, _method);
|
||||
if (hot() != method()) {
|
||||
xtty->method(hot);
|
||||
}
|
||||
}
|
||||
if (_hot_count != 0) {
|
||||
xtty->print(" hot_count='%d'", _hot_count);
|
||||
}
|
||||
xtty->end_elem();
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::log_task_start
|
||||
void CompileTask::log_task_start(CompileLog* log) {
|
||||
log->begin_head("task");
|
||||
log_task(log);
|
||||
log->end_head();
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::log_task_done
|
||||
void CompileTask::log_task_done(CompileLog* log) {
|
||||
Thread* thread = Thread::current();
|
||||
methodHandle method(thread, this->method());
|
||||
ResourceMark rm(thread);
|
||||
|
||||
if (!_is_success) {
|
||||
const char* reason = _failure_reason != NULL ? _failure_reason : "unknown";
|
||||
log->elem("failure reason='%s'", reason);
|
||||
}
|
||||
|
||||
// <task_done ... stamp='1.234'> </task>
|
||||
nmethod* nm = code();
|
||||
log->begin_elem("task_done success='%d' nmsize='%d' count='%d'",
|
||||
_is_success, nm == NULL ? 0 : nm->content_size(),
|
||||
method->invocation_count());
|
||||
int bec = method->backedge_count();
|
||||
if (bec != 0) log->print(" backedge_count='%d'", bec);
|
||||
// Note: "_is_complete" is about to be set, but is not.
|
||||
if (_num_inlined_bytecodes != 0) {
|
||||
log->print(" inlined_bytes='%d'", _num_inlined_bytecodes);
|
||||
}
|
||||
log->stamp();
|
||||
log->end_elem();
|
||||
log->clear_identities(); // next task will have different CI
|
||||
log->tail("task");
|
||||
if (log->unflushed_count() > 2000) {
|
||||
log->flush();
|
||||
}
|
||||
log->mark_file_end();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::print_inlining
|
||||
void CompileTask::print_inlining_inner(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg) {
|
||||
// 1234567
|
||||
st->print(" "); // print timestamp
|
||||
// 1234
|
||||
st->print(" "); // print compilation number
|
||||
|
||||
// method attributes
|
||||
if (method->is_loaded()) {
|
||||
const char sync_char = method->is_synchronized() ? 's' : ' ';
|
||||
const char exception_char = method->has_exception_handlers() ? '!' : ' ';
|
||||
const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' ';
|
||||
|
||||
// print method attributes
|
||||
st->print(" %c%c%c ", sync_char, exception_char, monitors_char);
|
||||
} else {
|
||||
// %s!bn
|
||||
st->print(" "); // print method attributes
|
||||
}
|
||||
|
||||
if (TieredCompilation) {
|
||||
st->print(" ");
|
||||
}
|
||||
st->print(" "); // more indent
|
||||
st->print(" "); // initial inlining indent
|
||||
|
||||
for (int i = 0; i < inline_level; i++) st->print(" ");
|
||||
|
||||
st->print("@ %d ", bci); // print bci
|
||||
method->print_short_name(st);
|
||||
if (method->is_loaded())
|
||||
st->print(" (%d bytes)", method->code_size());
|
||||
else
|
||||
st->print(" (not loaded)");
|
||||
|
||||
if (msg != NULL) {
|
||||
st->print(" %s", msg);
|
||||
}
|
||||
st->cr();
|
||||
}
|
151
hotspot/src/share/vm/compiler/compileTask.hpp
Normal file
151
hotspot/src/share/vm/compiler/compileTask.hpp
Normal file
@ -0,0 +1,151 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_COMPILER_COMPILETASK_HPP
|
||||
#define SHARE_VM_COMPILER_COMPILETASK_HPP
|
||||
|
||||
#include "code/nmethod.hpp"
|
||||
#include "ci/ciMethod.hpp"
|
||||
#include "compiler/compileLog.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/xmlstream.hpp"
|
||||
|
||||
// CompileTask
|
||||
//
|
||||
// An entry in the compile queue. It represents a pending or current
|
||||
// compilation.
|
||||
|
||||
class CompileTask : public CHeapObj<mtCompiler> {
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
static CompileTask* _task_free_list;
|
||||
#ifdef ASSERT
|
||||
static int _num_allocated_tasks;
|
||||
#endif
|
||||
|
||||
Monitor* _lock;
|
||||
uint _compile_id;
|
||||
Method* _method;
|
||||
jobject _method_holder;
|
||||
int _osr_bci;
|
||||
bool _is_complete;
|
||||
bool _is_success;
|
||||
bool _is_blocking;
|
||||
int _comp_level;
|
||||
int _num_inlined_bytecodes;
|
||||
nmethodLocker* _code_handle; // holder of eventual result
|
||||
CompileTask* _next, *_prev;
|
||||
bool _is_free;
|
||||
// Fields used for logging why the compilation was initiated:
|
||||
jlong _time_queued; // in units of os::elapsed_counter()
|
||||
Method* _hot_method; // which method actually triggered this task
|
||||
jobject _hot_method_holder;
|
||||
int _hot_count; // information about its invocation counter
|
||||
const char* _comment; // more info about the task
|
||||
const char* _failure_reason;
|
||||
|
||||
public:
|
||||
CompileTask() {
|
||||
_lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
|
||||
}
|
||||
|
||||
void initialize(int compile_id, methodHandle method, int osr_bci, int comp_level,
|
||||
methodHandle hot_method, int hot_count, const char* comment,
|
||||
bool is_blocking);
|
||||
|
||||
static CompileTask* allocate();
|
||||
static void free(CompileTask* task);
|
||||
|
||||
int compile_id() const { return _compile_id; }
|
||||
Method* method() const { return _method; }
|
||||
Method* hot_method() const { return _hot_method; }
|
||||
int osr_bci() const { return _osr_bci; }
|
||||
bool is_complete() const { return _is_complete; }
|
||||
bool is_blocking() const { return _is_blocking; }
|
||||
bool is_success() const { return _is_success; }
|
||||
|
||||
nmethodLocker* code_handle() const { return _code_handle; }
|
||||
void set_code_handle(nmethodLocker* l) { _code_handle = l; }
|
||||
nmethod* code() const; // _code_handle->code()
|
||||
void set_code(nmethod* nm); // _code_handle->set_code(nm)
|
||||
|
||||
Monitor* lock() const { return _lock; }
|
||||
|
||||
void mark_complete() { _is_complete = true; }
|
||||
void mark_success() { _is_success = true; }
|
||||
|
||||
int comp_level() { return _comp_level;}
|
||||
void set_comp_level(int comp_level) { _comp_level = comp_level;}
|
||||
|
||||
int num_inlined_bytecodes() const { return _num_inlined_bytecodes; }
|
||||
void set_num_inlined_bytecodes(int n) { _num_inlined_bytecodes = n; }
|
||||
|
||||
CompileTask* next() const { return _next; }
|
||||
void set_next(CompileTask* next) { _next = next; }
|
||||
CompileTask* prev() const { return _prev; }
|
||||
void set_prev(CompileTask* prev) { _prev = prev; }
|
||||
bool is_free() const { return _is_free; }
|
||||
void set_is_free(bool val) { _is_free = val; }
|
||||
|
||||
// RedefineClasses support
|
||||
void metadata_do(void f(Metadata*));
|
||||
void mark_on_stack();
|
||||
|
||||
private:
|
||||
static void print_impl(outputStream* st, Method* method, int compile_id, int comp_level,
|
||||
bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false,
|
||||
const char* msg = NULL, bool short_form = false, bool cr = true);
|
||||
|
||||
public:
|
||||
void print(outputStream* st = tty, const char* msg = NULL, bool short_form = false, bool cr = true);
|
||||
static void print(outputStream* st, const nmethod* nm, const char* msg = NULL, bool short_form = false, bool cr = true) {
|
||||
print_impl(st, nm->method(), nm->compile_id(), nm->comp_level(),
|
||||
nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false,
|
||||
msg, short_form, cr);
|
||||
}
|
||||
|
||||
static void print_inline_indent(int inline_level, outputStream* st = tty);
|
||||
|
||||
void print_tty();
|
||||
void print_line_on_error(outputStream* st, char* buf, int buflen);
|
||||
|
||||
void log_task(xmlStream* log);
|
||||
void log_task_queued();
|
||||
void log_task_start(CompileLog* log);
|
||||
void log_task_done(CompileLog* log);
|
||||
|
||||
void set_failure_reason(const char* reason) {
|
||||
_failure_reason = reason;
|
||||
}
|
||||
|
||||
bool check_break_at_flags();
|
||||
|
||||
static void print_inlining_inner(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL);
|
||||
static void print_inlining_tty(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
|
||||
print_inlining_inner(tty, method, inline_level, bci, msg);
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_COMPILER_COMPILETASK_HPP
|
@ -66,7 +66,8 @@ class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
|
||||
virtual void do_klass(Klass* k);
|
||||
void do_klass_nv(Klass* k);
|
||||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld);
|
||||
virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
|
||||
void do_cld_nv(ClassLoaderData* cld);
|
||||
};
|
||||
|
||||
class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
|
||||
|
@ -50,11 +50,11 @@ inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
|
||||
|
||||
inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
|
||||
ClassLoaderData* cld = k->class_loader_data();
|
||||
do_class_loader_data(cld);
|
||||
do_cld_nv(cld);
|
||||
}
|
||||
inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
|
||||
inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
|
||||
assert(_klass_closure._oop_closure == this, "Must be");
|
||||
|
||||
bool claim = true; // Must claim the class loader data before processing.
|
||||
|
@ -702,7 +702,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,
|
||||
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
|
||||
oop(bottom)) && \
|
||||
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
|
||||
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
|
||||
size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
|
||||
bottom += _cfls->adjustObjectSize(word_sz); \
|
||||
} else { \
|
||||
bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
|
||||
@ -729,7 +729,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,
|
||||
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
|
||||
oop(bottom)) && \
|
||||
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
|
||||
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
|
||||
size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
|
||||
bottom += _cfls->adjustObjectSize(word_sz); \
|
||||
} else { \
|
||||
bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
|
||||
@ -2989,7 +2989,7 @@ initialize_sequential_subtasks_for_marking(int n_threads,
|
||||
assert(task_size > CardTableModRefBS::card_size_in_words &&
|
||||
(task_size % CardTableModRefBS::card_size_in_words == 0),
|
||||
"Otherwise arithmetic below would be incorrect");
|
||||
MemRegion span = _gen->reserved();
|
||||
MemRegion span = _old_gen->reserved();
|
||||
if (low != NULL) {
|
||||
if (span.contains(low)) {
|
||||
// Align low down to a card boundary so that
|
||||
|
@ -99,7 +99,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
BlockOffsetArrayNonContigSpace _bt;
|
||||
|
||||
CMSCollector* _collector;
|
||||
ConcurrentMarkSweepGeneration* _gen;
|
||||
ConcurrentMarkSweepGeneration* _old_gen;
|
||||
|
||||
// Data structures for free blocks (used during allocation/sweeping)
|
||||
|
||||
|
@ -212,7 +212,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||
use_adaptive_freelists,
|
||||
dictionaryChoice);
|
||||
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
|
||||
_cmsSpace->_gen = this;
|
||||
_cmsSpace->_old_gen = this;
|
||||
|
||||
_gc_stats = new CMSGCStats();
|
||||
|
||||
@ -359,13 +359,13 @@ double CMSStats::time_until_cms_gen_full() const {
|
||||
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
|
||||
if (cms_free > expected_promotion) {
|
||||
// Start a cms collection if there isn't enough space to promote
|
||||
// for the next minor collection. Use the padded average as
|
||||
// for the next young collection. Use the padded average as
|
||||
// a safety factor.
|
||||
cms_free -= expected_promotion;
|
||||
|
||||
// Adjust by the safety factor.
|
||||
double cms_free_dbl = (double)cms_free;
|
||||
double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
|
||||
double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
|
||||
// Apply a further correction factor which tries to adjust
|
||||
// for recent occurance of concurrent mode failures.
|
||||
cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
|
||||
@ -531,7 +531,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
if (CMSConcurrentMTEnabled) {
|
||||
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
|
||||
// just for now
|
||||
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
|
||||
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
|
||||
}
|
||||
if (ConcGCThreads > 1) {
|
||||
_conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
|
||||
@ -592,7 +592,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
|
||||
|
||||
// Clip CMSBootstrapOccupancy between 0 and 100.
|
||||
_bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
|
||||
_bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
|
||||
|
||||
// Now tell CMS generations the identity of their collector
|
||||
ConcurrentMarkSweepGeneration::set_collector(this);
|
||||
@ -613,7 +613,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_end_addr = gch->end_addr();
|
||||
assert(_young_gen != NULL, "no _young_gen");
|
||||
_eden_chunk_index = 0;
|
||||
_eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
|
||||
_eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
|
||||
_eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
|
||||
}
|
||||
|
||||
@ -795,29 +795,22 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
|
||||
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
|
||||
gclog_or_tty->print_cr("\nFrom compute_new_size: ");
|
||||
gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
|
||||
gclog_or_tty->print_cr(" Desired free fraction %f",
|
||||
desired_free_percentage);
|
||||
gclog_or_tty->print_cr(" Maximum free fraction %f",
|
||||
maximum_free_percentage);
|
||||
gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity()/1000);
|
||||
gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT,
|
||||
desired_capacity/1000);
|
||||
gclog_or_tty->print_cr(" Desired free fraction %f", desired_free_percentage);
|
||||
gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage);
|
||||
gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity() / 1000);
|
||||
gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
|
||||
size_t young_size = gch->young_gen()->capacity();
|
||||
gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000);
|
||||
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT,
|
||||
unsafe_max_alloc_nogc()/1000);
|
||||
gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT,
|
||||
contiguous_available()/1000);
|
||||
gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)",
|
||||
expand_bytes);
|
||||
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
|
||||
gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
|
||||
gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
|
||||
}
|
||||
// safe if expansion fails
|
||||
expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr(" Expanded free fraction %f",
|
||||
((double) free()) / capacity());
|
||||
gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity());
|
||||
}
|
||||
} else {
|
||||
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
|
||||
@ -834,16 +827,14 @@ Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
|
||||
return cmsSpace()->freelistLock();
|
||||
}
|
||||
|
||||
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
|
||||
bool tlab) {
|
||||
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
|
||||
CMSSynchronousYieldRequest yr;
|
||||
MutexLockerEx x(freelistLock(),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
|
||||
return have_lock_and_allocate(size, tlab);
|
||||
}
|
||||
|
||||
HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
|
||||
bool tlab /* ignored */) {
|
||||
bool tlab /* ignored */) {
|
||||
assert_lock_strong(freelistLock());
|
||||
size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
|
||||
HeapWord* res = cmsSpace()->allocate(adjustedSize);
|
||||
@ -2426,7 +2417,7 @@ void CMSCollector::verify_after_remark_work_1() {
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens are roots
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
@ -2498,7 +2489,7 @@ void CMSCollector::verify_after_remark_work_2() {
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens are roots
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
@ -2952,12 +2943,7 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
|
||||
assert(_collectorState == InitialMarking, "just checking");
|
||||
|
||||
// If there has not been a GC[n-1] since last GC[n] cycle completed,
|
||||
// precede our marking with a collection of all
|
||||
// younger generations to keep floating garbage to a minimum.
|
||||
// XXX: we won't do this for now -- it's an optimization to be done later.
|
||||
|
||||
// already have locks
|
||||
// Already have locks.
|
||||
assert_lock_strong(bitMapLock());
|
||||
assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
|
||||
|
||||
@ -3027,7 +3013,7 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens are roots
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
@ -3037,7 +3023,7 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
}
|
||||
|
||||
// Clear mod-union table; it will be dirtied in the prologue of
|
||||
// CMS generation per each younger generation collection.
|
||||
// CMS generation per each young generation collection.
|
||||
|
||||
assert(_modUnionTable.isAllClear(),
|
||||
"Was cleared in most recent final checkpoint phase"
|
||||
@ -3057,7 +3043,7 @@ bool CMSCollector::markFromRoots() {
|
||||
// assert(!SafepointSynchronize::is_at_safepoint(),
|
||||
// "inconsistent argument?");
|
||||
// However that wouldn't be right, because it's possible that
|
||||
// a safepoint is indeed in progress as a younger generation
|
||||
// a safepoint is indeed in progress as a young generation
|
||||
// stop-the-world GC happens even as we mark in this generation.
|
||||
assert(_collectorState == Marking, "inconsistent state?");
|
||||
check_correct_thread_executing();
|
||||
@ -3065,7 +3051,7 @@ bool CMSCollector::markFromRoots() {
|
||||
|
||||
// Weak ref discovery note: We may be discovering weak
|
||||
// refs in this generation concurrent (but interleaved) with
|
||||
// weak ref discovery by a younger generation collector.
|
||||
// weak ref discovery by the young generation collector.
|
||||
|
||||
CMSTokenSyncWithLocks ts(true, bitMapLock());
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
@ -3095,7 +3081,7 @@ bool CMSCollector::markFromRootsWork() {
|
||||
|
||||
// Note that when we do a marking step we need to hold the
|
||||
// bit map lock -- recall that direct allocation (by mutators)
|
||||
// and promotion (by younger generation collectors) is also
|
||||
// and promotion (by the young generation collector) is also
|
||||
// marking the bit map. [the so-called allocate live policy.]
|
||||
// Because the implementation of bit map marking is not
|
||||
// robust wrt simultaneous marking of bits in the same word,
|
||||
@ -4049,7 +4035,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
// one of these methods, please check the other method too.
|
||||
|
||||
size_t CMSCollector::preclean_mod_union_table(
|
||||
ConcurrentMarkSweepGeneration* gen,
|
||||
ConcurrentMarkSweepGeneration* old_gen,
|
||||
ScanMarkedObjectsAgainCarefullyClosure* cl) {
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
@ -4064,10 +4050,10 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
// generation, but we might potentially miss cards when the
|
||||
// generation is rapidly expanding while we are in the midst
|
||||
// of precleaning.
|
||||
HeapWord* startAddr = gen->reserved().start();
|
||||
HeapWord* endAddr = gen->reserved().end();
|
||||
HeapWord* startAddr = old_gen->reserved().start();
|
||||
HeapWord* endAddr = old_gen->reserved().end();
|
||||
|
||||
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
|
||||
cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
|
||||
|
||||
size_t numDirtyCards, cumNumDirtyCards;
|
||||
HeapWord *nextAddr, *lastAddr;
|
||||
@ -4109,7 +4095,7 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
HeapWord* stop_point = NULL;
|
||||
stopTimer();
|
||||
// Potential yield point
|
||||
CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
|
||||
CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
|
||||
bitMapLock());
|
||||
startTimer();
|
||||
{
|
||||
@ -4117,7 +4103,7 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
verify_overflow_empty();
|
||||
sample_eden();
|
||||
stop_point =
|
||||
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
}
|
||||
if (stop_point != NULL) {
|
||||
// The careful iteration stopped early either because it found an
|
||||
@ -4152,15 +4138,15 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
// below are largely identical; if you need to modify
|
||||
// one of these methods, please check the other method too.
|
||||
|
||||
size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
|
||||
ScanMarkedObjectsAgainCarefullyClosure* cl) {
|
||||
// strategy: it's similar to precleamModUnionTable above, in that
|
||||
// we accumulate contiguous ranges of dirty cards, mark these cards
|
||||
// precleaned, then scan the region covered by these cards.
|
||||
HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
|
||||
HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
|
||||
HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high());
|
||||
HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
|
||||
|
||||
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
|
||||
cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
|
||||
|
||||
size_t numDirtyCards, cumNumDirtyCards;
|
||||
HeapWord *lastAddr, *nextAddr;
|
||||
@ -4197,13 +4183,13 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
|
||||
if (!dirtyRegion.is_empty()) {
|
||||
stopTimer();
|
||||
CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
|
||||
CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
|
||||
startTimer();
|
||||
sample_eden();
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
HeapWord* stop_point =
|
||||
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
if (stop_point != NULL) {
|
||||
assert((_collectorState == AbortablePreclean && should_abort_preclean()),
|
||||
"Should only be AbortablePreclean.");
|
||||
@ -4623,7 +4609,7 @@ void CMSParRemarkTask::work(uint worker_id) {
|
||||
ResourceMark rm;
|
||||
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
|
||||
for (int i = 0; i < array->length(); i++) {
|
||||
par_mrias_cl.do_class_loader_data(array->at(i));
|
||||
par_mrias_cl.do_cld_nv(array->at(i));
|
||||
}
|
||||
|
||||
// We don't need to keep track of new CLDs anymore.
|
||||
@ -5086,7 +5072,7 @@ void CMSCollector::do_remark_parallel() {
|
||||
// preclean phase did of eden, plus the [two] tasks of
|
||||
// scanning the [two] survivor spaces. Further fine-grain
|
||||
// parallelization of the scanning of the survivor spaces
|
||||
// themselves, and of precleaning of the younger gen itself
|
||||
// themselves, and of precleaning of the young gen itself
|
||||
// is deferred to the future.
|
||||
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
|
||||
|
||||
@ -5177,7 +5163,7 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens as roots
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
&mrias_cl,
|
||||
@ -5199,7 +5185,7 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
ResourceMark rm;
|
||||
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
|
||||
for (int i = 0; i < array->length(); i++) {
|
||||
mrias_cl.do_class_loader_data(array->at(i));
|
||||
mrias_cl.do_cld_nv(array->at(i));
|
||||
}
|
||||
|
||||
// We don't need to keep track of new CLDs anymore.
|
||||
@ -5661,7 +5647,7 @@ void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generati
|
||||
}
|
||||
}
|
||||
|
||||
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
|
||||
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
|
||||
// We iterate over the space(s) underlying this generation,
|
||||
// checking the mark bit map to see if the bits corresponding
|
||||
// to specific blocks are marked or not. Blocks that are
|
||||
@ -5690,26 +5676,26 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
|
||||
// check that we hold the requisite locks
|
||||
assert(have_cms_token(), "Should hold cms token");
|
||||
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
|
||||
assert_lock_strong(gen->freelistLock());
|
||||
assert_lock_strong(old_gen->freelistLock());
|
||||
assert_lock_strong(bitMapLock());
|
||||
|
||||
assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
|
||||
assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
|
||||
gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
|
||||
_inter_sweep_estimate.padded_average(),
|
||||
_intra_sweep_estimate.padded_average());
|
||||
gen->setNearLargestChunk();
|
||||
old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
|
||||
_inter_sweep_estimate.padded_average(),
|
||||
_intra_sweep_estimate.padded_average());
|
||||
old_gen->setNearLargestChunk();
|
||||
|
||||
{
|
||||
SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
|
||||
gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
|
||||
SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
|
||||
old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
|
||||
// We need to free-up/coalesce garbage/blocks from a
|
||||
// co-terminal free run. This is done in the SweepClosure
|
||||
// destructor; so, do not remove this scope, else the
|
||||
// end-of-sweep-census below will be off by a little bit.
|
||||
}
|
||||
gen->cmsSpace()->sweep_completed();
|
||||
gen->cmsSpace()->endSweepFLCensus(sweep_count());
|
||||
old_gen->cmsSpace()->sweep_completed();
|
||||
old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
|
||||
if (should_unload_classes()) { // unloaded classes this cycle,
|
||||
_concurrent_cycles_since_last_unload = 0; // ... reset count
|
||||
} else { // did not unload classes,
|
||||
@ -6324,12 +6310,12 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
||||
// objArrays are precisely marked; restrict scanning
|
||||
// to dirty cards only.
|
||||
size = CompactibleFreeListSpace::adjustObjectSize(
|
||||
p->oop_iterate(_scanningClosure, mr));
|
||||
p->oop_iterate_size(_scanningClosure, mr));
|
||||
} else {
|
||||
// A non-array may have been imprecisely marked; we need
|
||||
// to scan object in its entirety.
|
||||
size = CompactibleFreeListSpace::adjustObjectSize(
|
||||
p->oop_iterate(_scanningClosure));
|
||||
p->oop_iterate_size(_scanningClosure));
|
||||
}
|
||||
#ifdef ASSERT
|
||||
size_t direct_size =
|
||||
@ -6417,7 +6403,7 @@ size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
|
||||
// Note that we do not yield while we iterate over
|
||||
// the interior oops of p, pushing the relevant ones
|
||||
// on our marking stack.
|
||||
size_t size = p->oop_iterate(_scanning_closure);
|
||||
size_t size = p->oop_iterate_size(_scanning_closure);
|
||||
do_yield_check();
|
||||
// Observe that below, we do not abandon the preclean
|
||||
// phase as soon as we should; rather we empty the
|
||||
|
@ -723,7 +723,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
|
||||
private:
|
||||
// Support for parallelizing young gen rescan in CMS remark phase
|
||||
ParNewGeneration* _young_gen; // the younger gen
|
||||
ParNewGeneration* _young_gen;
|
||||
|
||||
HeapWord** _top_addr; // ... Top of Eden
|
||||
HeapWord** _end_addr; // ... End of Eden
|
||||
@ -772,9 +772,9 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
private:
|
||||
|
||||
// Concurrent precleaning work
|
||||
size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
|
||||
size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen,
|
||||
ScanMarkedObjectsAgainCarefullyClosure* cl);
|
||||
size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
|
||||
ScanMarkedObjectsAgainCarefullyClosure* cl);
|
||||
// Does precleaning work, returning a quantity indicative of
|
||||
// the amount of "useful work" done.
|
||||
@ -797,7 +797,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
void refProcessingWork();
|
||||
|
||||
// Concurrent sweeping work
|
||||
void sweepWork(ConcurrentMarkSweepGeneration* gen);
|
||||
void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
|
||||
|
||||
// (Concurrent) resetting of support data structures
|
||||
void reset(bool concurrent);
|
||||
@ -1120,10 +1120,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
MemRegion used_region_at_save_marks() const;
|
||||
|
||||
// Does a "full" (forced) collection invoked on this generation collect
|
||||
// all younger generations as well? Note that the second conjunct is a
|
||||
// hack to allow the collection of the younger gen first if the flag is
|
||||
// set.
|
||||
virtual bool full_collects_younger_generations() const {
|
||||
// the young generation as well?
|
||||
virtual bool full_collects_young_generation() const {
|
||||
return !ScavengeBeforeFullGC;
|
||||
}
|
||||
|
||||
@ -1153,9 +1151,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
|
||||
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
|
||||
|
||||
// Inform this (non-young) generation that a promotion failure was
|
||||
// encountered during a collection of a younger generation that
|
||||
// promotes into this generation.
|
||||
// Inform this (old) generation that a promotion failure was
|
||||
// encountered during a collection of the young generation.
|
||||
virtual void promotion_failure_occurred();
|
||||
|
||||
bool should_collect(bool full, size_t size, bool tlab);
|
||||
|
@ -295,7 +295,7 @@ inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
|
||||
promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
|
||||
}
|
||||
|
||||
// If the younger gen collections were skipped, then the
|
||||
// If the young gen collection was skipped, then the
|
||||
// number of promoted bytes will be 0 and adding it to the
|
||||
// average will incorrectly lessen the average. It is, however,
|
||||
// also possible that no promotion was needed.
|
||||
|
@ -39,23 +39,17 @@
|
||||
|
||||
// ======= Concurrent Mark Sweep Thread ========
|
||||
|
||||
// The CMS thread is created when Concurrent Mark Sweep is used in the
|
||||
// older of two generations in a generational memory system.
|
||||
ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
|
||||
CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
|
||||
bool ConcurrentMarkSweepThread::_should_terminate = false;
|
||||
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
|
||||
|
||||
ConcurrentMarkSweepThread*
|
||||
ConcurrentMarkSweepThread::_cmst = NULL;
|
||||
CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
|
||||
bool ConcurrentMarkSweepThread::_should_terminate = false;
|
||||
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
|
||||
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
|
||||
|
||||
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
|
||||
|
||||
SurrogateLockerThread*
|
||||
ConcurrentMarkSweepThread::_slt = NULL;
|
||||
SurrogateLockerThread* ConcurrentMarkSweepThread::_slt = NULL;
|
||||
SurrogateLockerThread::SLT_msg_type
|
||||
ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
|
||||
Monitor*
|
||||
ConcurrentMarkSweepThread::_sltMonitor = NULL;
|
||||
Monitor* ConcurrentMarkSweepThread::_sltMonitor = NULL;
|
||||
|
||||
ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
|
||||
: ConcurrentGCThread() {
|
||||
|
@ -69,20 +69,28 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
Stack<oop, mtGC>* overflow_stacks_,
|
||||
size_t desired_plab_sz_,
|
||||
ParallelTaskTerminator& term_) :
|
||||
_to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
|
||||
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
|
||||
_to_space(to_space_),
|
||||
_old_gen(old_gen_),
|
||||
_young_gen(young_gen_),
|
||||
_thread_num(thread_num_),
|
||||
_work_queue(work_queue_set_->queue(thread_num_)),
|
||||
_to_space_full(false),
|
||||
_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
|
||||
_ageTable(false), // false ==> not the global age table, no perf data.
|
||||
_to_space_alloc_buffer(desired_plab_sz_),
|
||||
_to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
|
||||
_to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
|
||||
_to_space_closure(young_gen_, this),
|
||||
_old_gen_closure(young_gen_, this),
|
||||
_to_space_root_closure(young_gen_, this),
|
||||
_old_gen_root_closure(young_gen_, this),
|
||||
_older_gen_closure(young_gen_, this),
|
||||
_evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
|
||||
&_to_space_root_closure, young_gen_, &_old_gen_root_closure,
|
||||
work_queue_set_, &term_),
|
||||
_is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
|
||||
_is_alive_closure(young_gen_),
|
||||
_scan_weak_ref_closure(young_gen_, this),
|
||||
_keep_alive_closure(&_scan_weak_ref_closure),
|
||||
_strong_roots_time(0.0), _term_time(0.0)
|
||||
_strong_roots_time(0.0),
|
||||
_term_time(0.0)
|
||||
{
|
||||
#if TASKQUEUE_STATS
|
||||
_term_attempts = 0;
|
||||
@ -90,8 +98,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
_overflow_refill_objs = 0;
|
||||
#endif // TASKQUEUE_STATS
|
||||
|
||||
_survivor_chunk_array =
|
||||
(ChunkArray*) old_gen()->get_data_recorder(thread_num());
|
||||
_survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
|
||||
_hash_seed = 17; // Might want to take time-based random value.
|
||||
_start = os::elapsedTime();
|
||||
_old_gen_closure.set_generation(old_gen_);
|
||||
@ -154,7 +161,6 @@ void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ParScanThreadState::trim_queues(int max_size) {
|
||||
ObjToScanQueue* queue = work_queue();
|
||||
do {
|
||||
@ -222,15 +228,12 @@ void ParScanThreadState::push_on_overflow_stack(oop p) {
|
||||
}
|
||||
|
||||
HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
|
||||
|
||||
// Otherwise, if the object is small enough, try to reallocate the
|
||||
// buffer.
|
||||
// If the object is small enough, try to reallocate the buffer.
|
||||
HeapWord* obj = NULL;
|
||||
if (!_to_space_full) {
|
||||
PLAB* const plab = to_space_alloc_buffer();
|
||||
Space* const sp = to_space();
|
||||
if (word_sz * 100 <
|
||||
ParallelGCBufferWastePct * plab->word_sz()) {
|
||||
Space* const sp = to_space();
|
||||
if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
|
||||
// Is small enough; abandon this buffer and start a new one.
|
||||
plab->retire();
|
||||
size_t buf_size = plab->word_sz();
|
||||
@ -241,8 +244,7 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
|
||||
size_t free_bytes = sp->free();
|
||||
while(buf_space == NULL && free_bytes >= min_bytes) {
|
||||
buf_size = free_bytes >> LogHeapWordSize;
|
||||
assert(buf_size == (size_t)align_object_size(buf_size),
|
||||
"Invariant");
|
||||
assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
|
||||
buf_space = sp->par_allocate(buf_size);
|
||||
free_bytes = sp->free();
|
||||
}
|
||||
@ -262,7 +264,6 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
|
||||
// We're used up.
|
||||
_to_space_full = true;
|
||||
}
|
||||
|
||||
} else {
|
||||
// Too large; allocate the object individually.
|
||||
obj = sp->par_allocate(word_sz);
|
||||
@ -271,7 +272,6 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
||||
void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
|
||||
to_space_alloc_buffer()->undo_allocation(obj, word_sz);
|
||||
}
|
||||
@ -288,7 +288,7 @@ public:
|
||||
// Initializes states for the specified number of threads;
|
||||
ParScanThreadStateSet(int num_threads,
|
||||
Space& to_space,
|
||||
ParNewGeneration& gen,
|
||||
ParNewGeneration& young_gen,
|
||||
Generation& old_gen,
|
||||
ObjToScanQueueSet& queue_set,
|
||||
Stack<oop, mtGC>* overflow_stacks_,
|
||||
@ -315,21 +315,25 @@ public:
|
||||
|
||||
private:
|
||||
ParallelTaskTerminator& _term;
|
||||
ParNewGeneration& _gen;
|
||||
ParNewGeneration& _young_gen;
|
||||
Generation& _old_gen;
|
||||
public:
|
||||
bool is_valid(int id) const { return id < length(); }
|
||||
ParallelTaskTerminator* terminator() { return &_term; }
|
||||
};
|
||||
|
||||
|
||||
ParScanThreadStateSet::ParScanThreadStateSet(
|
||||
int num_threads, Space& to_space, ParNewGeneration& gen,
|
||||
Generation& old_gen, ObjToScanQueueSet& queue_set,
|
||||
Stack<oop, mtGC>* overflow_stacks,
|
||||
size_t desired_plab_sz, ParallelTaskTerminator& term)
|
||||
ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
|
||||
Space& to_space,
|
||||
ParNewGeneration& young_gen,
|
||||
Generation& old_gen,
|
||||
ObjToScanQueueSet& queue_set,
|
||||
Stack<oop, mtGC>* overflow_stacks,
|
||||
size_t desired_plab_sz,
|
||||
ParallelTaskTerminator& term)
|
||||
: ResourceArray(sizeof(ParScanThreadState), num_threads),
|
||||
_gen(gen), _old_gen(old_gen), _term(term)
|
||||
_young_gen(young_gen),
|
||||
_old_gen(old_gen),
|
||||
_term(term)
|
||||
{
|
||||
assert(num_threads > 0, "sanity check!");
|
||||
assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
|
||||
@ -337,13 +341,12 @@ ParScanThreadStateSet::ParScanThreadStateSet(
|
||||
// Initialize states.
|
||||
for (int i = 0; i < num_threads; ++i) {
|
||||
new ((ParScanThreadState*)_data + i)
|
||||
ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
|
||||
ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
|
||||
overflow_stacks, desired_plab_sz, term);
|
||||
}
|
||||
}
|
||||
|
||||
inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
|
||||
{
|
||||
inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
|
||||
assert(i >= 0 && i < length(), "sanity check!");
|
||||
return ((ParScanThreadState*)_data)[i];
|
||||
}
|
||||
@ -357,8 +360,7 @@ void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_trace
|
||||
}
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
|
||||
{
|
||||
void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
|
||||
_term.reset_for_reuse(active_threads);
|
||||
if (promotion_failed) {
|
||||
for (int i = 0; i < length(); ++i) {
|
||||
@ -368,36 +370,27 @@ void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
|
||||
}
|
||||
|
||||
#if TASKQUEUE_STATS
|
||||
void
|
||||
ParScanThreadState::reset_stats()
|
||||
{
|
||||
void ParScanThreadState::reset_stats() {
|
||||
taskqueue_stats().reset();
|
||||
_term_attempts = 0;
|
||||
_overflow_refills = 0;
|
||||
_overflow_refill_objs = 0;
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::reset_stats()
|
||||
{
|
||||
void ParScanThreadStateSet::reset_stats() {
|
||||
for (int i = 0; i < length(); ++i) {
|
||||
thread_state(i).reset_stats();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
|
||||
{
|
||||
void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
|
||||
st->print_raw_cr("GC Termination Stats");
|
||||
st->print_raw_cr(" elapsed --strong roots-- "
|
||||
"-------termination-------");
|
||||
st->print_raw_cr("thr ms ms % "
|
||||
" ms % attempts");
|
||||
st->print_raw_cr("--- --------- --------- ------ "
|
||||
"--------- ------ --------");
|
||||
st->print_raw_cr(" elapsed --strong roots-- -------termination-------");
|
||||
st->print_raw_cr("thr ms ms % ms % attempts");
|
||||
st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
|
||||
{
|
||||
void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {
|
||||
print_termination_stats_hdr(st);
|
||||
|
||||
for (int i = 0; i < length(); ++i) {
|
||||
@ -405,23 +398,20 @@ void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
|
||||
const double elapsed_ms = pss.elapsed_time() * 1000.0;
|
||||
const double s_roots_ms = pss.strong_roots_time() * 1000.0;
|
||||
const double term_ms = pss.term_time() * 1000.0;
|
||||
st->print_cr("%3d %9.2f %9.2f %6.2f "
|
||||
"%9.2f %6.2f " SIZE_FORMAT_W(8),
|
||||
st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
|
||||
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
|
||||
term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
|
||||
}
|
||||
}
|
||||
|
||||
// Print stats related to work queue activity.
|
||||
void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st)
|
||||
{
|
||||
void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
|
||||
st->print_raw_cr("GC Task Stats");
|
||||
st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
|
||||
st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
|
||||
{
|
||||
void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {
|
||||
print_taskqueue_stats_hdr(st);
|
||||
|
||||
TaskQueueStats totals;
|
||||
@ -443,8 +433,7 @@ void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
|
||||
}
|
||||
#endif // TASKQUEUE_STATS
|
||||
|
||||
void ParScanThreadStateSet::flush()
|
||||
{
|
||||
void ParScanThreadStateSet::flush() {
|
||||
// Work in this loop should be kept as lightweight as
|
||||
// possible since this might otherwise become a bottleneck
|
||||
// to scaling. Should we add heavy-weight work into this
|
||||
@ -454,12 +443,12 @@ void ParScanThreadStateSet::flush()
|
||||
|
||||
// Flush stats related to To-space PLAB activity and
|
||||
// retire the last buffer.
|
||||
par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats());
|
||||
par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
|
||||
|
||||
// Every thread has its own age table. We need to merge
|
||||
// them all into one.
|
||||
ageTable *local_table = par_scan_state.age_table();
|
||||
_gen.age_table()->merge(local_table);
|
||||
_young_gen.age_table()->merge(local_table);
|
||||
|
||||
// Inform old gen that we're done.
|
||||
_old_gen.par_promote_alloc_done(i);
|
||||
@ -478,8 +467,7 @@ void ParScanThreadStateSet::flush()
|
||||
|
||||
ParScanClosure::ParScanClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state) :
|
||||
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
|
||||
{
|
||||
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
|
||||
_boundary = _g->reserved().end();
|
||||
}
|
||||
|
||||
@ -531,24 +519,23 @@ void ParEvacuateFollowersClosure::do_void() {
|
||||
ObjToScanQueue* work_q = par_scan_state()->work_queue();
|
||||
|
||||
while (true) {
|
||||
|
||||
// Scan to-space and old-gen objs until we run out of both.
|
||||
oop obj_to_scan;
|
||||
par_scan_state()->trim_queues(0);
|
||||
|
||||
// We have no local work, attempt to steal from other threads.
|
||||
|
||||
// attempt to steal work from promoted.
|
||||
// Attempt to steal work from promoted.
|
||||
if (task_queues()->steal(par_scan_state()->thread_num(),
|
||||
par_scan_state()->hash_seed(),
|
||||
obj_to_scan)) {
|
||||
bool res = work_q->push(obj_to_scan);
|
||||
assert(res, "Empty queue should have room for a push.");
|
||||
|
||||
// if successful, goto Start.
|
||||
// If successful, goto Start.
|
||||
continue;
|
||||
|
||||
// try global overflow list.
|
||||
// Try global overflow list.
|
||||
} else if (par_gen()->take_from_overflow_list(par_scan_state())) {
|
||||
continue;
|
||||
}
|
||||
@ -564,15 +551,17 @@ void ParEvacuateFollowersClosure::do_void() {
|
||||
par_scan_state()->end_term_time();
|
||||
}
|
||||
|
||||
ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
|
||||
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
|
||||
ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
|
||||
Generation* old_gen,
|
||||
HeapWord* young_old_boundary,
|
||||
ParScanThreadStateSet* state_set,
|
||||
StrongRootsScope* strong_roots_scope) :
|
||||
AbstractGangTask("ParNewGeneration collection"),
|
||||
_young_gen(young_gen), _old_gen(old_gen),
|
||||
_young_old_boundary(young_old_boundary),
|
||||
_state_set(state_set),
|
||||
_strong_roots_scope(strong_roots_scope)
|
||||
{}
|
||||
{}
|
||||
|
||||
void ParNewGenTask::work(uint worker_id) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
@ -595,8 +584,7 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
par_scan_state.start_strong_roots();
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
GenCollectedHeap::YoungGen,
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
true, // Process younger gens, if any, as strong roots.
|
||||
GenCollectedHeap::SO_ScavengeCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&par_scan_state.to_space_root_closure(),
|
||||
@ -613,8 +601,7 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
#pragma warning( push )
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
#endif
|
||||
ParNewGeneration::
|
||||
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
|
||||
ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
|
||||
: DefNewGeneration(rs, initial_byte_size, "PCopy"),
|
||||
_overflow_list(NULL),
|
||||
_is_alive_closure(this),
|
||||
@ -625,20 +612,19 @@ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
|
||||
_task_queues = new ObjToScanQueueSet(ParallelGCThreads);
|
||||
guarantee(_task_queues != NULL, "task_queues allocation failure.");
|
||||
|
||||
for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
ObjToScanQueue *q = new ObjToScanQueue();
|
||||
guarantee(q != NULL, "work_queue Allocation failure.");
|
||||
_task_queues->register_queue(i1, q);
|
||||
_task_queues->register_queue(i, q);
|
||||
}
|
||||
|
||||
for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
|
||||
_task_queues->queue(i2)->initialize();
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
_task_queues->queue(i)->initialize();
|
||||
}
|
||||
|
||||
_overflow_stacks = NULL;
|
||||
if (ParGCUseLocalOverflow) {
|
||||
|
||||
// typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
|
||||
// with ','
|
||||
// typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
|
||||
typedef Stack<oop, mtGC> GCOopStack;
|
||||
|
||||
_overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
|
||||
@ -742,7 +728,7 @@ class ParNewRefProcTaskProxy: public AbstractGangTask {
|
||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||
public:
|
||||
ParNewRefProcTaskProxy(ProcessTask& task,
|
||||
ParNewGeneration& gen,
|
||||
ParNewGeneration& young_gen,
|
||||
Generation& old_gen,
|
||||
HeapWord* young_old_boundary,
|
||||
ParScanThreadStateSet& state_set);
|
||||
@ -768,11 +754,9 @@ ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
|
||||
_old_gen(old_gen),
|
||||
_young_old_boundary(young_old_boundary),
|
||||
_state_set(state_set)
|
||||
{
|
||||
}
|
||||
{ }
|
||||
|
||||
void ParNewRefProcTaskProxy::work(uint worker_id)
|
||||
{
|
||||
void ParNewRefProcTaskProxy::work(uint worker_id) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
|
||||
@ -792,15 +776,12 @@ public:
|
||||
_task(task)
|
||||
{ }
|
||||
|
||||
virtual void work(uint worker_id)
|
||||
{
|
||||
virtual void work(uint worker_id) {
|
||||
_task.work(worker_id);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
|
||||
{
|
||||
void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
WorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
@ -812,8 +793,7 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
|
||||
_young_gen.promotion_failed());
|
||||
}
|
||||
|
||||
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
|
||||
{
|
||||
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
WorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
@ -821,8 +801,7 @@ void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
|
||||
workers->run_task(&enq_task);
|
||||
}
|
||||
|
||||
void ParNewRefProcTaskExecutor::set_single_threaded_mode()
|
||||
{
|
||||
void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
|
||||
_state_set.flush();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
gch->save_marks();
|
||||
@ -830,7 +809,8 @@ void ParNewRefProcTaskExecutor::set_single_threaded_mode()
|
||||
|
||||
ScanClosureWithParBarrier::
|
||||
ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
|
||||
ScanClosure(g, gc_barrier) {}
|
||||
ScanClosure(g, gc_barrier)
|
||||
{ }
|
||||
|
||||
EvacuateFollowersClosureGeneral::
|
||||
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
|
||||
@ -838,7 +818,7 @@ EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
|
||||
OopsInGenClosure* older) :
|
||||
_gch(gch),
|
||||
_scan_cur_or_nonheap(cur), _scan_older(older)
|
||||
{}
|
||||
{ }
|
||||
|
||||
void EvacuateFollowersClosureGeneral::do_void() {
|
||||
do {
|
||||
@ -850,7 +830,6 @@ void EvacuateFollowersClosureGeneral::do_void() {
|
||||
} while (!_gch->no_allocs_since_save_marks());
|
||||
}
|
||||
|
||||
|
||||
// A Generation that does parallel young-gen collection.
|
||||
|
||||
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
|
||||
@ -996,9 +975,9 @@ void ParNewGeneration::collect(bool full,
|
||||
if (ZapUnusedHeapArea) {
|
||||
// This is now done here because of the piece-meal mangling which
|
||||
// can check for valid mangling at intermediate points in the
|
||||
// collection(s). When a minor collection fails to collect
|
||||
// collection(s). When a young collection fails to collect
|
||||
// sufficient space resizing of the young generation can occur
|
||||
// an redistribute the spaces in the young generation. Mangle
|
||||
// and redistribute the spaces in the young generation. Mangle
|
||||
// here so that unzapped regions don't get distributed to
|
||||
// other spaces.
|
||||
to()->mangle_unused_area();
|
||||
@ -1113,8 +1092,10 @@ void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
|
||||
// thus avoiding the need to undo the copy as in
|
||||
// copy_to_survivor_space_avoiding_with_undo.
|
||||
|
||||
oop ParNewGeneration::copy_to_survivor_space(
|
||||
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
|
||||
oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
|
||||
oop old,
|
||||
size_t sz,
|
||||
markOop m) {
|
||||
// In the sequential version, this assert also says that the object is
|
||||
// not forwarded. That might not be the case here. It is the case that
|
||||
// the caller observed it to be not forwarded at some time in the past.
|
||||
@ -1141,8 +1122,7 @@ oop ParNewGeneration::copy_to_survivor_space(
|
||||
}
|
||||
|
||||
if (new_obj == NULL) {
|
||||
// Either to-space is full or we decided to promote
|
||||
// try allocating obj tenured
|
||||
// Either to-space is full or we decided to promote try allocating obj tenured
|
||||
|
||||
// Attempt to install a null forwarding pointer (atomically),
|
||||
// to claim the right to install the real forwarding pointer.
|
||||
|
@ -71,11 +71,7 @@ class ParScanThreadState {
|
||||
ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
|
||||
ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
|
||||
ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
|
||||
// One of these two will be passed to process_roots, which will
|
||||
// set its generation. The first is for two-gen configs where the
|
||||
// old gen collects the perm gen; the second is for arbitrary configs.
|
||||
// The second isn't used right now (it used to be used for the train, an
|
||||
// incremental collector) but the declaration has been left as a reminder.
|
||||
// Will be passed to process_roots to set its generation.
|
||||
ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
|
||||
// This closure will always be bound to the old gen; it will be used
|
||||
// in evacuate_followers.
|
||||
@ -85,7 +81,6 @@ class ParScanThreadState {
|
||||
ParScanWeakRefClosure _scan_weak_ref_closure;
|
||||
ParKeepAliveClosure _keep_alive_closure;
|
||||
|
||||
|
||||
Space* _to_space;
|
||||
Space* to_space() { return _to_space; }
|
||||
|
||||
|
@ -1143,7 +1143,7 @@ void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
|
||||
while (curr < end) {
|
||||
Prefetch::read(curr, interval);
|
||||
oop obj = oop(curr);
|
||||
int size = obj->oop_iterate(&cl);
|
||||
int size = obj->oop_iterate_size(&cl);
|
||||
assert(size == obj->size(), "sanity");
|
||||
curr += size;
|
||||
}
|
||||
|
@ -367,7 +367,7 @@ bool G1ArchiveAllocator::alloc_new_region() {
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
|
||||
// Tell mark-sweep that objects in this region are not to be marked.
|
||||
G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
|
||||
G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
|
||||
|
||||
// Since we've modified the old set, call update_sizes.
|
||||
_g1h->g1mm()->update_sizes();
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/g1/g1BlockOffsetTable.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
|
||||
inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
|
||||
@ -68,15 +69,7 @@ void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_cha
|
||||
check_index(right, "right index out of range");
|
||||
assert(left <= right, "indexes out of order");
|
||||
size_t num_cards = right - left + 1;
|
||||
if (UseMemSetInBOT) {
|
||||
memset(&_offset_array[left], offset, num_cards);
|
||||
} else {
|
||||
size_t i = left;
|
||||
const size_t end = i + num_cards;
|
||||
for (; i < end; i++) {
|
||||
_offset_array[i] = offset;
|
||||
}
|
||||
}
|
||||
memset_with_concurrent_readers(&_offset_array[left], offset, num_cards);
|
||||
}
|
||||
|
||||
// Variant of index_for that does not check the index for validity.
|
||||
|
63
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.cpp
Normal file
63
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.cpp
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "gc/g1/g1CodeBlobClosure.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
template <typename T>
|
||||
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
|
||||
_work->do_oop(p);
|
||||
T oop_or_narrowoop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(oop_or_narrowoop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(o);
|
||||
assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
|
||||
hr->add_strong_code_root(_nm);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(oop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
|
||||
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(narrowOop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
|
||||
void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
if (!nm->test_set_oops_do_mark()) {
|
||||
_oc.set_nm(nm);
|
||||
nm->oops_do(&_oc);
|
||||
nm->fix_oop_relocations();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
55
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp
Normal file
55
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
|
||||
class nmethod;
|
||||
|
||||
class G1CodeBlobClosure : public CodeBlobClosure {
|
||||
class HeapRegionGatheringOopClosure : public OopClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
OopClosure* _work;
|
||||
nmethod* _nm;
|
||||
|
||||
template <typename T>
|
||||
void do_oop_work(T* p);
|
||||
|
||||
public:
|
||||
HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
|
||||
|
||||
void do_oop(oop* o);
|
||||
void do_oop(narrowOop* o);
|
||||
|
||||
void set_nm(nmethod* nm) {
|
||||
_nm = nm;
|
||||
}
|
||||
};
|
||||
|
||||
HeapRegionGatheringOopClosure _oc;
|
||||
public:
|
||||
G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
|
||||
|
||||
void do_code_blob(CodeBlob* cb);
|
||||
};
|
@ -65,6 +65,7 @@
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -949,6 +950,7 @@ bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MutexLockerEx x(Heap_lock);
|
||||
@ -1037,12 +1039,13 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
}
|
||||
|
||||
// Notify mark-sweep of the archive range.
|
||||
G1MarkSweep::mark_range_archive(curr_range);
|
||||
G1MarkSweep::set_range_archive(curr_range, true);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
@ -1125,6 +1128,81 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||
return result;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
HeapWord* prev_last_addr = NULL;
|
||||
HeapRegion* prev_last_region = NULL;
|
||||
size_t size_used = 0;
|
||||
size_t uncommitted_regions = 0;
|
||||
|
||||
// For each Memregion, free the G1 regions that constitute it, and
|
||||
// notify mark-sweep that the range is no longer to be considered 'archive.'
|
||||
MutexLockerEx x(Heap_lock);
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
HeapWord* start_address = ranges[i].start();
|
||||
HeapWord* last_address = ranges[i].last();
|
||||
|
||||
assert(reserved.contains(start_address) && reserved.contains(last_address),
|
||||
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address)));
|
||||
assert(start_address > prev_last_addr,
|
||||
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr)));
|
||||
size_used += ranges[i].byte_size();
|
||||
prev_last_addr = last_address;
|
||||
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
|
||||
// Check for ranges that start in the same G1 region in which the previous
|
||||
// range ended, and adjust the start address so we don't try to free
|
||||
// the same region again. If the current range is entirely within that
|
||||
// region, skip it.
|
||||
if (start_region == prev_last_region) {
|
||||
start_address = start_region->end();
|
||||
if (start_address > last_address) {
|
||||
continue;
|
||||
}
|
||||
start_region = _hrm.addr_to_region(start_address);
|
||||
}
|
||||
prev_last_region = last_region;
|
||||
|
||||
// After verifying that each region was marked as an archive region by
|
||||
// alloc_archive_regions, set it free and empty and uncommit it.
|
||||
HeapRegion* curr_region = start_region;
|
||||
while (curr_region != NULL) {
|
||||
guarantee(curr_region->is_archive(),
|
||||
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
|
||||
uint curr_index = curr_region->hrm_index();
|
||||
_old_set.remove(curr_region);
|
||||
curr_region->set_free();
|
||||
curr_region->set_top(curr_region->bottom());
|
||||
if (curr_region != last_region) {
|
||||
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||
} else {
|
||||
curr_region = NULL;
|
||||
}
|
||||
_hrm.shrink_at(curr_index, 1);
|
||||
uncommitted_regions++;
|
||||
}
|
||||
|
||||
// Notify mark-sweep that this is no longer an archive range.
|
||||
G1MarkSweep::set_range_archive(ranges[i], false);
|
||||
}
|
||||
|
||||
if (uncommitted_regions != 0) {
|
||||
ergo_verbose1(ErgoHeapSizing,
|
||||
"attempt heap shrinking",
|
||||
ergo_format_reason("uncommitted archive regions")
|
||||
ergo_format_byte("total size"),
|
||||
HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
|
||||
}
|
||||
decrease_used(size_used);
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
@ -2845,9 +2923,9 @@ size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
|
||||
}
|
||||
|
||||
// For G1 TLABs should not contain humongous objects, so the maximum TLAB size
|
||||
// must be smaller than the humongous object limit.
|
||||
// must be equal to the humongous object limit.
|
||||
size_t G1CollectedHeap::max_tlab_size() const {
|
||||
return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
|
||||
return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||
@ -4051,7 +4129,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
|
||||
g1_policy()->finalize_cset(target_pause_time_ms);
|
||||
|
||||
evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
|
||||
|
||||
register_humongous_regions_with_cset();
|
||||
|
||||
@ -4175,7 +4255,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// investigate this in CR 7178365.
|
||||
double sample_end_time_sec = os::elapsedTime();
|
||||
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
|
||||
g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
|
||||
g1_policy()->record_collection_pause_end(pause_time_ms);
|
||||
|
||||
evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
|
||||
evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
|
||||
|
||||
MemoryService::track_memory_usage();
|
||||
|
||||
@ -4501,8 +4584,7 @@ public:
|
||||
bool only_young, bool claim)
|
||||
: _oop_closure(oop_closure),
|
||||
_oop_in_klass_closure(oop_closure->g1(),
|
||||
oop_closure->pss(),
|
||||
oop_closure->rp()),
|
||||
oop_closure->pss()),
|
||||
_klass_in_cld_closure(&_oop_in_klass_closure, only_young),
|
||||
_claim(claim) {
|
||||
|
||||
@ -4531,18 +4613,18 @@ public:
|
||||
bool only_young = _g1h->collector_state()->gcs_are_young();
|
||||
|
||||
// Non-IM young GC.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss, rp);
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss);
|
||||
G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
|
||||
only_young, // Only process dirty klasses.
|
||||
false); // No need to claim CLDs.
|
||||
// IM young GC.
|
||||
// Strong roots closures.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss, rp);
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss);
|
||||
G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
|
||||
false, // Process all klasses.
|
||||
true); // Need to claim CLDs.
|
||||
// Weak roots closures.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss, rp);
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
|
||||
G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
|
||||
false, // Process all klasses.
|
||||
true); // Need to claim CLDs.
|
||||
@ -4582,9 +4664,9 @@ public:
|
||||
worker_id);
|
||||
|
||||
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
|
||||
_root_processor->scan_remembered_sets(&push_heap_rs_cl,
|
||||
weak_root_cl,
|
||||
worker_id);
|
||||
_g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
|
||||
weak_root_cl,
|
||||
worker_id);
|
||||
double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
|
||||
|
||||
double term_sec = 0.0;
|
||||
@ -5241,9 +5323,9 @@ public:
|
||||
G1ParScanThreadState* pss = _pss[worker_id];
|
||||
pss->set_ref_processor(NULL);
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL);
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
|
||||
@ -5341,9 +5423,9 @@ public:
|
||||
pss->set_ref_processor(NULL);
|
||||
assert(pss->queue_is_empty(), "both queue and overflow should be empty");
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL);
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
|
||||
@ -5451,9 +5533,9 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadState** per_t
|
||||
// closures while we're actually processing the discovered
|
||||
// reference objects.
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss, NULL);
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL);
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
|
||||
|
@ -757,6 +757,12 @@ public:
|
||||
// alloc_archive_regions, and after class loading has occurred.
|
||||
void fill_archive_regions(MemRegion* range, size_t count);
|
||||
|
||||
// For each of the specified MemRegions, uncommit the containing G1 regions
|
||||
// which had been allocated by alloc_archive_regions. This should be called
|
||||
// rather than fill_archive_regions at JVM init time if the archive file
|
||||
// mapping failed, with the same non-overlapping and sorted MemRegion array.
|
||||
void dealloc_archive_regions(MemRegion* range, size_t count);
|
||||
|
||||
protected:
|
||||
|
||||
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
||||
|
@ -181,15 +181,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
G1ErgoVerbose::set_enabled(false);
|
||||
}
|
||||
|
||||
// Verify PLAB sizes
|
||||
const size_t region_size = HeapRegion::GrainWords;
|
||||
if (YoungPLABSize > region_size || OldPLABSize > region_size) {
|
||||
char buffer[128];
|
||||
jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most " SIZE_FORMAT,
|
||||
OldPLABSize > region_size ? "Old" : "Young", region_size);
|
||||
vm_exit_during_initialization(buffer);
|
||||
}
|
||||
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||
|
||||
@ -932,7 +923,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
|
||||
// Anything below that is considered to be zero
|
||||
#define MIN_TIMER_GRANULARITY 0.0000001
|
||||
|
||||
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
|
||||
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
|
||||
"otherwise, the subtraction below does not make sense");
|
||||
@ -964,9 +955,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
||||
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
|
||||
end_time_sec, _g1->gc_tracer_stw()->gc_id());
|
||||
|
||||
evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
|
||||
evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
|
||||
|
||||
if (update_stats) {
|
||||
_trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
|
||||
// this is where we update the allocation rate of the application
|
||||
@ -1883,7 +1871,7 @@ uint G1CollectorPolicy::calc_max_old_cset_length() {
|
||||
}
|
||||
|
||||
|
||||
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
|
||||
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
|
||||
double young_start_time_sec = os::elapsedTime();
|
||||
|
||||
YoungList* young_list = _g1->young_list();
|
||||
@ -2093,7 +2081,6 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
|
||||
|
||||
double non_young_end_time_sec = os::elapsedTime();
|
||||
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
|
||||
evacuation_info.set_collectionset_regions(cset_region_length());
|
||||
}
|
||||
|
||||
void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
|
||||
|
@ -604,10 +604,6 @@ public:
|
||||
|
||||
virtual G1CollectorPolicy* as_g1_policy() { return this; }
|
||||
|
||||
virtual CollectorPolicy::Name kind() {
|
||||
return CollectorPolicy::G1CollectorPolicyKind;
|
||||
}
|
||||
|
||||
G1CollectorState* collector_state();
|
||||
|
||||
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
||||
@ -634,13 +630,11 @@ public:
|
||||
virtual HeapWord* satisfy_failed_allocation(size_t size,
|
||||
bool is_tlab);
|
||||
|
||||
BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
|
||||
|
||||
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
||||
|
||||
// Record the start and end of an evacuation pause.
|
||||
void record_collection_pause_start(double start_time_sec);
|
||||
void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
|
||||
void record_collection_pause_end(double pause_time_ms);
|
||||
|
||||
// Record the start and end of a full collection.
|
||||
void record_full_collection_start();
|
||||
@ -682,6 +676,10 @@ public:
|
||||
return _bytes_copied_during_gc;
|
||||
}
|
||||
|
||||
size_t collection_set_bytes_used_before() const {
|
||||
return _collection_set_bytes_used_before;
|
||||
}
|
||||
|
||||
// Determine whether there are candidate regions so that the
|
||||
// next GC should be mixed. The two action strings are used
|
||||
// in the ergo output when the method returns true or false.
|
||||
@ -691,7 +689,7 @@ public:
|
||||
// Choose a new collection set. Marks the chosen regions as being
|
||||
// "in_collection_set", and links them together. The head and number of
|
||||
// the collection set are available via access methods.
|
||||
void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
|
||||
void finalize_cset(double target_pause_time_ms);
|
||||
|
||||
// The head of the list (via "next_in_collection_set()") representing the
|
||||
// current collection set.
|
||||
|
@ -54,17 +54,46 @@ void G1EvacStats::adjust_desired_plab_sz() {
|
||||
_allocated, _wasted, _region_end_waste, _unused, used()));
|
||||
_allocated = 1;
|
||||
}
|
||||
// We account region end waste fully to PLAB allocation. This is not completely fair,
|
||||
// but is a conservative assumption because PLABs may be sized flexibly while we
|
||||
// cannot adjust direct allocations.
|
||||
// In some cases, wasted_frac may become > 1 but that just reflects the problem
|
||||
// with region_end_waste.
|
||||
double wasted_frac = (double)(_unused + _wasted + _region_end_waste) / (double)_allocated;
|
||||
size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
|
||||
if (target_refills == 0) {
|
||||
target_refills = 1;
|
||||
}
|
||||
size_t cur_plab_sz = used() / target_refills;
|
||||
// The size of the PLAB caps the amount of space that can be wasted at the
|
||||
// end of the collection. In the worst case the last PLAB could be completely
|
||||
// empty.
|
||||
// This allows us to calculate the new PLAB size to achieve the
|
||||
// TargetPLABWastePct given the latest memory usage and that the last buffer
|
||||
// will be G1LastPLABAverageOccupancy full.
|
||||
//
|
||||
// E.g. assume that if in the current GC 100 words were allocated and a
|
||||
// TargetPLABWastePct of 10 had been set.
|
||||
//
|
||||
// So we could waste up to 10 words to meet that percentage. Given that we
|
||||
// also assume that that buffer is typically half-full, the new desired PLAB
|
||||
// size is set to 20 words.
|
||||
//
|
||||
// The amount of allocation performed should be independent of the number of
|
||||
// threads, so should the maximum waste we can spend in total. So if
|
||||
// we used n threads to allocate, each of them can spend maximum waste/n words in
|
||||
// a first rough approximation. The number of threads only comes into play later
|
||||
// when actually retrieving the actual desired PLAB size.
|
||||
//
|
||||
// After calculating this optimal PLAB size the algorithm applies the usual
|
||||
// exponential decaying average over this value to guess the next PLAB size.
|
||||
//
|
||||
// We account region end waste fully to PLAB allocation (in the calculation of
|
||||
// what we consider as "used_for_waste_calculation" below). This is not
|
||||
// completely fair, but is a conservative assumption because PLABs may be sized
|
||||
// flexibly while we cannot adjust inline allocations.
|
||||
// Allocation during GC will try to minimize region end waste so this impact
|
||||
// should be minimal.
|
||||
//
|
||||
// We need to cover overflow when calculating the amount of space actually used
|
||||
// by objects in PLABs when subtracting the region end waste.
|
||||
// Region end waste may be higher than actual allocation. This may occur if many
|
||||
// threads do not allocate anything but a few rather large objects. In this
|
||||
// degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
|
||||
// which is an okay reaction.
|
||||
size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
|
||||
|
||||
size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
|
||||
size_t const cur_plab_sz = (double)total_waste_allowed / G1LastPLABAverageOccupancy;
|
||||
// Take historical weighted average
|
||||
_filter.sample(cur_plab_sz);
|
||||
// Clip from above and below, and align to object boundary
|
||||
|
@ -74,7 +74,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
assert(rp != NULL, "should be non-NULL");
|
||||
assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
|
||||
|
||||
GenMarkSweep::_ref_processor = rp;
|
||||
GenMarkSweep::set_ref_processor(rp);
|
||||
rp->setup_policy(clear_all_softrefs);
|
||||
|
||||
// When collecting the permanent generation Method*s may be moving,
|
||||
@ -108,7 +108,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
JvmtiExport::gc_epilogue();
|
||||
|
||||
// refs processing: clean slate
|
||||
GenMarkSweep::_ref_processor = NULL;
|
||||
GenMarkSweep::set_ref_processor(NULL);
|
||||
}
|
||||
|
||||
|
||||
@ -310,9 +310,9 @@ void G1MarkSweep::enable_archive_object_check() {
|
||||
HeapRegion::GrainBytes);
|
||||
}
|
||||
|
||||
void G1MarkSweep::mark_range_archive(MemRegion range) {
|
||||
void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) {
|
||||
assert(_archive_check_enabled, "archive range check not enabled");
|
||||
_archive_region_map.set_by_address(range, true);
|
||||
_archive_region_map.set_by_address(range, is_archive);
|
||||
}
|
||||
|
||||
bool G1MarkSweep::in_archive_range(oop object) {
|
||||
|
@ -58,8 +58,8 @@ class G1MarkSweep : AllStatic {
|
||||
// Create the _archive_region_map which is used to identify archive objects.
|
||||
static void enable_archive_object_check();
|
||||
|
||||
// Mark the regions containing the specified address range as archive regions.
|
||||
static void mark_range_archive(MemRegion range);
|
||||
// Set the regions containing the specified address range as archive/non-archive.
|
||||
static void set_range_archive(MemRegion range, bool is_archive);
|
||||
|
||||
// Check if an object is in an archive region using the _archive_region_map.
|
||||
static bool in_archive_range(oop object);
|
||||
|
@ -125,8 +125,7 @@ private:
|
||||
template <class T> void do_oop_work(T* p);
|
||||
|
||||
public:
|
||||
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
|
||||
ReferenceProcessor* rp) :
|
||||
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParCopyHelper(g1, par_scan_state) {
|
||||
assert(_ref_processor == NULL, "sanity");
|
||||
}
|
||||
@ -141,7 +140,6 @@ public:
|
||||
|
||||
G1CollectedHeap* g1() { return _g1; };
|
||||
G1ParScanThreadState* pss() { return _par_scan_state; }
|
||||
ReferenceProcessor* rp() { return _ref_processor; };
|
||||
};
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure;
|
||||
|
@ -186,6 +186,21 @@ InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop co
|
||||
return dest(state);
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
|
||||
oop const old, size_t word_sz, uint age,
|
||||
HeapWord * const obj_ptr,
|
||||
const AllocationContext_t context) const {
|
||||
G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
|
||||
if (alloc_buf->contains(obj_ptr)) {
|
||||
_g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
|
||||
dest_state.value() == InCSetState::Old,
|
||||
alloc_buf->word_sz());
|
||||
} else {
|
||||
_g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
|
||||
dest_state.value() == InCSetState::Old);
|
||||
}
|
||||
}
|
||||
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
oop const old,
|
||||
markOop const old_mark) {
|
||||
@ -219,6 +234,10 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
}
|
||||
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
|
||||
// The events are checked individually as part of the actual commit
|
||||
report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
|
||||
}
|
||||
}
|
||||
|
||||
assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
|
||||
|
@ -173,6 +173,10 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||
bool previous_plab_refill_failed);
|
||||
|
||||
inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
|
||||
|
||||
void report_promotion_event(InCSetState const dest_state,
|
||||
oop const old, size_t word_sz, uint age,
|
||||
HeapWord * const obj_ptr, const AllocationContext_t context) const;
|
||||
public:
|
||||
|
||||
oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/g1/concurrentG1Refine.hpp"
|
||||
#include "gc/g1/concurrentG1RefineThread.hpp"
|
||||
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc/g1/g1CodeBlobClosure.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
@ -228,12 +229,15 @@ public:
|
||||
};
|
||||
|
||||
void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
OopClosure* non_heap_roots,
|
||||
uint worker_i) {
|
||||
double rs_time_start = os::elapsedTime();
|
||||
|
||||
G1CodeBlobClosure code_root_cl(non_heap_roots);
|
||||
|
||||
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
|
||||
|
||||
ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
|
||||
ScanRSClosure scanRScl(oc, &code_root_cl, worker_i);
|
||||
|
||||
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
||||
scanRScl.set_try_claimed();
|
||||
@ -295,7 +299,7 @@ void G1RemSet::cleanupHRRS() {
|
||||
}
|
||||
|
||||
void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
OopClosure* non_heap_roots,
|
||||
uint worker_i) {
|
||||
#if CARD_REPEAT_HISTO
|
||||
ct_freq_update_histo_and_reset();
|
||||
@ -318,7 +322,7 @@ void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
||||
DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
|
||||
|
||||
updateRS(&into_cset_dcq, worker_i);
|
||||
scanRS(oc, code_root_cl, worker_i);
|
||||
scanRS(oc, non_heap_roots, worker_i);
|
||||
|
||||
// We now clear the cached values of _cset_rs_update_cl for this worker
|
||||
_cset_rs_update_cl[worker_i] = NULL;
|
||||
|
@ -85,7 +85,7 @@ public:
|
||||
// invoked "blk->set_region" to set the "from" region correctly
|
||||
// beforehand.)
|
||||
//
|
||||
// Invoke code_root_cl->do_code_blob on the unmarked nmethods
|
||||
// Apply non_heap_roots on the oops of the unmarked nmethods
|
||||
// on the strong code roots list for each region in the
|
||||
// collection set.
|
||||
//
|
||||
@ -95,7 +95,7 @@ public:
|
||||
// the "i" passed to the calling thread's work(i) function.
|
||||
// In the sequential case this param will be ignored.
|
||||
void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
OopClosure* non_heap_roots,
|
||||
uint worker_i);
|
||||
|
||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||
@ -107,7 +107,7 @@ public:
|
||||
void cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
void scanRS(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
OopClosure* non_heap_roots,
|
||||
uint worker_i);
|
||||
|
||||
void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc/g1/bufferingOopClosure.hpp"
|
||||
#include "gc/g1/g1CodeBlobClosure.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
@ -40,57 +41,6 @@
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "services/management.hpp"
|
||||
|
||||
class G1CodeBlobClosure : public CodeBlobClosure {
|
||||
class HeapRegionGatheringOopClosure : public OopClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
OopClosure* _work;
|
||||
nmethod* _nm;
|
||||
|
||||
template <typename T>
|
||||
void do_oop_work(T* p) {
|
||||
_work->do_oop(p);
|
||||
T oop_or_narrowoop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(oop_or_narrowoop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(o);
|
||||
assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
|
||||
hr->add_strong_code_root(_nm);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
|
||||
|
||||
void do_oop(oop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
|
||||
void do_oop(narrowOop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
|
||||
void set_nm(nmethod* nm) {
|
||||
_nm = nm;
|
||||
}
|
||||
};
|
||||
|
||||
HeapRegionGatheringOopClosure _oc;
|
||||
public:
|
||||
G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
|
||||
|
||||
void do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
if (!nm->test_set_oops_do_mark()) {
|
||||
_oc.set_nm(nm);
|
||||
nm->oops_do(&_oc);
|
||||
nm->fix_oop_relocations();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void G1RootProcessor::worker_has_discovered_all_strong_classes() {
|
||||
assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
|
||||
|
||||
@ -321,14 +271,6 @@ void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
|
||||
}
|
||||
}
|
||||
|
||||
void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
uint worker_i) {
|
||||
G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
|
||||
|
||||
_g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
|
||||
}
|
||||
|
||||
uint G1RootProcessor::n_workers() const {
|
||||
return _srs.n_threads();
|
||||
}
|
||||
|
@ -107,13 +107,6 @@ public:
|
||||
CLDClosure* clds,
|
||||
CodeBlobClosure* blobs);
|
||||
|
||||
// Apply scan_rs to all locations in the union of the remembered sets for all
|
||||
// regions in the collection set
|
||||
// (having done "set_region" to indicate the region in which the root resides),
|
||||
void scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
uint worker_i);
|
||||
|
||||
// Number of worker threads used by the root processor.
|
||||
uint n_workers() const;
|
||||
};
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/satbQueue.hpp"
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
@ -108,15 +109,7 @@ void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
|
||||
jbyte *const first = byte_for(mr.start());
|
||||
jbyte *const last = byte_after(mr.last());
|
||||
|
||||
// Below we may use an explicit loop instead of memset() because on
|
||||
// certain platforms memset() can give concurrent readers phantom zeros.
|
||||
if (UseMemSetInBOT) {
|
||||
memset(first, g1_young_gen, last - first);
|
||||
} else {
|
||||
for (jbyte* i = first; i < last; i++) {
|
||||
*i = g1_young_gen;
|
||||
}
|
||||
}
|
||||
memset_with_concurrent_readers(first, g1_young_gen, last - first);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -207,7 +200,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
|
||||
// Otherwise, log it.
|
||||
G1SATBCardTableLoggingModRefBS* g1_bs =
|
||||
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set());
|
||||
g1_bs->write_ref_field_work(field, new_val);
|
||||
g1_bs->write_ref_field_work(field, new_val, false);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -147,6 +147,10 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
||||
private:
|
||||
G1SATBCardTableLoggingModRefBSChangedListener _listener;
|
||||
DirtyCardQueueSet& _dcqs;
|
||||
|
||||
protected:
|
||||
virtual void write_ref_field_work(void* field, oop new_val, bool release);
|
||||
|
||||
public:
|
||||
static size_t compute_size(size_t mem_region_size_in_words) {
|
||||
size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
|
||||
@ -165,8 +169,6 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
||||
|
||||
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
|
||||
|
||||
void write_ref_field_work(void* field, oop new_val, bool release = false);
|
||||
|
||||
// Can be called from static contexts.
|
||||
static void write_ref_field_static(void* field, oop new_val);
|
||||
|
||||
|
@ -82,6 +82,11 @@
|
||||
"If true, enable reference discovery during concurrent " \
|
||||
"marking and reference processing at the end of remark.") \
|
||||
\
|
||||
experimental(double, G1LastPLABAverageOccupancy, 50.0, \
|
||||
"The expected average occupancy of the last PLAB in " \
|
||||
"percent.") \
|
||||
range(0.001, 100.0) \
|
||||
\
|
||||
product(size_t, G1SATBBufferSize, 1*K, \
|
||||
"Number of entries in an SATB log buffer.") \
|
||||
\
|
||||
|
@ -68,7 +68,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
// or it was allocated after marking finished, then we add it. Otherwise
|
||||
// we can safely ignore the object.
|
||||
if (!g1h->is_obj_dead(oop(cur), _hr)) {
|
||||
oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
|
||||
oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
|
||||
} else {
|
||||
oop_size = _hr->block_size(cur);
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
|
||||
(num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
|
||||
uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
|
||||
|
||||
uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
|
||||
shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
|
||||
|
||||
cur = idx_last_found;
|
||||
removed += to_remove;
|
||||
@ -437,6 +437,17 @@ uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
|
||||
return removed;
|
||||
}
|
||||
|
||||
void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
|
||||
#ifdef ASSERT
|
||||
for (uint i = index; i < (index + num_regions); i++) {
|
||||
assert(is_available(i), err_msg("Expected available region at index %u", i));
|
||||
assert(at(i)->is_empty(), err_msg("Expected empty region at index %u", i));
|
||||
assert(at(i)->is_free(), err_msg("Expected free region at index %u", i));
|
||||
}
|
||||
#endif
|
||||
uncommit_regions(index, num_regions);
|
||||
}
|
||||
|
||||
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
|
||||
guarantee(start_idx < _allocated_heapregions_length, "checking");
|
||||
guarantee(res_idx != NULL, "checking");
|
||||
|
@ -241,6 +241,10 @@ public:
|
||||
// Return the actual number of uncommitted regions.
|
||||
uint shrink_by(uint num_regions_to_remove);
|
||||
|
||||
// Uncommit a number of regions starting at the specified index, which must be available,
|
||||
// empty, and free.
|
||||
void shrink_at(uint index, size_t num_regions);
|
||||
|
||||
void verify();
|
||||
|
||||
// Do some sanity checking.
|
||||
|
@ -35,7 +35,7 @@ private:
|
||||
// We encode the value of the heap region type so the generation can be
|
||||
// determined quickly. The tag is split into two parts:
|
||||
//
|
||||
// major type (young, humongous) : top N-1 bits
|
||||
// major type (young, old, humongous, archive) : top N-1 bits
|
||||
// minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
|
||||
//
|
||||
// If there's need to increase the number of minor types in the
|
||||
|
@ -89,7 +89,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
|
||||
CheckForUnmarkedOops object_check(_young_gen, _card_table);
|
||||
obj->oop_iterate_no_header(&object_check);
|
||||
if (object_check.has_unmarked_oop()) {
|
||||
assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
|
||||
guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -56,13 +56,7 @@ class CardTableExtension : public CardTableModRefBS {
|
||||
CardTableExtension(MemRegion whole_heap) :
|
||||
CardTableModRefBS(
|
||||
whole_heap,
|
||||
// Concrete tag should be BarrierSet::CardTableExtension.
|
||||
// That will presently break things in a bunch of places though.
|
||||
// The concrete tag is used as a dispatch key in many places, and
|
||||
// CardTableExtension does not correctly dispatch in some of those
|
||||
// uses. This will be addressed as part of a reorganization of the
|
||||
// BarrierSet hierarchy.
|
||||
BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableExtension))
|
||||
BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
|
||||
{ }
|
||||
|
||||
// Scavenge support
|
||||
|
@ -44,7 +44,7 @@ void ImmutableSpace::oop_iterate(ExtendedOopClosure* cl) {
|
||||
HeapWord* t = end();
|
||||
// Could call objects iterate, but this is easier.
|
||||
while (obj_addr < t) {
|
||||
obj_addr += oop(obj_addr)->oop_iterate(cl);
|
||||
obj_addr += oop(obj_addr)->oop_iterate_size(cl);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -213,15 +213,6 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
|
||||
return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
|
||||
}
|
||||
|
||||
void MutableSpace::oop_iterate(ExtendedOopClosure* cl) {
|
||||
HeapWord* obj_addr = bottom();
|
||||
HeapWord* t = top();
|
||||
// Could call objects iterate, but this is easier.
|
||||
while (obj_addr < t) {
|
||||
obj_addr += oop(obj_addr)->oop_iterate(cl);
|
||||
}
|
||||
}
|
||||
|
||||
void MutableSpace::oop_iterate_no_header(OopClosure* cl) {
|
||||
HeapWord* obj_addr = bottom();
|
||||
HeapWord* t = top();
|
||||
|
@ -134,7 +134,6 @@ class MutableSpace: public ImmutableSpace {
|
||||
bool cas_deallocate(HeapWord *obj, size_t size);
|
||||
|
||||
// Iteration.
|
||||
void oop_iterate(ExtendedOopClosure* cl);
|
||||
void oop_iterate_no_header(OopClosure* cl);
|
||||
void object_iterate(ObjectClosure* cl);
|
||||
|
||||
|
@ -30,26 +30,22 @@
|
||||
#include "gc/parallel/psParallelCompact.hpp"
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
|
||||
inline size_t ParallelScavengeHeap::total_invocations()
|
||||
{
|
||||
inline size_t ParallelScavengeHeap::total_invocations() {
|
||||
return UseParallelOldGC ? PSParallelCompact::total_invocations() :
|
||||
PSMarkSweep::total_invocations();
|
||||
}
|
||||
|
||||
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
|
||||
{
|
||||
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
|
||||
const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
|
||||
return size < eden_size / 2;
|
||||
}
|
||||
|
||||
inline void ParallelScavengeHeap::invoke_scavenge()
|
||||
{
|
||||
inline void ParallelScavengeHeap::invoke_scavenge() {
|
||||
PSScavenge::invoke();
|
||||
}
|
||||
|
||||
inline bool ParallelScavengeHeap::is_in_young(oop p) {
|
||||
// Assumes the the old gen address range is lower than that of the young gen.
|
||||
const void* loc = (void*) p;
|
||||
bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
|
||||
assert(result == young_gen()->is_in_reserved(p),
|
||||
err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user