Merge
This commit is contained in:
commit
40c4fe8573
hotspot/src
cpu
aarch64/vm
ppc/vm
sparc/vm
x86/vm
jdk.hotspot.agent/share/classes/sun/jvm/hotspot
HSDB.java
oops
ArrayKlass.javaConstantPool.javaConstantPoolCache.javaInstanceKlass.javaKlass.javaMetadata.javaMethodData.java
utilities
jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot
os
os_cpu/windows_x86/vm
share/vm
c1
classfile
classFileParser.cppcompactHashtable.cppcompactHashtable.hppstringTable.cppsymbolTable.cppverifier.cpp
gc
cms
cmsCollectorPolicy.cppcmsOopClosures.hppcmsOopClosures.inline.hppcompactibleFreeListSpace.cppcompactibleFreeListSpace.hppconcurrentMarkSweepGeneration.cppparNewGeneration.cpppromotionInfo.cpppromotionInfo.hpp
g1
concurrentMark.cppconcurrentMark.hppconcurrentMark.inline.hppg1Allocator.cppg1CodeCacheRemSet.cppg1CollectedHeap.cppg1CollectedHeap.hppg1CollectorPolicy.cppg1CollectorPolicy.hppg1EvacFailure.cppg1GCPhaseTimes.cppg1GCPhaseTimes.hppg1HeapTransition.cppg1HeapTransition.hppg1HeapVerifier.cppg1HeapVerifier.hppg1OopClosures.hppg1OopClosures.inline.hppg1RemSet.cppg1SATBCardTableModRefBS.cppg1SATBCardTableModRefBS.hppg1SATBCardTableModRefBS.inline.hppheapRegion.cppheapRegionRemSet.cpp
parallel
asPSYoungGen.cppcardTableExtension.cppobjectStartArray.cppobjectStartArray.hppobjectStartArray.inline.hppparMarkBitMap.cppparMarkBitMap.hppparallelScavengeHeap.cpppsCompactionManager.cpppsCompactionManager.hpppsCompactionManager.inline.hpppsMarkSweep.cpppsOldGen.cpppsParallelCompact.cpppsParallelCompact.hpppsParallelCompact.inline.hpppsPromotionManager.cpppsPromotionManager.inline.hpppsScavenge.cpppsScavenge.hpp
serial
shared
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -898,23 +898,18 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
"caller must use same register for non-constant itable index as for method");
|
||||
|
||||
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
|
||||
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
int vtable_base = in_bytes(Klass::vtable_start_offset());
|
||||
int itentry_off = itableMethodEntry::method_offset_in_bytes();
|
||||
int scan_step = itableOffsetEntry::size() * wordSize;
|
||||
int vte_size = vtableEntry::size() * wordSize;
|
||||
int vte_size = vtableEntry::size_in_bytes();
|
||||
assert(vte_size == wordSize, "else adjust times_vte_scale");
|
||||
|
||||
ldrw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
|
||||
ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
|
||||
|
||||
// %%% Could store the aligned, prescaled offset in the klassoop.
|
||||
// lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
|
||||
lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
|
||||
add(scan_temp, scan_temp, vtable_base);
|
||||
if (HeapWordsPerLong > 1) {
|
||||
// Round up to align_object_offset boundary
|
||||
// see code for instanceKlass::start_of_itable!
|
||||
round_to(scan_temp, BytesPerLong);
|
||||
}
|
||||
|
||||
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
|
||||
assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
|
||||
@ -963,7 +958,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
void MacroAssembler::lookup_virtual_method(Register recv_klass,
|
||||
RegisterOrConstant vtable_index,
|
||||
Register method_result) {
|
||||
const int base = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
const int base = in_bytes(Klass::vtable_start_offset());
|
||||
assert(vtableEntry::size() * wordSize == 8,
|
||||
"adjust the scaling in the code below");
|
||||
int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -73,7 +73,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
// check offset vs vtable length
|
||||
__ ldrw(rscratch1, Address(r19, InstanceKlass::vtable_length_offset() * wordSize));
|
||||
__ ldrw(rscratch1, Address(r19, Klass::vtable_length_offset()));
|
||||
__ cmpw(rscratch1, vtable_index * vtableEntry::size());
|
||||
__ br(Assembler::GT, L);
|
||||
__ enter();
|
||||
|
@ -1583,13 +1583,13 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
"caller must use same register for non-constant itable index as for method");
|
||||
|
||||
// Compute start of first itableOffsetEntry (which is at the end of the vtable).
|
||||
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
int vtable_base = in_bytes(Klass::vtable_start_offset());
|
||||
int itentry_off = itableMethodEntry::method_offset_in_bytes();
|
||||
int logMEsize = exact_log2(itableMethodEntry::size() * wordSize);
|
||||
int scan_step = itableOffsetEntry::size() * wordSize;
|
||||
int log_vte_size= exact_log2(vtableEntry::size() * wordSize);
|
||||
int log_vte_size= exact_log2(vtableEntry::size_in_bytes());
|
||||
|
||||
lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass);
|
||||
lwz(scan_temp, in_bytes(Klass::vtable_length_offset()), recv_klass);
|
||||
// %%% We should store the aligned, prescaled offset in the klassoop.
|
||||
// Then the next several instructions would fold away.
|
||||
|
||||
@ -1657,7 +1657,7 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
|
||||
|
||||
assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
|
||||
|
||||
const int base = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
const int base = in_bytes(Klass::vtable_start_offset());
|
||||
assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
|
||||
|
||||
if (vtable_index.is_register()) {
|
||||
|
@ -3568,8 +3568,8 @@ encode %{
|
||||
|
||||
__ load_klass(R11_scratch1, R3);
|
||||
|
||||
int entry_offset = InstanceKlass::vtable_start_offset() + _vtable_index * vtableEntry::size();
|
||||
int v_off = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
|
||||
int entry_offset = in_bytes(Klass::vtable_start_offset()) + _vtable_index * vtableEntry::size_in_bytes();
|
||||
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
|
||||
__ li(R19_method, v_off);
|
||||
__ ldx(R19_method/*method oop*/, R19_method/*method offset*/, R11_scratch1/*class*/);
|
||||
// NOTE: for vtable dispatches, the vtable entry will never be
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -3282,9 +3282,9 @@ void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex,
|
||||
const Register Rtarget_method = Rindex;
|
||||
|
||||
// Get target method & entry point.
|
||||
const int base = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
const int base = in_bytes(Klass::vtable_start_offset());
|
||||
// Calc vtable addr scale the vtable index by 8.
|
||||
__ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize));
|
||||
__ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes()));
|
||||
// Load target.
|
||||
__ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
|
||||
__ ldx(Rtarget_method, Rindex, Rrecv_klass);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -80,14 +80,14 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
__ load_klass(rcvr_klass, R3);
|
||||
|
||||
// Set method (in case of interpreted method), and destination address.
|
||||
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
// Check offset vs vtable length.
|
||||
const Register vtable_len = R12_scratch2;
|
||||
__ lwz(vtable_len, InstanceKlass::vtable_length_offset()*wordSize, rcvr_klass);
|
||||
__ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
|
||||
__ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size());
|
||||
__ bge(CCR0, L);
|
||||
__ li(R12_scratch2, vtable_index);
|
||||
@ -96,7 +96,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
}
|
||||
#endif
|
||||
|
||||
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
|
||||
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
|
||||
|
||||
__ ld(R19_method, v_off, rcvr_klass);
|
||||
|
||||
@ -163,13 +163,13 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
||||
__ load_klass(rcvr_klass, R3_ARG1);
|
||||
|
||||
BLOCK_COMMENT("Load start of itable entries into itable_entry.");
|
||||
__ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass);
|
||||
__ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size() * wordSize));
|
||||
__ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
|
||||
__ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
|
||||
__ add(itable_entry_addr, vtable_len, rcvr_klass);
|
||||
|
||||
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
|
||||
BLOCK_COMMENT("Increment itable_entry_addr in loop.");
|
||||
const int vtable_base_offset = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
|
||||
__ addi(itable_entry_addr, itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes());
|
||||
|
||||
const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -180,6 +180,9 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
||||
|
||||
typedef void (*_zero_Fn)(HeapWord* to, size_t count);
|
||||
|
||||
// Only used for heap objects, so align_object_offset.
|
||||
// All other platforms pd_fill_to_aligned_words simply calls pd_fill_to_words, don't
|
||||
// know why this one is different.
|
||||
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
|
||||
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
|
||||
|
||||
|
@ -2188,30 +2188,18 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
}
|
||||
|
||||
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
|
||||
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
int vtable_base = in_bytes(Klass::vtable_start_offset());
|
||||
int scan_step = itableOffsetEntry::size() * wordSize;
|
||||
int vte_size = vtableEntry::size() * wordSize;
|
||||
int vte_size = vtableEntry::size_in_bytes();
|
||||
|
||||
lduw(recv_klass, InstanceKlass::vtable_length_offset() * wordSize, scan_temp);
|
||||
lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp);
|
||||
// %%% We should store the aligned, prescaled offset in the klassoop.
|
||||
// Then the next several instructions would fold away.
|
||||
|
||||
int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
|
||||
int itb_offset = vtable_base;
|
||||
if (round_to_unit != 0) {
|
||||
// hoist first instruction of round_to(scan_temp, BytesPerLong):
|
||||
itb_offset += round_to_unit - wordSize;
|
||||
}
|
||||
int itb_scale = exact_log2(vtableEntry::size() * wordSize);
|
||||
int itb_scale = exact_log2(vtableEntry::size_in_bytes());
|
||||
sll(scan_temp, itb_scale, scan_temp);
|
||||
add(scan_temp, itb_offset, scan_temp);
|
||||
if (round_to_unit != 0) {
|
||||
// Round up to align_object_offset boundary
|
||||
// see code for InstanceKlass::start_of_itable!
|
||||
// Was: round_to(scan_temp, BytesPerLong);
|
||||
// Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
|
||||
and3(scan_temp, -round_to_unit, scan_temp);
|
||||
}
|
||||
add(recv_klass, scan_temp, scan_temp);
|
||||
|
||||
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
|
||||
@ -2280,16 +2268,16 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
|
||||
Register method_result) {
|
||||
assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
|
||||
Register sethi_temp = method_result;
|
||||
const int base = (InstanceKlass::vtable_start_offset() * wordSize +
|
||||
// method pointer offset within the vtable entry:
|
||||
vtableEntry::method_offset_in_bytes());
|
||||
const int base = in_bytes(Klass::vtable_start_offset()) +
|
||||
// method pointer offset within the vtable entry:
|
||||
vtableEntry::method_offset_in_bytes();
|
||||
RegisterOrConstant vtable_offset = vtable_index;
|
||||
// Each of the following three lines potentially generates an instruction.
|
||||
// But the total number of address formation instructions will always be
|
||||
// at most two, and will often be zero. In any case, it will be optimal.
|
||||
// If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x).
|
||||
// If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t).
|
||||
vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset);
|
||||
vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset);
|
||||
vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp);
|
||||
Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp));
|
||||
ld_ptr(vtable_entry_addr, method_result);
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -601,8 +601,8 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
|
||||
NativeCall::instruction_size); // sethi; setlo; call; delay slot
|
||||
} else {
|
||||
assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
|
||||
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
|
||||
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
|
||||
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
|
||||
int klass_load_size;
|
||||
if (UseCompressedClassPointers) {
|
||||
assert(Universe::heap() != NULL, "java heap should be initialized");
|
||||
@ -2658,8 +2658,8 @@ encode %{
|
||||
} else {
|
||||
klass_load_size = 1*BytesPerInstWord;
|
||||
}
|
||||
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
|
||||
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
|
||||
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
|
||||
if (Assembler::is_simm13(v_off)) {
|
||||
__ ld_ptr(G3, v_off, G5_method);
|
||||
} else {
|
||||
|
@ -3153,14 +3153,11 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
//
|
||||
|
||||
// compute start of first itableOffsetEntry (which is at end of vtable)
|
||||
const int base = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
const int base = in_bytes(Klass::vtable_start_offset());
|
||||
Label search;
|
||||
Register Rtemp = O1_flags;
|
||||
|
||||
__ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp);
|
||||
if (align_object_offset(1) > 1) {
|
||||
__ round_to(Rtemp, align_object_offset(1));
|
||||
}
|
||||
__ ld(O2_Klass, in_bytes(Klass::vtable_length_offset()), Rtemp);
|
||||
__ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
|
||||
if (Assembler::is_simm13(base)) {
|
||||
__ add(Rtemp, base, Rtemp);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -78,7 +78,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
// check offset vs vtable length
|
||||
__ ld(G3_scratch, InstanceKlass::vtable_length_offset()*wordSize, G5);
|
||||
__ ld(G3_scratch, in_bytes(Klass::vtable_length_offset()), G5);
|
||||
__ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L);
|
||||
__ set(vtable_index, O2);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -5867,22 +5867,17 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
"caller must use same register for non-constant itable index as for method");
|
||||
|
||||
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
|
||||
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
int vtable_base = in_bytes(Klass::vtable_start_offset());
|
||||
int itentry_off = itableMethodEntry::method_offset_in_bytes();
|
||||
int scan_step = itableOffsetEntry::size() * wordSize;
|
||||
int vte_size = vtableEntry::size() * wordSize;
|
||||
int vte_size = vtableEntry::size_in_bytes();
|
||||
Address::ScaleFactor times_vte_scale = Address::times_ptr;
|
||||
assert(vte_size == wordSize, "else adjust times_vte_scale");
|
||||
|
||||
movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
|
||||
movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
|
||||
|
||||
// %%% Could store the aligned, prescaled offset in the klassoop.
|
||||
lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
|
||||
if (HeapWordsPerLong > 1) {
|
||||
// Round up to align_object_offset boundary
|
||||
// see code for InstanceKlass::start_of_itable!
|
||||
round_to(scan_temp, BytesPerLong);
|
||||
}
|
||||
|
||||
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
|
||||
assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
|
||||
@ -5930,7 +5925,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
void MacroAssembler::lookup_virtual_method(Register recv_klass,
|
||||
RegisterOrConstant vtable_index,
|
||||
Register method_result) {
|
||||
const int base = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
const int base = in_bytes(Klass::vtable_start_offset());
|
||||
assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
|
||||
Address vtable_entry_addr(recv_klass,
|
||||
vtable_index, Address::times_ptr,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -85,7 +85,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
// check offset vs vtable length
|
||||
__ cmpl(Address(rax, InstanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
|
||||
__ cmpl(Address(rax, Klass::vtable_length_offset()), vtable_index*vtableEntry::size());
|
||||
__ jcc(Assembler::greater, L);
|
||||
__ movl(rbx, vtable_index);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), rcx, rbx);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -77,7 +77,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
// check offset vs vtable length
|
||||
__ cmpl(Address(rax, InstanceKlass::vtable_length_offset() * wordSize),
|
||||
__ cmpl(Address(rax, Klass::vtable_length_offset()),
|
||||
vtable_index * vtableEntry::size());
|
||||
__ jcc(Assembler::greater, L);
|
||||
__ movl(rbx, vtable_index);
|
||||
|
@ -141,16 +141,20 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
|
||||
return;
|
||||
}
|
||||
|
||||
agent = new HotSpotAgent();
|
||||
workerThread = new WorkerThread();
|
||||
attachMenuItems = new java.util.ArrayList();
|
||||
detachMenuItems = new java.util.ArrayList();
|
||||
// Create frame first, to catch any GUI creation issues
|
||||
// before we initialize agent
|
||||
|
||||
frame = new JFrame("HSDB - HotSpot Debugger");
|
||||
frame.setSize(800, 600);
|
||||
frame.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE);
|
||||
frame.addWindowListener(new CloseUI());
|
||||
|
||||
agent = new HotSpotAgent();
|
||||
workerThread = new WorkerThread();
|
||||
attachMenuItems = new java.util.ArrayList();
|
||||
detachMenuItems = new java.util.ArrayList();
|
||||
|
||||
|
||||
JMenuBar menuBar = new JMenuBar();
|
||||
|
||||
//
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,7 +48,6 @@ public class ArrayKlass extends Klass {
|
||||
dimension = new CIntField(type.getCIntegerField("_dimension"), 0);
|
||||
higherDimension = new MetadataField(type.getAddressField("_higher_dimension"), 0);
|
||||
lowerDimension = new MetadataField(type.getAddressField("_lower_dimension"), 0);
|
||||
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
|
||||
javaLangCloneableName = null;
|
||||
javaLangObjectName = null;
|
||||
javaIoSerializableName = null;
|
||||
@ -61,7 +60,6 @@ public class ArrayKlass extends Klass {
|
||||
private static CIntField dimension;
|
||||
private static MetadataField higherDimension;
|
||||
private static MetadataField lowerDimension;
|
||||
private static CIntField vtableLen;
|
||||
|
||||
public Klass getJavaSuper() {
|
||||
SystemDictionary sysDict = VM.getVM().getSystemDictionary();
|
||||
@ -71,7 +69,6 @@ public class ArrayKlass extends Klass {
|
||||
public long getDimension() { return dimension.getValue(this); }
|
||||
public Klass getHigherDimension() { return (Klass) higherDimension.getValue(this); }
|
||||
public Klass getLowerDimension() { return (Klass) lowerDimension.getValue(this); }
|
||||
public long getVtableLen() { return vtableLen.getValue(this); }
|
||||
|
||||
// constant class names - javaLangCloneable, javaIoSerializable, javaLangObject
|
||||
// Initialized lazily to avoid initialization ordering dependencies between ArrayKlass and SymbolTable
|
||||
@ -140,6 +137,5 @@ public class ArrayKlass extends Klass {
|
||||
visitor.doCInt(dimension, true);
|
||||
visitor.doMetadata(higherDimension, true);
|
||||
visitor.doMetadata(lowerDimension, true);
|
||||
visitor.doCInt(vtableLen, true);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -696,7 +696,7 @@ public class ConstantPool extends Metadata implements ClassConstants {
|
||||
}
|
||||
|
||||
public long getSize() {
|
||||
return Oop.alignObjectSize(headerSize + getLength());
|
||||
return alignSize(headerSize + getLength());
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,7 +70,7 @@ public class ConstantPoolCache extends Metadata {
|
||||
public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
|
||||
|
||||
public long getSize() {
|
||||
return Oop.alignObjectSize(baseOffset + getLength() * elementSize);
|
||||
return alignSize(baseOffset + getLength() * elementSize);
|
||||
}
|
||||
|
||||
public ConstantPoolCacheEntry getEntryAt(int i) {
|
||||
@ -79,8 +79,7 @@ public class ConstantPoolCache extends Metadata {
|
||||
}
|
||||
|
||||
public int getIntAt(int entry, int fld) {
|
||||
//alignObjectSize ?
|
||||
long offset = baseOffset + /*alignObjectSize*/entry * elementSize + fld * intSize;
|
||||
long offset = baseOffset + entry * elementSize + fld * intSize;
|
||||
return (int) getAddress().getCIntegerAt(offset, intSize, true );
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -84,13 +84,12 @@ public class InstanceKlass extends Klass {
|
||||
nonstaticOopMapSize = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), 0);
|
||||
isMarkedDependent = new CIntField(type.getCIntegerField("_is_marked_dependent"), 0);
|
||||
initState = new CIntField(type.getCIntegerField("_init_state"), 0);
|
||||
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
|
||||
itableLen = new CIntField(type.getCIntegerField("_itable_len"), 0);
|
||||
breakpoints = type.getAddressField("_breakpoints");
|
||||
genericSignatureIndex = new CIntField(type.getCIntegerField("_generic_signature_index"), 0);
|
||||
majorVersion = new CIntField(type.getCIntegerField("_major_version"), 0);
|
||||
minorVersion = new CIntField(type.getCIntegerField("_minor_version"), 0);
|
||||
headerSize = Oop.alignObjectOffset(type.getSize());
|
||||
headerSize = type.getSize();
|
||||
|
||||
// read field offset constants
|
||||
ACCESS_FLAGS_OFFSET = db.lookupIntConstant("FieldInfo::access_flags_offset").intValue();
|
||||
@ -143,7 +142,6 @@ public class InstanceKlass extends Klass {
|
||||
private static CIntField nonstaticOopMapSize;
|
||||
private static CIntField isMarkedDependent;
|
||||
private static CIntField initState;
|
||||
private static CIntField vtableLen;
|
||||
private static CIntField itableLen;
|
||||
private static AddressField breakpoints;
|
||||
private static CIntField genericSignatureIndex;
|
||||
@ -242,8 +240,7 @@ public class InstanceKlass extends Klass {
|
||||
}
|
||||
|
||||
public long getSize() {
|
||||
return Oop.alignObjectSize(getHeaderSize() + Oop.alignObjectOffset(getVtableLen()) +
|
||||
Oop.alignObjectOffset(getItableLen()) + Oop.alignObjectOffset(getNonstaticOopMapSize()));
|
||||
return alignSize(getHeaderSize() + getVtableLen() + getItableLen() + getNonstaticOopMapSize());
|
||||
}
|
||||
|
||||
public static long getHeaderSize() { return headerSize; }
|
||||
@ -352,7 +349,6 @@ public class InstanceKlass extends Klass {
|
||||
public long getStaticOopFieldCount() { return staticOopFieldCount.getValue(this); }
|
||||
public long getNonstaticOopMapSize() { return nonstaticOopMapSize.getValue(this); }
|
||||
public boolean getIsMarkedDependent() { return isMarkedDependent.getValue(this) != 0; }
|
||||
public long getVtableLen() { return vtableLen.getValue(this); }
|
||||
public long getItableLen() { return itableLen.getValue(this); }
|
||||
public long majorVersion() { return majorVersion.getValue(this); }
|
||||
public long minorVersion() { return minorVersion.getValue(this); }
|
||||
@ -548,7 +544,6 @@ public class InstanceKlass extends Klass {
|
||||
visitor.doCInt(nonstaticOopMapSize, true);
|
||||
visitor.doCInt(isMarkedDependent, true);
|
||||
visitor.doCInt(initState, true);
|
||||
visitor.doCInt(vtableLen, true);
|
||||
visitor.doCInt(itableLen, true);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,6 +61,7 @@ public class Klass extends Metadata implements ClassConstants {
|
||||
}
|
||||
subklass = new MetadataField(type.getAddressField("_subklass"), 0);
|
||||
nextSibling = new MetadataField(type.getAddressField("_next_sibling"), 0);
|
||||
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
|
||||
|
||||
LH_INSTANCE_SLOW_PATH_BIT = db.lookupIntConstant("Klass::_lh_instance_slow_path_bit").intValue();
|
||||
LH_LOG2_ELEMENT_SIZE_SHIFT = db.lookupIntConstant("Klass::_lh_log2_element_size_shift").intValue();
|
||||
@ -71,6 +72,7 @@ public class Klass extends Metadata implements ClassConstants {
|
||||
LH_ARRAY_TAG_OBJ_VALUE = db.lookupIntConstant("Klass::_lh_array_tag_obj_value").intValue();
|
||||
}
|
||||
|
||||
|
||||
public Klass(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
@ -91,6 +93,7 @@ public class Klass extends Metadata implements ClassConstants {
|
||||
private static MetadataField subklass;
|
||||
private static MetadataField nextSibling;
|
||||
private static sun.jvm.hotspot.types.Field traceIDField;
|
||||
private static CIntField vtableLen;
|
||||
|
||||
private Address getValue(AddressField field) {
|
||||
return addr.getAddressAt(field.getOffset());
|
||||
@ -111,6 +114,7 @@ public class Klass extends Metadata implements ClassConstants {
|
||||
public AccessFlags getAccessFlagsObj(){ return new AccessFlags(getAccessFlags()); }
|
||||
public Klass getSubklassKlass() { return (Klass) subklass.getValue(this); }
|
||||
public Klass getNextSiblingKlass() { return (Klass) nextSibling.getValue(this); }
|
||||
public long getVtableLen() { return vtableLen.getValue(this); }
|
||||
|
||||
public long traceID() {
|
||||
if (traceIDField == null) return 0;
|
||||
@ -179,6 +183,7 @@ public class Klass extends Metadata implements ClassConstants {
|
||||
visitor.doCInt(accessFlags, true);
|
||||
visitor.doMetadata(subklass, true);
|
||||
visitor.doMetadata(nextSibling, true);
|
||||
visitor.doCInt(vtableLen, true);
|
||||
}
|
||||
|
||||
public long getObjectSize() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,6 +44,11 @@ abstract public class Metadata extends VMObject {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
public static long alignSize(long size) {
|
||||
// natural word size.
|
||||
return VM.getVM().alignUp(size, VM.getVM().getBytesPerWord());
|
||||
}
|
||||
|
||||
private static VirtualBaseConstructor<Metadata> metadataConstructor;
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -252,7 +252,7 @@ public class MethodData extends Metadata implements MethodDataInterface<Klass,Me
|
||||
}
|
||||
|
||||
int size() {
|
||||
return (int)Oop.alignObjectSize(VM.getVM().alignUp(sizeInBytes(), VM.getVM().getBytesPerWord())/VM.getVM().getBytesPerWord());
|
||||
return (int)alignSize(VM.getVM().alignUp(sizeInBytes(), VM.getVM().getBytesPerWord())/VM.getVM().getBytesPerWord());
|
||||
}
|
||||
|
||||
ParametersTypeData<Klass,Method> parametersTypeData() {
|
||||
|
@ -35,7 +35,11 @@ public class WorkerThread {
|
||||
public WorkerThread() {
|
||||
mqb = new MessageQueueBackend();
|
||||
mq = mqb.getFirstQueue();
|
||||
new Thread(new MainLoop()).start();
|
||||
|
||||
// Enable to terminate this worker during runnning by daemonize.
|
||||
Thread mqthread = new Thread(new MainLoop());
|
||||
mqthread.setDaemon(true);
|
||||
mqthread.start();
|
||||
}
|
||||
|
||||
/** Runs the given Runnable in the thread represented by this
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -597,7 +597,7 @@ final class HotSpotResolvedJavaMethodImpl extends HotSpotMethod implements HotSp
|
||||
}
|
||||
HotSpotVMConfig config = config();
|
||||
final int vtableIndex = getVtableIndex((HotSpotResolvedObjectTypeImpl) resolved);
|
||||
return config.instanceKlassVtableStartOffset() + vtableIndex * config.vtableEntrySize + config.vtableEntryMethodOffset;
|
||||
return config.klassVtableStartOffset + vtableIndex * config.vtableEntrySize + config.vtableEntryMethodOffset;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -472,8 +472,8 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
|
||||
/* Everything has the core vtable of java.lang.Object */
|
||||
return config.baseVtableLength();
|
||||
}
|
||||
int result = UNSAFE.getInt(getMetaspaceKlass() + config.instanceKlassVtableLengthOffset) / (config.vtableEntrySize / config.heapWordSize);
|
||||
assert result >= config.baseVtableLength() : UNSAFE.getInt(getMetaspaceKlass() + config.instanceKlassVtableLengthOffset) + " " + config.vtableEntrySize;
|
||||
int result = UNSAFE.getInt(getMetaspaceKlass() + config.klassVtableLengthOffset) / (config.vtableEntrySize / config.heapWordSize);
|
||||
assert result >= config.baseVtableLength() : UNSAFE.getInt(getMetaspaceKlass() + config.klassVtableLengthOffset) + " " + config.vtableEntrySize;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1031,19 +1031,12 @@ public class HotSpotVMConfig {
|
||||
@HotSpotVMField(name = "InstanceKlass::_init_state", type = "u1", get = HotSpotVMField.Type.OFFSET) @Stable public int instanceKlassInitStateOffset;
|
||||
@HotSpotVMField(name = "InstanceKlass::_constants", type = "ConstantPool*", get = HotSpotVMField.Type.OFFSET) @Stable public int instanceKlassConstantsOffset;
|
||||
@HotSpotVMField(name = "InstanceKlass::_fields", type = "Array<u2>*", get = HotSpotVMField.Type.OFFSET) @Stable public int instanceKlassFieldsOffset;
|
||||
@HotSpotVMField(name = "CompilerToVM::Data::InstanceKlass_vtable_start_offset", type = "int", get = HotSpotVMField.Type.VALUE) @Stable public int instanceKlassVtableStartOffset;
|
||||
@HotSpotVMField(name = "CompilerToVM::Data::InstanceKlass_vtable_length_offset", type = "int", get = HotSpotVMField.Type.VALUE) @Stable public int instanceKlassVtableLengthOffset;
|
||||
@HotSpotVMField(name = "CompilerToVM::Data::Klass_vtable_start_offset", type = "int", get = HotSpotVMField.Type.VALUE) @Stable public int klassVtableStartOffset;
|
||||
@HotSpotVMField(name = "CompilerToVM::Data::Klass_vtable_length_offset", type = "int", get = HotSpotVMField.Type.VALUE) @Stable public int klassVtableLengthOffset;
|
||||
|
||||
@HotSpotVMConstant(name = "InstanceKlass::linked") @Stable public int instanceKlassStateLinked;
|
||||
@HotSpotVMConstant(name = "InstanceKlass::fully_initialized") @Stable public int instanceKlassStateFullyInitialized;
|
||||
|
||||
/**
|
||||
* See {@code InstanceKlass::vtable_start_offset()}.
|
||||
*/
|
||||
public final int instanceKlassVtableStartOffset() {
|
||||
return instanceKlassVtableStartOffset * heapWordSize;
|
||||
}
|
||||
|
||||
@HotSpotVMType(name = "arrayOopDesc", get = HotSpotVMType.Type.SIZE) @Stable public int arrayOopDescSize;
|
||||
|
||||
/**
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,7 +48,10 @@
|
||||
"Load DLLs with executable-stack attribute in the VM Thread") \
|
||||
\
|
||||
product(bool, UseSHM, false, \
|
||||
"Use SYSV shared memory for large pages")
|
||||
"Use SYSV shared memory for large pages") \
|
||||
\
|
||||
diagnostic(bool, UseCpuAllocPath, false, \
|
||||
"Use CPU_ALLOC code path in os::active_processor_count ")
|
||||
|
||||
//
|
||||
// Defines Linux-specific default values. The flags are available on all
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,6 +32,7 @@
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "jvm_linux.h"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "mutex_linux.inline.hpp"
|
||||
@ -106,6 +107,14 @@
|
||||
# include <inttypes.h>
|
||||
# include <sys/ioctl.h>
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#include <sched.h>
|
||||
#undef _GNU_SOURCE
|
||||
#else
|
||||
#include <sched.h>
|
||||
#endif
|
||||
|
||||
// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
|
||||
// getrusage() is prepared to handle the associated failure.
|
||||
#ifndef RUSAGE_THREAD
|
||||
@ -4762,12 +4771,72 @@ void os::make_polling_page_readable(void) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get the current number of available processors for this process.
|
||||
// This value can change at any time during a process's lifetime.
|
||||
// sched_getaffinity gives an accurate answer as it accounts for cpusets.
|
||||
// If it appears there may be more than 1024 processors then we do a
|
||||
// dynamic check - see 6515172 for details.
|
||||
// If anything goes wrong we fallback to returning the number of online
|
||||
// processors - which can be greater than the number available to the process.
|
||||
int os::active_processor_count() {
|
||||
// Linux doesn't yet have a (official) notion of processor sets,
|
||||
// so just return the number of online processors.
|
||||
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
|
||||
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
|
||||
return online_cpus;
|
||||
cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors
|
||||
cpu_set_t* cpus_p = &cpus;
|
||||
int cpus_size = sizeof(cpu_set_t);
|
||||
|
||||
int configured_cpus = processor_count(); // upper bound on available cpus
|
||||
int cpu_count = 0;
|
||||
|
||||
// To enable easy testing of the dynamic path on different platforms we
|
||||
// introduce a diagnostic flag: UseCpuAllocPath
|
||||
if (configured_cpus >= CPU_SETSIZE || UseCpuAllocPath) {
|
||||
// kernel may use a mask bigger than cpu_set_t
|
||||
log_trace(os)("active_processor_count: using dynamic path %s"
|
||||
"- configured processors: %d",
|
||||
UseCpuAllocPath ? "(forced) " : "",
|
||||
configured_cpus);
|
||||
cpus_p = CPU_ALLOC(configured_cpus);
|
||||
if (cpus_p != NULL) {
|
||||
cpus_size = CPU_ALLOC_SIZE(configured_cpus);
|
||||
// zero it just to be safe
|
||||
CPU_ZERO_S(cpus_size, cpus_p);
|
||||
}
|
||||
else {
|
||||
// failed to allocate so fallback to online cpus
|
||||
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
|
||||
log_trace(os)("active_processor_count: "
|
||||
"CPU_ALLOC failed (%s) - using "
|
||||
"online processor count: %d",
|
||||
strerror(errno), online_cpus);
|
||||
return online_cpus;
|
||||
}
|
||||
}
|
||||
else {
|
||||
log_trace(os)("active_processor_count: using static path - configured processors: %d",
|
||||
configured_cpus);
|
||||
}
|
||||
|
||||
// pid 0 means the current thread - which we have to assume represents the process
|
||||
if (sched_getaffinity(0, cpus_size, cpus_p) == 0) {
|
||||
if (cpus_p != &cpus) {
|
||||
cpu_count = CPU_COUNT_S(cpus_size, cpus_p);
|
||||
}
|
||||
else {
|
||||
cpu_count = CPU_COUNT(cpus_p);
|
||||
}
|
||||
log_trace(os)("active_processor_count: sched_getaffinity processor count: %d", cpu_count);
|
||||
}
|
||||
else {
|
||||
cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);
|
||||
warning("sched_getaffinity failed (%s)- using online processor count (%d) "
|
||||
"which may exceed available processors", strerror(errno), cpu_count);
|
||||
}
|
||||
|
||||
if (cpus_p != &cpus) {
|
||||
CPU_FREE(cpus_p);
|
||||
}
|
||||
|
||||
assert(cpu_count > 0 && cpu_count <= processor_count(), "sanity check");
|
||||
return cpu_count;
|
||||
}
|
||||
|
||||
void os::set_native_thread_name(const char *name) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -5267,8 +5267,29 @@ bool os::check_heap(bool force) {
|
||||
|
||||
|
||||
bool os::find(address addr, outputStream* st) {
|
||||
// Nothing yet
|
||||
return false;
|
||||
int offset = -1;
|
||||
bool result = false;
|
||||
char buf[256];
|
||||
if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
|
||||
st->print(PTR_FORMAT " ", addr);
|
||||
if (strlen(buf) < sizeof(buf) - 1) {
|
||||
char* p = strrchr(buf, '\\');
|
||||
if (p) {
|
||||
st->print("%s", p + 1);
|
||||
} else {
|
||||
st->print("%s", buf);
|
||||
}
|
||||
} else {
|
||||
// The library name is probably truncated. Let's omit the library name.
|
||||
// See also JDK-8147512.
|
||||
}
|
||||
if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
|
||||
st->print("::%s + 0x%x", buf, offset);
|
||||
}
|
||||
st->cr();
|
||||
result = true;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
|
||||
|
@ -599,6 +599,7 @@ void os::print_register_info(outputStream *st, const void *context) {
|
||||
// this is only for the "general purpose" registers
|
||||
|
||||
#ifdef AMD64
|
||||
st->print("RIP="); print_location(st, uc->Rip);
|
||||
st->print("RAX="); print_location(st, uc->Rax);
|
||||
st->print("RBX="); print_location(st, uc->Rbx);
|
||||
st->print("RCX="); print_location(st, uc->Rcx);
|
||||
@ -616,6 +617,7 @@ void os::print_register_info(outputStream *st, const void *context) {
|
||||
st->print("R14="); print_location(st, uc->R14);
|
||||
st->print("R15="); print_location(st, uc->R15);
|
||||
#else
|
||||
st->print("EIP="); print_location(st, uc->Eip);
|
||||
st->print("EAX="); print_location(st, uc->Eax);
|
||||
st->print("EBX="); print_location(st, uc->Ebx);
|
||||
st->print("ECX="); print_location(st, uc->Ecx);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2972,8 +2972,8 @@ void LIRGenerator::do_Invoke(Invoke* x) {
|
||||
SharedRuntime::get_resolve_virtual_call_stub(),
|
||||
arg_list, info);
|
||||
} else {
|
||||
int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
|
||||
int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
|
||||
int entry_offset = in_bytes(Klass::vtable_start_offset()) + x->vtable_index() * vtableEntry::size_in_bytes();
|
||||
int vtable_offset = entry_offset + vtableEntry::method_offset_in_bytes();
|
||||
__ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
|
||||
}
|
||||
break;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2705,7 +2705,7 @@ Method* ClassFileParser::parse_method(const ClassFileStream* const cfs,
|
||||
ConstMethod::NORMAL,
|
||||
CHECK_NULL);
|
||||
|
||||
ClassLoadingService::add_class_method_size(m->size()*HeapWordSize);
|
||||
ClassLoadingService::add_class_method_size(m->size()*wordSize);
|
||||
|
||||
// Fill in information from fixed part (access_flags already set)
|
||||
m->set_constants(_cp);
|
||||
@ -4602,8 +4602,8 @@ void ClassFileParser::verify_legal_method_modifiers(jint flags,
|
||||
}
|
||||
} else if (major_gte_15) {
|
||||
// Class file version in the interval [JAVA_1_5_VERSION, JAVA_8_VERSION)
|
||||
if (!is_public || is_static || is_final || is_synchronized ||
|
||||
is_native || !is_abstract || is_strict) {
|
||||
if (!is_public || is_private || is_protected || is_static || is_final ||
|
||||
is_synchronized || is_native || !is_abstract || is_strict) {
|
||||
is_illegal = true;
|
||||
}
|
||||
} else {
|
||||
|
@ -365,14 +365,14 @@ bool HashtableTextDump::skip_newline() {
|
||||
}
|
||||
|
||||
int HashtableTextDump::skip(char must_be_char) {
|
||||
corrupted_if(remain() < 1);
|
||||
corrupted_if(*_p++ != must_be_char);
|
||||
corrupted_if(remain() < 1, "Truncated");
|
||||
corrupted_if(*_p++ != must_be_char, "Unexpected character");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void HashtableTextDump::skip_past(char c) {
|
||||
for (;;) {
|
||||
corrupted_if(remain() < 1);
|
||||
corrupted_if(remain() < 1, "Truncated");
|
||||
if (*_p++ == c) {
|
||||
return;
|
||||
}
|
||||
@ -381,7 +381,7 @@ void HashtableTextDump::skip_past(char c) {
|
||||
|
||||
void HashtableTextDump::check_version(const char* ver) {
|
||||
int len = (int)strlen(ver);
|
||||
corrupted_if(remain() < len);
|
||||
corrupted_if(remain() < len, "Truncated");
|
||||
if (strncmp(_p, ver, len) != 0) {
|
||||
quit("wrong version of hashtable dump file", _filename);
|
||||
}
|
||||
@ -451,7 +451,7 @@ int HashtableTextDump::scan_symbol_prefix() {
|
||||
jchar HashtableTextDump::unescape(const char* from, const char* end, int count) {
|
||||
jchar value = 0;
|
||||
|
||||
corrupted_if(from + count > end);
|
||||
corrupted_if(from + count > end, "Truncated");
|
||||
|
||||
for (int i=0; i<count; i++) {
|
||||
char c = *from++;
|
||||
@ -486,7 +486,7 @@ void HashtableTextDump::get_utf8(char* utf8_buffer, int utf8_length) {
|
||||
if (*from != '\\') {
|
||||
*to++ = *from++;
|
||||
} else {
|
||||
corrupted_if(from + 2 > end);
|
||||
corrupted_if(from + 2 > end, "Truncated");
|
||||
char c = from[1];
|
||||
from += 2;
|
||||
switch (c) {
|
||||
@ -507,7 +507,7 @@ void HashtableTextDump::get_utf8(char* utf8_buffer, int utf8_length) {
|
||||
}
|
||||
}
|
||||
}
|
||||
corrupted_if(n > 0); // expected more chars but file has ended
|
||||
corrupted_if(n > 0, "Truncated"); // expected more chars but file has ended
|
||||
_p = from;
|
||||
skip_newline();
|
||||
}
|
||||
|
@ -276,9 +276,9 @@ public:
|
||||
|
||||
void corrupted(const char *p, const char *msg);
|
||||
|
||||
inline void corrupted_if(bool cond) {
|
||||
inline void corrupted_if(bool cond, const char *msg) {
|
||||
if (cond) {
|
||||
corrupted(_p, NULL);
|
||||
corrupted(_p, msg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -287,27 +287,30 @@ public:
|
||||
void skip_past(char c);
|
||||
void check_version(const char* ver);
|
||||
|
||||
inline bool get_num(char delim, int *utf8_length) {
|
||||
inline void get_num(char delim, int *num) {
|
||||
const char* p = _p;
|
||||
const char* end = _end;
|
||||
int num = 0;
|
||||
u8 n = 0;
|
||||
|
||||
while (p < end) {
|
||||
char c = *p ++;
|
||||
if ('0' <= c && c <= '9') {
|
||||
num = num * 10 + (c - '0');
|
||||
n = n * 10 + (c - '0');
|
||||
if (n > (u8)INT_MAX) {
|
||||
corrupted(_p, "Num overflow");
|
||||
}
|
||||
} else if (c == delim) {
|
||||
_p = p;
|
||||
*utf8_length = num;
|
||||
return true;
|
||||
*num = (int)n;
|
||||
return;
|
||||
} else {
|
||||
// Not [0-9], not 'delim'
|
||||
return false;
|
||||
corrupted(_p, "Unrecognized format");;
|
||||
}
|
||||
}
|
||||
|
||||
corrupted(_end, "Incorrect format");
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
}
|
||||
|
||||
void scan_prefix_type();
|
||||
|
@ -737,7 +737,7 @@ bool StringTable::copy_compact_table(char** top, char *end, GrowableArray<MemReg
|
||||
return false;
|
||||
}
|
||||
ch_table.dump(top, end);
|
||||
*top = (char*)align_pointer_up(*top, sizeof(void*));
|
||||
*top = (char*)align_ptr_up(*top, sizeof(void*));
|
||||
|
||||
#endif
|
||||
return true;
|
||||
@ -760,7 +760,7 @@ const char* StringTable::init_shared_table(FileMapInfo *mapinfo, char *buffer) {
|
||||
juint *p = (juint*)buffer;
|
||||
const char* end = _shared_table.init(
|
||||
CompactHashtable<oop, char>::_string_table, (char*)p);
|
||||
const char* aligned_end = (const char*)align_pointer_up(end, sizeof(void*));
|
||||
const char* aligned_end = (const char*)align_ptr_up(end, sizeof(void*));
|
||||
|
||||
if (_ignore_shared_strings) {
|
||||
_shared_table.reset();
|
||||
|
@ -544,7 +544,7 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
|
||||
|
||||
ch_table.dump(top, end);
|
||||
|
||||
*top = (char*)align_pointer_up(*top, sizeof(void*));
|
||||
*top = (char*)align_ptr_up(*top, sizeof(void*));
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
@ -552,7 +552,7 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
|
||||
const char* SymbolTable::init_shared_table(const char* buffer) {
|
||||
const char* end = _shared_table.init(
|
||||
CompactHashtable<Symbol*, char>::_symbol_table, buffer);
|
||||
return (const char*)align_pointer_up(end, sizeof(void*));
|
||||
return (const char*)align_ptr_up(end, sizeof(void*));
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
@ -600,7 +600,7 @@ void SymbolTable::print_histogram() {
|
||||
tty->print_cr("Symbol Table Histogram:");
|
||||
tty->print_cr(" Total number of symbols %7d", total_count);
|
||||
tty->print_cr(" Total size in memory %7dK",
|
||||
(total_size*HeapWordSize)/1024);
|
||||
(total_size*wordSize)/1024);
|
||||
tty->print_cr(" Total counted %7d", _symbols_counted);
|
||||
tty->print_cr(" Total removed %7d", _symbols_removed);
|
||||
if (_symbols_counted > 0) {
|
||||
@ -617,11 +617,11 @@ void SymbolTable::print_histogram() {
|
||||
tty->print_cr(" %6s %10s %10s", "Length", "#Symbols", "Size");
|
||||
for (i = 0; i < results_length; i++) {
|
||||
if (counts[i] > 0) {
|
||||
tty->print_cr(" %6d %10d %10dK", i, counts[i], (sizes[i]*HeapWordSize)/1024);
|
||||
tty->print_cr(" %6d %10d %10dK", i, counts[i], (sizes[i]*wordSize)/1024);
|
||||
}
|
||||
}
|
||||
tty->print_cr(" >=%6d %10d %10dK\n", results_length,
|
||||
out_of_range_count, (out_of_range_size*HeapWordSize)/1024);
|
||||
out_of_range_count, (out_of_range_size*wordSize)/1024);
|
||||
}
|
||||
|
||||
void SymbolTable::print() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -651,6 +651,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
int ex_max = -1;
|
||||
// Look through each item on the exception table. Each of the fields must refer
|
||||
// to a legal instruction.
|
||||
if (was_recursively_verified()) return;
|
||||
verify_exception_handler_table(
|
||||
code_length, code_data, ex_min, ex_max, CHECK_VERIFY(this));
|
||||
|
||||
@ -737,11 +738,14 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
// should be used for this check. So, do the check here before a possible
|
||||
// local is added to the type state.
|
||||
if (Bytecodes::is_store_into_local(opcode) && bci >= ex_min && bci < ex_max) {
|
||||
if (was_recursively_verified()) return;
|
||||
verify_exception_handler_targets(
|
||||
bci, this_uninit, ¤t_frame, &stackmap_table, CHECK_VERIFY(this));
|
||||
verified_exc_handlers = true;
|
||||
}
|
||||
|
||||
if (was_recursively_verified()) return;
|
||||
|
||||
switch (opcode) {
|
||||
case Bytecodes::_nop :
|
||||
no_control_flow = false; break;
|
||||
@ -1730,6 +1734,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
assert(!(verified_exc_handlers && this_uninit),
|
||||
"Exception handler targets got verified before this_uninit got set");
|
||||
if (!verified_exc_handlers && bci >= ex_min && bci < ex_max) {
|
||||
if (was_recursively_verified()) return;
|
||||
verify_exception_handler_targets(
|
||||
bci, this_uninit, ¤t_frame, &stackmap_table, CHECK_VERIFY(this));
|
||||
}
|
||||
@ -1767,6 +1772,9 @@ char* ClassVerifier::generate_code_data(const methodHandle& m, u4 code_length, T
|
||||
return code_data;
|
||||
}
|
||||
|
||||
// Since this method references the constant pool, call was_recursively_verified()
|
||||
// before calling this method to make sure a prior class load did not cause the
|
||||
// current class to get verified.
|
||||
void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_data, int& min, int& max, TRAPS) {
|
||||
ExceptionTable exhandlers(_method());
|
||||
int exlength = exhandlers.length();
|
||||
@ -1874,7 +1882,11 @@ u2 ClassVerifier::verify_stackmap_table(u2 stackmap_index, u2 bci,
|
||||
return stackmap_index;
|
||||
}
|
||||
|
||||
void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, StackMapFrame* current_frame,
|
||||
// Since this method references the constant pool, call was_recursively_verified()
|
||||
// before calling this method to make sure a prior class load did not cause the
|
||||
// current class to get verified.
|
||||
void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit,
|
||||
StackMapFrame* current_frame,
|
||||
StackMapTable* stackmap_table, TRAPS) {
|
||||
constantPoolHandle cp (THREAD, _method->constants());
|
||||
ExceptionTable exhandlers(_method());
|
||||
@ -1889,6 +1901,7 @@ void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, S
|
||||
if (this_uninit) { flags |= FLAG_THIS_UNINIT; }
|
||||
StackMapFrame* new_frame = current_frame->frame_in_exception_handler(flags);
|
||||
if (catch_type_index != 0) {
|
||||
if (was_recursively_verified()) return;
|
||||
// We know that this index refers to a subclass of Throwable
|
||||
VerificationType catch_type = cp_index_to_type(
|
||||
catch_type_index, cp, CHECK_VERIFY(this));
|
||||
@ -2269,6 +2282,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
|
||||
check_protected: {
|
||||
if (_this_type == stack_object_type)
|
||||
break; // stack_object_type must be assignable to _current_class_type
|
||||
if (was_recursively_verified()) return;
|
||||
Symbol* ref_class_name =
|
||||
cp->klass_name_at(cp->klass_ref_index_at(index));
|
||||
if (!name_in_supers(ref_class_name, current_class()))
|
||||
@ -2531,6 +2545,7 @@ void ClassVerifier::verify_invoke_init(
|
||||
// Check the exception handler target stackmaps with the locals from the
|
||||
// incoming stackmap (before initialize_object() changes them to outgoing
|
||||
// state).
|
||||
if (was_recursively_verified()) return;
|
||||
verify_exception_handler_targets(bci, true, current_frame,
|
||||
stackmap_table, CHECK_VERIFY(this));
|
||||
} // in_try_block
|
||||
@ -2548,6 +2563,7 @@ void ClassVerifier::verify_invoke_init(
|
||||
return;
|
||||
}
|
||||
u2 new_class_index = Bytes::get_Java_u2(new_bcp + 1);
|
||||
if (was_recursively_verified()) return;
|
||||
verify_cp_class_type(bci, new_class_index, cp, CHECK_VERIFY(this));
|
||||
|
||||
// The method must be an <init> method of the indicated class
|
||||
@ -2567,6 +2583,7 @@ void ClassVerifier::verify_invoke_init(
|
||||
VerificationType objectref_type = new_class_type;
|
||||
if (name_in_supers(ref_class_type.name(), current_class())) {
|
||||
Klass* ref_klass = load_class(ref_class_type.name(), CHECK);
|
||||
if (was_recursively_verified()) return;
|
||||
Method* m = InstanceKlass::cast(ref_klass)->uncached_lookup_method(
|
||||
vmSymbols::object_initializer_name(),
|
||||
cp->signature_ref_at(bcs->get_index_u2()),
|
||||
@ -2591,6 +2608,7 @@ void ClassVerifier::verify_invoke_init(
|
||||
// incoming stackmap (before initialize_object() changes them to outgoing
|
||||
// state).
|
||||
if (in_try_block) {
|
||||
if (was_recursively_verified()) return;
|
||||
verify_exception_handler_targets(bci, *this_uninit, current_frame,
|
||||
stackmap_table, CHECK_VERIFY(this));
|
||||
}
|
||||
@ -2791,6 +2809,7 @@ void ClassVerifier::verify_invoke_instructions(
|
||||
verify_invoke_init(bcs, index, ref_class_type, current_frame,
|
||||
code_length, in_try_block, this_uninit, cp, stackmap_table,
|
||||
CHECK_VERIFY(this));
|
||||
if (was_recursively_verified()) return;
|
||||
} else { // other methods
|
||||
// Ensures that target class is assignable to method class.
|
||||
if (opcode == Bytecodes::_invokespecial) {
|
||||
@ -2816,6 +2835,7 @@ void ClassVerifier::verify_invoke_instructions(
|
||||
VerificationType stack_object_type =
|
||||
current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));
|
||||
if (current_type() != stack_object_type) {
|
||||
if (was_recursively_verified()) return;
|
||||
assert(cp->cache() == NULL, "not rewritten yet");
|
||||
Symbol* ref_class_name =
|
||||
cp->klass_name_at(cp->klass_ref_index_at(index));
|
||||
@ -2894,6 +2914,7 @@ void ClassVerifier::verify_anewarray(
|
||||
current_frame->pop_stack(
|
||||
VerificationType::integer_type(), CHECK_VERIFY(this));
|
||||
|
||||
if (was_recursively_verified()) return;
|
||||
VerificationType component_type =
|
||||
cp_index_to_type(index, cp, CHECK_VERIFY(this));
|
||||
int length;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,6 +35,7 @@
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "gc/shared/vmGCOperations.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,15 +40,9 @@ class MarkFromRootsClosure;
|
||||
class ParMarkFromRootsClosure;
|
||||
|
||||
// Decode the oop and call do_oop on it.
|
||||
#define DO_OOP_WORK_DEFN \
|
||||
void do_oop(oop obj); \
|
||||
template <class T> inline void do_oop_work(T* p) { \
|
||||
T heap_oop = oopDesc::load_heap_oop(p); \
|
||||
if (!oopDesc::is_null(heap_oop)) { \
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
|
||||
do_oop(obj); \
|
||||
} \
|
||||
}
|
||||
#define DO_OOP_WORK_DEFN \
|
||||
void do_oop(oop obj); \
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
|
||||
// TODO: This duplication of the MetadataAwareOopClosure class is only needed
|
||||
// because some CMS OopClosures derive from OopsInGenClosure. It would be
|
||||
@ -131,8 +125,8 @@ class PushAndMarkClosure: public MetadataAwareOopClosure {
|
||||
bool concurrent_precleaning);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
};
|
||||
|
||||
// In the parallel case, the bit map and the
|
||||
@ -157,8 +151,8 @@ class ParPushAndMarkClosure: public MetadataAwareOopClosure {
|
||||
OopTaskQueue* work_queue);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
@ -186,8 +180,8 @@ class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
|
||||
bool concurrent_precleaning);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
|
||||
void set_freelistLock(Mutex* m) {
|
||||
_freelistLock = m;
|
||||
@ -220,8 +214,8 @@ class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
|
||||
OopTaskQueue* work_queue);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
|
||||
void trim_queue(uint size);
|
||||
};
|
||||
@ -249,8 +243,8 @@ class PushOrMarkClosure: public MetadataAwareOopClosure {
|
||||
MarkFromRootsClosure* parent);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
@ -287,8 +281,8 @@ class ParPushOrMarkClosure: public MetadataAwareOopClosure {
|
||||
ParMarkFromRootsClosure* parent);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
@ -318,8 +312,8 @@ class CMSKeepAliveClosure: public MetadataAwareOopClosure {
|
||||
bool concurrent_precleaning() const { return _concurrent_precleaning; }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
};
|
||||
|
||||
class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
|
||||
@ -336,8 +330,8 @@ class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
|
||||
OopTaskQueue* work_queue);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
};
|
||||
|
||||
// A parallel (MT) version of the above, used when
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,21 +30,6 @@
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
// Trim our work_queue so its length is below max at return
|
||||
inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
|
||||
while (_work_queue->size() > max) {
|
||||
oop newOop;
|
||||
if (_work_queue->pop_local(newOop)) {
|
||||
assert(newOop->is_oop(), "Expected an oop");
|
||||
assert(_bit_map->isMarked((HeapWord*)newOop),
|
||||
"only grey objects on this stack");
|
||||
// iterate over the oops in this oop, marking and pushing
|
||||
// the ones in CMS heap (i.e. in _span).
|
||||
newOop->oop_iterate(&_parPushAndMarkClosure);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
|
||||
// until we get rid of OopsInGenClosure.
|
||||
|
||||
@ -61,4 +46,48 @@ inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
|
||||
cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
|
||||
}
|
||||
|
||||
// Decode the oop and call do_oop on it.
|
||||
#define DO_OOP_WORK_IMPL(cls) \
|
||||
template <class T> void cls::do_oop_work(T* p) { \
|
||||
T heap_oop = oopDesc::load_heap_oop(p); \
|
||||
if (!oopDesc::is_null(heap_oop)) { \
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
|
||||
do_oop(obj); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define DO_OOP_WORK_NV_IMPL(cls) \
|
||||
DO_OOP_WORK_IMPL(cls) \
|
||||
void cls::do_oop_nv(oop* p) { cls::do_oop_work(p); } \
|
||||
void cls::do_oop_nv(narrowOop* p) { cls::do_oop_work(p); }
|
||||
|
||||
DO_OOP_WORK_IMPL(MarkRefsIntoClosure)
|
||||
DO_OOP_WORK_IMPL(ParMarkRefsIntoClosure)
|
||||
DO_OOP_WORK_IMPL(MarkRefsIntoVerifyClosure)
|
||||
DO_OOP_WORK_NV_IMPL(PushAndMarkClosure)
|
||||
DO_OOP_WORK_NV_IMPL(ParPushAndMarkClosure)
|
||||
DO_OOP_WORK_NV_IMPL(MarkRefsIntoAndScanClosure)
|
||||
DO_OOP_WORK_NV_IMPL(ParMarkRefsIntoAndScanClosure)
|
||||
|
||||
// Trim our work_queue so its length is below max at return
|
||||
inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
|
||||
while (_work_queue->size() > max) {
|
||||
oop newOop;
|
||||
if (_work_queue->pop_local(newOop)) {
|
||||
assert(newOop->is_oop(), "Expected an oop");
|
||||
assert(_bit_map->isMarked((HeapWord*)newOop),
|
||||
"only grey objects on this stack");
|
||||
// iterate over the oops in this oop, marking and pushing
|
||||
// the ones in CMS heap (i.e. in _span).
|
||||
newOop->oop_iterate(&_parPushAndMarkClosure);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DO_OOP_WORK_NV_IMPL(PushOrMarkClosure)
|
||||
DO_OOP_WORK_NV_IMPL(ParPushOrMarkClosure)
|
||||
DO_OOP_WORK_NV_IMPL(CMSKeepAliveClosure)
|
||||
DO_OOP_WORK_NV_IMPL(CMSInnerParMarkAndPushClosure)
|
||||
DO_OOP_WORK_IMPL(CMSParKeepAliveClosure)
|
||||
|
||||
#endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -219,6 +219,10 @@ void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
|
||||
}
|
||||
}
|
||||
|
||||
size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
|
||||
return adjustObjectSize(oop(addr)->size());
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace::resetIndexedFreeListArray() {
|
||||
for (size_t i = 1; i < IndexSetSize; i++) {
|
||||
assert(_indexedFreeList[i].size() == (size_t) i,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -313,9 +313,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
return adjustObjectSize(size);
|
||||
}
|
||||
|
||||
inline size_t obj_size(const HeapWord* addr) const {
|
||||
return adjustObjectSize(oop(addr)->size());
|
||||
}
|
||||
inline size_t obj_size(const HeapWord* addr) const;
|
||||
|
||||
protected:
|
||||
// Reset the indexed free list to its initial empty condition.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3324,6 +3324,8 @@ class ParConcMarkingClosure: public MetadataAwareOopClosure {
|
||||
}
|
||||
};
|
||||
|
||||
DO_OOP_WORK_IMPL(ParConcMarkingClosure)
|
||||
|
||||
// Grey object scanning during work stealing phase --
|
||||
// the salient assumption here is that any references
|
||||
// that are in these stolen objects being scanned must
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,7 @@
|
||||
#include "gc/cms/parOopClosures.inline.hpp"
|
||||
#include "gc/serial/defNewGeneration.inline.hpp"
|
||||
#include "gc/shared/adaptiveSizePolicy.hpp"
|
||||
#include "gc/shared/ageTable.hpp"
|
||||
#include "gc/shared/ageTable.inline.hpp"
|
||||
#include "gc/shared/copyFailedInfo.hpp"
|
||||
#include "gc/shared/gcHeapSummary.hpp"
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
@ -414,7 +414,7 @@ void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::print_taskqueue_stats() {
|
||||
if (!develop_log_is_enabled(Trace, gc, task, stats)) {
|
||||
if (!log_develop_is_enabled(Trace, gc, task, stats)) {
|
||||
return;
|
||||
}
|
||||
LogHandle(gc, task, stats) log;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,6 +34,31 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
PromotedObject* PromotedObject::next() const {
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
PromotedObject* res;
|
||||
if (UseCompressedOops) {
|
||||
// The next pointer is a compressed oop stored in the top 32 bits
|
||||
res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
|
||||
} else {
|
||||
res = (PromotedObject*)(_next & next_mask);
|
||||
}
|
||||
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res)));
|
||||
return res;
|
||||
}
|
||||
|
||||
inline void PromotedObject::setNext(PromotedObject* x) {
|
||||
assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
|
||||
"or insufficient alignment of objects");
|
||||
if (UseCompressedOops) {
|
||||
assert(_data._narrow_next == 0, "Overwrite?");
|
||||
_data._narrow_next = oopDesc::encode_heap_oop(oop(x));
|
||||
} else {
|
||||
_next |= (intptr_t)x;
|
||||
}
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// We go over the list of promoted objects, removing each from the list,
|
||||
// and applying the closure (this may, in turn, add more elements to
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -64,29 +64,8 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
|
||||
Data _data;
|
||||
};
|
||||
public:
|
||||
inline PromotedObject* next() const {
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
PromotedObject* res;
|
||||
if (UseCompressedOops) {
|
||||
// The next pointer is a compressed oop stored in the top 32 bits
|
||||
res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
|
||||
} else {
|
||||
res = (PromotedObject*)(_next & next_mask);
|
||||
}
|
||||
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res)));
|
||||
return res;
|
||||
}
|
||||
inline void setNext(PromotedObject* x) {
|
||||
assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
|
||||
"or insufficient alignment of objects");
|
||||
if (UseCompressedOops) {
|
||||
assert(_data._narrow_next == 0, "Overwrite?");
|
||||
_data._narrow_next = oopDesc::encode_heap_oop(oop(x));
|
||||
} else {
|
||||
_next |= (intptr_t)x;
|
||||
}
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
}
|
||||
PromotedObject* next() const;
|
||||
void setNext(PromotedObject* x);
|
||||
inline void setPromotedMark() {
|
||||
_next |= promoted_mask;
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
@ -1062,7 +1063,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
|
||||
}
|
||||
g1h->check_bitmaps("Remark Start");
|
||||
g1h->verifier()->check_bitmaps("Remark Start");
|
||||
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
g1p->record_concurrent_mark_remark_start();
|
||||
@ -1111,7 +1112,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
|
||||
}
|
||||
g1h->check_bitmaps("Remark End");
|
||||
g1h->verifier()->check_bitmaps("Remark End");
|
||||
assert(!restart_for_overflow(), "sanity");
|
||||
// Completely reset the marking state since marking completed
|
||||
set_non_marking_state();
|
||||
@ -1605,14 +1606,14 @@ void ConcurrentMark::cleanup() {
|
||||
return;
|
||||
}
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
g1h->verifier()->verify_region_sets_optional();
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
|
||||
}
|
||||
g1h->check_bitmaps("Cleanup Start");
|
||||
g1h->verifier()->check_bitmaps("Cleanup Start");
|
||||
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
g1p->record_concurrent_mark_cleanup_start();
|
||||
@ -1702,9 +1703,9 @@ void ConcurrentMark::cleanup() {
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
|
||||
}
|
||||
|
||||
g1h->check_bitmaps("Cleanup End");
|
||||
g1h->verifier()->check_bitmaps("Cleanup End");
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
g1h->verifier()->verify_region_sets_optional();
|
||||
|
||||
// We need to make this be a "collection" so any collection pause that
|
||||
// races with it goes around and waits for completeCleanup to finish.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -96,12 +96,7 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
|
||||
}
|
||||
|
||||
// The argument addr should be the start address of a valid object
|
||||
HeapWord* nextObject(HeapWord* addr) {
|
||||
oop obj = (oop) addr;
|
||||
HeapWord* res = addr + obj->size();
|
||||
assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
|
||||
return res;
|
||||
}
|
||||
inline HeapWord* nextObject(HeapWord* addr);
|
||||
|
||||
void print_on_error(outputStream* st, const char* prefix) const;
|
||||
|
||||
@ -627,14 +622,7 @@ public:
|
||||
// If marking is not in progress, it's a no-op.
|
||||
void verify_no_cset_oops() PRODUCT_RETURN;
|
||||
|
||||
bool isPrevMarked(oop p) const {
|
||||
assert(p != NULL && p->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)p;
|
||||
assert(addr >= _prevMarkBitMap->startWord() ||
|
||||
addr < _prevMarkBitMap->endWord(), "in a region");
|
||||
|
||||
return _prevMarkBitMap->isMarked(addr);
|
||||
}
|
||||
inline bool isPrevMarked(oop p) const;
|
||||
|
||||
inline bool do_yield_check(uint worker_i = 0);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -185,6 +185,14 @@ inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// The argument addr should be the start address of a valid object
|
||||
HeapWord* CMBitMapRO::nextObject(HeapWord* addr) {
|
||||
oop obj = (oop) addr;
|
||||
HeapWord* res = addr + obj->size();
|
||||
assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
|
||||
return res;
|
||||
}
|
||||
|
||||
#define check_mark(addr) \
|
||||
assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \
|
||||
"outside underlying space?"); \
|
||||
@ -353,6 +361,15 @@ inline void ConcurrentMark::markPrev(oop p) {
|
||||
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
|
||||
}
|
||||
|
||||
bool ConcurrentMark::isPrevMarked(oop p) const {
|
||||
assert(p != NULL && p->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)p;
|
||||
assert(addr >= _prevMarkBitMap->startWord() ||
|
||||
addr < _prevMarkBitMap->endWord(), "in a region");
|
||||
|
||||
return _prevMarkBitMap->isMarked(addr);
|
||||
}
|
||||
|
||||
inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
|
||||
uint worker_id, HeapRegion* hr) {
|
||||
assert(obj != NULL, "pre-condition");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -438,7 +438,7 @@ void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
// If an end alignment was requested, insert filler objects.
|
||||
if (end_alignment_in_bytes != 0) {
|
||||
HeapWord* currtop = _allocation_region->top();
|
||||
HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
|
||||
HeapWord* newtop = (HeapWord*)align_ptr_up(currtop, end_alignment_in_bytes);
|
||||
size_t fill_size = pointer_delta(newtop, currtop);
|
||||
if (fill_size != 0) {
|
||||
if (fill_size < CollectedHeap::min_fill_size()) {
|
||||
@ -447,8 +447,8 @@ void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
// region boundary because the max supported alignment is smaller than the min
|
||||
// region size, and because the allocation code never leaves space smaller than
|
||||
// the min_fill_size at the top of the current allocation region.
|
||||
newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
|
||||
end_alignment_in_bytes);
|
||||
newtop = (HeapWord*)align_ptr_up(currtop + CollectedHeap::min_fill_size(),
|
||||
end_alignment_in_bytes);
|
||||
fill_size = pointer_delta(newtop, currtop);
|
||||
}
|
||||
HeapWord* fill = archive_mem_allocate(fill_size);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -74,10 +74,16 @@ class CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
|
||||
static size_t static_mem_size() {
|
||||
return sizeof(_purge_list);
|
||||
}
|
||||
|
||||
size_t mem_size();
|
||||
};
|
||||
|
||||
CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL;
|
||||
|
||||
size_t CodeRootSetTable::mem_size() {
|
||||
return sizeof(CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
|
||||
}
|
||||
|
||||
CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
|
||||
unsigned int hash = compute_hash(nm);
|
||||
Entry* entry = (Entry*) new_entry_free_list();
|
||||
@ -232,7 +238,6 @@ void G1CodeRootSet::move_to_large() {
|
||||
OrderAccess::release_store_ptr(&_table, temp);
|
||||
}
|
||||
|
||||
|
||||
void G1CodeRootSet::purge() {
|
||||
CodeRootSetTable::purge();
|
||||
}
|
||||
@ -247,12 +252,13 @@ void G1CodeRootSet::add(nmethod* method) {
|
||||
allocate_small_table();
|
||||
}
|
||||
added = _table->add(method);
|
||||
if (_length == Threshold) {
|
||||
move_to_large();
|
||||
}
|
||||
if (added) {
|
||||
if (_length == Threshold) {
|
||||
move_to_large();
|
||||
}
|
||||
++_length;
|
||||
}
|
||||
assert(_length == (size_t)_table->number_of_entries(), "sizes should match");
|
||||
}
|
||||
|
||||
bool G1CodeRootSet::remove(nmethod* method) {
|
||||
@ -266,11 +272,13 @@ bool G1CodeRootSet::remove(nmethod* method) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
assert((_length == 0 && _table == NULL) ||
|
||||
(_length == (size_t)_table->number_of_entries()), "sizes should match");
|
||||
return removed;
|
||||
}
|
||||
|
||||
bool G1CodeRootSet::contains(nmethod* method) {
|
||||
CodeRootSetTable* table = load_acquire_table();
|
||||
CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync.
|
||||
if (table != NULL) {
|
||||
return table->contains(method);
|
||||
}
|
||||
@ -284,8 +292,7 @@ void G1CodeRootSet::clear() {
|
||||
}
|
||||
|
||||
size_t G1CodeRootSet::mem_size() {
|
||||
return sizeof(*this) +
|
||||
(_table != NULL ? sizeof(CodeRootSetTable) + _table->entry_size() * _length : 0);
|
||||
return sizeof(*this) + (_table != NULL ? _table->mem_size() : 0);
|
||||
}
|
||||
|
||||
void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -82,6 +82,7 @@ class Ticks;
|
||||
class WorkGang;
|
||||
class G1Allocator;
|
||||
class G1ArchiveAllocator;
|
||||
class G1HeapVerifier;
|
||||
|
||||
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
|
||||
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
|
||||
@ -118,6 +119,7 @@ class G1CollectedHeap : public CollectedHeap {
|
||||
friend class VMStructs;
|
||||
friend class MutatorAllocRegion;
|
||||
friend class G1GCAllocRegion;
|
||||
friend class G1HeapVerifier;
|
||||
|
||||
// Closures used in implementation.
|
||||
friend class G1ParScanThreadState;
|
||||
@ -181,6 +183,9 @@ private:
|
||||
// Manages all allocations with regions except humongous object allocations.
|
||||
G1Allocator* _allocator;
|
||||
|
||||
// Manages all heap verification.
|
||||
G1HeapVerifier* _verifier;
|
||||
|
||||
// Outside of GC pauses, the number of bytes used in all regions other
|
||||
// than the current allocation region(s).
|
||||
size_t _summary_bytes_used;
|
||||
@ -286,12 +291,6 @@ private:
|
||||
size_t size,
|
||||
size_t translation_factor);
|
||||
|
||||
double verify(bool guard, const char* msg);
|
||||
void verify_before_gc();
|
||||
void verify_after_gc();
|
||||
|
||||
void log_gc_footer(jlong pause_time_counter);
|
||||
|
||||
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
|
||||
|
||||
void process_weak_jni_handles();
|
||||
@ -527,6 +526,10 @@ public:
|
||||
return _allocator;
|
||||
}
|
||||
|
||||
G1HeapVerifier* verifier() {
|
||||
return _verifier;
|
||||
}
|
||||
|
||||
G1MonitoringSupport* g1mm() {
|
||||
assert(_g1mm != NULL, "should have been initialized");
|
||||
return _g1mm;
|
||||
@ -1056,54 +1059,6 @@ public:
|
||||
// The number of regions that are not completely free.
|
||||
uint num_used_regions() const { return num_regions() - num_free_regions(); }
|
||||
|
||||
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
|
||||
void verify_dirty_young_regions() PRODUCT_RETURN;
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Make sure that the given bitmap has no marked objects in the
|
||||
// range [from,limit). If it does, print an error message and return
|
||||
// false. Otherwise, just return true. bitmap_name should be "prev"
|
||||
// or "next".
|
||||
bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
|
||||
HeapWord* from, HeapWord* limit);
|
||||
|
||||
// Verify that the prev / next bitmap range [tams,end) for the given
|
||||
// region has no marks. Return true if all is well, false if errors
|
||||
// are detected.
|
||||
bool verify_bitmaps(const char* caller, HeapRegion* hr);
|
||||
#endif // PRODUCT
|
||||
|
||||
// If G1VerifyBitmaps is set, verify that the marking bitmaps for
|
||||
// the given region do not have any spurious marks. If errors are
|
||||
// detected, print appropriate error messages and crash.
|
||||
void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
|
||||
|
||||
// If G1VerifyBitmaps is set, verify that the marking bitmaps do not
|
||||
// have any spurious marks. If errors are detected, print
|
||||
// appropriate error messages and crash.
|
||||
void check_bitmaps(const char* caller) PRODUCT_RETURN;
|
||||
|
||||
// Do sanity check on the contents of the in-cset fast test table.
|
||||
bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
|
||||
|
||||
// verify_region_sets() performs verification over the region
|
||||
// lists. It will be compiled in the product code to be used when
|
||||
// necessary (i.e., during heap verification).
|
||||
void verify_region_sets();
|
||||
|
||||
// verify_region_sets_optional() is planted in the code for
|
||||
// list verification in non-product builds (and it can be enabled in
|
||||
// product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
|
||||
#if HEAP_REGION_SET_FORCE_VERIFY
|
||||
void verify_region_sets_optional() {
|
||||
verify_region_sets();
|
||||
}
|
||||
#else // HEAP_REGION_SET_FORCE_VERIFY
|
||||
void verify_region_sets_optional() { }
|
||||
#endif // HEAP_REGION_SET_FORCE_VERIFY
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_on_master_free_list(HeapRegion* hr) {
|
||||
return _hrm.is_free(hr);
|
||||
@ -1425,11 +1380,6 @@ public:
|
||||
|
||||
inline bool is_obj_ill(const oop obj) const;
|
||||
|
||||
bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
|
||||
HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
|
||||
bool is_marked(oop obj, VerifyOption vo);
|
||||
const char* top_at_mark_start_str(VerifyOption vo);
|
||||
|
||||
ConcurrentMark* concurrent_mark() const { return _cm; }
|
||||
|
||||
// Refinement
|
||||
|
@ -117,15 +117,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
_rs_lengths_prediction(0),
|
||||
_max_survivor_regions(0),
|
||||
|
||||
_eden_used_bytes_before_gc(0),
|
||||
_survivor_used_bytes_before_gc(0),
|
||||
_old_used_bytes_before_gc(0),
|
||||
_humongous_used_bytes_before_gc(0),
|
||||
_heap_used_bytes_before_gc(0),
|
||||
_metaspace_used_bytes_before_gc(0),
|
||||
_eden_capacity_bytes_before_gc(0),
|
||||
_heap_capacity_bytes_before_gc(0),
|
||||
|
||||
_eden_cset_region_length(0),
|
||||
_survivor_cset_region_length(0),
|
||||
_old_cset_region_length(0),
|
||||
@ -809,7 +800,6 @@ G1CollectorPolicy::verify_young_ages(HeapRegion* head,
|
||||
|
||||
void G1CollectorPolicy::record_full_collection_start() {
|
||||
_full_collection_start_sec = os::elapsedTime();
|
||||
record_heap_size_info_at_start(true /* full */);
|
||||
// Release the future to-space so that it is available for compaction into.
|
||||
collector_state()->set_full_collection(true);
|
||||
}
|
||||
@ -871,8 +861,6 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
|
||||
_trace_young_gen_time_data.record_start_collection(s_w_t_ms);
|
||||
_stop_world_start = 0.0;
|
||||
|
||||
record_heap_size_info_at_start(false /* full */);
|
||||
|
||||
phase_times()->record_cur_collection_start_sec(start_time_sec);
|
||||
_pending_cards = _g1->pending_card_num();
|
||||
|
||||
@ -987,7 +975,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
|
||||
// Anything below that is considered to be zero
|
||||
#define MIN_TIMER_GRANULARITY 0.0000001
|
||||
|
||||
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
|
||||
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
|
||||
size_t cur_used_bytes = _g1->used();
|
||||
@ -1138,7 +1126,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
}
|
||||
_rs_length_diff_seq->add((double) rs_length_diff);
|
||||
|
||||
size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
|
||||
size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
|
||||
size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
|
||||
double cost_per_byte_ms = 0.0;
|
||||
|
||||
@ -1260,51 +1248,8 @@ void G1CollectorPolicy::report_ihop_statistics() {
|
||||
_ihop_control->print();
|
||||
}
|
||||
|
||||
#define EXT_SIZE_FORMAT "%.1f%s"
|
||||
#define EXT_SIZE_PARAMS(bytes) \
|
||||
byte_size_in_proper_unit((double)(bytes)), \
|
||||
proper_unit_for_byte_size((bytes))
|
||||
|
||||
void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
|
||||
YoungList* young_list = _g1->young_list();
|
||||
_eden_used_bytes_before_gc = young_list->eden_used_bytes();
|
||||
_survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
|
||||
_heap_capacity_bytes_before_gc = _g1->capacity();
|
||||
_old_used_bytes_before_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
|
||||
_humongous_used_bytes_before_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
|
||||
_heap_used_bytes_before_gc = _g1->used();
|
||||
_eden_capacity_bytes_before_gc = (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
|
||||
_metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::print_detailed_heap_transition() const {
|
||||
YoungList* young_list = _g1->young_list();
|
||||
|
||||
size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
|
||||
size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
|
||||
size_t heap_used_bytes_after_gc = _g1->used();
|
||||
size_t old_used_bytes_after_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
|
||||
size_t humongous_used_bytes_after_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
|
||||
|
||||
size_t heap_capacity_bytes_after_gc = _g1->capacity();
|
||||
size_t eden_capacity_bytes_after_gc =
|
||||
(_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
|
||||
size_t survivor_capacity_bytes_after_gc = _max_survivor_regions * HeapRegion::GrainBytes;
|
||||
|
||||
log_info(gc, heap)("Eden: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
|
||||
_eden_used_bytes_before_gc / K, eden_used_bytes_after_gc /K, eden_capacity_bytes_after_gc /K);
|
||||
log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
|
||||
_survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K);
|
||||
log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
|
||||
_old_used_bytes_before_gc / K, old_used_bytes_after_gc /K);
|
||||
log_info(gc, heap)("Humongous: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
|
||||
_humongous_used_bytes_before_gc / K, humongous_used_bytes_after_gc /K);
|
||||
|
||||
MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::print_phases(double pause_time_ms) {
|
||||
phase_times()->print(pause_time_ms);
|
||||
void G1CollectorPolicy::print_phases() {
|
||||
phase_times()->print();
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
|
||||
@ -2310,7 +2255,7 @@ void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
|
||||
// whether we added any apparently expensive regions or not, to
|
||||
// avoid generating output per region.
|
||||
log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
|
||||
"old %u regions, expensive: %u regions, min %u regions, remaining time: %1.2fms",
|
||||
"old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
|
||||
old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
|
||||
}
|
||||
|
||||
@ -2319,7 +2264,7 @@ void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
|
||||
|
||||
stop_incremental_cset_building();
|
||||
|
||||
log_debug(gc, ergo, cset)("Finish choosing CSet. old %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
|
||||
log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
|
||||
old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
|
||||
|
||||
double non_young_end_time_sec = os::elapsedTime();
|
||||
|
@ -636,7 +636,7 @@ public:
|
||||
|
||||
// Record the start and end of an evacuation pause.
|
||||
void record_collection_pause_start(double start_time_sec);
|
||||
void record_collection_pause_end(double pause_time_ms, size_t cards_scanned);
|
||||
void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
|
||||
|
||||
// Record the start and end of a full collection.
|
||||
void record_full_collection_start();
|
||||
@ -654,15 +654,7 @@ public:
|
||||
void record_concurrent_mark_cleanup_end();
|
||||
void record_concurrent_mark_cleanup_completed();
|
||||
|
||||
// Records the information about the heap size for reporting in
|
||||
// print_detailed_heap_transition
|
||||
void record_heap_size_info_at_start(bool full);
|
||||
|
||||
// Print heap sizing transition (with less and more detail).
|
||||
|
||||
void print_detailed_heap_transition() const;
|
||||
|
||||
virtual void print_phases(double pause_time_ms);
|
||||
virtual void print_phases();
|
||||
|
||||
void record_stop_world_start();
|
||||
void record_concurrent_pause();
|
||||
@ -825,16 +817,6 @@ private:
|
||||
// The value of _heap_bytes_before_gc is also used to calculate
|
||||
// the cost of copying.
|
||||
|
||||
size_t _eden_used_bytes_before_gc; // Eden occupancy before GC
|
||||
size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC
|
||||
size_t _old_used_bytes_before_gc; // Old occupancy before GC
|
||||
size_t _humongous_used_bytes_before_gc; // Humongous occupancy before GC
|
||||
size_t _heap_used_bytes_before_gc; // Heap occupancy before GC
|
||||
size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC
|
||||
|
||||
size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC
|
||||
size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC
|
||||
|
||||
// The amount of survivor regions after a collection.
|
||||
uint _recorded_survivor_regions;
|
||||
// List of survivor regions.
|
||||
@ -846,6 +828,10 @@ private:
|
||||
public:
|
||||
uint tenuring_threshold() const { return _tenuring_threshold; }
|
||||
|
||||
uint max_survivor_regions() {
|
||||
return _max_survivor_regions;
|
||||
}
|
||||
|
||||
static const uint REGIONS_UNLIMITED = (uint) -1;
|
||||
|
||||
uint max_regions(InCSetState dest) const {
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1EvacFailure.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/g1_globals.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
@ -223,7 +224,7 @@ public:
|
||||
if (hr->evacuation_failed()) {
|
||||
hr->note_self_forwarding_removal_start(during_initial_mark,
|
||||
during_conc_mark);
|
||||
_g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
|
||||
_g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
|
||||
|
||||
// In the common case (i.e. when there is no evacuation
|
||||
// failure) we make sure that the following is done when
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "gc/g1/workerDataArray.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
// Helper class for avoiding interleaved logging
|
||||
@ -125,7 +126,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
||||
_gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup:", true, 2);
|
||||
_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup:", true, 2);
|
||||
|
||||
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, 3);
|
||||
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty:", true, 3);
|
||||
_redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:", true, 3);
|
||||
_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
|
||||
}
|
||||
@ -133,6 +134,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
||||
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
|
||||
assert(active_gc_threads > 0, "The number of threads must be > 0");
|
||||
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
|
||||
_gc_start_counter = os::elapsed_counter();
|
||||
_active_gc_threads = active_gc_threads;
|
||||
_cur_expand_heap_time_ms = 0.0;
|
||||
_external_accounted_time_ms = 0.0;
|
||||
@ -146,6 +148,7 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::note_gc_end() {
|
||||
_gc_pause_time_ms = TimeHelper::counter_to_millis(os::elapsed_counter() - _gc_start_counter);
|
||||
for (uint i = 0; i < _active_gc_threads; i++) {
|
||||
double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
|
||||
record_time_secs(GCWorkerTotal, i , worker_time);
|
||||
@ -349,7 +352,7 @@ class G1GCParPhasePrinter : public StackObj {
|
||||
}
|
||||
};
|
||||
|
||||
void G1GCPhaseTimes::print(double pause_time_ms) {
|
||||
void G1GCPhaseTimes::print() {
|
||||
note_gc_end();
|
||||
|
||||
G1GCParPhasePrinter par_phase_printer(this);
|
||||
@ -373,7 +376,7 @@ void G1GCPhaseTimes::print(double pause_time_ms) {
|
||||
}
|
||||
print_stats(Indents[1], "Clear CT", _cur_clear_ct_time_ms);
|
||||
print_stats(Indents[1], "Expand Heap After Collection", _cur_expand_heap_time_ms);
|
||||
double misc_time_ms = pause_time_ms - accounted_time_ms();
|
||||
double misc_time_ms = _gc_pause_time_ms - accounted_time_ms();
|
||||
print_stats(Indents[1], "Other", misc_time_ms);
|
||||
if (_cur_verify_before_time_ms > 0.0) {
|
||||
print_stats(Indents[2], "Verify Before", _cur_verify_before_time_ms);
|
||||
|
@ -36,6 +36,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
|
||||
uint _active_gc_threads;
|
||||
uint _max_gc_threads;
|
||||
jlong _gc_start_counter;
|
||||
double _gc_pause_time_ms;
|
||||
|
||||
public:
|
||||
enum GCParPhases {
|
||||
@ -126,7 +128,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
public:
|
||||
G1GCPhaseTimes(uint max_gc_threads);
|
||||
void note_gc_start(uint active_gc_threads);
|
||||
void print(double pause_time_ms);
|
||||
void print();
|
||||
|
||||
// record the time a phase took in seconds
|
||||
void record_time_secs(GCParPhases phase, uint worker_i, double secs);
|
||||
|
122
hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp
Normal file
122
hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1HeapTransition.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
|
||||
G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) {
|
||||
YoungList* young_list = g1_heap->young_list();
|
||||
_eden_length = young_list->eden_length();
|
||||
_survivor_length = young_list->survivor_length();
|
||||
_old_length = g1_heap->old_regions_count();
|
||||
_humongous_length = g1_heap->humongous_regions_count();
|
||||
_metaspace_used_bytes = MetaspaceAux::used_bytes();
|
||||
}
|
||||
|
||||
G1HeapTransition::G1HeapTransition(G1CollectedHeap* g1_heap) : _g1_heap(g1_heap), _before(g1_heap) { }
|
||||
|
||||
struct DetailedUsage : public StackObj {
|
||||
size_t _eden_used;
|
||||
size_t _survivor_used;
|
||||
size_t _old_used;
|
||||
size_t _humongous_used;
|
||||
|
||||
size_t _eden_region_count;
|
||||
size_t _survivor_region_count;
|
||||
size_t _old_region_count;
|
||||
size_t _humongous_region_count;
|
||||
|
||||
DetailedUsage() :
|
||||
_eden_used(0), _survivor_used(0), _old_used(0), _humongous_used(0),
|
||||
_eden_region_count(0), _survivor_region_count(0), _old_region_count(0), _humongous_region_count(0) {}
|
||||
};
|
||||
|
||||
class DetailedUsageClosure: public HeapRegionClosure {
|
||||
public:
|
||||
DetailedUsage _usage;
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->is_old()) {
|
||||
_usage._old_used += r->used();
|
||||
_usage._old_region_count++;
|
||||
} else if (r->is_survivor()) {
|
||||
_usage._survivor_used += r->used();
|
||||
_usage._survivor_region_count++;
|
||||
} else if (r->is_eden()) {
|
||||
_usage._eden_used += r->used();
|
||||
_usage._eden_region_count++;
|
||||
} else if (r->is_humongous()) {
|
||||
_usage._humongous_used += r->used();
|
||||
_usage._humongous_region_count++;
|
||||
} else {
|
||||
assert(r->used() == 0, "Expected used to be 0 but it was " SIZE_FORMAT, r->used());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1HeapTransition::print() {
|
||||
Data after(_g1_heap);
|
||||
|
||||
size_t eden_capacity_bytes_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length;
|
||||
size_t survivor_capacity_bytes_after_gc = _g1_heap->g1_policy()->max_survivor_regions();
|
||||
|
||||
DetailedUsage usage;
|
||||
if (log_is_enabled(Trace, gc, heap)) {
|
||||
DetailedUsageClosure blk;
|
||||
_g1_heap->heap_region_iterate(&blk);
|
||||
usage = blk._usage;
|
||||
assert(usage._eden_region_count == 0, "Expected no eden regions, but got " SIZE_FORMAT, usage._eden_region_count);
|
||||
assert(usage._survivor_region_count == after._survivor_length, "Expected survivors to be " SIZE_FORMAT " but was " SIZE_FORMAT,
|
||||
after._survivor_length, usage._survivor_region_count);
|
||||
assert(usage._old_region_count == after._old_length, "Expected old to be " SIZE_FORMAT " but was " SIZE_FORMAT,
|
||||
after._old_length, usage._old_region_count);
|
||||
assert(usage._humongous_region_count == after._humongous_length, "Expected humongous to be " SIZE_FORMAT " but was " SIZE_FORMAT,
|
||||
after._humongous_length, usage._humongous_region_count);
|
||||
}
|
||||
|
||||
log_info(gc, heap)("Eden regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
|
||||
_before._eden_length, after._eden_length, eden_capacity_bytes_after_gc);
|
||||
log_trace(gc, heap)(" Used: 0K, Waste: 0K");
|
||||
|
||||
log_info(gc, heap)("Survivor regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
|
||||
_before._survivor_length, after._survivor_length, survivor_capacity_bytes_after_gc);
|
||||
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
|
||||
usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K);
|
||||
|
||||
log_info(gc, heap)("Old regions: " SIZE_FORMAT "->" SIZE_FORMAT,
|
||||
_before._old_length, after._old_length);
|
||||
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
|
||||
usage._old_used / K, ((after._old_length * HeapRegion::GrainBytes) - usage._old_used) / K);
|
||||
|
||||
log_info(gc, heap)("Humongous regions: " SIZE_FORMAT "->" SIZE_FORMAT,
|
||||
_before._humongous_length, after._humongous_length);
|
||||
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
|
||||
usage._humongous_used / K, ((after._humongous_length * HeapRegion::GrainBytes) - usage._humongous_used) / K);
|
||||
|
||||
MetaspaceAux::print_metaspace_change(_before._metaspace_used_bytes);
|
||||
}
|
52
hotspot/src/share/vm/gc/g1/g1HeapTransition.hpp
Normal file
52
hotspot/src/share/vm/gc/g1/g1HeapTransition.hpp
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1HEAPTRANSITION_HPP
|
||||
#define SHARE_VM_GC_G1_G1HEAPTRANSITION_HPP
|
||||
|
||||
#include "gc/shared/plab.hpp"
|
||||
|
||||
class G1CollectedHeap;
|
||||
|
||||
class G1HeapTransition {
|
||||
struct Data {
|
||||
size_t _eden_length;
|
||||
size_t _survivor_length;
|
||||
size_t _old_length;
|
||||
size_t _humongous_length;
|
||||
size_t _metaspace_used_bytes;
|
||||
|
||||
Data(G1CollectedHeap* g1_heap);
|
||||
};
|
||||
|
||||
G1CollectedHeap* _g1_heap;
|
||||
Data _before;
|
||||
|
||||
public:
|
||||
G1HeapTransition(G1CollectedHeap* g1_heap);
|
||||
|
||||
void print();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1HEAPTRANSITION_HPP
|
731
hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp
Normal file
731
hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp
Normal file
@ -0,0 +1,731 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "gc/g1/concurrentMarkThread.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1MarkSweep.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "gc/g1/g1RootProcessor.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/g1/youngList.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
class VerifyRootsClosure: public OopClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
VerifyOption _vo;
|
||||
bool _failures;
|
||||
public:
|
||||
// _vo == UsePrevMarking -> use "prev" marking information,
|
||||
// _vo == UseNextMarking -> use "next" marking information,
|
||||
// _vo == UseMarkWord -> use mark word from object header.
|
||||
VerifyRootsClosure(VerifyOption vo) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_vo(vo),
|
||||
_failures(false) { }
|
||||
|
||||
bool failures() { return _failures; }
|
||||
|
||||
template <class T> void do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (_g1h->is_obj_dead_cond(obj, _vo)) {
|
||||
LogHandle(gc, verify) log;
|
||||
log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
|
||||
if (_vo == VerifyOption_G1UseMarkWord) {
|
||||
log.info(" Mark word: " PTR_FORMAT, p2i(obj->mark()));
|
||||
}
|
||||
ResourceMark rm;
|
||||
obj->print_on(log.info_stream());
|
||||
_failures = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void do_oop(oop* p) { do_oop_nv(p); }
|
||||
void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
class G1VerifyCodeRootOopClosure: public OopClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
OopClosure* _root_cl;
|
||||
nmethod* _nm;
|
||||
VerifyOption _vo;
|
||||
bool _failures;
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
// First verify that this root is live
|
||||
_root_cl->do_oop(p);
|
||||
|
||||
if (!G1VerifyHeapRegionCodeRoots) {
|
||||
// We're not verifying the code roots attached to heap region.
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't check the code roots during marking verification in a full GC
|
||||
if (_vo == VerifyOption_G1UseMarkWord) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Now verify that the current nmethod (which contains p) is
|
||||
// in the code root list of the heap region containing the
|
||||
// object referenced by p.
|
||||
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
|
||||
// Now fetch the region containing the object
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
HeapRegionRemSet* hrrs = hr->rem_set();
|
||||
// Verify that the strong code root list for this region
|
||||
// contains the nmethod
|
||||
if (!hrrs->strong_code_roots_list_contains(_nm)) {
|
||||
log_info(gc, verify)("Code root location " PTR_FORMAT " "
|
||||
"from nmethod " PTR_FORMAT " not in strong "
|
||||
"code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
|
||||
p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
|
||||
_failures = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
|
||||
_g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
|
||||
|
||||
void do_oop(oop* p) { do_oop_work(p); }
|
||||
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
|
||||
void set_nmethod(nmethod* nm) { _nm = nm; }
|
||||
bool failures() { return _failures; }
|
||||
};
|
||||
|
||||
class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
|
||||
G1VerifyCodeRootOopClosure* _oop_cl;
|
||||
|
||||
public:
|
||||
G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
|
||||
_oop_cl(oop_cl) {}
|
||||
|
||||
void do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
_oop_cl->set_nmethod(nm);
|
||||
nm->oops_do(_oop_cl);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class YoungRefCounterClosure : public OopClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
int _count;
|
||||
public:
|
||||
YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
|
||||
void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
|
||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
|
||||
int count() { return _count; }
|
||||
void reset_count() { _count = 0; };
|
||||
};
|
||||
|
||||
class VerifyKlassClosure: public KlassClosure {
|
||||
YoungRefCounterClosure _young_ref_counter_closure;
|
||||
OopClosure *_oop_closure;
|
||||
public:
|
||||
VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
|
||||
void do_klass(Klass* k) {
|
||||
k->oops_do(_oop_closure);
|
||||
|
||||
_young_ref_counter_closure.reset_count();
|
||||
k->oops_do(&_young_ref_counter_closure);
|
||||
if (_young_ref_counter_closure.count() > 0) {
|
||||
guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class VerifyLivenessOopClosure: public OopClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
VerifyOption _vo;
|
||||
public:
|
||||
VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
|
||||
_g1h(g1h), _vo(vo)
|
||||
{ }
|
||||
void do_oop(narrowOop *p) { do_oop_work(p); }
|
||||
void do_oop( oop *p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
|
||||
"Dead object referenced by a not dead object");
|
||||
}
|
||||
};
|
||||
|
||||
class VerifyObjsInRegionClosure: public ObjectClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
size_t _live_bytes;
|
||||
HeapRegion *_hr;
|
||||
VerifyOption _vo;
|
||||
public:
|
||||
// _vo == UsePrevMarking -> use "prev" marking information,
|
||||
// _vo == UseNextMarking -> use "next" marking information,
|
||||
// _vo == UseMarkWord -> use mark word from object header.
|
||||
VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
|
||||
: _live_bytes(0), _hr(hr), _vo(vo) {
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
}
|
||||
void do_object(oop o) {
|
||||
VerifyLivenessOopClosure isLive(_g1h, _vo);
|
||||
assert(o != NULL, "Huh?");
|
||||
if (!_g1h->is_obj_dead_cond(o, _vo)) {
|
||||
// If the object is alive according to the mark word,
|
||||
// then verify that the marking information agrees.
|
||||
// Note we can't verify the contra-positive of the
|
||||
// above: if the object is dead (according to the mark
|
||||
// word), it may not be marked, or may have been marked
|
||||
// but has since became dead, or may have been allocated
|
||||
// since the last marking.
|
||||
if (_vo == VerifyOption_G1UseMarkWord) {
|
||||
guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
|
||||
}
|
||||
|
||||
o->oop_iterate_no_header(&isLive);
|
||||
if (!_hr->obj_allocated_since_prev_marking(o)) {
|
||||
size_t obj_size = o->size(); // Make sure we don't overflow
|
||||
_live_bytes += (obj_size * HeapWordSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
size_t live_bytes() { return _live_bytes; }
|
||||
};
|
||||
|
||||
class VerifyArchiveOopClosure: public OopClosure {
|
||||
public:
|
||||
VerifyArchiveOopClosure(HeapRegion *hr) { }
|
||||
void do_oop(narrowOop *p) { do_oop_work(p); }
|
||||
void do_oop( oop *p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
|
||||
"Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
|
||||
p2i(p), p2i(obj));
|
||||
}
|
||||
};
|
||||
|
||||
class VerifyArchiveRegionClosure: public ObjectClosure {
|
||||
public:
|
||||
VerifyArchiveRegionClosure(HeapRegion *hr) { }
|
||||
// Verify that all object pointers are to archive regions.
|
||||
void do_object(oop o) {
|
||||
VerifyArchiveOopClosure checkOop(NULL);
|
||||
assert(o != NULL, "Should not be here for NULL oops");
|
||||
o->oop_iterate_no_header(&checkOop);
|
||||
}
|
||||
};
|
||||
|
||||
class VerifyRegionClosure: public HeapRegionClosure {
|
||||
private:
|
||||
bool _par;
|
||||
VerifyOption _vo;
|
||||
bool _failures;
|
||||
public:
|
||||
// _vo == UsePrevMarking -> use "prev" marking information,
|
||||
// _vo == UseNextMarking -> use "next" marking information,
|
||||
// _vo == UseMarkWord -> use mark word from object header.
|
||||
VerifyRegionClosure(bool par, VerifyOption vo)
|
||||
: _par(par),
|
||||
_vo(vo),
|
||||
_failures(false) {}
|
||||
|
||||
bool failures() {
|
||||
return _failures;
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
// For archive regions, verify there are no heap pointers to
|
||||
// non-pinned regions. For all others, verify liveness info.
|
||||
if (r->is_archive()) {
|
||||
VerifyArchiveRegionClosure verify_oop_pointers(r);
|
||||
r->object_iterate(&verify_oop_pointers);
|
||||
return true;
|
||||
}
|
||||
if (!r->is_continues_humongous()) {
|
||||
bool failures = false;
|
||||
r->verify(_vo, &failures);
|
||||
if (failures) {
|
||||
_failures = true;
|
||||
} else if (!r->is_starts_humongous()) {
|
||||
VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
|
||||
r->object_iterate(¬_dead_yet_cl);
|
||||
if (_vo != VerifyOption_G1UseNextMarking) {
|
||||
if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
|
||||
log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
|
||||
p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
|
||||
_failures = true;
|
||||
}
|
||||
} else {
|
||||
// When vo == UseNextMarking we cannot currently do a sanity
|
||||
// check on the live bytes as the calculation has not been
|
||||
// finalized yet.
|
||||
}
|
||||
}
|
||||
}
|
||||
return false; // stop the region iteration if we hit a failure
|
||||
}
|
||||
};
|
||||
|
||||
// This is the task used for parallel verification of the heap regions
|
||||
|
||||
class G1ParVerifyTask: public AbstractGangTask {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
VerifyOption _vo;
|
||||
bool _failures;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
// _vo == UsePrevMarking -> use "prev" marking information,
|
||||
// _vo == UseNextMarking -> use "next" marking information,
|
||||
// _vo == UseMarkWord -> use mark word from object header.
|
||||
G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
|
||||
AbstractGangTask("Parallel verify task"),
|
||||
_g1h(g1h),
|
||||
_vo(vo),
|
||||
_failures(false),
|
||||
_hrclaimer(g1h->workers()->active_workers()) {}
|
||||
|
||||
bool failures() {
|
||||
return _failures;
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
HandleMark hm;
|
||||
VerifyRegionClosure blk(true, _vo);
|
||||
_g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
|
||||
if (blk.failures()) {
|
||||
_failures = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void G1HeapVerifier::verify(VerifyOption vo) {
|
||||
if (!SafepointSynchronize::is_at_safepoint()) {
|
||||
log_info(gc, verify)("Skipping verification. Not at safepoint.");
|
||||
}
|
||||
|
||||
assert(Thread::current()->is_VM_thread(),
|
||||
"Expected to be executed serially by the VM thread at this point");
|
||||
|
||||
log_debug(gc, verify)("Roots");
|
||||
VerifyRootsClosure rootsCl(vo);
|
||||
VerifyKlassClosure klassCl(_g1h, &rootsCl);
|
||||
CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
|
||||
|
||||
// We apply the relevant closures to all the oops in the
|
||||
// system dictionary, class loader data graph, the string table
|
||||
// and the nmethods in the code cache.
|
||||
G1VerifyCodeRootOopClosure codeRootsCl(_g1h, &rootsCl, vo);
|
||||
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
|
||||
|
||||
{
|
||||
G1RootProcessor root_processor(_g1h, 1);
|
||||
root_processor.process_all_roots(&rootsCl,
|
||||
&cldCl,
|
||||
&blobsCl);
|
||||
}
|
||||
|
||||
bool failures = rootsCl.failures() || codeRootsCl.failures();
|
||||
|
||||
if (vo != VerifyOption_G1UseMarkWord) {
|
||||
// If we're verifying during a full GC then the region sets
|
||||
// will have been torn down at the start of the GC. Therefore
|
||||
// verifying the region sets will fail. So we only verify
|
||||
// the region sets when not in a full GC.
|
||||
log_debug(gc, verify)("HeapRegionSets");
|
||||
verify_region_sets();
|
||||
}
|
||||
|
||||
log_debug(gc, verify)("HeapRegions");
|
||||
if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
|
||||
|
||||
G1ParVerifyTask task(_g1h, vo);
|
||||
_g1h->workers()->run_task(&task);
|
||||
if (task.failures()) {
|
||||
failures = true;
|
||||
}
|
||||
|
||||
} else {
|
||||
VerifyRegionClosure blk(false, vo);
|
||||
_g1h->heap_region_iterate(&blk);
|
||||
if (blk.failures()) {
|
||||
failures = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
log_debug(gc, verify)("StrDedup");
|
||||
G1StringDedup::verify();
|
||||
}
|
||||
|
||||
if (failures) {
|
||||
log_info(gc, verify)("Heap after failed verification:");
|
||||
// It helps to have the per-region information in the output to
|
||||
// help us track down what went wrong. This is why we call
|
||||
// print_extended_on() instead of print_on().
|
||||
LogHandle(gc, verify) log;
|
||||
ResourceMark rm;
|
||||
_g1h->print_extended_on(log.info_stream());
|
||||
}
|
||||
guarantee(!failures, "there should not have been any failures");
|
||||
}
|
||||
|
||||
// Heap region set verification
|
||||
|
||||
class VerifyRegionListsClosure : public HeapRegionClosure {
|
||||
private:
|
||||
HeapRegionSet* _old_set;
|
||||
HeapRegionSet* _humongous_set;
|
||||
HeapRegionManager* _hrm;
|
||||
|
||||
public:
|
||||
uint _old_count;
|
||||
uint _humongous_count;
|
||||
uint _free_count;
|
||||
|
||||
VerifyRegionListsClosure(HeapRegionSet* old_set,
|
||||
HeapRegionSet* humongous_set,
|
||||
HeapRegionManager* hrm) :
|
||||
_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
|
||||
_old_count(), _humongous_count(), _free_count(){ }
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->is_young()) {
|
||||
// TODO
|
||||
} else if (hr->is_humongous()) {
|
||||
assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
|
||||
_humongous_count++;
|
||||
} else if (hr->is_empty()) {
|
||||
assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
|
||||
_free_count++;
|
||||
} else if (hr->is_old()) {
|
||||
assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
|
||||
_old_count++;
|
||||
} else {
|
||||
// There are no other valid region types. Check for one invalid
|
||||
// one we can identify: pinned without old or humongous set.
|
||||
assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
|
||||
guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
|
||||
guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
|
||||
guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
|
||||
}
|
||||
};
|
||||
|
||||
void G1HeapVerifier::verify_region_sets() {
|
||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
// First, check the explicit lists.
|
||||
_g1h->_hrm.verify();
|
||||
{
|
||||
// Given that a concurrent operation might be adding regions to
|
||||
// the secondary free list we have to take the lock before
|
||||
// verifying it.
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
_g1h->_secondary_free_list.verify_list();
|
||||
}
|
||||
|
||||
// If a concurrent region freeing operation is in progress it will
|
||||
// be difficult to correctly attributed any free regions we come
|
||||
// across to the correct free list given that they might belong to
|
||||
// one of several (free_list, secondary_free_list, any local lists,
|
||||
// etc.). So, if that's the case we will skip the rest of the
|
||||
// verification operation. Alternatively, waiting for the concurrent
|
||||
// operation to complete will have a non-trivial effect on the GC's
|
||||
// operation (no concurrent operation will last longer than the
|
||||
// interval between two calls to verification) and it might hide
|
||||
// any issues that we would like to catch during testing.
|
||||
if (_g1h->free_regions_coming()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure we append the secondary_free_list on the free_list so
|
||||
// that all free regions we will come across can be safely
|
||||
// attributed to the free_list.
|
||||
_g1h->append_secondary_free_list_if_not_empty_with_lock();
|
||||
|
||||
// Finally, make sure that the region accounting in the lists is
|
||||
// consistent with what we see in the heap.
|
||||
|
||||
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
|
||||
_g1h->heap_region_iterate(&cl);
|
||||
cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
|
||||
}
|
||||
|
||||
void G1HeapVerifier::prepare_for_verify() {
|
||||
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
|
||||
_g1h->ensure_parsability(false);
|
||||
}
|
||||
_g1h->g1_rem_set()->prepare_for_verify();
|
||||
}
|
||||
|
||||
double G1HeapVerifier::verify(bool guard, const char* msg) {
|
||||
double verify_time_ms = 0.0;
|
||||
|
||||
if (guard && _g1h->total_collections() >= VerifyGCStartAt) {
|
||||
double verify_start = os::elapsedTime();
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, msg);
|
||||
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
|
||||
}
|
||||
|
||||
return verify_time_ms;
|
||||
}
|
||||
|
||||
void G1HeapVerifier::verify_before_gc() {
|
||||
double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
|
||||
_g1h->g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
|
||||
}
|
||||
|
||||
void G1HeapVerifier::verify_after_gc() {
|
||||
double verify_time_ms = verify(VerifyAfterGC, "After GC");
|
||||
_g1h->g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
class G1VerifyCardTableCleanup: public HeapRegionClosure {
|
||||
G1HeapVerifier* _verifier;
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
public:
|
||||
G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs)
|
||||
: _verifier(verifier), _ct_bs(ct_bs) { }
|
||||
virtual bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->is_survivor()) {
|
||||
_verifier->verify_dirty_region(r);
|
||||
} else {
|
||||
_verifier->verify_not_dirty_region(r);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1HeapVerifier::verify_card_table_cleanup() {
|
||||
if (G1VerifyCTCleanup || VerifyAfterGC) {
|
||||
G1VerifyCardTableCleanup cleanup_verifier(this, _g1h->g1_barrier_set());
|
||||
_g1h->heap_region_iterate(&cleanup_verifier);
|
||||
}
|
||||
}
|
||||
|
||||
void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
|
||||
// All of the region should be clean.
|
||||
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
|
||||
MemRegion mr(hr->bottom(), hr->end());
|
||||
ct_bs->verify_not_dirty_region(mr);
|
||||
}
|
||||
|
||||
void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
|
||||
// We cannot guarantee that [bottom(),end()] is dirty. Threads
|
||||
// dirty allocated blocks as they allocate them. The thread that
|
||||
// retires each region and replaces it with a new one will do a
|
||||
// maximal allocation to fill in [pre_dummy_top(),end()] but will
|
||||
// not dirty that area (one less thing to have to do while holding
|
||||
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
|
||||
// is dirty.
|
||||
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
|
||||
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
|
||||
if (hr->is_young()) {
|
||||
ct_bs->verify_g1_young_region(mr);
|
||||
} else {
|
||||
ct_bs->verify_dirty_region(mr);
|
||||
}
|
||||
}
|
||||
|
||||
void G1HeapVerifier::verify_dirty_young_list(HeapRegion* head) {
|
||||
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
|
||||
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
|
||||
verify_dirty_region(hr);
|
||||
}
|
||||
}
|
||||
|
||||
void G1HeapVerifier::verify_dirty_young_regions() {
|
||||
verify_dirty_young_list(_g1h->young_list()->first_region());
|
||||
}
|
||||
|
||||
bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
|
||||
HeapWord* tams, HeapWord* end) {
|
||||
guarantee(tams <= end,
|
||||
"tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
|
||||
HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
|
||||
if (result < end) {
|
||||
log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
|
||||
log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) {
|
||||
CMBitMapRO* prev_bitmap = _g1h->concurrent_mark()->prevMarkBitMap();
|
||||
CMBitMapRO* next_bitmap = (CMBitMapRO*) _g1h->concurrent_mark()->nextMarkBitMap();
|
||||
|
||||
HeapWord* bottom = hr->bottom();
|
||||
HeapWord* ptams = hr->prev_top_at_mark_start();
|
||||
HeapWord* ntams = hr->next_top_at_mark_start();
|
||||
HeapWord* end = hr->end();
|
||||
|
||||
bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
|
||||
|
||||
bool res_n = true;
|
||||
// We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
|
||||
// we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
|
||||
// if we happen to be in that state.
|
||||
if (_g1h->collector_state()->mark_in_progress() || !_g1h->_cmThread->in_progress()) {
|
||||
res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
|
||||
}
|
||||
if (!res_p || !res_n) {
|
||||
log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
|
||||
log_info(gc, verify)("#### Caller: %s", caller);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) {
|
||||
if (!G1VerifyBitmaps) return;
|
||||
|
||||
guarantee(verify_bitmaps(caller, hr), "bitmap verification");
|
||||
}
|
||||
|
||||
class G1VerifyBitmapClosure : public HeapRegionClosure {
|
||||
private:
|
||||
const char* _caller;
|
||||
G1HeapVerifier* _verifier;
|
||||
bool _failures;
|
||||
|
||||
public:
|
||||
G1VerifyBitmapClosure(const char* caller, G1HeapVerifier* verifier) :
|
||||
_caller(caller), _verifier(verifier), _failures(false) { }
|
||||
|
||||
bool failures() { return _failures; }
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* hr) {
|
||||
bool result = _verifier->verify_bitmaps(_caller, hr);
|
||||
if (!result) {
|
||||
_failures = true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1HeapVerifier::check_bitmaps(const char* caller) {
|
||||
if (!G1VerifyBitmaps) return;
|
||||
|
||||
G1VerifyBitmapClosure cl(caller, this);
|
||||
_g1h->heap_region_iterate(&cl);
|
||||
guarantee(!cl.failures(), "bitmap verification");
|
||||
}
|
||||
|
||||
class G1CheckCSetFastTableClosure : public HeapRegionClosure {
|
||||
private:
|
||||
bool _failures;
|
||||
public:
|
||||
G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* hr) {
|
||||
uint i = hr->hrm_index();
|
||||
InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
|
||||
if (hr->is_humongous()) {
|
||||
if (hr->in_collection_set()) {
|
||||
log_info(gc, verify)("## humongous region %u in CSet", i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (cset_state.is_in_cset()) {
|
||||
log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->is_continues_humongous() && cset_state.is_humongous()) {
|
||||
log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (cset_state.is_humongous()) {
|
||||
log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->in_collection_set() != cset_state.is_in_cset()) {
|
||||
log_info(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
|
||||
hr->in_collection_set(), cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (cset_state.is_in_cset()) {
|
||||
if (hr->is_young() != (cset_state.is_young())) {
|
||||
log_info(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
|
||||
hr->is_young(), cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->is_old() != (cset_state.is_old())) {
|
||||
log_info(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
|
||||
hr->is_old(), cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool failures() const { return _failures; }
|
||||
};
|
||||
|
||||
bool G1HeapVerifier::check_cset_fast_test() {
|
||||
G1CheckCSetFastTableClosure cl;
|
||||
_g1h->_hrm.iterate(&cl);
|
||||
return !cl.failures();
|
||||
}
|
||||
#endif // PRODUCT
|
115
hotspot/src/share/vm/gc/g1/g1HeapVerifier.hpp
Normal file
115
hotspot/src/share/vm/gc/g1/g1HeapVerifier.hpp
Normal file
@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1HEAPVERIFIER_HPP
|
||||
#define SHARE_VM_GC_G1_G1HEAPVERIFIER_HPP
|
||||
|
||||
#include "gc/g1/heapRegionSet.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
|
||||
class G1CollectedHeap;
|
||||
|
||||
class G1HeapVerifier : public CHeapObj<mtGC> {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// verify_region_sets() performs verification over the region
|
||||
// lists. It will be compiled in the product code to be used when
|
||||
// necessary (i.e., during heap verification).
|
||||
void verify_region_sets();
|
||||
|
||||
public:
|
||||
|
||||
G1HeapVerifier(G1CollectedHeap* heap) : _g1h(heap) { }
|
||||
|
||||
// Perform verification.
|
||||
|
||||
// vo == UsePrevMarking -> use "prev" marking information,
|
||||
// vo == UseNextMarking -> use "next" marking information
|
||||
// vo == UseMarkWord -> use the mark word in the object header
|
||||
//
|
||||
// NOTE: Only the "prev" marking information is guaranteed to be
|
||||
// consistent most of the time, so most calls to this should use
|
||||
// vo == UsePrevMarking.
|
||||
// Currently, there is only one case where this is called with
|
||||
// vo == UseNextMarking, which is to verify the "next" marking
|
||||
// information at the end of remark.
|
||||
// Currently there is only one place where this is called with
|
||||
// vo == UseMarkWord, which is to verify the marking during a
|
||||
// full GC.
|
||||
void verify(VerifyOption vo);
|
||||
|
||||
// verify_region_sets_optional() is planted in the code for
|
||||
// list verification in non-product builds (and it can be enabled in
|
||||
// product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
|
||||
#if HEAP_REGION_SET_FORCE_VERIFY
|
||||
void verify_region_sets_optional() {
|
||||
verify_region_sets();
|
||||
}
|
||||
#else // HEAP_REGION_SET_FORCE_VERIFY
|
||||
void verify_region_sets_optional() { }
|
||||
#endif // HEAP_REGION_SET_FORCE_VERIFY
|
||||
|
||||
void prepare_for_verify();
|
||||
double verify(bool guard, const char* msg);
|
||||
void verify_before_gc();
|
||||
void verify_after_gc();
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Make sure that the given bitmap has no marked objects in the
|
||||
// range [from,limit). If it does, print an error message and return
|
||||
// false. Otherwise, just return true. bitmap_name should be "prev"
|
||||
// or "next".
|
||||
bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
|
||||
HeapWord* from, HeapWord* limit);
|
||||
|
||||
// Verify that the prev / next bitmap range [tams,end) for the given
|
||||
// region has no marks. Return true if all is well, false if errors
|
||||
// are detected.
|
||||
bool verify_bitmaps(const char* caller, HeapRegion* hr);
|
||||
#endif // PRODUCT
|
||||
|
||||
// If G1VerifyBitmaps is set, verify that the marking bitmaps for
|
||||
// the given region do not have any spurious marks. If errors are
|
||||
// detected, print appropriate error messages and crash.
|
||||
void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
|
||||
|
||||
// If G1VerifyBitmaps is set, verify that the marking bitmaps do not
|
||||
// have any spurious marks. If errors are detected, print
|
||||
// appropriate error messages and crash.
|
||||
void check_bitmaps(const char* caller) PRODUCT_RETURN;
|
||||
|
||||
// Do sanity check on the contents of the in-cset fast test table.
|
||||
bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
|
||||
|
||||
void verify_card_table_cleanup() PRODUCT_RETURN;
|
||||
|
||||
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
|
||||
void verify_dirty_young_regions() PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1HEAPVERIFIER_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -206,9 +206,9 @@ class G1Mux2Closure : public OopClosure {
|
||||
OopClosure* _c2;
|
||||
public:
|
||||
G1Mux2Closure(OopClosure *c1, OopClosure *c2);
|
||||
template <class T> void do_oop_work(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
virtual inline void do_oop(oop* p);
|
||||
virtual inline void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
// A closure that returns true if it is actually applied
|
||||
@ -219,9 +219,9 @@ class G1TriggerClosure : public OopClosure {
|
||||
public:
|
||||
G1TriggerClosure();
|
||||
bool triggered() const { return _triggered; }
|
||||
template <class T> void do_oop_work(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
virtual inline void do_oop(oop* p);
|
||||
virtual inline void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
// A closure which uses a triggering closure to determine
|
||||
@ -232,9 +232,9 @@ class G1InvokeIfNotTriggeredClosure: public OopClosure {
|
||||
OopClosure* _oop_cl;
|
||||
public:
|
||||
G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
|
||||
template <class T> void do_oop_work(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
virtual inline void do_oop(oop* p);
|
||||
virtual inline void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class G1UpdateRSOrPushRefOopClosure: public OopClosure {
|
||||
@ -263,9 +263,9 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
template <class T> void do_oop_work(T* p);
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
virtual inline void do_oop(narrowOop* p);
|
||||
virtual inline void do_oop(oop* p);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1OOPCLOSURES_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -141,12 +141,16 @@ inline void G1Mux2Closure::do_oop_work(T* p) {
|
||||
_c1->do_oop(p);
|
||||
_c2->do_oop(p);
|
||||
}
|
||||
void G1Mux2Closure::do_oop(oop* p) { do_oop_work(p); }
|
||||
void G1Mux2Closure::do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
|
||||
template <class T>
|
||||
inline void G1TriggerClosure::do_oop_work(T* p) {
|
||||
// Record that this closure was actually applied (triggered).
|
||||
_triggered = true;
|
||||
}
|
||||
void G1TriggerClosure::do_oop(oop* p) { do_oop_work(p); }
|
||||
void G1TriggerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
|
||||
template <class T>
|
||||
inline void G1InvokeIfNotTriggeredClosure::do_oop_work(T* p) {
|
||||
@ -154,6 +158,8 @@ inline void G1InvokeIfNotTriggeredClosure::do_oop_work(T* p) {
|
||||
_oop_cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
void G1InvokeIfNotTriggeredClosure::do_oop(oop* p) { do_oop_work(p); }
|
||||
void G1InvokeIfNotTriggeredClosure::do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
|
||||
template <class T>
|
||||
inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
|
||||
@ -224,6 +230,8 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
|
||||
to->rem_set()->add_reference(p, _worker_i);
|
||||
}
|
||||
}
|
||||
void G1UpdateRSOrPushRefOopClosure::do_oop(oop* p) { do_oop_work(p); }
|
||||
void G1UpdateRSOrPushRefOopClosure::do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
|
||||
template <class T>
|
||||
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/g1RemSet.inline.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,7 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/satbMarkQueue.hpp"
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,20 +58,11 @@ public:
|
||||
|
||||
// We export this to make it available in cases where the static
|
||||
// type of the barrier set is known. Note that it is non-virtual.
|
||||
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
|
||||
T heap_oop = oopDesc::load_heap_oop(field);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
enqueue(oopDesc::decode_heap_oop(heap_oop));
|
||||
}
|
||||
}
|
||||
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal);
|
||||
|
||||
// These are the more general virtual versions.
|
||||
virtual void write_ref_field_pre_work(oop* field, oop new_val) {
|
||||
inline_write_ref_field_pre(field, new_val);
|
||||
}
|
||||
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
|
||||
inline_write_ref_field_pre(field, new_val);
|
||||
}
|
||||
inline virtual void write_ref_field_pre_work(oop* field, oop new_val);
|
||||
inline virtual void write_ref_field_pre_work(narrowOop* field, oop new_val);
|
||||
virtual void write_ref_field_pre_work(void* field, oop new_val) {
|
||||
guarantee(false, "Not needed");
|
||||
}
|
||||
@ -98,15 +89,7 @@ public:
|
||||
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
|
||||
}
|
||||
|
||||
void set_card_claimed(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
if (val == clean_card_val()) {
|
||||
val = (jbyte)claimed_card_val();
|
||||
} else {
|
||||
val |= (jbyte)claimed_card_val();
|
||||
}
|
||||
_byte_map[card_index] = val;
|
||||
}
|
||||
inline void set_card_claimed(size_t card_index);
|
||||
|
||||
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
|
||||
void g1_mark_as_young(const MemRegion& mr);
|
||||
|
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
|
||||
#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
|
||||
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
// We export this to make it available in cases where the static
|
||||
// type of the barrier set is known. Note that it is non-virtual.
|
||||
template <class T> void G1SATBCardTableModRefBS::inline_write_ref_field_pre(T* field, oop newVal) {
|
||||
T heap_oop = oopDesc::load_heap_oop(field);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
enqueue(oopDesc::decode_heap_oop(heap_oop));
|
||||
}
|
||||
}
|
||||
|
||||
// These are the more general virtual versions.
|
||||
void G1SATBCardTableModRefBS::write_ref_field_pre_work(oop* field, oop new_val) {
|
||||
inline_write_ref_field_pre(field, new_val);
|
||||
}
|
||||
void G1SATBCardTableModRefBS::write_ref_field_pre_work(narrowOop* field, oop new_val) {
|
||||
inline_write_ref_field_pre(field, new_val);
|
||||
}
|
||||
|
||||
void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
if (val == clean_card_val()) {
|
||||
val = (jbyte)claimed_card_val();
|
||||
} else {
|
||||
val |= (jbyte)claimed_card_val();
|
||||
}
|
||||
_byte_map[card_index] = val;
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
|
@ -143,6 +143,7 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
|
||||
// The cast to int is safe, given that we've bounded region_size by
|
||||
// MIN_REGION_SIZE and MAX_REGION_SIZE.
|
||||
GrainBytes = region_size;
|
||||
log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M);
|
||||
|
||||
guarantee(GrainWords == 0, "we should only set it once");
|
||||
GrainWords = GrainBytes >> LogHeapWordSize;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -110,7 +110,9 @@ protected:
|
||||
|
||||
public:
|
||||
|
||||
HeapRegion* hr() const { return _hr; }
|
||||
HeapRegion* hr() const {
|
||||
return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
|
||||
}
|
||||
|
||||
jint occupied() const {
|
||||
// Overkill, but if we ever need it...
|
||||
@ -123,10 +125,12 @@ public:
|
||||
set_next(NULL);
|
||||
set_prev(NULL);
|
||||
}
|
||||
_hr = hr;
|
||||
_collision_list_next = NULL;
|
||||
_occupied = 0;
|
||||
_bm.clear();
|
||||
// Make sure that the bitmap clearing above has been finished before publishing
|
||||
// this PRT to concurrent threads.
|
||||
OrderAccess::release_store_ptr(&_hr, hr);
|
||||
}
|
||||
|
||||
void add_reference(OopOrNarrowOopStar from) {
|
||||
@ -357,7 +361,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
|
||||
|
||||
if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
|
||||
assert(contains_reference(from), "We just added it!");
|
||||
assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -367,7 +371,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||
|
||||
// If the region is already coarsened, return.
|
||||
if (_coarse_map.at(from_hrm_ind)) {
|
||||
assert(contains_reference(from), "We just added it!");
|
||||
assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -388,7 +392,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||
"Must be in range.");
|
||||
if (G1HRRSUseSparseTable &&
|
||||
_sparse_table.add_card(from_hrm_ind, card_index)) {
|
||||
assert(contains_reference_locked(from), "We just added it!");
|
||||
assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -438,7 +442,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||
assert(prt != NULL, "Inv");
|
||||
|
||||
prt->add_reference(from);
|
||||
assert(contains_reference(from), "We just added it!");
|
||||
assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT", p2i(from));
|
||||
}
|
||||
|
||||
PerRegionTable*
|
||||
@ -785,6 +789,9 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
|
||||
|
||||
void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
|
||||
assert(nm != NULL, "sanity");
|
||||
assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()),
|
||||
"should call add_strong_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s",
|
||||
BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()));
|
||||
// Optimistic unlocked contains-check
|
||||
if (!_code_roots.contains(nm)) {
|
||||
MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
|
||||
@ -794,6 +801,12 @@ void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
|
||||
|
||||
void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
|
||||
assert(nm != NULL, "sanity");
|
||||
assert((CodeCache_lock->owned_by_self() ||
|
||||
(SafepointSynchronize::is_at_safepoint() &&
|
||||
(_m.owned_by_self() || Thread::current()->is_VM_thread()))),
|
||||
"not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s",
|
||||
BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),
|
||||
BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread()));
|
||||
_code_roots.add(nm);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
#include "gc/parallel/asPSYoungGen.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psMarkSweepDecorator.hpp"
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
#include "gc/parallel/psScavenge.inline.hpp"
|
||||
#include "gc/parallel/psYoungGen.hpp"
|
||||
#include "gc/shared/gcUtil.hpp"
|
||||
#include "gc/shared/spaceDecorator.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/cardTableExtension.hpp"
|
||||
#include "gc/parallel/gcTaskManager.hpp"
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psPromotionManager.inline.hpp"
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/objectStartArray.hpp"
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
#include "gc/shared/cardTableModRefBS.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -123,7 +123,6 @@ void ObjectStartArray::reset() {
|
||||
memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());
|
||||
}
|
||||
|
||||
|
||||
bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
|
||||
HeapWord* end_addr) const {
|
||||
assert(start_addr <= end_addr,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -139,23 +139,7 @@ class ObjectStartArray : public CHeapObj<mtGC> {
|
||||
// a given block. The blocks contain the offset of the last
|
||||
// object in that block. Scroll backwards by one, and the first
|
||||
// object hit should be at the beginning of the block
|
||||
HeapWord* object_start(HeapWord* addr) const {
|
||||
assert_covered_region_contains(addr);
|
||||
jbyte* block = block_for_addr(addr);
|
||||
HeapWord* scroll_forward = offset_addr_for_block(block--);
|
||||
while (scroll_forward > addr) {
|
||||
scroll_forward = offset_addr_for_block(block--);
|
||||
}
|
||||
|
||||
HeapWord* next = scroll_forward;
|
||||
while (next <= addr) {
|
||||
scroll_forward = next;
|
||||
next += oop(next)->size();
|
||||
}
|
||||
assert(scroll_forward <= addr, "wrong order for current and arg");
|
||||
assert(addr <= next, "wrong order for arg and next");
|
||||
return scroll_forward;
|
||||
}
|
||||
inline HeapWord* object_start(HeapWord* addr) const;
|
||||
|
||||
bool is_block_allocated(HeapWord* addr) {
|
||||
assert_covered_region_contains(addr);
|
||||
@ -165,7 +149,6 @@ class ObjectStartArray : public CHeapObj<mtGC> {
|
||||
|
||||
return true;
|
||||
}
|
||||
#undef assert_covered_region_contains
|
||||
|
||||
// Return true if an object starts in the range of heap addresses.
|
||||
// If an object starts at an address corresponding to
|
||||
|
53
hotspot/src/share/vm/gc/parallel/objectStartArray.inline.hpp
Normal file
53
hotspot/src/share/vm/gc/parallel/objectStartArray.inline.hpp
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_PARALLEL_OBJECTSTARTARRAY_INLINE_HPP
|
||||
#define SHARE_VM_GC_PARALLEL_OBJECTSTARTARRAY_INLINE_HPP
|
||||
|
||||
#include "gc/parallel/objectStartArray.hpp"
|
||||
|
||||
// Optimized for finding the first object that crosses into
|
||||
// a given block. The blocks contain the offset of the last
|
||||
// object in that block. Scroll backwards by one, and the first
|
||||
// object hit should be at the beginning of the block
|
||||
HeapWord* ObjectStartArray::object_start(HeapWord* addr) const {
|
||||
assert_covered_region_contains(addr);
|
||||
jbyte* block = block_for_addr(addr);
|
||||
HeapWord* scroll_forward = offset_addr_for_block(block--);
|
||||
while (scroll_forward > addr) {
|
||||
scroll_forward = offset_addr_for_block(block--);
|
||||
}
|
||||
|
||||
HeapWord* next = scroll_forward;
|
||||
while (next <= addr) {
|
||||
scroll_forward = next;
|
||||
next += oop(next)->size();
|
||||
}
|
||||
assert(scroll_forward <= addr, "wrong order for current and arg");
|
||||
assert(addr <= next, "wrong order for arg and next");
|
||||
return scroll_forward;
|
||||
}
|
||||
|
||||
|
||||
#endif // SHARE_VM_GC_PARALLEL_OBJECTSTARTARRAY_INLINE_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,7 +24,8 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/parMarkBitMap.hpp"
|
||||
#include "gc/parallel/psParallelCompact.hpp"
|
||||
#include "gc/parallel/psCompactionManager.inline.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
@ -96,7 +97,20 @@ ParMarkBitMap::mark_obj(HeapWord* addr, size_t size)
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t ParMarkBitMap::live_words_in_range(HeapWord* beg_addr, oop end_obj) const
|
||||
inline bool
|
||||
ParMarkBitMap::is_live_words_in_range_in_cache(ParCompactionManager* cm, HeapWord* beg_addr) const {
|
||||
return cm->last_query_begin() == beg_addr;
|
||||
}
|
||||
|
||||
inline void
|
||||
ParMarkBitMap::update_live_words_in_range_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj, size_t result) const {
|
||||
cm->set_last_query_begin(beg_addr);
|
||||
cm->set_last_query_object(end_obj);
|
||||
cm->set_last_query_return(result);
|
||||
}
|
||||
|
||||
size_t
|
||||
ParMarkBitMap::live_words_in_range_helper(HeapWord* beg_addr, oop end_obj) const
|
||||
{
|
||||
assert(beg_addr <= (HeapWord*)end_obj, "bad range");
|
||||
assert(is_marked(end_obj), "end_obj must be live");
|
||||
@ -117,6 +131,42 @@ size_t ParMarkBitMap::live_words_in_range(HeapWord* beg_addr, oop end_obj) const
|
||||
return bits_to_words(live_bits);
|
||||
}
|
||||
|
||||
size_t
|
||||
ParMarkBitMap::live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const
|
||||
{
|
||||
HeapWord* last_beg = cm->last_query_begin();
|
||||
oop last_obj = cm->last_query_object();
|
||||
size_t last_ret = cm->last_query_return();
|
||||
if (end_obj > last_obj) {
|
||||
last_ret = last_ret + live_words_in_range_helper((HeapWord*)last_obj, end_obj);
|
||||
last_obj = end_obj;
|
||||
} else if (end_obj < last_obj) {
|
||||
// The cached value is for an object that is to the left (lower address) of the current
|
||||
// end_obj. Calculate back from that cached value.
|
||||
if (pointer_delta((HeapWord*)end_obj, (HeapWord*)beg_addr) > pointer_delta((HeapWord*)last_obj, (HeapWord*)end_obj)) {
|
||||
last_ret = last_ret - live_words_in_range_helper((HeapWord*)end_obj, last_obj);
|
||||
} else {
|
||||
last_ret = live_words_in_range_helper(beg_addr, end_obj);
|
||||
}
|
||||
last_obj = end_obj;
|
||||
}
|
||||
|
||||
update_live_words_in_range_cache(cm, last_beg, last_obj, last_ret);
|
||||
return last_ret;
|
||||
}
|
||||
|
||||
size_t
|
||||
ParMarkBitMap::live_words_in_range(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const
|
||||
{
|
||||
// Try to reuse result from ParCompactionManager cache first.
|
||||
if (is_live_words_in_range_in_cache(cm, beg_addr)) {
|
||||
return live_words_in_range_use_cache(cm, beg_addr, end_obj);
|
||||
}
|
||||
size_t ret = live_words_in_range_helper(beg_addr, end_obj);
|
||||
update_live_words_in_range_cache(cm, beg_addr, end_obj, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ParMarkBitMap::IterationStatus
|
||||
ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
|
||||
idx_t range_beg, idx_t range_end) const
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,6 +31,7 @@
|
||||
|
||||
class ParMarkBitMapClosure;
|
||||
class PSVirtualSpace;
|
||||
class ParCompactionManager;
|
||||
|
||||
class ParMarkBitMap: public CHeapObj<mtGC>
|
||||
{
|
||||
@ -124,7 +125,7 @@ public:
|
||||
// the range are included in the result. The end of the range must be a live object,
|
||||
// which is the case when updating pointers. This allows a branch to be removed
|
||||
// from inside the loop.
|
||||
size_t live_words_in_range(HeapWord* beg_addr, oop end_obj) const;
|
||||
size_t live_words_in_range(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const;
|
||||
|
||||
inline HeapWord* region_start() const;
|
||||
inline HeapWord* region_end() const;
|
||||
@ -167,6 +168,12 @@ public:
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
private:
|
||||
size_t live_words_in_range_helper(HeapWord* beg_addr, oop end_obj) const;
|
||||
|
||||
bool is_live_words_in_range_in_cache(ParCompactionManager* cm, HeapWord* beg_addr) const;
|
||||
size_t live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const;
|
||||
void update_live_words_in_range_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj, size_t result) const;
|
||||
|
||||
// Each bit in the bitmap represents one unit of 'object granularity.' Objects
|
||||
// are double-word aligned in 32-bit VMs, but not in 64-bit VMs, so the 32-bit
|
||||
// granularity is 2, 64-bit is 1.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,10 +28,11 @@
|
||||
#include "gc/parallel/cardTableExtension.hpp"
|
||||
#include "gc/parallel/gcTaskManager.hpp"
|
||||
#include "gc/parallel/generationSizer.hpp"
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
|
||||
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc/parallel/psMarkSweep.hpp"
|
||||
#include "gc/parallel/psParallelCompact.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "gc/parallel/psPromotionManager.hpp"
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
#include "gc/parallel/vmPSOperations.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -68,6 +68,8 @@ ParCompactionManager::ParCompactionManager() :
|
||||
|
||||
marking_stack()->initialize();
|
||||
_objarray_stack.initialize();
|
||||
|
||||
reset_bitmap_query_cache();
|
||||
}
|
||||
|
||||
ParCompactionManager::~ParCompactionManager() {
|
||||
@ -124,6 +126,13 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
"Not initialized?");
|
||||
}
|
||||
|
||||
void ParCompactionManager::reset_all_bitmap_query_caches() {
|
||||
uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
|
||||
for (uint i=0; i<=parallel_gc_threads; i++) {
|
||||
_manager_array[i]->reset_bitmap_query_cache();
|
||||
}
|
||||
}
|
||||
|
||||
int ParCompactionManager::pop_recycled_stack_index() {
|
||||
assert(_recycled_bottom <= _recycled_top, "list is empty");
|
||||
// Get the next available index
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -109,6 +109,10 @@ private:
|
||||
|
||||
Action _action;
|
||||
|
||||
HeapWord* _last_query_beg;
|
||||
oop _last_query_obj;
|
||||
size_t _last_query_ret;
|
||||
|
||||
static PSOldGen* old_gen() { return _old_gen; }
|
||||
static ObjectStartArray* start_array() { return _start_array; }
|
||||
static OopTaskQueueSet* stack_array() { return _stack_array; }
|
||||
@ -127,9 +131,26 @@ private:
|
||||
// marking stack and overflow stack directly.
|
||||
|
||||
public:
|
||||
void reset_bitmap_query_cache() {
|
||||
_last_query_beg = NULL;
|
||||
_last_query_obj = NULL;
|
||||
_last_query_ret = 0;
|
||||
}
|
||||
|
||||
Action action() { return _action; }
|
||||
void set_action(Action v) { _action = v; }
|
||||
|
||||
// Bitmap query support, cache last query and result
|
||||
HeapWord* last_query_begin() { return _last_query_beg; }
|
||||
oop last_query_object() { return _last_query_obj; }
|
||||
size_t last_query_return() { return _last_query_ret; }
|
||||
|
||||
void set_last_query_begin(HeapWord *new_beg) { _last_query_beg = new_beg; }
|
||||
void set_last_query_object(oop new_obj) { _last_query_obj = new_obj; }
|
||||
void set_last_query_return(size_t new_ret) { _last_query_ret = new_ret; }
|
||||
|
||||
static void reset_all_bitmap_query_caches();
|
||||
|
||||
RegionTaskQueue* region_stack() { return _region_stack; }
|
||||
void set_region_stack(RegionTaskQueue* v) { _region_stack = v; }
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -144,7 +144,7 @@ inline void ParCompactionManager::follow_contents(objArrayOop obj, int index) {
|
||||
}
|
||||
|
||||
inline void ParCompactionManager::update_contents(oop obj) {
|
||||
obj->pc_update_contents();
|
||||
obj->pc_update_contents(this);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -99,7 +99,7 @@ void PSMarkSweep::invoke(bool maximum_heap_compaction) {
|
||||
heap->collector_policy()->should_clear_all_soft_refs();
|
||||
|
||||
uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
|
||||
UIntXFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
|
||||
UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
|
||||
PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc/parallel/psMarkSweepDecorator.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -195,7 +195,7 @@ const char* PSParallelCompact::space_names[] = {
|
||||
};
|
||||
|
||||
void PSParallelCompact::print_region_ranges() {
|
||||
if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
|
||||
if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
|
||||
return;
|
||||
}
|
||||
LogHandle(gc, compaction, phases) log;
|
||||
@ -265,7 +265,7 @@ void
|
||||
print_generic_summary_data(ParallelCompactData& summary_data,
|
||||
SpaceInfo* space_info)
|
||||
{
|
||||
if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
|
||||
if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -360,7 +360,7 @@ print_initial_summary_data(ParallelCompactData& summary_data,
|
||||
void
|
||||
print_initial_summary_data(ParallelCompactData& summary_data,
|
||||
SpaceInfo* space_info) {
|
||||
if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
|
||||
if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -641,7 +641,7 @@ ParallelCompactData::summarize_split_space(size_t src_region,
|
||||
*target_next = split_destination + partial_obj_size;
|
||||
HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
|
||||
|
||||
if (develop_log_is_enabled(Trace, gc, compaction, phases)) {
|
||||
if (log_develop_is_enabled(Trace, gc, compaction, phases)) {
|
||||
const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
|
||||
log_develop_trace(gc, compaction, phases)("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
|
||||
split_type, p2i(source_next), split_region, partial_obj_size);
|
||||
@ -751,7 +751,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
|
||||
return true;
|
||||
}
|
||||
|
||||
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
|
||||
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) {
|
||||
assert(addr != NULL, "Should detect NULL oop earlier");
|
||||
assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
|
||||
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
|
||||
@ -788,7 +788,7 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
|
||||
const size_t block_offset = addr_to_block_ptr(addr)->offset();
|
||||
|
||||
const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
|
||||
const size_t live = bitmap->live_words_in_range(search_start, oop(addr));
|
||||
const size_t live = bitmap->live_words_in_range(cm, search_start, oop(addr));
|
||||
result += block_offset + live;
|
||||
DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
|
||||
return result;
|
||||
@ -825,11 +825,9 @@ PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
|
||||
|
||||
bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
|
||||
|
||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
|
||||
PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
|
||||
|
||||
void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
|
||||
klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);
|
||||
PSParallelCompact::AdjustPointerClosure closure(_cm);
|
||||
klass->oops_do(&closure);
|
||||
}
|
||||
|
||||
void PSParallelCompact::post_initialize() {
|
||||
@ -977,6 +975,8 @@ void PSParallelCompact::pre_compact()
|
||||
|
||||
// Have worker threads release resources the next time they run a task.
|
||||
gc_task_manager()->release_all_resources();
|
||||
|
||||
ParCompactionManager::reset_all_bitmap_query_caches();
|
||||
}
|
||||
|
||||
void PSParallelCompact::post_compact()
|
||||
@ -1535,7 +1535,7 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
|
||||
}
|
||||
}
|
||||
|
||||
if (develop_log_is_enabled(Trace, gc, compaction, phases)) {
|
||||
if (log_develop_is_enabled(Trace, gc, compaction, phases)) {
|
||||
const size_t region_size = ParallelCompactData::RegionSize;
|
||||
HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
|
||||
const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
|
||||
@ -1801,7 +1801,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
|
||||
// adjust_roots() updates Universe::_intArrayKlassObj which is
|
||||
// needed by the compaction for filling holes in the dense prefix.
|
||||
adjust_roots();
|
||||
adjust_roots(vmthread_cm);
|
||||
|
||||
compaction_start.update();
|
||||
compact();
|
||||
@ -2142,39 +2142,42 @@ public:
|
||||
};
|
||||
static PSAlwaysTrueClosure always_true;
|
||||
|
||||
void PSParallelCompact::adjust_roots() {
|
||||
void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
|
||||
// Adjust the pointers to reflect the new locations
|
||||
GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer);
|
||||
|
||||
// Need new claim bits when tracing through and adjusting pointers.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
PSParallelCompact::AdjustPointerClosure oop_closure(cm);
|
||||
PSParallelCompact::AdjustKlassClosure klass_closure(cm);
|
||||
|
||||
// General strong roots.
|
||||
Universe::oops_do(adjust_pointer_closure());
|
||||
JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
|
||||
CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
|
||||
Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
|
||||
ObjectSynchronizer::oops_do(adjust_pointer_closure());
|
||||
FlatProfiler::oops_do(adjust_pointer_closure());
|
||||
Management::oops_do(adjust_pointer_closure());
|
||||
JvmtiExport::oops_do(adjust_pointer_closure());
|
||||
SystemDictionary::oops_do(adjust_pointer_closure());
|
||||
ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
|
||||
Universe::oops_do(&oop_closure);
|
||||
JNIHandles::oops_do(&oop_closure); // Global (strong) JNI handles
|
||||
CLDToOopClosure adjust_from_cld(&oop_closure);
|
||||
Threads::oops_do(&oop_closure, &adjust_from_cld, NULL);
|
||||
ObjectSynchronizer::oops_do(&oop_closure);
|
||||
FlatProfiler::oops_do(&oop_closure);
|
||||
Management::oops_do(&oop_closure);
|
||||
JvmtiExport::oops_do(&oop_closure);
|
||||
SystemDictionary::oops_do(&oop_closure);
|
||||
ClassLoaderDataGraph::oops_do(&oop_closure, &klass_closure, true);
|
||||
|
||||
// Now adjust pointers in remaining weak roots. (All of which should
|
||||
// have been cleared if they pointed to non-surviving objects.)
|
||||
// Global (weak) JNI handles
|
||||
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
|
||||
JNIHandles::weak_oops_do(&always_true, &oop_closure);
|
||||
|
||||
CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
|
||||
CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
|
||||
CodeCache::blobs_do(&adjust_from_blobs);
|
||||
StringTable::oops_do(adjust_pointer_closure());
|
||||
ref_processor()->weak_oops_do(adjust_pointer_closure());
|
||||
StringTable::oops_do(&oop_closure);
|
||||
ref_processor()->weak_oops_do(&oop_closure);
|
||||
// Roots were visited so references into the young gen in roots
|
||||
// may have been scanned. Process them also.
|
||||
// Should the reference processor have a span that excludes
|
||||
// young gen objects?
|
||||
PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
|
||||
PSScavenge::reference_processor()->weak_oops_do(&oop_closure);
|
||||
}
|
||||
|
||||
// Helper class to print 8 region numbers per line and then print the total at the end.
|
||||
@ -2187,7 +2190,7 @@ private:
|
||||
bool _enabled;
|
||||
size_t _total_regions;
|
||||
public:
|
||||
FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(develop_log_is_enabled(Trace, gc, compaction)) { }
|
||||
FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)) { }
|
||||
~FillableRegionLogger() {
|
||||
log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
|
||||
}
|
||||
@ -2378,7 +2381,7 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
|
||||
// region.
|
||||
void PSParallelCompact::write_block_fill_histogram()
|
||||
{
|
||||
if (!develop_log_is_enabled(Trace, gc, compaction)) {
|
||||
if (!log_develop_is_enabled(Trace, gc, compaction)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3062,18 +3065,20 @@ void MoveAndUpdateClosure::copy_partial_obj()
|
||||
update_state(words);
|
||||
}
|
||||
|
||||
void InstanceKlass::oop_pc_update_pointers(oop obj) {
|
||||
oop_oop_iterate_oop_maps<true>(obj, PSParallelCompact::adjust_pointer_closure());
|
||||
void InstanceKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
|
||||
PSParallelCompact::AdjustPointerClosure closure(cm);
|
||||
oop_oop_iterate_oop_maps<true>(obj, &closure);
|
||||
}
|
||||
|
||||
void InstanceMirrorKlass::oop_pc_update_pointers(oop obj) {
|
||||
InstanceKlass::oop_pc_update_pointers(obj);
|
||||
void InstanceMirrorKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
|
||||
InstanceKlass::oop_pc_update_pointers(obj, cm);
|
||||
|
||||
oop_oop_iterate_statics<true>(obj, PSParallelCompact::adjust_pointer_closure());
|
||||
PSParallelCompact::AdjustPointerClosure closure(cm);
|
||||
oop_oop_iterate_statics<true>(obj, &closure);
|
||||
}
|
||||
|
||||
void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj) {
|
||||
InstanceKlass::oop_pc_update_pointers(obj);
|
||||
void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
|
||||
InstanceKlass::oop_pc_update_pointers(obj, cm);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -3092,33 +3097,34 @@ template <class T> static void trace_reference_gc(const char *s, oop obj,
|
||||
#endif
|
||||
|
||||
template <class T>
|
||||
static void oop_pc_update_pointers_specialized(oop obj) {
|
||||
static void oop_pc_update_pointers_specialized(oop obj, ParCompactionManager* cm) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
PSParallelCompact::adjust_pointer(referent_addr);
|
||||
PSParallelCompact::adjust_pointer(referent_addr, cm);
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
PSParallelCompact::adjust_pointer(next_addr);
|
||||
PSParallelCompact::adjust_pointer(next_addr, cm);
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
PSParallelCompact::adjust_pointer(discovered_addr);
|
||||
PSParallelCompact::adjust_pointer(discovered_addr, cm);
|
||||
debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
|
||||
referent_addr, next_addr, discovered_addr);)
|
||||
}
|
||||
|
||||
void InstanceRefKlass::oop_pc_update_pointers(oop obj) {
|
||||
InstanceKlass::oop_pc_update_pointers(obj);
|
||||
void InstanceRefKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
|
||||
InstanceKlass::oop_pc_update_pointers(obj, cm);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
oop_pc_update_pointers_specialized<narrowOop>(obj);
|
||||
oop_pc_update_pointers_specialized<narrowOop>(obj, cm);
|
||||
} else {
|
||||
oop_pc_update_pointers_specialized<oop>(obj);
|
||||
oop_pc_update_pointers_specialized<oop>(obj, cm);
|
||||
}
|
||||
}
|
||||
|
||||
void ObjArrayKlass::oop_pc_update_pointers(oop obj) {
|
||||
void ObjArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
|
||||
assert(obj->is_objArray(), "obj must be obj array");
|
||||
oop_oop_iterate_elements<true>(objArrayOop(obj), PSParallelCompact::adjust_pointer_closure());
|
||||
PSParallelCompact::AdjustPointerClosure closure(cm);
|
||||
oop_oop_iterate_elements<true>(objArrayOop(obj), &closure);
|
||||
}
|
||||
|
||||
void TypeArrayKlass::oop_pc_update_pointers(oop obj) {
|
||||
void TypeArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
}
|
||||
|
||||
@ -3128,7 +3134,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
|
||||
assert(bitmap()->obj_size(addr) == words, "bad size");
|
||||
|
||||
_source = addr;
|
||||
assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
|
||||
assert(PSParallelCompact::summary_data().calc_new_pointer(source(), compaction_manager()) ==
|
||||
destination(), "wrong destination");
|
||||
|
||||
if (words > words_remaining()) {
|
||||
@ -3169,3 +3175,14 @@ UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
|
||||
do_addr(addr);
|
||||
return ParMarkBitMap::incomplete;
|
||||
}
|
||||
|
||||
ParMarkBitMapClosure::IterationStatus
|
||||
FillClosure::do_addr(HeapWord* addr, size_t size) {
|
||||
CollectedHeap::fill_with_objects(addr, size);
|
||||
HeapWord* const end = addr + size;
|
||||
do {
|
||||
_start_array->allocate_block(addr);
|
||||
addr += oop(addr)->size();
|
||||
} while (addr < end);
|
||||
return ParMarkBitMap::incomplete;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -451,10 +451,10 @@ public:
|
||||
HeapWord* partial_obj_end(size_t region_idx) const;
|
||||
|
||||
// Return the location of the object after compaction.
|
||||
HeapWord* calc_new_pointer(HeapWord* addr);
|
||||
HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm);
|
||||
|
||||
HeapWord* calc_new_pointer(oop p) {
|
||||
return calc_new_pointer((HeapWord*) p);
|
||||
HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) {
|
||||
return calc_new_pointer((HeapWord*) p, cm);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -937,17 +937,29 @@ class PSParallelCompact : AllStatic {
|
||||
|
||||
class AdjustPointerClosure: public ExtendedOopClosure {
|
||||
public:
|
||||
AdjustPointerClosure(ParCompactionManager* cm) {
|
||||
assert(cm != NULL, "associate ParCompactionManage should not be NULL");
|
||||
_cm = cm;
|
||||
}
|
||||
template <typename T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
// This closure provides its own oop verification code.
|
||||
debug_only(virtual bool should_verify_oops() { return false; })
|
||||
private:
|
||||
ParCompactionManager* _cm;
|
||||
};
|
||||
|
||||
class AdjustKlassClosure : public KlassClosure {
|
||||
public:
|
||||
AdjustKlassClosure(ParCompactionManager* cm) {
|
||||
assert(cm != NULL, "associate ParCompactionManage should not be NULL");
|
||||
_cm = cm;
|
||||
}
|
||||
void do_klass(Klass* klass);
|
||||
private:
|
||||
ParCompactionManager* _cm;
|
||||
};
|
||||
|
||||
friend class AdjustPointerClosure;
|
||||
@ -966,8 +978,6 @@ class PSParallelCompact : AllStatic {
|
||||
static ParallelCompactData _summary_data;
|
||||
static IsAliveClosure _is_alive_closure;
|
||||
static SpaceInfo _space_info[last_space_id];
|
||||
static AdjustPointerClosure _adjust_pointer_closure;
|
||||
static AdjustKlassClosure _adjust_klass_closure;
|
||||
|
||||
// Reference processing (used in ...follow_contents)
|
||||
static ReferenceProcessor* _ref_processor;
|
||||
@ -1063,7 +1073,7 @@ class PSParallelCompact : AllStatic {
|
||||
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
|
||||
|
||||
// Adjust addresses in roots. Does not adjust addresses in heap.
|
||||
static void adjust_roots();
|
||||
static void adjust_roots(ParCompactionManager* cm);
|
||||
|
||||
DEBUG_ONLY(static void write_block_fill_histogram();)
|
||||
|
||||
@ -1109,10 +1119,6 @@ class PSParallelCompact : AllStatic {
|
||||
static bool initialize();
|
||||
|
||||
// Closure accessors
|
||||
static PSParallelCompact::AdjustPointerClosure* adjust_pointer_closure() {
|
||||
return &_adjust_pointer_closure;
|
||||
}
|
||||
static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
|
||||
|
||||
// Public accessors
|
||||
@ -1127,7 +1133,7 @@ class PSParallelCompact : AllStatic {
|
||||
static inline bool mark_obj(oop obj);
|
||||
static inline bool is_marked(oop obj);
|
||||
|
||||
template <class T> static inline void adjust_pointer(T* p);
|
||||
template <class T> static inline void adjust_pointer(T* p, ParCompactionManager* cm);
|
||||
|
||||
// Compaction support.
|
||||
// Return true if p is in the range [beg_addr, end_addr).
|
||||
@ -1242,16 +1248,6 @@ class PSParallelCompact : AllStatic {
|
||||
#endif // #ifdef ASSERT
|
||||
};
|
||||
|
||||
inline bool PSParallelCompact::mark_obj(oop obj) {
|
||||
const int obj_size = obj->size();
|
||||
if (mark_bitmap()->mark_obj(obj, obj_size)) {
|
||||
_summary_data.add_obj(obj, obj_size);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
inline bool PSParallelCompact::is_marked(oop obj) {
|
||||
return mark_bitmap()->is_marked(obj);
|
||||
}
|
||||
@ -1386,9 +1382,8 @@ class UpdateOnlyClosure: public ParMarkBitMapClosure {
|
||||
inline void do_addr(HeapWord* addr);
|
||||
};
|
||||
|
||||
class FillClosure: public ParMarkBitMapClosure
|
||||
{
|
||||
public:
|
||||
class FillClosure: public ParMarkBitMapClosure {
|
||||
public:
|
||||
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
|
||||
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
|
||||
_start_array(PSParallelCompact::start_array(space_id))
|
||||
@ -1397,17 +1392,9 @@ public:
|
||||
"cannot use FillClosure in the young gen");
|
||||
}
|
||||
|
||||
virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
|
||||
CollectedHeap::fill_with_objects(addr, size);
|
||||
HeapWord* const end = addr + size;
|
||||
do {
|
||||
_start_array->allocate_block(addr);
|
||||
addr += oop(addr)->size();
|
||||
} while (addr < end);
|
||||
return ParMarkBitMap::incomplete;
|
||||
}
|
||||
virtual IterationStatus do_addr(HeapWord* addr, size_t size);
|
||||
|
||||
private:
|
||||
private:
|
||||
ObjectStartArray* const _start_array;
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,14 +31,24 @@
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
inline bool PSParallelCompact::mark_obj(oop obj) {
|
||||
const int obj_size = obj->size();
|
||||
if (mark_bitmap()->mark_obj(obj, obj_size)) {
|
||||
_summary_data.add_obj(obj, obj_size);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p) {
|
||||
inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = (oop)summary_data().calc_new_pointer(obj);
|
||||
oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
|
||||
assert(new_obj != NULL, // is forwarding ptr?
|
||||
"should be forwarded");
|
||||
// Just always do the update unconditionally?
|
||||
@ -52,7 +62,7 @@ inline void PSParallelCompact::adjust_pointer(T* p) {
|
||||
|
||||
template <typename T>
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop_nv(T* p) {
|
||||
adjust_pointer(p);
|
||||
adjust_pointer(p, _cm);
|
||||
}
|
||||
|
||||
inline void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { do_oop_nv(p); }
|
||||
|
@ -130,7 +130,7 @@ static const char* const pm_stats_hdr[] = {
|
||||
|
||||
void
|
||||
PSPromotionManager::print_taskqueue_stats() {
|
||||
if (!develop_log_is_enabled(Trace, gc, task, stats)) {
|
||||
if (!log_develop_is_enabled(Trace, gc, task, stats)) {
|
||||
return;
|
||||
}
|
||||
LogHandle(gc, task, stats) log;
|
||||
|
@ -284,7 +284,7 @@ inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
|
||||
|
||||
// This code must come after the CAS test, or it will print incorrect
|
||||
// information.
|
||||
if (develop_log_is_enabled(Trace, gc, scavenge) && o->is_forwarded()) {
|
||||
if (log_develop_is_enabled(Trace, gc, scavenge) && o->is_forwarded()) {
|
||||
log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
|
||||
"forwarding",
|
||||
new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,7 @@
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc/parallel/psMarkSweep.hpp"
|
||||
#include "gc/parallel/psParallelCompact.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "gc/parallel/psScavenge.inline.hpp"
|
||||
#include "gc/parallel/psTasks.hpp"
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
@ -763,6 +763,15 @@ GCTaskManager* const PSScavenge::gc_task_manager() {
|
||||
return ParallelScavengeHeap::gc_task_manager();
|
||||
}
|
||||
|
||||
// Adaptive size policy support. When the young generation/old generation
|
||||
// boundary moves, _young_generation_boundary must be reset
|
||||
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
|
||||
_young_generation_boundary = v;
|
||||
if (UseCompressedOops) {
|
||||
_young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
|
||||
}
|
||||
}
|
||||
|
||||
void PSScavenge::initialize() {
|
||||
// Arguments must have been parsed
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -117,12 +117,7 @@ class PSScavenge: AllStatic {
|
||||
}
|
||||
// Adaptive size policy support. When the young generation/old generation
|
||||
// boundary moves, _young_generation_boundary must be reset
|
||||
static void set_young_generation_boundary(HeapWord* v) {
|
||||
_young_generation_boundary = v;
|
||||
if (UseCompressedOops) {
|
||||
_young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
|
||||
}
|
||||
}
|
||||
static void set_young_generation_boundary(HeapWord* v);
|
||||
|
||||
// Called by parallelScavengeHeap to init the tenuring threshold
|
||||
static void initialize();
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/serial/defNewGeneration.inline.hpp"
|
||||
#include "gc/shared/ageTable.inline.hpp"
|
||||
#include "gc/shared/cardTableRS.hpp"
|
||||
#include "gc/shared/collectorCounters.hpp"
|
||||
#include "gc/shared/gcHeapSummary.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,15 +23,16 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/ageTable.hpp"
|
||||
#include "gc/shared/ageTable.inline.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
#include "gc/shared/gcPolicyCounters.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
/* Copyright (c) 1992, 2015, Oracle and/or its affiliates, and Stanford University.
|
||||
/* Copyright (c) 1992, 2016, Oracle and/or its affiliates, and Stanford University.
|
||||
See the LICENSE file for license information. */
|
||||
|
||||
AgeTable::AgeTable(bool global) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,7 @@
|
||||
|
||||
class GCPolicyCounters;
|
||||
|
||||
/* Copyright (c) 1992, 2015, Oracle and/or its affiliates, and Stanford University.
|
||||
/* Copyright (c) 1992, 2016, Oracle and/or its affiliates, and Stanford University.
|
||||
See the LICENSE file for license information. */
|
||||
|
||||
// Age table for adaptive feedback-mediated tenuring (scavenging)
|
||||
@ -56,9 +56,7 @@ class AgeTable VALUE_OBJ_CLASS_SPEC {
|
||||
void clear();
|
||||
|
||||
// add entry
|
||||
void add(oop p, size_t oop_size) {
|
||||
add(p->age(), oop_size);
|
||||
}
|
||||
inline void add(oop p, size_t oop_size);
|
||||
|
||||
void add(uint age, size_t oop_size) {
|
||||
assert(age > 0 && age < table_size, "invalid age of object");
|
||||
|
36
hotspot/src/share/vm/gc/shared/ageTable.inline.hpp
Normal file
36
hotspot/src/share/vm/gc/shared/ageTable.inline.hpp
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_SHARED_AGETABLE_INLINE_HPP
|
||||
#define SHARE_VM_GC_SHARED_AGETABLE_INLINE_HPP
|
||||
|
||||
#include "gc/shared/ageTable.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
// add entry
|
||||
void AgeTable::add(oop p, size_t oop_size) {
|
||||
add(p->age(), oop_size);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_SHARED_AGETABLE_INLINE_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,6 +30,7 @@
|
||||
#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
@ -248,7 +249,7 @@ inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
|
||||
assert(is_size_aligned(alignment_in_bytes, HeapWordSize),
|
||||
"Alignment size %u is incorrect.", alignment_in_bytes);
|
||||
|
||||
HeapWord* new_addr = (HeapWord*) align_pointer_up(addr, alignment_in_bytes);
|
||||
HeapWord* new_addr = (HeapWord*) align_ptr_up(addr, alignment_in_bytes);
|
||||
size_t padding = pointer_delta(new_addr, addr);
|
||||
|
||||
if (padding == 0) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -774,7 +774,7 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
|
||||
// free memory should be here, especially if they are expensive. If this
|
||||
// attempt fails, an OOM exception will be thrown.
|
||||
{
|
||||
UIntXFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
|
||||
UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
|
||||
|
||||
gch->do_collection(true, // full
|
||||
true, // clear_all_soft_refs
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -146,23 +146,15 @@ class FilteringClosure: public ExtendedOopClosure {
|
||||
HeapWord* _boundary;
|
||||
ExtendedOopClosure* _cl;
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
_cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
public:
|
||||
FilteringClosure(HeapWord* boundary, ExtendedOopClosure* cl) :
|
||||
ExtendedOopClosure(cl->ref_processor()), _boundary(boundary),
|
||||
_cl(cl) {}
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { FilteringClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
virtual bool do_metadata() { return do_metadata_nv(); }
|
||||
inline bool do_metadata_nv() { assert(!_cl->do_metadata(), "assumption broken, must change to 'return _cl->do_metadata()'"); return false; }
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -124,6 +124,19 @@ template <class T> inline void FastScanClosure::do_oop_work(T* p) {
|
||||
inline void FastScanClosure::do_oop_nv(oop* p) { FastScanClosure::do_oop_work(p); }
|
||||
inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
|
||||
|
||||
template <class T> void FilteringClosure::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
_cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FilteringClosure::do_oop_nv(oop* p) { FilteringClosure::do_oop_work(p); }
|
||||
void FilteringClosure::do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
|
||||
|
||||
// Note similarity to ScanClosure; the difference is that
|
||||
// the barrier set is taken care of outside this closure.
|
||||
template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/referenceProcessor.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
@ -58,23 +58,13 @@ class AbstractRefProcTaskExecutor;
|
||||
class DiscoveredList {
|
||||
public:
|
||||
DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
|
||||
oop head() const {
|
||||
return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
|
||||
_oop_head;
|
||||
}
|
||||
inline oop head() const;
|
||||
HeapWord* adr_head() {
|
||||
return UseCompressedOops ? (HeapWord*)&_compressed_head :
|
||||
(HeapWord*)&_oop_head;
|
||||
}
|
||||
void set_head(oop o) {
|
||||
if (UseCompressedOops) {
|
||||
// Must compress the head ptr.
|
||||
_compressed_head = oopDesc::encode_heap_oop(o);
|
||||
} else {
|
||||
_oop_head = o;
|
||||
}
|
||||
}
|
||||
bool is_empty() const { return head() == NULL; }
|
||||
inline void set_head(oop o);
|
||||
inline bool is_empty() const;
|
||||
size_t length() { return _len; }
|
||||
void set_length(size_t len) { _len = len; }
|
||||
void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
|
||||
@ -113,22 +103,7 @@ private:
|
||||
public:
|
||||
inline DiscoveredListIterator(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
BoolObjectClosure* is_alive):
|
||||
_refs_list(refs_list),
|
||||
_prev_next(refs_list.adr_head()),
|
||||
_prev(NULL),
|
||||
_ref(refs_list.head()),
|
||||
#ifdef ASSERT
|
||||
_first_seen(refs_list.head()),
|
||||
#endif
|
||||
#ifndef PRODUCT
|
||||
_processed(0),
|
||||
_removed(0),
|
||||
#endif
|
||||
_next(NULL),
|
||||
_keep_alive(keep_alive),
|
||||
_is_alive(is_alive)
|
||||
{ }
|
||||
BoolObjectClosure* is_alive);
|
||||
|
||||
// End Of List.
|
||||
inline bool has_next() const { return _ref != NULL; }
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user