Merge
This commit is contained in:
commit
6b3281d3e6
@ -156,3 +156,4 @@ cc771d92284f71765eca14d6d08703c4af254c04 jdk8-b21
|
|||||||
88176171e940f02916a312c265a34c32552a8376 jdk8-b32
|
88176171e940f02916a312c265a34c32552a8376 jdk8-b32
|
||||||
42f275168fa5d9e7c70b246614dca8cf81f52c2e jdk8-b33
|
42f275168fa5d9e7c70b246614dca8cf81f52c2e jdk8-b33
|
||||||
894a478d2c4819a1a0f230bd7bdd09f3b2de9a8c jdk8-b34
|
894a478d2c4819a1a0f230bd7bdd09f3b2de9a8c jdk8-b34
|
||||||
|
5285317ebb4e8e4f6d8d52b5616fa801e2ea844d jdk8-b35
|
||||||
|
@ -239,3 +239,5 @@ cd47da9383cd932cb2b659064057feafa2a91134 hs24-b06
|
|||||||
785bcf415ead2eaa5f6677aaf528481008140bac jdk8-b33
|
785bcf415ead2eaa5f6677aaf528481008140bac jdk8-b33
|
||||||
7c6aba65acd2c334f1c3512b574f9038cddac24b hs24-b07
|
7c6aba65acd2c334f1c3512b574f9038cddac24b hs24-b07
|
||||||
f284b08835584517c1ca3dd67341f569e763841f jdk8-b34
|
f284b08835584517c1ca3dd67341f569e763841f jdk8-b34
|
||||||
|
f621660a297baa48fab9dca28e99d318826e8304 jdk8-b35
|
||||||
|
dff6e3459210f8dd0430b9b03ccc99280560da30 hs24-b08
|
||||||
|
@ -440,7 +440,7 @@ static bool sort_map_array(struct ps_prochandle* ph) {
|
|||||||
int j = 0;
|
int j = 0;
|
||||||
print_debug("---- sorted virtual address map ----\n");
|
print_debug("---- sorted virtual address map ----\n");
|
||||||
for (j = 0; j < ph->core->num_maps; j++) {
|
for (j = 0; j < ph->core->num_maps; j++) {
|
||||||
print_debug("base = 0x%lx\tsize = %d\n", ph->core->map_array[j]->vaddr,
|
print_debug("base = 0x%lx\tsize = %zd\n", ph->core->map_array[j]->vaddr,
|
||||||
ph->core->map_array[j]->memsz);
|
ph->core->map_array[j]->memsz);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -42,7 +42,7 @@ import sun.jvm.hotspot.types.TypeDataBase;
|
|||||||
public class HeapRegionSeq extends VMObject {
|
public class HeapRegionSeq extends VMObject {
|
||||||
// HeapRegion** _regions;
|
// HeapRegion** _regions;
|
||||||
static private AddressField regionsField;
|
static private AddressField regionsField;
|
||||||
// size_t _length;
|
// uint _length;
|
||||||
static private CIntegerField lengthField;
|
static private CIntegerField lengthField;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
|
@ -40,9 +40,9 @@ import sun.jvm.hotspot.types.TypeDataBase;
|
|||||||
// Mirror class for HeapRegionSetBase. Represents a group of regions.
|
// Mirror class for HeapRegionSetBase. Represents a group of regions.
|
||||||
|
|
||||||
public class HeapRegionSetBase extends VMObject {
|
public class HeapRegionSetBase extends VMObject {
|
||||||
// size_t _length;
|
// uint _length;
|
||||||
static private CIntegerField lengthField;
|
static private CIntegerField lengthField;
|
||||||
// size_t _region_num;
|
// uint _region_num;
|
||||||
static private CIntegerField regionNumField;
|
static private CIntegerField regionNumField;
|
||||||
// size_t _total_used_bytes;
|
// size_t _total_used_bytes;
|
||||||
static private CIntegerField totalUsedBytesField;
|
static private CIntegerField totalUsedBytesField;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -889,15 +889,9 @@ public class VirtualMachineImpl extends MirrorImpl implements PathSearchingVirtu
|
|||||||
Klass kls = ((ReferenceTypeImpl)type).ref();
|
Klass kls = ((ReferenceTypeImpl)type).ref();
|
||||||
if (kls instanceof InstanceKlass) {
|
if (kls instanceof InstanceKlass) {
|
||||||
InstanceKlass ik = (InstanceKlass) kls;
|
InstanceKlass ik = (InstanceKlass) kls;
|
||||||
if (ik.isInterface()) {
|
// if the Klass is final or if there are no subklasses loaded yet
|
||||||
if (ik.nofImplementors() == 0L) {
|
if (ik.getAccessFlagsObj().isFinal() || ik.getSubklassKlass() == null) {
|
||||||
return new ArrayList(0);
|
includeSubtypes = false;
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// if the Klass is final or if there are no subklasses loaded yet
|
|
||||||
if (ik.getAccessFlagsObj().isFinal() || ik.getSubklassKlass() == null) {
|
|
||||||
includeSubtypes = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// no subtypes for primitive array types
|
// no subtypes for primitive array types
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -52,7 +52,6 @@ public class InstanceKlass extends Klass {
|
|||||||
private static int HIGH_OFFSET;
|
private static int HIGH_OFFSET;
|
||||||
private static int GENERIC_SIGNATURE_INDEX_OFFSET;
|
private static int GENERIC_SIGNATURE_INDEX_OFFSET;
|
||||||
private static int FIELD_SLOTS;
|
private static int FIELD_SLOTS;
|
||||||
public static int IMPLEMENTORS_LIMIT;
|
|
||||||
|
|
||||||
// ClassState constants
|
// ClassState constants
|
||||||
private static int CLASS_STATE_UNPARSABLE_BY_GC;
|
private static int CLASS_STATE_UNPARSABLE_BY_GC;
|
||||||
@ -70,13 +69,6 @@ public class InstanceKlass extends Klass {
|
|||||||
methodOrdering = new OopField(type.getOopField("_method_ordering"), Oop.getHeaderSize());
|
methodOrdering = new OopField(type.getOopField("_method_ordering"), Oop.getHeaderSize());
|
||||||
localInterfaces = new OopField(type.getOopField("_local_interfaces"), Oop.getHeaderSize());
|
localInterfaces = new OopField(type.getOopField("_local_interfaces"), Oop.getHeaderSize());
|
||||||
transitiveInterfaces = new OopField(type.getOopField("_transitive_interfaces"), Oop.getHeaderSize());
|
transitiveInterfaces = new OopField(type.getOopField("_transitive_interfaces"), Oop.getHeaderSize());
|
||||||
nofImplementors = new CIntField(type.getCIntegerField("_nof_implementors"), Oop.getHeaderSize());
|
|
||||||
IMPLEMENTORS_LIMIT = db.lookupIntConstant("instanceKlass::implementors_limit").intValue();
|
|
||||||
implementors = new OopField[IMPLEMENTORS_LIMIT];
|
|
||||||
for (int i = 0; i < IMPLEMENTORS_LIMIT; i++) {
|
|
||||||
long arrayOffset = Oop.getHeaderSize() + (i * db.getAddressSize());
|
|
||||||
implementors[i] = new OopField(type.getOopField("_implementors[0]"), arrayOffset);
|
|
||||||
}
|
|
||||||
fields = new OopField(type.getOopField("_fields"), Oop.getHeaderSize());
|
fields = new OopField(type.getOopField("_fields"), Oop.getHeaderSize());
|
||||||
javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), Oop.getHeaderSize());
|
javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), Oop.getHeaderSize());
|
||||||
constants = new OopField(type.getOopField("_constants"), Oop.getHeaderSize());
|
constants = new OopField(type.getOopField("_constants"), Oop.getHeaderSize());
|
||||||
@ -136,8 +128,6 @@ public class InstanceKlass extends Klass {
|
|||||||
private static OopField methodOrdering;
|
private static OopField methodOrdering;
|
||||||
private static OopField localInterfaces;
|
private static OopField localInterfaces;
|
||||||
private static OopField transitiveInterfaces;
|
private static OopField transitiveInterfaces;
|
||||||
private static CIntField nofImplementors;
|
|
||||||
private static OopField[] implementors;
|
|
||||||
private static OopField fields;
|
private static OopField fields;
|
||||||
private static CIntField javaFieldsCount;
|
private static CIntField javaFieldsCount;
|
||||||
private static OopField constants;
|
private static OopField constants;
|
||||||
@ -317,9 +307,6 @@ public class InstanceKlass extends Klass {
|
|||||||
public TypeArray getMethodOrdering() { return (TypeArray) methodOrdering.getValue(this); }
|
public TypeArray getMethodOrdering() { return (TypeArray) methodOrdering.getValue(this); }
|
||||||
public ObjArray getLocalInterfaces() { return (ObjArray) localInterfaces.getValue(this); }
|
public ObjArray getLocalInterfaces() { return (ObjArray) localInterfaces.getValue(this); }
|
||||||
public ObjArray getTransitiveInterfaces() { return (ObjArray) transitiveInterfaces.getValue(this); }
|
public ObjArray getTransitiveInterfaces() { return (ObjArray) transitiveInterfaces.getValue(this); }
|
||||||
public long nofImplementors() { return nofImplementors.getValue(this); }
|
|
||||||
public Klass getImplementor() { return (Klass) implementors[0].getValue(this); }
|
|
||||||
public Klass getImplementor(int i) { return (Klass) implementors[i].getValue(this); }
|
|
||||||
public TypeArray getFields() { return (TypeArray) fields.getValue(this); }
|
public TypeArray getFields() { return (TypeArray) fields.getValue(this); }
|
||||||
public int getJavaFieldsCount() { return (int) javaFieldsCount.getValue(this); }
|
public int getJavaFieldsCount() { return (int) javaFieldsCount.getValue(this); }
|
||||||
public int getAllFieldsCount() { return (int)getFields().getLength() / FIELD_SLOTS; }
|
public int getAllFieldsCount() { return (int)getFields().getLength() / FIELD_SLOTS; }
|
||||||
@ -527,9 +514,6 @@ public class InstanceKlass extends Klass {
|
|||||||
visitor.doOop(methodOrdering, true);
|
visitor.doOop(methodOrdering, true);
|
||||||
visitor.doOop(localInterfaces, true);
|
visitor.doOop(localInterfaces, true);
|
||||||
visitor.doOop(transitiveInterfaces, true);
|
visitor.doOop(transitiveInterfaces, true);
|
||||||
visitor.doCInt(nofImplementors, true);
|
|
||||||
for (int i = 0; i < IMPLEMENTORS_LIMIT; i++)
|
|
||||||
visitor.doOop(implementors[i], true);
|
|
||||||
visitor.doOop(fields, true);
|
visitor.doOop(fields, true);
|
||||||
visitor.doOop(constants, true);
|
visitor.doOop(constants, true);
|
||||||
visitor.doOop(classLoader, true);
|
visitor.doOop(classLoader, true);
|
||||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
|
|||||||
|
|
||||||
HS_MAJOR_VER=24
|
HS_MAJOR_VER=24
|
||||||
HS_MINOR_VER=0
|
HS_MINOR_VER=0
|
||||||
HS_BUILD_NUMBER=07
|
HS_BUILD_NUMBER=08
|
||||||
|
|
||||||
JDK_MAJOR_VER=1
|
JDK_MAJOR_VER=1
|
||||||
JDK_MINOR_VER=8
|
JDK_MINOR_VER=8
|
||||||
|
@ -2651,56 +2651,49 @@ void TemplateTable::jvmti_post_fast_field_mod() {
|
|||||||
// Check to see if a field modification watch has been set before we take
|
// Check to see if a field modification watch has been set before we take
|
||||||
// the time to call into the VM.
|
// the time to call into the VM.
|
||||||
Label L2;
|
Label L2;
|
||||||
__ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
|
__ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
|
||||||
__ testl(rcx,rcx);
|
__ testl(rcx,rcx);
|
||||||
__ jcc(Assembler::zero, L2);
|
__ jcc(Assembler::zero, L2);
|
||||||
__ pop_ptr(rbx); // copy the object pointer from tos
|
__ pop_ptr(rbx); // copy the object pointer from tos
|
||||||
__ verify_oop(rbx);
|
__ verify_oop(rbx);
|
||||||
__ push_ptr(rbx); // put the object pointer back on tos
|
__ push_ptr(rbx); // put the object pointer back on tos
|
||||||
__ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
|
|
||||||
__ mov(rcx, rsp);
|
|
||||||
__ push_ptr(rbx); // save object pointer so we can steal rbx,
|
|
||||||
__ xorptr(rbx, rbx);
|
|
||||||
const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
|
|
||||||
const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
|
|
||||||
switch (bytecode()) { // load values into the jvalue object
|
|
||||||
case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
|
|
||||||
case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
|
|
||||||
case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
|
|
||||||
case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
|
|
||||||
case Bytecodes::_fast_lputfield:
|
|
||||||
NOT_LP64(__ movptr(hi_value, rdx));
|
|
||||||
__ movptr(lo_value, rax);
|
|
||||||
break;
|
|
||||||
|
|
||||||
// need to call fld_s() after fstp_s() to restore the value for below
|
// Save tos values before call_VM() clobbers them. Since we have
|
||||||
case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
|
// to do it for every data type, we use the saved values as the
|
||||||
|
// jvalue object.
|
||||||
|
switch (bytecode()) { // load values into the jvalue object
|
||||||
|
case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
|
||||||
|
case Bytecodes::_fast_bputfield: // fall through
|
||||||
|
case Bytecodes::_fast_sputfield: // fall through
|
||||||
|
case Bytecodes::_fast_cputfield: // fall through
|
||||||
|
case Bytecodes::_fast_iputfield: __ push_i(rax); break;
|
||||||
|
case Bytecodes::_fast_dputfield: __ push_d(); break;
|
||||||
|
case Bytecodes::_fast_fputfield: __ push_f(); break;
|
||||||
|
case Bytecodes::_fast_lputfield: __ push_l(rax); break;
|
||||||
|
|
||||||
// need to call fld_d() after fstp_d() to restore the value for below
|
default:
|
||||||
case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
__ mov(rcx, rsp); // points to jvalue on the stack
|
||||||
|
// access constant pool cache entry
|
||||||
|
__ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
|
||||||
|
__ verify_oop(rbx);
|
||||||
|
// rbx,: object pointer copied above
|
||||||
|
// rax,: cache entry pointer
|
||||||
|
// rcx: jvalue object on the stack
|
||||||
|
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
|
||||||
|
|
||||||
// since rcx is not an object we don't call store_check() here
|
switch (bytecode()) { // restore tos values
|
||||||
case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
|
case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
|
||||||
|
case Bytecodes::_fast_bputfield: // fall through
|
||||||
default: ShouldNotReachHere();
|
case Bytecodes::_fast_sputfield: // fall through
|
||||||
}
|
case Bytecodes::_fast_cputfield: // fall through
|
||||||
__ pop_ptr(rbx); // restore copy of object pointer
|
case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
|
||||||
|
case Bytecodes::_fast_dputfield: __ pop_d(); break;
|
||||||
// Save rax, and sometimes rdx because call_VM() will clobber them,
|
case Bytecodes::_fast_fputfield: __ pop_f(); break;
|
||||||
// then use them for JVM/DI purposes
|
case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
|
||||||
__ push(rax);
|
}
|
||||||
if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
|
__ bind(L2);
|
||||||
// access constant pool cache entry
|
|
||||||
__ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
|
|
||||||
__ verify_oop(rbx);
|
|
||||||
// rbx,: object pointer copied above
|
|
||||||
// rax,: cache entry pointer
|
|
||||||
// rcx: jvalue object on the stack
|
|
||||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
|
|
||||||
if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
|
|
||||||
__ pop(rax); // restore lower value
|
|
||||||
__ addptr(rsp, sizeof(jvalue)); // release jvalue object space
|
|
||||||
__ bind(L2);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2685,26 +2685,23 @@ void TemplateTable::jvmti_post_fast_field_mod() {
|
|||||||
__ pop_ptr(rbx); // copy the object pointer from tos
|
__ pop_ptr(rbx); // copy the object pointer from tos
|
||||||
__ verify_oop(rbx);
|
__ verify_oop(rbx);
|
||||||
__ push_ptr(rbx); // put the object pointer back on tos
|
__ push_ptr(rbx); // put the object pointer back on tos
|
||||||
__ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
|
// Save tos values before call_VM() clobbers them. Since we have
|
||||||
__ mov(c_rarg3, rsp);
|
// to do it for every data type, we use the saved values as the
|
||||||
const Address field(c_rarg3, 0);
|
// jvalue object.
|
||||||
|
|
||||||
switch (bytecode()) { // load values into the jvalue object
|
switch (bytecode()) { // load values into the jvalue object
|
||||||
case Bytecodes::_fast_aputfield: __ movq(field, rax); break;
|
case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
|
||||||
case Bytecodes::_fast_lputfield: __ movq(field, rax); break;
|
case Bytecodes::_fast_bputfield: // fall through
|
||||||
case Bytecodes::_fast_iputfield: __ movl(field, rax); break;
|
|
||||||
case Bytecodes::_fast_bputfield: __ movb(field, rax); break;
|
|
||||||
case Bytecodes::_fast_sputfield: // fall through
|
case Bytecodes::_fast_sputfield: // fall through
|
||||||
case Bytecodes::_fast_cputfield: __ movw(field, rax); break;
|
case Bytecodes::_fast_cputfield: // fall through
|
||||||
case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break;
|
case Bytecodes::_fast_iputfield: __ push_i(rax); break;
|
||||||
case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break;
|
case Bytecodes::_fast_dputfield: __ push_d(); break;
|
||||||
|
case Bytecodes::_fast_fputfield: __ push_f(); break;
|
||||||
|
case Bytecodes::_fast_lputfield: __ push_l(rax); break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
__ mov(c_rarg3, rsp); // points to jvalue on the stack
|
||||||
// Save rax because call_VM() will clobber it, then use it for
|
|
||||||
// JVMTI purposes
|
|
||||||
__ push(rax);
|
|
||||||
// access constant pool cache entry
|
// access constant pool cache entry
|
||||||
__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
|
__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
|
||||||
__ verify_oop(rbx);
|
__ verify_oop(rbx);
|
||||||
@ -2715,8 +2712,17 @@ void TemplateTable::jvmti_post_fast_field_mod() {
|
|||||||
CAST_FROM_FN_PTR(address,
|
CAST_FROM_FN_PTR(address,
|
||||||
InterpreterRuntime::post_field_modification),
|
InterpreterRuntime::post_field_modification),
|
||||||
rbx, c_rarg2, c_rarg3);
|
rbx, c_rarg2, c_rarg3);
|
||||||
__ pop(rax); // restore lower value
|
|
||||||
__ addptr(rsp, sizeof(jvalue)); // release jvalue object space
|
switch (bytecode()) { // restore tos values
|
||||||
|
case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
|
||||||
|
case Bytecodes::_fast_bputfield: // fall through
|
||||||
|
case Bytecodes::_fast_sputfield: // fall through
|
||||||
|
case Bytecodes::_fast_cputfield: // fall through
|
||||||
|
case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
|
||||||
|
case Bytecodes::_fast_dputfield: __ pop_d(); break;
|
||||||
|
case Bytecodes::_fast_fputfield: __ pop_f(); break;
|
||||||
|
case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
|
||||||
|
}
|
||||||
__ bind(L2);
|
__ bind(L2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -72,15 +72,18 @@
|
|||||||
|
|
||||||
#ifdef _ALLBSD_SOURCE
|
#ifdef _ALLBSD_SOURCE
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
|
static size_t thread_id_size() { return sizeof(thread_t); }
|
||||||
thread_t thread_id() const {
|
thread_t thread_id() const {
|
||||||
return _thread_id;
|
return _thread_id;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
static size_t thread_id_size() { return sizeof(pthread_t); }
|
||||||
pthread_t thread_id() const {
|
pthread_t thread_id() const {
|
||||||
return _thread_id;
|
return _thread_id;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
|
static size_t thread_id_size() { return sizeof(pid_t); }
|
||||||
pid_t thread_id() const {
|
pid_t thread_id() const {
|
||||||
return _thread_id;
|
return _thread_id;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -56,6 +56,8 @@
|
|||||||
sigset_t caller_sigmask() const { return _caller_sigmask; }
|
sigset_t caller_sigmask() const { return _caller_sigmask; }
|
||||||
void set_caller_sigmask(sigset_t sigmask) { _caller_sigmask = sigmask; }
|
void set_caller_sigmask(sigset_t sigmask) { _caller_sigmask = sigmask; }
|
||||||
|
|
||||||
|
static size_t thread_id_size() { return sizeof(pid_t); }
|
||||||
|
|
||||||
pid_t thread_id() const {
|
pid_t thread_id() const {
|
||||||
return _thread_id;
|
return _thread_id;
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
bool _vm_created_thread; // true if the VM created this thread,
|
bool _vm_created_thread; // true if the VM created this thread,
|
||||||
// false if primary thread or attached thread
|
// false if primary thread or attached thread
|
||||||
public:
|
public:
|
||||||
|
static size_t thread_id_size() { return sizeof(thread_t); }
|
||||||
thread_t thread_id() const { return _thread_id; }
|
thread_t thread_id() const { return _thread_id; }
|
||||||
uint lwp_id() const { return _lwp_id; }
|
uint lwp_id() const { return _lwp_id; }
|
||||||
int native_priority() const { return _native_priority; }
|
int native_priority() const { return _native_priority; }
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -42,6 +42,8 @@ typedef void* HANDLE;
|
|||||||
HANDLE interrupt_event() const { return _interrupt_event; }
|
HANDLE interrupt_event() const { return _interrupt_event; }
|
||||||
void set_interrupt_event(HANDLE interrupt_event) { _interrupt_event = interrupt_event; }
|
void set_interrupt_event(HANDLE interrupt_event) { _interrupt_event = interrupt_event; }
|
||||||
|
|
||||||
|
|
||||||
|
static size_t thread_id_size() { return sizeof(unsigned long); }
|
||||||
unsigned long thread_id() const { return _thread_id; }
|
unsigned long thread_id() const { return _thread_id; }
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// Used for debugging, return a unique integer for each thread.
|
// Used for debugging, return a unique integer for each thread.
|
||||||
|
@ -1694,7 +1694,9 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
|||||||
// they are roughly equivalent to Object.
|
// they are roughly equivalent to Object.
|
||||||
ciInstanceKlass* singleton = NULL;
|
ciInstanceKlass* singleton = NULL;
|
||||||
if (target->holder()->nof_implementors() == 1) {
|
if (target->holder()->nof_implementors() == 1) {
|
||||||
singleton = target->holder()->implementor(0);
|
singleton = target->holder()->implementor();
|
||||||
|
assert(singleton != NULL && singleton != target->holder(),
|
||||||
|
"just checking");
|
||||||
|
|
||||||
assert(holder->is_interface(), "invokeinterface to non interface?");
|
assert(holder->is_interface(), "invokeinterface to non interface?");
|
||||||
ciInstanceKlass* decl_interface = (ciInstanceKlass*)holder;
|
ciInstanceKlass* decl_interface = (ciInstanceKlass*)holder;
|
||||||
@ -3130,10 +3132,23 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
|||||||
bool cantrap = true;
|
bool cantrap = true;
|
||||||
vmIntrinsics::ID id = callee->intrinsic_id();
|
vmIntrinsics::ID id = callee->intrinsic_id();
|
||||||
switch (id) {
|
switch (id) {
|
||||||
case vmIntrinsics::_arraycopy :
|
case vmIntrinsics::_arraycopy:
|
||||||
if (!InlineArrayCopy) return false;
|
if (!InlineArrayCopy) return false;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
#ifdef TRACE_HAVE_INTRINSICS
|
||||||
|
case vmIntrinsics::_classID:
|
||||||
|
case vmIntrinsics::_threadID:
|
||||||
|
preserves_state = true;
|
||||||
|
cantrap = true;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case vmIntrinsics::_counterTime:
|
||||||
|
preserves_state = true;
|
||||||
|
cantrap = false;
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
|
||||||
case vmIntrinsics::_currentTimeMillis:
|
case vmIntrinsics::_currentTimeMillis:
|
||||||
case vmIntrinsics::_nanoTime:
|
case vmIntrinsics::_nanoTime:
|
||||||
preserves_state = true;
|
preserves_state = true;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -2879,6 +2879,50 @@ void LIRGenerator::do_IfOp(IfOp* x) {
|
|||||||
__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
|
__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
|
||||||
|
assert(x->number_of_arguments() == expected_arguments, "wrong type");
|
||||||
|
LIR_Opr reg = result_register_for(x->type());
|
||||||
|
__ call_runtime_leaf(routine, getThreadTemp(),
|
||||||
|
reg, new LIR_OprList());
|
||||||
|
LIR_Opr result = rlock_result(x);
|
||||||
|
__ move(reg, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef TRACE_HAVE_INTRINSICS
|
||||||
|
void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
|
||||||
|
LIR_Opr thread = getThreadPointer();
|
||||||
|
LIR_Opr osthread = new_pointer_register();
|
||||||
|
__ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
|
||||||
|
size_t thread_id_size = OSThread::thread_id_size();
|
||||||
|
if (thread_id_size == (size_t) BytesPerLong) {
|
||||||
|
LIR_Opr id = new_register(T_LONG);
|
||||||
|
__ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
|
||||||
|
__ convert(Bytecodes::_l2i, id, rlock_result(x));
|
||||||
|
} else if (thread_id_size == (size_t) BytesPerInt) {
|
||||||
|
__ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
|
||||||
|
} else {
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
|
||||||
|
CodeEmitInfo* info = state_for(x);
|
||||||
|
CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
|
||||||
|
assert(info != NULL, "must have info");
|
||||||
|
LIRItem arg(x->argument_at(1), this);
|
||||||
|
arg.load_item();
|
||||||
|
LIR_Opr klass = new_register(T_OBJECT);
|
||||||
|
__ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_OBJECT), klass, info);
|
||||||
|
LIR_Opr id = new_register(T_LONG);
|
||||||
|
ByteSize offset = TRACE_ID_OFFSET;
|
||||||
|
LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
|
||||||
|
__ move(trace_id_addr, id);
|
||||||
|
__ logical_or(id, LIR_OprFact::longConst(0x01l), id);
|
||||||
|
__ store(id, trace_id_addr);
|
||||||
|
__ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
|
||||||
|
__ move(id, rlock_result(x));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
||||||
switch (x->id()) {
|
switch (x->id()) {
|
||||||
@ -2890,25 +2934,21 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case vmIntrinsics::_currentTimeMillis: {
|
#ifdef TRACE_HAVE_INTRINSICS
|
||||||
assert(x->number_of_arguments() == 0, "wrong type");
|
case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
|
||||||
LIR_Opr reg = result_register_for(x->type());
|
case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
|
||||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(),
|
case vmIntrinsics::_counterTime:
|
||||||
reg, new LIR_OprList());
|
do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
|
||||||
LIR_Opr result = rlock_result(x);
|
|
||||||
__ move(reg, result);
|
|
||||||
break;
|
break;
|
||||||
}
|
#endif
|
||||||
|
|
||||||
case vmIntrinsics::_nanoTime: {
|
case vmIntrinsics::_currentTimeMillis:
|
||||||
assert(x->number_of_arguments() == 0, "wrong type");
|
do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
|
||||||
LIR_Opr reg = result_register_for(x->type());
|
break;
|
||||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(),
|
|
||||||
reg, new LIR_OprList());
|
case vmIntrinsics::_nanoTime:
|
||||||
LIR_Opr result = rlock_result(x);
|
do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
|
||||||
__ move(reg, result);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
|
case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
|
||||||
case vmIntrinsics::_getClass: do_getClass(x); break;
|
case vmIntrinsics::_getClass: do_getClass(x); break;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -426,6 +426,12 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
|||||||
SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
|
SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
|
||||||
void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
|
void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
|
||||||
|
|
||||||
|
void do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x);
|
||||||
|
#ifdef TRACE_HAVE_INTRINSICS
|
||||||
|
void do_ThreadIDIntrinsic(Intrinsic* x);
|
||||||
|
void do_ClassIDIntrinsic(Intrinsic* x);
|
||||||
|
#endif
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Compilation* compilation() const { return _compilation; }
|
Compilation* compilation() const { return _compilation; }
|
||||||
FrameMap* frame_map() const { return _compilation->frame_map(); }
|
FrameMap* frame_map() const { return _compilation->frame_map(); }
|
||||||
|
@ -295,6 +295,9 @@ const char* Runtime1::name_for_address(address entry) {
|
|||||||
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
|
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
|
||||||
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
|
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
|
||||||
FUNCTION_CASE(entry, trace_block_entry);
|
FUNCTION_CASE(entry, trace_block_entry);
|
||||||
|
#ifdef TRACE_HAVE_INTRINSICS
|
||||||
|
FUNCTION_CASE(entry, TRACE_TIME_METHOD);
|
||||||
|
#endif
|
||||||
|
|
||||||
#undef FUNCTION_CASE
|
#undef FUNCTION_CASE
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -59,10 +59,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
|
|||||||
_has_nonstatic_fields = ik->has_nonstatic_fields();
|
_has_nonstatic_fields = ik->has_nonstatic_fields();
|
||||||
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
|
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
|
||||||
|
|
||||||
_nof_implementors = ik->nof_implementors();
|
_implementor = NULL; // we will fill these lazily
|
||||||
for (int i = 0; i < implementors_limit; i++) {
|
|
||||||
_implementors[i] = NULL; // we will fill these lazily
|
|
||||||
}
|
|
||||||
|
|
||||||
Thread *thread = Thread::current();
|
Thread *thread = Thread::current();
|
||||||
if (ciObjectFactory::is_initialized()) {
|
if (ciObjectFactory::is_initialized()) {
|
||||||
@ -102,7 +99,6 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
|
|||||||
_nonstatic_field_size = -1;
|
_nonstatic_field_size = -1;
|
||||||
_has_nonstatic_fields = false;
|
_has_nonstatic_fields = false;
|
||||||
_nonstatic_fields = NULL;
|
_nonstatic_fields = NULL;
|
||||||
_nof_implementors = -1;
|
|
||||||
_loader = loader;
|
_loader = loader;
|
||||||
_protection_domain = protection_domain;
|
_protection_domain = protection_domain;
|
||||||
_is_shared = false;
|
_is_shared = false;
|
||||||
@ -132,17 +128,6 @@ bool ciInstanceKlass::compute_shared_has_subklass() {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
|
||||||
// ciInstanceKlass::compute_shared_nof_implementors
|
|
||||||
int ciInstanceKlass::compute_shared_nof_implementors() {
|
|
||||||
// We requery this property, since it is a very old ciObject.
|
|
||||||
GUARDED_VM_ENTRY(
|
|
||||||
instanceKlass* ik = get_instanceKlass();
|
|
||||||
_nof_implementors = ik->nof_implementors();
|
|
||||||
return _nof_implementors;
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciInstanceKlass::loader
|
// ciInstanceKlass::loader
|
||||||
oop ciInstanceKlass::loader() {
|
oop ciInstanceKlass::loader() {
|
||||||
@ -540,7 +525,7 @@ bool ciInstanceKlass::is_leaf_type() {
|
|||||||
if (is_shared()) {
|
if (is_shared()) {
|
||||||
return is_final(); // approximately correct
|
return is_final(); // approximately correct
|
||||||
} else {
|
} else {
|
||||||
return !_has_subklass && (_nof_implementors == 0);
|
return !_has_subklass && (nof_implementors() == 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -548,35 +533,31 @@ bool ciInstanceKlass::is_leaf_type() {
|
|||||||
// ciInstanceKlass::implementor
|
// ciInstanceKlass::implementor
|
||||||
//
|
//
|
||||||
// Report an implementor of this interface.
|
// Report an implementor of this interface.
|
||||||
// Returns NULL if exact information is not available.
|
|
||||||
// Note that there are various races here, since my copy
|
// Note that there are various races here, since my copy
|
||||||
// of _nof_implementors might be out of date with respect
|
// of _nof_implementors might be out of date with respect
|
||||||
// to results returned by instanceKlass::implementor.
|
// to results returned by instanceKlass::implementor.
|
||||||
// This is OK, since any dependencies we decide to assert
|
// This is OK, since any dependencies we decide to assert
|
||||||
// will be checked later under the Compile_lock.
|
// will be checked later under the Compile_lock.
|
||||||
ciInstanceKlass* ciInstanceKlass::implementor(int n) {
|
ciInstanceKlass* ciInstanceKlass::implementor() {
|
||||||
if (n >= implementors_limit) {
|
ciInstanceKlass* impl = _implementor;
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
ciInstanceKlass* impl = _implementors[n];
|
|
||||||
if (impl == NULL) {
|
if (impl == NULL) {
|
||||||
if (_nof_implementors > implementors_limit) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
// Go into the VM to fetch the implementor.
|
// Go into the VM to fetch the implementor.
|
||||||
{
|
{
|
||||||
VM_ENTRY_MARK;
|
VM_ENTRY_MARK;
|
||||||
klassOop k = get_instanceKlass()->implementor(n);
|
klassOop k = get_instanceKlass()->implementor();
|
||||||
if (k != NULL) {
|
if (k != NULL) {
|
||||||
impl = CURRENT_THREAD_ENV->get_object(k)->as_instance_klass();
|
if (k == get_instanceKlass()->as_klassOop()) {
|
||||||
|
// More than one implementors. Use 'this' in this case.
|
||||||
|
impl = this;
|
||||||
|
} else {
|
||||||
|
impl = CURRENT_THREAD_ENV->get_object(k)->as_instance_klass();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Memoize this result.
|
// Memoize this result.
|
||||||
if (!is_shared()) {
|
if (!is_shared()) {
|
||||||
_implementors[n] = (impl == NULL)? this: impl;
|
_implementor = impl;
|
||||||
}
|
}
|
||||||
} else if (impl == this) {
|
|
||||||
impl = NULL; // memoized null result from a VM query
|
|
||||||
}
|
}
|
||||||
return impl;
|
return impl;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -65,9 +65,11 @@ private:
|
|||||||
ciConstantPoolCache* _field_cache; // cached map index->field
|
ciConstantPoolCache* _field_cache; // cached map index->field
|
||||||
GrowableArray<ciField*>* _nonstatic_fields;
|
GrowableArray<ciField*>* _nonstatic_fields;
|
||||||
|
|
||||||
enum { implementors_limit = instanceKlass::implementors_limit };
|
// The possible values of the _implementor fall into following three cases:
|
||||||
ciInstanceKlass* _implementors[implementors_limit];
|
// NULL: no implementor.
|
||||||
jint _nof_implementors;
|
// A ciInstanceKlass that's not itself: one implementor.
|
||||||
|
// Itsef: more than one implementors.
|
||||||
|
ciInstanceKlass* _implementor;
|
||||||
|
|
||||||
GrowableArray<ciField*>* _non_static_fields;
|
GrowableArray<ciField*>* _non_static_fields;
|
||||||
|
|
||||||
@ -97,7 +99,6 @@ protected:
|
|||||||
|
|
||||||
void compute_shared_init_state();
|
void compute_shared_init_state();
|
||||||
bool compute_shared_has_subklass();
|
bool compute_shared_has_subklass();
|
||||||
int compute_shared_nof_implementors();
|
|
||||||
int compute_nonstatic_fields();
|
int compute_nonstatic_fields();
|
||||||
GrowableArray<ciField*>* compute_nonstatic_fields_impl(GrowableArray<ciField*>* super_fields);
|
GrowableArray<ciField*>* compute_nonstatic_fields_impl(GrowableArray<ciField*>* super_fields);
|
||||||
|
|
||||||
@ -158,10 +159,17 @@ public:
|
|||||||
assert(is_loaded(), "must be loaded");
|
assert(is_loaded(), "must be loaded");
|
||||||
return _nonstatic_oop_map_size; }
|
return _nonstatic_oop_map_size; }
|
||||||
ciInstanceKlass* super();
|
ciInstanceKlass* super();
|
||||||
jint nof_implementors() {
|
jint nof_implementors() {
|
||||||
|
ciInstanceKlass* impl;
|
||||||
assert(is_loaded(), "must be loaded");
|
assert(is_loaded(), "must be loaded");
|
||||||
if (_is_shared) return compute_shared_nof_implementors();
|
impl = implementor();
|
||||||
return _nof_implementors;
|
if (impl == NULL) {
|
||||||
|
return 0;
|
||||||
|
} else if (impl != this) {
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ciInstanceKlass* get_canonical_holder(int offset);
|
ciInstanceKlass* get_canonical_holder(int offset);
|
||||||
@ -207,7 +215,7 @@ public:
|
|||||||
// but consider adding to vmSymbols.hpp instead.
|
// but consider adding to vmSymbols.hpp instead.
|
||||||
|
|
||||||
bool is_leaf_type();
|
bool is_leaf_type();
|
||||||
ciInstanceKlass* implementor(int n);
|
ciInstanceKlass* implementor();
|
||||||
|
|
||||||
// Is the defining class loader of this class the default loader?
|
// Is the defining class loader of this class the default loader?
|
||||||
bool uses_default_loader();
|
bool uses_default_loader();
|
||||||
|
@ -3354,6 +3354,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
|
|||||||
klassOop ik = oopFactory::new_instanceKlass(name, vtable_size, itable_size,
|
klassOop ik = oopFactory::new_instanceKlass(name, vtable_size, itable_size,
|
||||||
static_field_size,
|
static_field_size,
|
||||||
total_oop_map_count,
|
total_oop_map_count,
|
||||||
|
access_flags,
|
||||||
rt, CHECK_(nullHandle));
|
rt, CHECK_(nullHandle));
|
||||||
instanceKlassHandle this_klass (THREAD, ik);
|
instanceKlassHandle this_klass (THREAD, ik);
|
||||||
|
|
||||||
@ -3362,7 +3363,6 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
|
|||||||
"sanity");
|
"sanity");
|
||||||
|
|
||||||
// Fill in information already parsed
|
// Fill in information already parsed
|
||||||
this_klass->set_access_flags(access_flags);
|
|
||||||
this_klass->set_should_verify_class(verify);
|
this_klass->set_should_verify_class(verify);
|
||||||
jint lh = Klass::instance_layout_helper(instance_size, false);
|
jint lh = Klass::instance_layout_helper(instance_size, false);
|
||||||
this_klass->set_layout_helper(lh);
|
this_klass->set_layout_helper(lh);
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
|
|
||||||
#include "oops/symbol.hpp"
|
#include "oops/symbol.hpp"
|
||||||
#include "memory/iterator.hpp"
|
#include "memory/iterator.hpp"
|
||||||
|
#include "trace/traceMacros.hpp"
|
||||||
|
|
||||||
// The class vmSymbols is a name space for fast lookup of
|
// The class vmSymbols is a name space for fast lookup of
|
||||||
// symbols commonly used in the VM.
|
// symbols commonly used in the VM.
|
||||||
@ -424,6 +425,7 @@
|
|||||||
template(throwable_throwable_signature, "(Ljava/lang/Throwable;)Ljava/lang/Throwable;") \
|
template(throwable_throwable_signature, "(Ljava/lang/Throwable;)Ljava/lang/Throwable;") \
|
||||||
template(class_void_signature, "(Ljava/lang/Class;)V") \
|
template(class_void_signature, "(Ljava/lang/Class;)V") \
|
||||||
template(class_int_signature, "(Ljava/lang/Class;)I") \
|
template(class_int_signature, "(Ljava/lang/Class;)I") \
|
||||||
|
template(class_long_signature, "(Ljava/lang/Class;)J") \
|
||||||
template(class_boolean_signature, "(Ljava/lang/Class;)Z") \
|
template(class_boolean_signature, "(Ljava/lang/Class;)Z") \
|
||||||
template(throwable_string_void_signature, "(Ljava/lang/Throwable;Ljava/lang/String;)V") \
|
template(throwable_string_void_signature, "(Ljava/lang/Throwable;Ljava/lang/String;)V") \
|
||||||
template(string_array_void_signature, "([Ljava/lang/String;)V") \
|
template(string_array_void_signature, "([Ljava/lang/String;)V") \
|
||||||
@ -539,10 +541,12 @@
|
|||||||
template(serializePropertiesToByteArray_signature, "()[B") \
|
template(serializePropertiesToByteArray_signature, "()[B") \
|
||||||
template(serializeAgentPropertiesToByteArray_name, "serializeAgentPropertiesToByteArray") \
|
template(serializeAgentPropertiesToByteArray_name, "serializeAgentPropertiesToByteArray") \
|
||||||
template(classRedefinedCount_name, "classRedefinedCount") \
|
template(classRedefinedCount_name, "classRedefinedCount") \
|
||||||
|
\
|
||||||
|
/* trace signatures */ \
|
||||||
|
TRACE_TEMPLATES(template) \
|
||||||
|
\
|
||||||
/*end*/
|
/*end*/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Here are all the intrinsics known to the runtime and the CI.
|
// Here are all the intrinsics known to the runtime and the CI.
|
||||||
// Each intrinsic consists of a public enum name (like _hashCode),
|
// Each intrinsic consists of a public enum name (like _hashCode),
|
||||||
// followed by a specification of its klass, name, and signature:
|
// followed by a specification of its klass, name, and signature:
|
||||||
@ -648,6 +652,8 @@
|
|||||||
do_intrinsic(_nanoTime, java_lang_System, nanoTime_name, void_long_signature, F_S) \
|
do_intrinsic(_nanoTime, java_lang_System, nanoTime_name, void_long_signature, F_S) \
|
||||||
do_name( nanoTime_name, "nanoTime") \
|
do_name( nanoTime_name, "nanoTime") \
|
||||||
\
|
\
|
||||||
|
TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) \
|
||||||
|
\
|
||||||
do_intrinsic(_arraycopy, java_lang_System, arraycopy_name, arraycopy_signature, F_S) \
|
do_intrinsic(_arraycopy, java_lang_System, arraycopy_name, arraycopy_signature, F_S) \
|
||||||
do_name( arraycopy_name, "arraycopy") \
|
do_name( arraycopy_name, "arraycopy") \
|
||||||
do_signature(arraycopy_signature, "(Ljava/lang/Object;ILjava/lang/Object;II)V") \
|
do_signature(arraycopy_signature, "(Ljava/lang/Object;ILjava/lang/Object;II)V") \
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -1033,21 +1033,25 @@ klassOop ClassHierarchyWalker::find_witness_anywhere(klassOop context_type,
|
|||||||
// (Old CHA had the same limitation.)
|
// (Old CHA had the same limitation.)
|
||||||
return context_type;
|
return context_type;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < nof_impls; i++) {
|
if (nof_impls > 0) {
|
||||||
klassOop impl = instanceKlass::cast(context_type)->implementor(i);
|
klassOop impl = instanceKlass::cast(context_type)->implementor();
|
||||||
if (impl == NULL) {
|
assert(impl != NULL, "just checking");
|
||||||
// implementors array overflowed => no exact info.
|
// If impl is the same as the context_type, then more than one
|
||||||
|
// implementor has seen. No exact info in this case.
|
||||||
|
if (impl == context_type) {
|
||||||
return context_type; // report an inexact witness to this sad affair
|
return context_type; // report an inexact witness to this sad affair
|
||||||
}
|
}
|
||||||
if (do_counts)
|
if (do_counts)
|
||||||
{ NOT_PRODUCT(deps_find_witness_steps++); }
|
{ NOT_PRODUCT(deps_find_witness_steps++); }
|
||||||
if (is_participant(impl)) {
|
if (is_participant(impl)) {
|
||||||
if (participants_hide_witnesses) continue;
|
if (!participants_hide_witnesses) {
|
||||||
// else fall through to process this guy's subclasses
|
ADD_SUBCLASS_CHAIN(impl);
|
||||||
|
}
|
||||||
} else if (is_witness(impl) && !ignore_witness(impl)) {
|
} else if (is_witness(impl) && !ignore_witness(impl)) {
|
||||||
return impl;
|
return impl;
|
||||||
|
} else {
|
||||||
|
ADD_SUBCLASS_CHAIN(impl);
|
||||||
}
|
}
|
||||||
ADD_SUBCLASS_CHAIN(impl);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recursively process each non-trivial sibling chain.
|
// Recursively process each non-trivial sibling chain.
|
||||||
@ -1174,8 +1178,9 @@ klassOop Dependencies::check_leaf_type(klassOop ctxk) {
|
|||||||
} else if (ctx->nof_implementors() != 0) {
|
} else if (ctx->nof_implementors() != 0) {
|
||||||
// if it is an interface, it must be unimplemented
|
// if it is an interface, it must be unimplemented
|
||||||
// (if it is not an interface, nof_implementors is always zero)
|
// (if it is not an interface, nof_implementors is always zero)
|
||||||
klassOop impl = ctx->implementor(0);
|
klassOop impl = ctx->implementor();
|
||||||
return (impl != NULL)? impl: ctxk;
|
assert(impl != NULL, "must be set");
|
||||||
|
return impl;
|
||||||
} else {
|
} else {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -2444,7 +2444,7 @@ class VerifyAllOopsClosure: public OopClosure {
|
|||||||
virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
|
virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
|
||||||
};
|
};
|
||||||
|
|
||||||
void CompactibleFreeListSpace::verify(bool ignored) const {
|
void CompactibleFreeListSpace::verify() const {
|
||||||
assert_lock_strong(&_freelistLock);
|
assert_lock_strong(&_freelistLock);
|
||||||
verify_objects_initialized();
|
verify_objects_initialized();
|
||||||
MemRegion span = _collector->_span;
|
MemRegion span = _collector->_span;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -492,7 +492,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||||||
void print() const;
|
void print() const;
|
||||||
void print_on(outputStream* st) const;
|
void print_on(outputStream* st) const;
|
||||||
void prepare_for_verify();
|
void prepare_for_verify();
|
||||||
void verify(bool allow_dirty) const;
|
void verify() const;
|
||||||
void verifyFreeLists() const PRODUCT_RETURN;
|
void verifyFreeLists() const PRODUCT_RETURN;
|
||||||
void verifyIndexedFreeLists() const;
|
void verifyIndexedFreeLists() const;
|
||||||
void verifyIndexedFreeList(size_t size) const;
|
void verifyIndexedFreeList(size_t size) const;
|
||||||
|
@ -3109,21 +3109,21 @@ ConcurrentMarkSweepGeneration::prepare_for_verify() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
|
ConcurrentMarkSweepGeneration::verify() {
|
||||||
// Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
|
// Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
|
||||||
// are not called when the heap is verified during universe initialization and
|
// are not called when the heap is verified during universe initialization and
|
||||||
// at vm shutdown.
|
// at vm shutdown.
|
||||||
if (freelistLock()->owned_by_self()) {
|
if (freelistLock()->owned_by_self()) {
|
||||||
cmsSpace()->verify(false /* ignored */);
|
cmsSpace()->verify();
|
||||||
} else {
|
} else {
|
||||||
MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
|
MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
|
||||||
cmsSpace()->verify(false /* ignored */);
|
cmsSpace()->verify();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CMSCollector::verify(bool allow_dirty /* ignored */) {
|
void CMSCollector::verify() {
|
||||||
_cmsGen->verify(allow_dirty);
|
_cmsGen->verify();
|
||||||
_permGen->verify(allow_dirty);
|
_permGen->verify();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -988,7 +988,7 @@ class CMSCollector: public CHeapObj {
|
|||||||
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
|
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
|
||||||
|
|
||||||
// debugging
|
// debugging
|
||||||
void verify(bool);
|
void verify();
|
||||||
bool verify_after_remark();
|
bool verify_after_remark();
|
||||||
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
||||||
void verify_work_stacks_empty() const PRODUCT_RETURN;
|
void verify_work_stacks_empty() const PRODUCT_RETURN;
|
||||||
@ -1279,7 +1279,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||||||
|
|
||||||
// Debugging
|
// Debugging
|
||||||
void prepare_for_verify();
|
void prepare_for_verify();
|
||||||
void verify(bool allow_dirty);
|
void verify();
|
||||||
void print_statistics() PRODUCT_RETURN;
|
void print_statistics() PRODUCT_RETURN;
|
||||||
|
|
||||||
// Performance Counters support
|
// Performance Counters support
|
||||||
|
@ -29,102 +29,6 @@
|
|||||||
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
|
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
|
||||||
#include "memory/space.inline.hpp"
|
#include "memory/space.inline.hpp"
|
||||||
|
|
||||||
CSetChooserCache::CSetChooserCache() {
|
|
||||||
for (int i = 0; i < CacheLength; ++i)
|
|
||||||
_cache[i] = NULL;
|
|
||||||
clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
void CSetChooserCache::clear() {
|
|
||||||
_occupancy = 0;
|
|
||||||
_first = 0;
|
|
||||||
for (int i = 0; i < CacheLength; ++i) {
|
|
||||||
HeapRegion *hr = _cache[i];
|
|
||||||
if (hr != NULL)
|
|
||||||
hr->set_sort_index(-1);
|
|
||||||
_cache[i] = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
bool CSetChooserCache::verify() {
|
|
||||||
guarantee(false, "CSetChooserCache::verify(): don't call this any more");
|
|
||||||
|
|
||||||
int index = _first;
|
|
||||||
HeapRegion *prev = NULL;
|
|
||||||
for (int i = 0; i < _occupancy; ++i) {
|
|
||||||
guarantee(_cache[index] != NULL, "cache entry should not be empty");
|
|
||||||
HeapRegion *hr = _cache[index];
|
|
||||||
guarantee(!hr->is_young(), "should not be young!");
|
|
||||||
if (prev != NULL) {
|
|
||||||
guarantee(prev->gc_efficiency() >= hr->gc_efficiency(),
|
|
||||||
"cache should be correctly ordered");
|
|
||||||
}
|
|
||||||
guarantee(hr->sort_index() == get_sort_index(index),
|
|
||||||
"sort index should be correct");
|
|
||||||
index = trim_index(index + 1);
|
|
||||||
prev = hr;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < (CacheLength - _occupancy); ++i) {
|
|
||||||
guarantee(_cache[index] == NULL, "cache entry should be empty");
|
|
||||||
index = trim_index(index + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
guarantee(index == _first, "we should have reached where we started from");
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
#endif // PRODUCT
|
|
||||||
|
|
||||||
void CSetChooserCache::insert(HeapRegion *hr) {
|
|
||||||
guarantee(false, "CSetChooserCache::insert(): don't call this any more");
|
|
||||||
|
|
||||||
assert(!is_full(), "cache should not be empty");
|
|
||||||
hr->calc_gc_efficiency();
|
|
||||||
|
|
||||||
int empty_index;
|
|
||||||
if (_occupancy == 0) {
|
|
||||||
empty_index = _first;
|
|
||||||
} else {
|
|
||||||
empty_index = trim_index(_first + _occupancy);
|
|
||||||
assert(_cache[empty_index] == NULL, "last slot should be empty");
|
|
||||||
int last_index = trim_index(empty_index - 1);
|
|
||||||
HeapRegion *last = _cache[last_index];
|
|
||||||
assert(last != NULL,"as the cache is not empty, last should not be empty");
|
|
||||||
while (empty_index != _first &&
|
|
||||||
last->gc_efficiency() < hr->gc_efficiency()) {
|
|
||||||
_cache[empty_index] = last;
|
|
||||||
last->set_sort_index(get_sort_index(empty_index));
|
|
||||||
empty_index = last_index;
|
|
||||||
last_index = trim_index(last_index - 1);
|
|
||||||
last = _cache[last_index];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_cache[empty_index] = hr;
|
|
||||||
hr->set_sort_index(get_sort_index(empty_index));
|
|
||||||
|
|
||||||
++_occupancy;
|
|
||||||
assert(verify(), "cache should be consistent");
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapRegion *CSetChooserCache::remove_first() {
|
|
||||||
guarantee(false, "CSetChooserCache::remove_first(): "
|
|
||||||
"don't call this any more");
|
|
||||||
|
|
||||||
if (_occupancy > 0) {
|
|
||||||
assert(_cache[_first] != NULL, "cache should have at least one region");
|
|
||||||
HeapRegion *ret = _cache[_first];
|
|
||||||
_cache[_first] = NULL;
|
|
||||||
ret->set_sort_index(-1);
|
|
||||||
--_occupancy;
|
|
||||||
_first = trim_index(_first + 1);
|
|
||||||
assert(verify(), "cache should be consistent");
|
|
||||||
return ret;
|
|
||||||
} else {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Even though we don't use the GC efficiency in our heuristics as
|
// Even though we don't use the GC efficiency in our heuristics as
|
||||||
// much as we used to, we still order according to GC efficiency. This
|
// much as we used to, we still order according to GC efficiency. This
|
||||||
// will cause regions with a lot of live objects and large RSets to
|
// will cause regions with a lot of live objects and large RSets to
|
||||||
@ -134,7 +38,7 @@ HeapRegion *CSetChooserCache::remove_first() {
|
|||||||
// the ones we'll skip are ones with both large RSets and a lot of
|
// the ones we'll skip are ones with both large RSets and a lot of
|
||||||
// live objects, not the ones with just a lot of live objects if we
|
// live objects, not the ones with just a lot of live objects if we
|
||||||
// ordered according to the amount of reclaimable bytes per region.
|
// ordered according to the amount of reclaimable bytes per region.
|
||||||
static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
|
static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
|
||||||
if (hr1 == NULL) {
|
if (hr1 == NULL) {
|
||||||
if (hr2 == NULL) {
|
if (hr2 == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
@ -156,8 +60,8 @@ static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
|
static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
|
||||||
return orderRegions(*hr1p, *hr2p);
|
return order_regions(*hr1p, *hr2p);
|
||||||
}
|
}
|
||||||
|
|
||||||
CollectionSetChooser::CollectionSetChooser() :
|
CollectionSetChooser::CollectionSetChooser() :
|
||||||
@ -175,105 +79,74 @@ CollectionSetChooser::CollectionSetChooser() :
|
|||||||
//
|
//
|
||||||
// Note: containing object is allocated on C heap since it is CHeapObj.
|
// Note: containing object is allocated on C heap since it is CHeapObj.
|
||||||
//
|
//
|
||||||
_markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
|
_regions((ResourceObj::set_allocation_type((address) &_regions,
|
||||||
ResourceObj::C_HEAP),
|
ResourceObj::C_HEAP),
|
||||||
100), true /* C_Heap */),
|
100), true /* C_Heap */),
|
||||||
_curr_index(0), _length(0),
|
_curr_index(0), _length(0), _first_par_unreserved_idx(0),
|
||||||
_regionLiveThresholdBytes(0), _remainingReclaimableBytes(0),
|
_region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
|
||||||
_first_par_unreserved_idx(0) {
|
_region_live_threshold_bytes =
|
||||||
_regionLiveThresholdBytes =
|
|
||||||
HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
|
HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
bool CollectionSetChooser::verify() {
|
void CollectionSetChooser::verify() {
|
||||||
guarantee(_length >= 0, err_msg("_length: %d", _length));
|
guarantee(_length <= regions_length(),
|
||||||
guarantee(0 <= _curr_index && _curr_index <= _length,
|
err_msg("_length: %u regions length: %u", _length, regions_length()));
|
||||||
err_msg("_curr_index: %d _length: %d", _curr_index, _length));
|
guarantee(_curr_index <= _length,
|
||||||
int index = 0;
|
err_msg("_curr_index: %u _length: %u", _curr_index, _length));
|
||||||
|
uint index = 0;
|
||||||
size_t sum_of_reclaimable_bytes = 0;
|
size_t sum_of_reclaimable_bytes = 0;
|
||||||
while (index < _curr_index) {
|
while (index < _curr_index) {
|
||||||
guarantee(_markedRegions.at(index) == NULL,
|
guarantee(regions_at(index) == NULL,
|
||||||
"all entries before _curr_index should be NULL");
|
"all entries before _curr_index should be NULL");
|
||||||
index += 1;
|
index += 1;
|
||||||
}
|
}
|
||||||
HeapRegion *prev = NULL;
|
HeapRegion *prev = NULL;
|
||||||
while (index < _length) {
|
while (index < _length) {
|
||||||
HeapRegion *curr = _markedRegions.at(index++);
|
HeapRegion *curr = regions_at(index++);
|
||||||
guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
|
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
|
||||||
int si = curr->sort_index();
|
|
||||||
guarantee(!curr->is_young(), "should not be young!");
|
guarantee(!curr->is_young(), "should not be young!");
|
||||||
guarantee(!curr->isHumongous(), "should not be humongous!");
|
guarantee(!curr->isHumongous(), "should not be humongous!");
|
||||||
guarantee(si > -1 && si == (index-1), "sort index invariant");
|
|
||||||
if (prev != NULL) {
|
if (prev != NULL) {
|
||||||
guarantee(orderRegions(prev, curr) != 1,
|
guarantee(order_regions(prev, curr) != 1,
|
||||||
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
||||||
prev->gc_efficiency(), curr->gc_efficiency()));
|
prev->gc_efficiency(), curr->gc_efficiency()));
|
||||||
}
|
}
|
||||||
sum_of_reclaimable_bytes += curr->reclaimable_bytes();
|
sum_of_reclaimable_bytes += curr->reclaimable_bytes();
|
||||||
prev = curr;
|
prev = curr;
|
||||||
}
|
}
|
||||||
guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes,
|
guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
|
||||||
err_msg("reclaimable bytes inconsistent, "
|
err_msg("reclaimable bytes inconsistent, "
|
||||||
"remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
|
"remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
|
||||||
_remainingReclaimableBytes, sum_of_reclaimable_bytes));
|
_remaining_reclaimable_bytes, sum_of_reclaimable_bytes));
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif // !PRODUCT
|
||||||
|
|
||||||
void CollectionSetChooser::fillCache() {
|
void CollectionSetChooser::sort_regions() {
|
||||||
guarantee(false, "fillCache: don't call this any more");
|
|
||||||
|
|
||||||
while (!_cache.is_full() && (_curr_index < _length)) {
|
|
||||||
HeapRegion* hr = _markedRegions.at(_curr_index);
|
|
||||||
assert(hr != NULL,
|
|
||||||
err_msg("Unexpected NULL hr in _markedRegions at index %d",
|
|
||||||
_curr_index));
|
|
||||||
_curr_index += 1;
|
|
||||||
assert(!hr->is_young(), "should not be young!");
|
|
||||||
assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
|
|
||||||
_markedRegions.at_put(hr->sort_index(), NULL);
|
|
||||||
_cache.insert(hr);
|
|
||||||
assert(!_cache.is_empty(), "cache should not be empty");
|
|
||||||
}
|
|
||||||
assert(verify(), "cache should be consistent");
|
|
||||||
}
|
|
||||||
|
|
||||||
void CollectionSetChooser::sortMarkedHeapRegions() {
|
|
||||||
// First trim any unused portion of the top in the parallel case.
|
// First trim any unused portion of the top in the parallel case.
|
||||||
if (_first_par_unreserved_idx > 0) {
|
if (_first_par_unreserved_idx > 0) {
|
||||||
if (G1PrintParCleanupStats) {
|
assert(_first_par_unreserved_idx <= regions_length(),
|
||||||
gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n",
|
|
||||||
_markedRegions.length(), _first_par_unreserved_idx);
|
|
||||||
}
|
|
||||||
assert(_first_par_unreserved_idx <= _markedRegions.length(),
|
|
||||||
"Or we didn't reserved enough length");
|
"Or we didn't reserved enough length");
|
||||||
_markedRegions.trunc_to(_first_par_unreserved_idx);
|
regions_trunc_to(_first_par_unreserved_idx);
|
||||||
}
|
}
|
||||||
_markedRegions.sort(orderRegions);
|
_regions.sort(order_regions);
|
||||||
assert(_length <= _markedRegions.length(), "Requirement");
|
assert(_length <= regions_length(), "Requirement");
|
||||||
assert(_length == 0 || _markedRegions.at(_length - 1) != NULL,
|
#ifdef ASSERT
|
||||||
"Testing _length");
|
for (uint i = 0; i < _length; i++) {
|
||||||
assert(_length == _markedRegions.length() ||
|
assert(regions_at(i) != NULL, "Should be true by sorting!");
|
||||||
_markedRegions.at(_length) == NULL, "Testing _length");
|
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
gclog_or_tty->print_cr(" Sorted %d marked regions.", _length);
|
|
||||||
}
|
|
||||||
for (int i = 0; i < _length; i++) {
|
|
||||||
assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
|
|
||||||
_markedRegions.at(i)->set_sort_index(i);
|
|
||||||
}
|
}
|
||||||
|
#endif // ASSERT
|
||||||
if (G1PrintRegionLivenessInfo) {
|
if (G1PrintRegionLivenessInfo) {
|
||||||
G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
|
G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
|
||||||
for (int i = 0; i < _length; ++i) {
|
for (uint i = 0; i < _length; ++i) {
|
||||||
HeapRegion* r = _markedRegions.at(i);
|
HeapRegion* r = regions_at(i);
|
||||||
cl.doHeapRegion(r);
|
cl.doHeapRegion(r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(verify(), "CSet chooser verification");
|
verify();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CollectionSetChooser::calcMinOldCSetLength() {
|
uint CollectionSetChooser::calc_min_old_cset_length() {
|
||||||
// The min old CSet region bound is based on the maximum desired
|
// The min old CSet region bound is based on the maximum desired
|
||||||
// number of mixed GCs after a cycle. I.e., even if some old regions
|
// number of mixed GCs after a cycle. I.e., even if some old regions
|
||||||
// look expensive, we should add them to the CSet anyway to make
|
// look expensive, we should add them to the CSet anyway to make
|
||||||
@ -291,10 +164,10 @@ size_t CollectionSetChooser::calcMinOldCSetLength() {
|
|||||||
if (result * gc_num < region_num) {
|
if (result * gc_num < region_num) {
|
||||||
result += 1;
|
result += 1;
|
||||||
}
|
}
|
||||||
return result;
|
return (uint) result;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CollectionSetChooser::calcMaxOldCSetLength() {
|
uint CollectionSetChooser::calc_max_old_cset_length() {
|
||||||
// The max old CSet region bound is based on the threshold expressed
|
// The max old CSet region bound is based on the threshold expressed
|
||||||
// as a percentage of the heap size. I.e., it should bound the
|
// as a percentage of the heap size. I.e., it should bound the
|
||||||
// number of old regions added to the CSet irrespective of how many
|
// number of old regions added to the CSet irrespective of how many
|
||||||
@ -308,23 +181,23 @@ size_t CollectionSetChooser::calcMaxOldCSetLength() {
|
|||||||
if (100 * result < region_num * perc) {
|
if (100 * result < region_num * perc) {
|
||||||
result += 1;
|
result += 1;
|
||||||
}
|
}
|
||||||
return result;
|
return (uint) result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
|
void CollectionSetChooser::add_region(HeapRegion* hr) {
|
||||||
assert(!hr->isHumongous(),
|
assert(!hr->isHumongous(),
|
||||||
"Humongous regions shouldn't be added to the collection set");
|
"Humongous regions shouldn't be added to the collection set");
|
||||||
assert(!hr->is_young(), "should not be young!");
|
assert(!hr->is_young(), "should not be young!");
|
||||||
_markedRegions.append(hr);
|
_regions.append(hr);
|
||||||
_length++;
|
_length++;
|
||||||
_remainingReclaimableBytes += hr->reclaimable_bytes();
|
_remaining_reclaimable_bytes += hr->reclaimable_bytes();
|
||||||
hr->calc_gc_efficiency();
|
hr->calc_gc_efficiency();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
|
void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions,
|
||||||
size_t chunkSize) {
|
uint chunk_size) {
|
||||||
_first_par_unreserved_idx = 0;
|
_first_par_unreserved_idx = 0;
|
||||||
int n_threads = ParallelGCThreads;
|
uint n_threads = (uint) ParallelGCThreads;
|
||||||
if (UseDynamicNumberOfGCThreads) {
|
if (UseDynamicNumberOfGCThreads) {
|
||||||
assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
|
assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
|
||||||
"Should have been set earlier");
|
"Should have been set earlier");
|
||||||
@ -335,57 +208,46 @@ void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
|
|||||||
n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
|
n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
|
||||||
1U);
|
1U);
|
||||||
}
|
}
|
||||||
size_t max_waste = n_threads * chunkSize;
|
uint max_waste = n_threads * chunk_size;
|
||||||
// it should be aligned with respect to chunkSize
|
// it should be aligned with respect to chunk_size
|
||||||
size_t aligned_n_regions =
|
uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
|
||||||
(n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
|
assert(aligned_n_regions % chunk_size == 0, "should be aligned");
|
||||||
assert( aligned_n_regions % chunkSize == 0, "should be aligned" );
|
regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL);
|
||||||
_markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
|
uint CollectionSetChooser::claim_array_chunk(uint chunk_size) {
|
||||||
// Don't do this assert because this can be called at a point
|
uint res = (uint) Atomic::add((jint) chunk_size,
|
||||||
// where the loop up stream will not execute again but might
|
(volatile jint*) &_first_par_unreserved_idx);
|
||||||
// try to claim more chunks (loop test has not been done yet).
|
assert(regions_length() > res + chunk_size - 1,
|
||||||
// assert(_markedRegions.length() > _first_par_unreserved_idx,
|
|
||||||
// "Striding beyond the marked regions");
|
|
||||||
jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
|
|
||||||
assert(_markedRegions.length() > res + n_regions - 1,
|
|
||||||
"Should already have been expanded");
|
"Should already have been expanded");
|
||||||
return res - n_regions;
|
return res - chunk_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
|
void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
|
||||||
assert(_markedRegions.at(index) == NULL, "precondition");
|
assert(regions_at(index) == NULL, "precondition");
|
||||||
assert(!hr->is_young(), "should not be young!");
|
assert(!hr->is_young(), "should not be young!");
|
||||||
_markedRegions.at_put(index, hr);
|
regions_at_put(index, hr);
|
||||||
hr->calc_gc_efficiency();
|
hr->calc_gc_efficiency();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectionSetChooser::updateTotals(jint region_num,
|
void CollectionSetChooser::update_totals(uint region_num,
|
||||||
size_t reclaimable_bytes) {
|
size_t reclaimable_bytes) {
|
||||||
// Only take the lock if we actually need to update the totals.
|
// Only take the lock if we actually need to update the totals.
|
||||||
if (region_num > 0) {
|
if (region_num > 0) {
|
||||||
assert(reclaimable_bytes > 0, "invariant");
|
assert(reclaimable_bytes > 0, "invariant");
|
||||||
// We could have just used atomics instead of taking the
|
// We could have just used atomics instead of taking the
|
||||||
// lock. However, we currently don't have an atomic add for size_t.
|
// lock. However, we currently don't have an atomic add for size_t.
|
||||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||||
_length += (int) region_num;
|
_length += region_num;
|
||||||
_remainingReclaimableBytes += reclaimable_bytes;
|
_remaining_reclaimable_bytes += reclaimable_bytes;
|
||||||
} else {
|
} else {
|
||||||
assert(reclaimable_bytes == 0, "invariant");
|
assert(reclaimable_bytes == 0, "invariant");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectionSetChooser::clearMarkedHeapRegions() {
|
void CollectionSetChooser::clear() {
|
||||||
for (int i = 0; i < _markedRegions.length(); i++) {
|
_regions.clear();
|
||||||
HeapRegion* r = _markedRegions.at(i);
|
|
||||||
if (r != NULL) {
|
|
||||||
r->set_sort_index(-1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_markedRegions.clear();
|
|
||||||
_curr_index = 0;
|
_curr_index = 0;
|
||||||
_length = 0;
|
_length = 0;
|
||||||
_remainingReclaimableBytes = 0;
|
_remaining_reclaimable_bytes = 0;
|
||||||
};
|
};
|
||||||
|
@ -28,77 +28,42 @@
|
|||||||
#include "gc_implementation/g1/heapRegion.hpp"
|
#include "gc_implementation/g1/heapRegion.hpp"
|
||||||
#include "utilities/growableArray.hpp"
|
#include "utilities/growableArray.hpp"
|
||||||
|
|
||||||
class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
|
|
||||||
private:
|
|
||||||
enum {
|
|
||||||
CacheLength = 16
|
|
||||||
} PrivateConstants;
|
|
||||||
|
|
||||||
HeapRegion* _cache[CacheLength];
|
|
||||||
int _occupancy; // number of regions in cache
|
|
||||||
int _first; // (index of) "first" region in the cache
|
|
||||||
|
|
||||||
// adding CacheLength to deal with negative values
|
|
||||||
inline int trim_index(int index) {
|
|
||||||
return (index + CacheLength) % CacheLength;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline int get_sort_index(int index) {
|
|
||||||
return -index-2;
|
|
||||||
}
|
|
||||||
inline int get_index(int sort_index) {
|
|
||||||
return -sort_index-2;
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
CSetChooserCache(void);
|
|
||||||
|
|
||||||
inline int occupancy(void) { return _occupancy; }
|
|
||||||
inline bool is_full() { return _occupancy == CacheLength; }
|
|
||||||
inline bool is_empty() { return _occupancy == 0; }
|
|
||||||
|
|
||||||
void clear(void);
|
|
||||||
void insert(HeapRegion *hr);
|
|
||||||
HeapRegion *remove_first(void);
|
|
||||||
inline HeapRegion *get_first(void) {
|
|
||||||
return _cache[_first];
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
bool verify (void);
|
|
||||||
bool region_in_cache(HeapRegion *hr) {
|
|
||||||
int sort_index = hr->sort_index();
|
|
||||||
if (sort_index < -1) {
|
|
||||||
int index = get_index(sort_index);
|
|
||||||
guarantee(index < CacheLength, "should be within bounds");
|
|
||||||
return _cache[index] == hr;
|
|
||||||
} else
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif // PRODUCT
|
|
||||||
};
|
|
||||||
|
|
||||||
class CollectionSetChooser: public CHeapObj {
|
class CollectionSetChooser: public CHeapObj {
|
||||||
|
|
||||||
GrowableArray<HeapRegion*> _markedRegions;
|
GrowableArray<HeapRegion*> _regions;
|
||||||
|
|
||||||
|
// Unfortunately, GrowableArray uses ints for length and indexes. To
|
||||||
|
// avoid excessive casting in the rest of the class the following
|
||||||
|
// wrapper methods are provided that use uints.
|
||||||
|
|
||||||
|
uint regions_length() { return (uint) _regions.length(); }
|
||||||
|
HeapRegion* regions_at(uint i) { return _regions.at((int) i); }
|
||||||
|
void regions_at_put(uint i, HeapRegion* hr) {
|
||||||
|
_regions.at_put((int) i, hr);
|
||||||
|
}
|
||||||
|
void regions_at_put_grow(uint i, HeapRegion* hr) {
|
||||||
|
_regions.at_put_grow((int) i, hr);
|
||||||
|
}
|
||||||
|
void regions_trunc_to(uint i) { _regions.trunc_to((uint) i); }
|
||||||
|
|
||||||
// The index of the next candidate old region to be considered for
|
// The index of the next candidate old region to be considered for
|
||||||
// addition to the CSet.
|
// addition to the CSet.
|
||||||
int _curr_index;
|
uint _curr_index;
|
||||||
|
|
||||||
// The number of candidate old regions added to the CSet chooser.
|
// The number of candidate old regions added to the CSet chooser.
|
||||||
int _length;
|
uint _length;
|
||||||
|
|
||||||
CSetChooserCache _cache;
|
// Keeps track of the start of the next array chunk to be claimed by
|
||||||
jint _first_par_unreserved_idx;
|
// parallel GC workers.
|
||||||
|
uint _first_par_unreserved_idx;
|
||||||
|
|
||||||
// If a region has more live bytes than this threshold, it will not
|
// If a region has more live bytes than this threshold, it will not
|
||||||
// be added to the CSet chooser and will not be a candidate for
|
// be added to the CSet chooser and will not be a candidate for
|
||||||
// collection.
|
// collection.
|
||||||
size_t _regionLiveThresholdBytes;
|
size_t _region_live_threshold_bytes;
|
||||||
|
|
||||||
// The sum of reclaimable bytes over all the regions in the CSet chooser.
|
// The sum of reclaimable bytes over all the regions in the CSet chooser.
|
||||||
size_t _remainingReclaimableBytes;
|
size_t _remaining_reclaimable_bytes;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -107,9 +72,9 @@ public:
|
|||||||
HeapRegion* peek() {
|
HeapRegion* peek() {
|
||||||
HeapRegion* res = NULL;
|
HeapRegion* res = NULL;
|
||||||
if (_curr_index < _length) {
|
if (_curr_index < _length) {
|
||||||
res = _markedRegions.at(_curr_index);
|
res = regions_at(_curr_index);
|
||||||
assert(res != NULL,
|
assert(res != NULL,
|
||||||
err_msg("Unexpected NULL hr in _markedRegions at index %d",
|
err_msg("Unexpected NULL hr in _regions at index %u",
|
||||||
_curr_index));
|
_curr_index));
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
@ -121,90 +86,71 @@ public:
|
|||||||
void remove_and_move_to_next(HeapRegion* hr) {
|
void remove_and_move_to_next(HeapRegion* hr) {
|
||||||
assert(hr != NULL, "pre-condition");
|
assert(hr != NULL, "pre-condition");
|
||||||
assert(_curr_index < _length, "pre-condition");
|
assert(_curr_index < _length, "pre-condition");
|
||||||
assert(_markedRegions.at(_curr_index) == hr, "pre-condition");
|
assert(regions_at(_curr_index) == hr, "pre-condition");
|
||||||
hr->set_sort_index(-1);
|
regions_at_put(_curr_index, NULL);
|
||||||
_markedRegions.at_put(_curr_index, NULL);
|
assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
|
||||||
assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes,
|
|
||||||
err_msg("remaining reclaimable bytes inconsistent "
|
err_msg("remaining reclaimable bytes inconsistent "
|
||||||
"from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
|
"from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
|
||||||
hr->reclaimable_bytes(), _remainingReclaimableBytes));
|
hr->reclaimable_bytes(), _remaining_reclaimable_bytes));
|
||||||
_remainingReclaimableBytes -= hr->reclaimable_bytes();
|
_remaining_reclaimable_bytes -= hr->reclaimable_bytes();
|
||||||
_curr_index += 1;
|
_curr_index += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
CollectionSetChooser();
|
CollectionSetChooser();
|
||||||
|
|
||||||
void sortMarkedHeapRegions();
|
void sort_regions();
|
||||||
void fillCache();
|
|
||||||
|
|
||||||
// Determine whether to add the given region to the CSet chooser or
|
// Determine whether to add the given region to the CSet chooser or
|
||||||
// not. Currently, we skip humongous regions (we never add them to
|
// not. Currently, we skip humongous regions (we never add them to
|
||||||
// the CSet, we only reclaim them during cleanup) and regions whose
|
// the CSet, we only reclaim them during cleanup) and regions whose
|
||||||
// live bytes are over the threshold.
|
// live bytes are over the threshold.
|
||||||
bool shouldAdd(HeapRegion* hr) {
|
bool should_add(HeapRegion* hr) {
|
||||||
assert(hr->is_marked(), "pre-condition");
|
assert(hr->is_marked(), "pre-condition");
|
||||||
assert(!hr->is_young(), "should never consider young regions");
|
assert(!hr->is_young(), "should never consider young regions");
|
||||||
return !hr->isHumongous() &&
|
return !hr->isHumongous() &&
|
||||||
hr->live_bytes() < _regionLiveThresholdBytes;
|
hr->live_bytes() < _region_live_threshold_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate the minimum number of old regions we'll add to the CSet
|
// Calculate the minimum number of old regions we'll add to the CSet
|
||||||
// during a mixed GC.
|
// during a mixed GC.
|
||||||
size_t calcMinOldCSetLength();
|
uint calc_min_old_cset_length();
|
||||||
|
|
||||||
// Calculate the maximum number of old regions we'll add to the CSet
|
// Calculate the maximum number of old regions we'll add to the CSet
|
||||||
// during a mixed GC.
|
// during a mixed GC.
|
||||||
size_t calcMaxOldCSetLength();
|
uint calc_max_old_cset_length();
|
||||||
|
|
||||||
// Serial version.
|
// Serial version.
|
||||||
void addMarkedHeapRegion(HeapRegion *hr);
|
void add_region(HeapRegion *hr);
|
||||||
|
|
||||||
// Must be called before calls to getParMarkedHeapRegionChunk.
|
// Must be called before calls to claim_array_chunk().
|
||||||
// "n_regions" is the number of regions, "chunkSize" the chunk size.
|
// n_regions is the number of regions, chunk_size the chunk size.
|
||||||
void prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize);
|
void prepare_for_par_region_addition(uint n_regions, uint chunk_size);
|
||||||
// Returns the first index in a contiguous chunk of "n_regions" indexes
|
// Returns the first index in a contiguous chunk of chunk_size indexes
|
||||||
// that the calling thread has reserved. These must be set by the
|
// that the calling thread has reserved. These must be set by the
|
||||||
// calling thread using "setMarkedHeapRegion" (to NULL if necessary).
|
// calling thread using set_region() (to NULL if necessary).
|
||||||
jint getParMarkedHeapRegionChunk(jint n_regions);
|
uint claim_array_chunk(uint chunk_size);
|
||||||
// Set the marked array entry at index to hr. Careful to claim the index
|
// Set the marked array entry at index to hr. Careful to claim the index
|
||||||
// first if in parallel.
|
// first if in parallel.
|
||||||
void setMarkedHeapRegion(jint index, HeapRegion* hr);
|
void set_region(uint index, HeapRegion* hr);
|
||||||
// Atomically increment the number of added regions by region_num
|
// Atomically increment the number of added regions by region_num
|
||||||
// and the amount of reclaimable bytes by reclaimable_bytes.
|
// and the amount of reclaimable bytes by reclaimable_bytes.
|
||||||
void updateTotals(jint region_num, size_t reclaimable_bytes);
|
void update_totals(uint region_num, size_t reclaimable_bytes);
|
||||||
|
|
||||||
void clearMarkedHeapRegions();
|
void clear();
|
||||||
|
|
||||||
// Return the number of candidate regions that remain to be collected.
|
// Return the number of candidate regions that remain to be collected.
|
||||||
size_t remainingRegions() { return _length - _curr_index; }
|
uint remaining_regions() { return _length - _curr_index; }
|
||||||
|
|
||||||
// Determine whether the CSet chooser has more candidate regions or not.
|
// Determine whether the CSet chooser has more candidate regions or not.
|
||||||
bool isEmpty() { return remainingRegions() == 0; }
|
bool is_empty() { return remaining_regions() == 0; }
|
||||||
|
|
||||||
// Return the reclaimable bytes that remain to be collected on
|
// Return the reclaimable bytes that remain to be collected on
|
||||||
// all the candidate regions in the CSet chooser.
|
// all the candidate regions in the CSet chooser.
|
||||||
size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; }
|
size_t remaining_reclaimable_bytes() { return _remaining_reclaimable_bytes; }
|
||||||
|
|
||||||
// Returns true if the used portion of "_markedRegions" is properly
|
// Returns true if the used portion of "_regions" is properly
|
||||||
// sorted, otherwise asserts false.
|
// sorted, otherwise asserts false.
|
||||||
#ifndef PRODUCT
|
void verify() PRODUCT_RETURN;
|
||||||
bool verify(void);
|
|
||||||
bool regionProperlyOrdered(HeapRegion* r) {
|
|
||||||
int si = r->sort_index();
|
|
||||||
if (si > -1) {
|
|
||||||
guarantee(_curr_index <= si && si < _length,
|
|
||||||
err_msg("curr: %d sort index: %d: length: %d",
|
|
||||||
_curr_index, si, _length));
|
|
||||||
guarantee(_markedRegions.at(si) == r,
|
|
||||||
err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT,
|
|
||||||
si, _markedRegions.at(si), r));
|
|
||||||
} else {
|
|
||||||
guarantee(si == -1, err_msg("sort index: %d", si));
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_COLLECTIONSETCHOOSER_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_COLLECTIONSETCHOOSER_HPP
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
|
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Log.hpp"
|
||||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||||
@ -402,8 +403,7 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
|
|||||||
return MAX2((n_par_threads + 2) / 4, 1U);
|
return MAX2((n_par_threads + 2) / 4, 1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
|
||||||
int max_regions) :
|
|
||||||
_markBitMap1(rs, MinObjAlignment - 1),
|
_markBitMap1(rs, MinObjAlignment - 1),
|
||||||
_markBitMap2(rs, MinObjAlignment - 1),
|
_markBitMap2(rs, MinObjAlignment - 1),
|
||||||
|
|
||||||
@ -414,7 +414,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
|||||||
_cleanup_sleep_factor(0.0),
|
_cleanup_sleep_factor(0.0),
|
||||||
_cleanup_task_overhead(1.0),
|
_cleanup_task_overhead(1.0),
|
||||||
_cleanup_list("Cleanup List"),
|
_cleanup_list("Cleanup List"),
|
||||||
_region_bm(max_regions, false /* in_resource_area*/),
|
_region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
|
||||||
_card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
|
_card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
|
||||||
CardTableModRefBS::card_shift,
|
CardTableModRefBS::card_shift,
|
||||||
false /* in_resource_area*/),
|
false /* in_resource_area*/),
|
||||||
@ -496,7 +496,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
|||||||
_task_queues->register_queue(i, task_queue);
|
_task_queues->register_queue(i, task_queue);
|
||||||
|
|
||||||
_count_card_bitmaps[i] = BitMap(card_bm_size, false);
|
_count_card_bitmaps[i] = BitMap(card_bm_size, false);
|
||||||
_count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions);
|
_count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions);
|
||||||
|
|
||||||
_tasks[i] = new CMTask(i, this,
|
_tasks[i] = new CMTask(i, this,
|
||||||
_count_marked_bytes[i],
|
_count_marked_bytes[i],
|
||||||
@ -846,7 +846,7 @@ void ConcurrentMark::enter_first_sync_barrier(int task_num) {
|
|||||||
clear_marking_state(concurrent() /* clear_overflow */);
|
clear_marking_state(concurrent() /* clear_overflow */);
|
||||||
force_overflow()->update();
|
force_overflow()->update();
|
||||||
|
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
|
gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
|
||||||
@ -1119,8 +1119,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
Universe::verify(/* allow dirty */ true,
|
Universe::verify(/* silent */ false,
|
||||||
/* silent */ false,
|
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
/* option */ VerifyOption_G1UsePrevMarking);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1159,8 +1158,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
Universe::verify(/* allow dirty */ true,
|
Universe::verify(/* silent */ false,
|
||||||
/* silent */ false,
|
|
||||||
/* option */ VerifyOption_G1UseNextMarking);
|
/* option */ VerifyOption_G1UseNextMarking);
|
||||||
}
|
}
|
||||||
assert(!restart_for_overflow(), "sanity");
|
assert(!restart_for_overflow(), "sanity");
|
||||||
@ -1194,11 +1192,6 @@ class CalcLiveObjectsClosure: public HeapRegionClosure {
|
|||||||
BitMap* _region_bm;
|
BitMap* _region_bm;
|
||||||
BitMap* _card_bm;
|
BitMap* _card_bm;
|
||||||
|
|
||||||
// Debugging
|
|
||||||
size_t _tot_words_done;
|
|
||||||
size_t _tot_live;
|
|
||||||
size_t _tot_used;
|
|
||||||
|
|
||||||
size_t _region_marked_bytes;
|
size_t _region_marked_bytes;
|
||||||
|
|
||||||
intptr_t _bottom_card_num;
|
intptr_t _bottom_card_num;
|
||||||
@ -1217,9 +1210,7 @@ public:
|
|||||||
CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
|
CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
|
||||||
BitMap* region_bm, BitMap* card_bm) :
|
BitMap* region_bm, BitMap* card_bm) :
|
||||||
_bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
|
_bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
|
||||||
_region_marked_bytes(0), _tot_words_done(0),
|
_region_marked_bytes(0), _bottom_card_num(cm->heap_bottom_card_num()) { }
|
||||||
_tot_live(0), _tot_used(0),
|
|
||||||
_bottom_card_num(cm->heap_bottom_card_num()) { }
|
|
||||||
|
|
||||||
// It takes a region that's not empty (i.e., it has at least one
|
// It takes a region that's not empty (i.e., it has at least one
|
||||||
// live object in it and sets its corresponding bit on the region
|
// live object in it and sets its corresponding bit on the region
|
||||||
@ -1229,18 +1220,17 @@ public:
|
|||||||
void set_bit_for_region(HeapRegion* hr) {
|
void set_bit_for_region(HeapRegion* hr) {
|
||||||
assert(!hr->continuesHumongous(), "should have filtered those out");
|
assert(!hr->continuesHumongous(), "should have filtered those out");
|
||||||
|
|
||||||
size_t index = hr->hrs_index();
|
BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
|
||||||
if (!hr->startsHumongous()) {
|
if (!hr->startsHumongous()) {
|
||||||
// Normal (non-humongous) case: just set the bit.
|
// Normal (non-humongous) case: just set the bit.
|
||||||
_region_bm->par_at_put((BitMap::idx_t) index, true);
|
_region_bm->par_at_put(index, true);
|
||||||
} else {
|
} else {
|
||||||
// Starts humongous case: calculate how many regions are part of
|
// Starts humongous case: calculate how many regions are part of
|
||||||
// this humongous region and then set the bit range.
|
// this humongous region and then set the bit range.
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
|
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
|
||||||
size_t end_index = last_hr->hrs_index() + 1;
|
BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
|
||||||
_region_bm->par_at_put_range((BitMap::idx_t) index,
|
_region_bm->par_at_put_range(index, end_index, true);
|
||||||
(BitMap::idx_t) end_index, true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1265,9 +1255,6 @@ public:
|
|||||||
"start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT,
|
"start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT,
|
||||||
start, nextTop, hr->end()));
|
start, nextTop, hr->end()));
|
||||||
|
|
||||||
// Record the number of word's we'll examine.
|
|
||||||
size_t words_done = (nextTop - start);
|
|
||||||
|
|
||||||
// Find the first marked object at or after "start".
|
// Find the first marked object at or after "start".
|
||||||
start = _bm->getNextMarkedWordAddress(start, nextTop);
|
start = _bm->getNextMarkedWordAddress(start, nextTop);
|
||||||
|
|
||||||
@ -1346,19 +1333,10 @@ public:
|
|||||||
// it can be queried by a calling verificiation routine
|
// it can be queried by a calling verificiation routine
|
||||||
_region_marked_bytes = marked_bytes;
|
_region_marked_bytes = marked_bytes;
|
||||||
|
|
||||||
_tot_live += hr->next_live_bytes();
|
|
||||||
_tot_used += hr->used();
|
|
||||||
_tot_words_done = words_done;
|
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t region_marked_bytes() const { return _region_marked_bytes; }
|
size_t region_marked_bytes() const { return _region_marked_bytes; }
|
||||||
|
|
||||||
// Debugging
|
|
||||||
size_t tot_words_done() const { return _tot_words_done; }
|
|
||||||
size_t tot_live() const { return _tot_live; }
|
|
||||||
size_t tot_used() const { return _tot_used; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Heap region closure used for verifying the counting data
|
// Heap region closure used for verifying the counting data
|
||||||
@ -1419,7 +1397,7 @@ public:
|
|||||||
// Verify that _top_at_conc_count == ntams
|
// Verify that _top_at_conc_count == ntams
|
||||||
if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
|
if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
gclog_or_tty->print_cr("Region " SIZE_FORMAT ": top at conc count incorrect: "
|
gclog_or_tty->print_cr("Region %u: top at conc count incorrect: "
|
||||||
"expected " PTR_FORMAT ", actual: " PTR_FORMAT,
|
"expected " PTR_FORMAT ", actual: " PTR_FORMAT,
|
||||||
hr->hrs_index(), hr->next_top_at_mark_start(),
|
hr->hrs_index(), hr->next_top_at_mark_start(),
|
||||||
hr->top_at_conc_mark_count());
|
hr->top_at_conc_mark_count());
|
||||||
@ -1435,7 +1413,7 @@ public:
|
|||||||
// we have missed accounting some objects during the actual marking.
|
// we have missed accounting some objects during the actual marking.
|
||||||
if (exp_marked_bytes > act_marked_bytes) {
|
if (exp_marked_bytes > act_marked_bytes) {
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
gclog_or_tty->print_cr("Region " SIZE_FORMAT ": marked bytes mismatch: "
|
gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
|
||||||
"expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
|
"expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
|
||||||
hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
|
hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
|
||||||
}
|
}
|
||||||
@ -1446,15 +1424,16 @@ public:
|
|||||||
// (which was just calculated) region bit maps.
|
// (which was just calculated) region bit maps.
|
||||||
// We're not OK if the bit in the calculated expected region
|
// We're not OK if the bit in the calculated expected region
|
||||||
// bitmap is set and the bit in the actual region bitmap is not.
|
// bitmap is set and the bit in the actual region bitmap is not.
|
||||||
BitMap::idx_t index = (BitMap::idx_t)hr->hrs_index();
|
BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
|
||||||
|
|
||||||
bool expected = _exp_region_bm->at(index);
|
bool expected = _exp_region_bm->at(index);
|
||||||
bool actual = _region_bm->at(index);
|
bool actual = _region_bm->at(index);
|
||||||
if (expected && !actual) {
|
if (expected && !actual) {
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
gclog_or_tty->print_cr("Region " SIZE_FORMAT ": region bitmap mismatch: "
|
gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
|
||||||
"expected: %d, actual: %d",
|
"expected: %s, actual: %s",
|
||||||
hr->hrs_index(), expected, actual);
|
hr->hrs_index(),
|
||||||
|
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
|
||||||
}
|
}
|
||||||
failures += 1;
|
failures += 1;
|
||||||
}
|
}
|
||||||
@ -1472,9 +1451,10 @@ public:
|
|||||||
|
|
||||||
if (expected && !actual) {
|
if (expected && !actual) {
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
gclog_or_tty->print_cr("Region " SIZE_FORMAT ": card bitmap mismatch at " SIZE_FORMAT ": "
|
gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
|
||||||
"expected: %d, actual: %d",
|
"expected: %s, actual: %s",
|
||||||
hr->hrs_index(), i, expected, actual);
|
hr->hrs_index(), i,
|
||||||
|
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
|
||||||
}
|
}
|
||||||
failures += 1;
|
failures += 1;
|
||||||
}
|
}
|
||||||
@ -1575,10 +1555,6 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
|
|||||||
BitMap* _region_bm;
|
BitMap* _region_bm;
|
||||||
BitMap* _card_bm;
|
BitMap* _card_bm;
|
||||||
|
|
||||||
size_t _total_live_bytes;
|
|
||||||
size_t _total_used_bytes;
|
|
||||||
size_t _total_words_done;
|
|
||||||
|
|
||||||
void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
|
void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
|
||||||
assert(start_idx <= last_idx, "sanity");
|
assert(start_idx <= last_idx, "sanity");
|
||||||
|
|
||||||
@ -1604,18 +1580,17 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
|
|||||||
void set_bit_for_region(HeapRegion* hr) {
|
void set_bit_for_region(HeapRegion* hr) {
|
||||||
assert(!hr->continuesHumongous(), "should have filtered those out");
|
assert(!hr->continuesHumongous(), "should have filtered those out");
|
||||||
|
|
||||||
size_t index = hr->hrs_index();
|
BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
|
||||||
if (!hr->startsHumongous()) {
|
if (!hr->startsHumongous()) {
|
||||||
// Normal (non-humongous) case: just set the bit.
|
// Normal (non-humongous) case: just set the bit.
|
||||||
_region_bm->par_set_bit((BitMap::idx_t) index);
|
_region_bm->par_set_bit(index);
|
||||||
} else {
|
} else {
|
||||||
// Starts humongous case: calculate how many regions are part of
|
// Starts humongous case: calculate how many regions are part of
|
||||||
// this humongous region and then set the bit range.
|
// this humongous region and then set the bit range.
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
|
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
|
||||||
size_t end_index = last_hr->hrs_index() + 1;
|
BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
|
||||||
_region_bm->par_at_put_range((BitMap::idx_t) index,
|
_region_bm->par_at_put_range(index, end_index, true);
|
||||||
(BitMap::idx_t) end_index, true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1623,8 +1598,7 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
|
|||||||
FinalCountDataUpdateClosure(ConcurrentMark* cm,
|
FinalCountDataUpdateClosure(ConcurrentMark* cm,
|
||||||
BitMap* region_bm,
|
BitMap* region_bm,
|
||||||
BitMap* card_bm) :
|
BitMap* card_bm) :
|
||||||
_cm(cm), _region_bm(region_bm), _card_bm(card_bm),
|
_cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
|
||||||
_total_words_done(0), _total_live_bytes(0), _total_used_bytes(0) { }
|
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
|
|
||||||
@ -1646,8 +1620,6 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
|
|||||||
assert(hr->bottom() <= start && start <= hr->end() &&
|
assert(hr->bottom() <= start && start <= hr->end() &&
|
||||||
hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
|
hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
|
||||||
|
|
||||||
size_t words_done = ntams - hr->bottom();
|
|
||||||
|
|
||||||
if (start < ntams) {
|
if (start < ntams) {
|
||||||
// Region was changed between remark and cleanup pauses
|
// Region was changed between remark and cleanup pauses
|
||||||
// We need to add (ntams - start) to the marked bytes
|
// We need to add (ntams - start) to the marked bytes
|
||||||
@ -1678,16 +1650,8 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
|
|||||||
set_bit_for_region(hr);
|
set_bit_for_region(hr);
|
||||||
}
|
}
|
||||||
|
|
||||||
_total_words_done += words_done;
|
|
||||||
_total_used_bytes += hr->used();
|
|
||||||
_total_live_bytes += hr->next_marked_bytes();
|
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t total_words_done() const { return _total_words_done; }
|
|
||||||
size_t total_live_bytes() const { return _total_live_bytes; }
|
|
||||||
size_t total_used_bytes() const { return _total_used_bytes; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class G1ParFinalCountTask: public AbstractGangTask {
|
class G1ParFinalCountTask: public AbstractGangTask {
|
||||||
@ -1699,9 +1663,6 @@ protected:
|
|||||||
|
|
||||||
uint _n_workers;
|
uint _n_workers;
|
||||||
|
|
||||||
size_t *_live_bytes;
|
|
||||||
size_t *_used_bytes;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
|
G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
|
||||||
: AbstractGangTask("G1 final counting"),
|
: AbstractGangTask("G1 final counting"),
|
||||||
@ -1709,8 +1670,7 @@ public:
|
|||||||
_actual_region_bm(region_bm), _actual_card_bm(card_bm),
|
_actual_region_bm(region_bm), _actual_card_bm(card_bm),
|
||||||
_n_workers(0) {
|
_n_workers(0) {
|
||||||
// Use the value already set as the number of active threads
|
// Use the value already set as the number of active threads
|
||||||
// in the call to run_task(). Needed for the allocation of
|
// in the call to run_task().
|
||||||
// _live_bytes and _used_bytes.
|
|
||||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
assert( _g1h->workers()->active_workers() > 0,
|
assert( _g1h->workers()->active_workers() > 0,
|
||||||
"Should have been previously set");
|
"Should have been previously set");
|
||||||
@ -1718,14 +1678,6 @@ public:
|
|||||||
} else {
|
} else {
|
||||||
_n_workers = 1;
|
_n_workers = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
_live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
|
|
||||||
_used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
|
|
||||||
}
|
|
||||||
|
|
||||||
~G1ParFinalCountTask() {
|
|
||||||
FREE_C_HEAP_ARRAY(size_t, _live_bytes);
|
|
||||||
FREE_C_HEAP_ARRAY(size_t, _used_bytes);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void work(uint worker_id) {
|
void work(uint worker_id) {
|
||||||
@ -1743,23 +1695,6 @@ public:
|
|||||||
} else {
|
} else {
|
||||||
_g1h->heap_region_iterate(&final_update_cl);
|
_g1h->heap_region_iterate(&final_update_cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
_live_bytes[worker_id] = final_update_cl.total_live_bytes();
|
|
||||||
_used_bytes[worker_id] = final_update_cl.total_used_bytes();
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t live_bytes() {
|
|
||||||
size_t live_bytes = 0;
|
|
||||||
for (uint i = 0; i < _n_workers; ++i)
|
|
||||||
live_bytes += _live_bytes[i];
|
|
||||||
return live_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t used_bytes() {
|
|
||||||
size_t used_bytes = 0;
|
|
||||||
for (uint i = 0; i < _n_workers; ++i)
|
|
||||||
used_bytes += _used_bytes[i];
|
|
||||||
return used_bytes;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1769,7 +1704,7 @@ class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
|
|||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
int _worker_num;
|
int _worker_num;
|
||||||
size_t _max_live_bytes;
|
size_t _max_live_bytes;
|
||||||
size_t _regions_claimed;
|
uint _regions_claimed;
|
||||||
size_t _freed_bytes;
|
size_t _freed_bytes;
|
||||||
FreeRegionList* _local_cleanup_list;
|
FreeRegionList* _local_cleanup_list;
|
||||||
OldRegionSet* _old_proxy_set;
|
OldRegionSet* _old_proxy_set;
|
||||||
@ -1822,7 +1757,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t max_live_bytes() { return _max_live_bytes; }
|
size_t max_live_bytes() { return _max_live_bytes; }
|
||||||
size_t regions_claimed() { return _regions_claimed; }
|
uint regions_claimed() { return _regions_claimed; }
|
||||||
double claimed_region_time_sec() { return _claimed_region_time; }
|
double claimed_region_time_sec() { return _claimed_region_time; }
|
||||||
double max_region_time_sec() { return _max_region_time; }
|
double max_region_time_sec() { return _max_region_time; }
|
||||||
};
|
};
|
||||||
@ -1894,15 +1829,6 @@ public:
|
|||||||
|
|
||||||
HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
|
HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
|
||||||
}
|
}
|
||||||
double end = os::elapsedTime();
|
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
|
|
||||||
"claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n",
|
|
||||||
worker_id, start, end, (end-start)*1000.0,
|
|
||||||
g1_note_end.regions_claimed(),
|
|
||||||
g1_note_end.claimed_region_time_sec()*1000.0,
|
|
||||||
g1_note_end.max_region_time_sec()*1000.0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
size_t max_live_bytes() { return _max_live_bytes; }
|
size_t max_live_bytes() { return _max_live_bytes; }
|
||||||
size_t freed_bytes() { return _freed_bytes; }
|
size_t freed_bytes() { return _freed_bytes; }
|
||||||
@ -1949,8 +1875,7 @@ void ConcurrentMark::cleanup() {
|
|||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
Universe::verify(/* allow dirty */ true,
|
Universe::verify(/* silent */ false,
|
||||||
/* silent */ false,
|
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
/* option */ VerifyOption_G1UsePrevMarking);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2014,29 +1939,11 @@ void ConcurrentMark::cleanup() {
|
|||||||
guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
|
guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t known_garbage_bytes =
|
|
||||||
g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
|
|
||||||
g1p->set_known_garbage_bytes(known_garbage_bytes);
|
|
||||||
|
|
||||||
size_t start_used_bytes = g1h->used();
|
size_t start_used_bytes = g1h->used();
|
||||||
g1h->set_marking_complete();
|
g1h->set_marking_complete();
|
||||||
|
|
||||||
ergo_verbose4(ErgoConcCycles,
|
|
||||||
"finish cleanup",
|
|
||||||
ergo_format_byte("occupancy")
|
|
||||||
ergo_format_byte("capacity")
|
|
||||||
ergo_format_byte_perc("known garbage"),
|
|
||||||
start_used_bytes, g1h->capacity(),
|
|
||||||
known_garbage_bytes,
|
|
||||||
((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0);
|
|
||||||
|
|
||||||
double count_end = os::elapsedTime();
|
double count_end = os::elapsedTime();
|
||||||
double this_final_counting_time = (count_end - start);
|
double this_final_counting_time = (count_end - start);
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
gclog_or_tty->print_cr("Cleanup:");
|
|
||||||
gclog_or_tty->print_cr(" Finalize counting: %8.3f ms",
|
|
||||||
this_final_counting_time*1000.0);
|
|
||||||
}
|
|
||||||
_total_counting_time += this_final_counting_time;
|
_total_counting_time += this_final_counting_time;
|
||||||
|
|
||||||
if (G1PrintRegionLivenessInfo) {
|
if (G1PrintRegionLivenessInfo) {
|
||||||
@ -2050,7 +1957,6 @@ void ConcurrentMark::cleanup() {
|
|||||||
g1h->reset_gc_time_stamp();
|
g1h->reset_gc_time_stamp();
|
||||||
|
|
||||||
// Note end of marking in all heap regions.
|
// Note end of marking in all heap regions.
|
||||||
double note_end_start = os::elapsedTime();
|
|
||||||
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
|
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
|
||||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
g1h->set_par_threads((int)n_workers);
|
g1h->set_par_threads((int)n_workers);
|
||||||
@ -2069,11 +1975,6 @@ void ConcurrentMark::cleanup() {
|
|||||||
// regions that there will be more free regions coming soon.
|
// regions that there will be more free regions coming soon.
|
||||||
g1h->set_free_regions_coming();
|
g1h->set_free_regions_coming();
|
||||||
}
|
}
|
||||||
double note_end_end = os::elapsedTime();
|
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
|
|
||||||
(note_end_end - note_end_start)*1000.0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// call below, since it affects the metric by which we sort the heap
|
// call below, since it affects the metric by which we sort the heap
|
||||||
// regions.
|
// regions.
|
||||||
@ -2105,16 +2006,13 @@ void ConcurrentMark::cleanup() {
|
|||||||
double end = os::elapsedTime();
|
double end = os::elapsedTime();
|
||||||
_cleanup_times.add((end - start) * 1000.0);
|
_cleanup_times.add((end - start) * 1000.0);
|
||||||
|
|
||||||
if (PrintGC || PrintGCDetails) {
|
if (G1Log::fine()) {
|
||||||
g1h->print_size_transition(gclog_or_tty,
|
g1h->print_size_transition(gclog_or_tty,
|
||||||
start_used_bytes,
|
start_used_bytes,
|
||||||
g1h->used(),
|
g1h->used(),
|
||||||
g1h->capacity());
|
g1h->capacity());
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t cleaned_up_bytes = start_used_bytes - g1h->used();
|
|
||||||
g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
|
|
||||||
|
|
||||||
// Clean up will have freed any regions completely full of garbage.
|
// Clean up will have freed any regions completely full of garbage.
|
||||||
// Update the soft reference policy with the new heap occupancy.
|
// Update the soft reference policy with the new heap occupancy.
|
||||||
Universe::update_heap_info_at_gc();
|
Universe::update_heap_info_at_gc();
|
||||||
@ -2131,8 +2029,7 @@ void ConcurrentMark::cleanup() {
|
|||||||
HandleMark hm; // handle scope
|
HandleMark hm; // handle scope
|
||||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||||
Universe::heap()->prepare_for_verify();
|
Universe::heap()->prepare_for_verify();
|
||||||
Universe::verify(/* allow dirty */ true,
|
Universe::verify(/* silent */ false,
|
||||||
/* silent */ false,
|
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
/* option */ VerifyOption_G1UsePrevMarking);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2149,7 +2046,7 @@ void ConcurrentMark::completeCleanup() {
|
|||||||
|
|
||||||
if (G1ConcRegionFreeingVerbose) {
|
if (G1ConcRegionFreeingVerbose) {
|
||||||
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
|
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
|
||||||
"cleanup list has "SIZE_FORMAT" entries",
|
"cleanup list has %u entries",
|
||||||
_cleanup_list.length());
|
_cleanup_list.length());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2171,9 +2068,8 @@ void ConcurrentMark::completeCleanup() {
|
|||||||
_cleanup_list.is_empty()) {
|
_cleanup_list.is_empty()) {
|
||||||
if (G1ConcRegionFreeingVerbose) {
|
if (G1ConcRegionFreeingVerbose) {
|
||||||
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
|
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
|
||||||
"appending "SIZE_FORMAT" entries to the "
|
"appending %u entries to the secondary_free_list, "
|
||||||
"secondary_free_list, clean list still has "
|
"cleanup list still has %u entries",
|
||||||
SIZE_FORMAT" entries",
|
|
||||||
tmp_free_list.length(),
|
tmp_free_list.length(),
|
||||||
_cleanup_list.length());
|
_cleanup_list.length());
|
||||||
}
|
}
|
||||||
@ -2446,11 +2342,10 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
|||||||
// Inner scope to exclude the cleaning of the string and symbol
|
// Inner scope to exclude the cleaning of the string and symbol
|
||||||
// tables from the displayed time.
|
// tables from the displayed time.
|
||||||
{
|
{
|
||||||
bool verbose = PrintGC && PrintGCDetails;
|
if (G1Log::finer()) {
|
||||||
if (verbose) {
|
|
||||||
gclog_or_tty->put(' ');
|
gclog_or_tty->put(' ');
|
||||||
}
|
}
|
||||||
TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
|
TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
|
||||||
|
|
||||||
ReferenceProcessor* rp = g1h->ref_processor_cm();
|
ReferenceProcessor* rp = g1h->ref_processor_cm();
|
||||||
|
|
||||||
@ -3144,7 +3039,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
|
|||||||
assert(limit_idx <= end_idx, "or else use atomics");
|
assert(limit_idx <= end_idx, "or else use atomics");
|
||||||
|
|
||||||
// Aggregate the "stripe" in the count data associated with hr.
|
// Aggregate the "stripe" in the count data associated with hr.
|
||||||
size_t hrs_index = hr->hrs_index();
|
uint hrs_index = hr->hrs_index();
|
||||||
size_t marked_bytes = 0;
|
size_t marked_bytes = 0;
|
||||||
|
|
||||||
for (int i = 0; (size_t)i < _max_task_num; i += 1) {
|
for (int i = 0; (size_t)i < _max_task_num; i += 1) {
|
||||||
@ -3252,7 +3147,7 @@ void ConcurrentMark::clear_all_count_data() {
|
|||||||
// of the final counting task.
|
// of the final counting task.
|
||||||
_region_bm.clear();
|
_region_bm.clear();
|
||||||
|
|
||||||
size_t max_regions = _g1h->max_regions();
|
uint max_regions = _g1h->max_regions();
|
||||||
assert(_max_task_num != 0, "unitialized");
|
assert(_max_task_num != 0, "unitialized");
|
||||||
|
|
||||||
for (int i = 0; (size_t) i < _max_task_num; i += 1) {
|
for (int i = 0; (size_t) i < _max_task_num; i += 1) {
|
||||||
@ -3262,7 +3157,7 @@ void ConcurrentMark::clear_all_count_data() {
|
|||||||
assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
|
assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
|
||||||
assert(marked_bytes_array != NULL, "uninitialized");
|
assert(marked_bytes_array != NULL, "uninitialized");
|
||||||
|
|
||||||
memset(marked_bytes_array, 0, (max_regions * sizeof(size_t)));
|
memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
|
||||||
task_card_bm->clear();
|
task_card_bm->clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -636,7 +636,7 @@ public:
|
|||||||
return _task_queues->steal(task_num, hash_seed, obj);
|
return _task_queues->steal(task_num, hash_seed, obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMark(ReservedSpace rs, int max_regions);
|
ConcurrentMark(ReservedSpace rs, uint max_regions);
|
||||||
~ConcurrentMark();
|
~ConcurrentMark();
|
||||||
|
|
||||||
ConcurrentMarkThread* cmThread() { return _cmThread; }
|
ConcurrentMarkThread* cmThread() { return _cmThread; }
|
||||||
|
@ -49,7 +49,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
|
|||||||
HeapWord* start = mr.start();
|
HeapWord* start = mr.start();
|
||||||
HeapWord* last = mr.last();
|
HeapWord* last = mr.last();
|
||||||
size_t region_size_bytes = mr.byte_size();
|
size_t region_size_bytes = mr.byte_size();
|
||||||
size_t index = hr->hrs_index();
|
uint index = hr->hrs_index();
|
||||||
|
|
||||||
assert(!hr->continuesHumongous(), "should not be HC region");
|
assert(!hr->continuesHumongous(), "should not be HC region");
|
||||||
assert(hr == g1h->heap_region_containing(start), "sanity");
|
assert(hr == g1h->heap_region_containing(start), "sanity");
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
|
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Log.hpp"
|
||||||
#include "gc_implementation/g1/g1MMUTracker.hpp"
|
#include "gc_implementation/g1/g1MMUTracker.hpp"
|
||||||
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
@ -104,7 +105,7 @@ void ConcurrentMarkThread::run() {
|
|||||||
|
|
||||||
double scan_start = os::elapsedTime();
|
double scan_start = os::elapsedTime();
|
||||||
if (!cm()->has_aborted()) {
|
if (!cm()->has_aborted()) {
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
|
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
|
||||||
@ -113,7 +114,7 @@ void ConcurrentMarkThread::run() {
|
|||||||
_cm->scanRootRegions();
|
_cm->scanRootRegions();
|
||||||
|
|
||||||
double scan_end = os::elapsedTime();
|
double scan_end = os::elapsedTime();
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf]",
|
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf]",
|
||||||
@ -122,7 +123,7 @@ void ConcurrentMarkThread::run() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
double mark_start_sec = os::elapsedTime();
|
double mark_start_sec = os::elapsedTime();
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print_cr("[GC concurrent-mark-start]");
|
gclog_or_tty->print_cr("[GC concurrent-mark-start]");
|
||||||
@ -146,7 +147,7 @@ void ConcurrentMarkThread::run() {
|
|||||||
os::sleep(current_thread, sleep_time_ms, false);
|
os::sleep(current_thread, sleep_time_ms, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf sec]",
|
gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf sec]",
|
||||||
@ -165,7 +166,7 @@ void ConcurrentMarkThread::run() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (cm()->restart_for_overflow()) {
|
if (cm()->restart_for_overflow()) {
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
|
gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
|
||||||
@ -211,7 +212,7 @@ void ConcurrentMarkThread::run() {
|
|||||||
// reclaimed by cleanup.
|
// reclaimed by cleanup.
|
||||||
|
|
||||||
double cleanup_start_sec = os::elapsedTime();
|
double cleanup_start_sec = os::elapsedTime();
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
|
gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
|
||||||
@ -232,7 +233,7 @@ void ConcurrentMarkThread::run() {
|
|||||||
g1h->reset_free_regions_coming();
|
g1h->reset_free_regions_coming();
|
||||||
|
|
||||||
double cleanup_end_sec = os::elapsedTime();
|
double cleanup_end_sec = os::elapsedTime();
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
|
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
|
||||||
@ -273,7 +274,7 @@ void ConcurrentMarkThread::run() {
|
|||||||
_sts.leave();
|
_sts.leave();
|
||||||
|
|
||||||
if (cm()->has_aborted()) {
|
if (cm()->has_aborted()) {
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
|
gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -140,7 +140,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
|
void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
|
||||||
msg->append("[%s] %s c: "SIZE_FORMAT" b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
|
msg->append("[%s] %s c: %u b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
|
||||||
_name, message, _count, BOOL_TO_STR(_bot_updates),
|
_name, message, _count, BOOL_TO_STR(_bot_updates),
|
||||||
_alloc_region, _used_bytes_before);
|
_alloc_region, _used_bytes_before);
|
||||||
}
|
}
|
||||||
@ -215,7 +215,7 @@ void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
|
|||||||
jio_snprintf(rest_buffer, buffer_length, "");
|
jio_snprintf(rest_buffer, buffer_length, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
tty->print_cr("[%s] "SIZE_FORMAT" %s : %s %s",
|
tty->print_cr("[%s] %u %s : %s %s",
|
||||||
_name, _count, hr_buffer, str, rest_buffer);
|
_name, _count, hr_buffer, str, rest_buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -64,7 +64,7 @@ private:
|
|||||||
// the region that is re-used using the set() method. This count can
|
// the region that is re-used using the set() method. This count can
|
||||||
// be used in any heuristics that might want to bound how many
|
// be used in any heuristics that might want to bound how many
|
||||||
// distinct regions this object can used during an active interval.
|
// distinct regions this object can used during an active interval.
|
||||||
size_t _count;
|
uint _count;
|
||||||
|
|
||||||
// When we set up a new active region we save its used bytes in this
|
// When we set up a new active region we save its used bytes in this
|
||||||
// field so that, when we retire it, we can calculate how much space
|
// field so that, when we retire it, we can calculate how much space
|
||||||
@ -136,7 +136,7 @@ public:
|
|||||||
return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
|
return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t count() { return _count; }
|
uint count() { return _count; }
|
||||||
|
|
||||||
// The following two are the building blocks for the allocation method.
|
// The following two are the building blocks for the allocation method.
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
|
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
|
||||||
#include "gc_implementation/g1/g1EvacFailure.hpp"
|
#include "gc_implementation/g1/g1EvacFailure.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Log.hpp"
|
||||||
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
||||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||||
@ -233,7 +234,7 @@ void YoungList::empty_list() {
|
|||||||
bool YoungList::check_list_well_formed() {
|
bool YoungList::check_list_well_formed() {
|
||||||
bool ret = true;
|
bool ret = true;
|
||||||
|
|
||||||
size_t length = 0;
|
uint length = 0;
|
||||||
HeapRegion* curr = _head;
|
HeapRegion* curr = _head;
|
||||||
HeapRegion* last = NULL;
|
HeapRegion* last = NULL;
|
||||||
while (curr != NULL) {
|
while (curr != NULL) {
|
||||||
@ -252,7 +253,7 @@ bool YoungList::check_list_well_formed() {
|
|||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
|
gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
|
||||||
gclog_or_tty->print_cr("### list has %d entries, _length is %d",
|
gclog_or_tty->print_cr("### list has %u entries, _length is %u",
|
||||||
length, _length);
|
length, _length);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,7 +264,7 @@ bool YoungList::check_list_empty(bool check_sample) {
|
|||||||
bool ret = true;
|
bool ret = true;
|
||||||
|
|
||||||
if (_length != 0) {
|
if (_length != 0) {
|
||||||
gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
|
gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
|
||||||
_length);
|
_length);
|
||||||
ret = false;
|
ret = false;
|
||||||
}
|
}
|
||||||
@ -336,8 +337,7 @@ YoungList::reset_auxilary_lists() {
|
|||||||
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
|
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
|
||||||
young_index_in_cset += 1;
|
young_index_in_cset += 1;
|
||||||
}
|
}
|
||||||
assert((size_t) young_index_in_cset == _survivor_length,
|
assert((uint) young_index_in_cset == _survivor_length, "post-condition");
|
||||||
"post-condition");
|
|
||||||
_g1h->g1_policy()->note_stop_adding_survivor_regions();
|
_g1h->g1_policy()->note_stop_adding_survivor_regions();
|
||||||
|
|
||||||
_head = _survivor_head;
|
_head = _survivor_head;
|
||||||
@ -532,7 +532,7 @@ G1CollectedHeap::new_region_try_secondary_free_list() {
|
|||||||
if (!_secondary_free_list.is_empty()) {
|
if (!_secondary_free_list.is_empty()) {
|
||||||
if (G1ConcRegionFreeingVerbose) {
|
if (G1ConcRegionFreeingVerbose) {
|
||||||
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
|
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
|
||||||
"secondary_free_list has "SIZE_FORMAT" entries",
|
"secondary_free_list has %u entries",
|
||||||
_secondary_free_list.length());
|
_secondary_free_list.length());
|
||||||
}
|
}
|
||||||
// It looks as if there are free regions available on the
|
// It looks as if there are free regions available on the
|
||||||
@ -618,12 +618,12 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
|
uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
|
||||||
size_t word_size) {
|
size_t word_size) {
|
||||||
assert(isHumongous(word_size), "word_size should be humongous");
|
assert(isHumongous(word_size), "word_size should be humongous");
|
||||||
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
||||||
|
|
||||||
size_t first = G1_NULL_HRS_INDEX;
|
uint first = G1_NULL_HRS_INDEX;
|
||||||
if (num_regions == 1) {
|
if (num_regions == 1) {
|
||||||
// Only one region to allocate, no need to go through the slower
|
// Only one region to allocate, no need to go through the slower
|
||||||
// path. The caller will attempt the expasion if this fails, so
|
// path. The caller will attempt the expasion if this fails, so
|
||||||
@ -649,7 +649,7 @@ size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
|
|||||||
if (free_regions() >= num_regions) {
|
if (free_regions() >= num_regions) {
|
||||||
first = _hrs.find_contiguous(num_regions);
|
first = _hrs.find_contiguous(num_regions);
|
||||||
if (first != G1_NULL_HRS_INDEX) {
|
if (first != G1_NULL_HRS_INDEX) {
|
||||||
for (size_t i = first; i < first + num_regions; ++i) {
|
for (uint i = first; i < first + num_regions; ++i) {
|
||||||
HeapRegion* hr = region_at(i);
|
HeapRegion* hr = region_at(i);
|
||||||
assert(hr->is_empty(), "sanity");
|
assert(hr->is_empty(), "sanity");
|
||||||
assert(is_on_master_free_list(hr), "sanity");
|
assert(is_on_master_free_list(hr), "sanity");
|
||||||
@ -663,15 +663,15 @@ size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapWord*
|
HeapWord*
|
||||||
G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
|
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||||
size_t num_regions,
|
uint num_regions,
|
||||||
size_t word_size) {
|
size_t word_size) {
|
||||||
assert(first != G1_NULL_HRS_INDEX, "pre-condition");
|
assert(first != G1_NULL_HRS_INDEX, "pre-condition");
|
||||||
assert(isHumongous(word_size), "word_size should be humongous");
|
assert(isHumongous(word_size), "word_size should be humongous");
|
||||||
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
||||||
|
|
||||||
// Index of last region in the series + 1.
|
// Index of last region in the series + 1.
|
||||||
size_t last = first + num_regions;
|
uint last = first + num_regions;
|
||||||
|
|
||||||
// We need to initialize the region(s) we just discovered. This is
|
// We need to initialize the region(s) we just discovered. This is
|
||||||
// a bit tricky given that it can happen concurrently with
|
// a bit tricky given that it can happen concurrently with
|
||||||
@ -682,7 +682,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
|
|||||||
// a specific order.
|
// a specific order.
|
||||||
|
|
||||||
// The word size sum of all the regions we will allocate.
|
// The word size sum of all the regions we will allocate.
|
||||||
size_t word_size_sum = num_regions * HeapRegion::GrainWords;
|
size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
|
||||||
assert(word_size <= word_size_sum, "sanity");
|
assert(word_size <= word_size_sum, "sanity");
|
||||||
|
|
||||||
// This will be the "starts humongous" region.
|
// This will be the "starts humongous" region.
|
||||||
@ -721,7 +721,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
|
|||||||
// Then, if there are any, we will set up the "continues
|
// Then, if there are any, we will set up the "continues
|
||||||
// humongous" regions.
|
// humongous" regions.
|
||||||
HeapRegion* hr = NULL;
|
HeapRegion* hr = NULL;
|
||||||
for (size_t i = first + 1; i < last; ++i) {
|
for (uint i = first + 1; i < last; ++i) {
|
||||||
hr = region_at(i);
|
hr = region_at(i);
|
||||||
hr->set_continuesHumongous(first_hr);
|
hr->set_continuesHumongous(first_hr);
|
||||||
}
|
}
|
||||||
@ -767,7 +767,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
|
|||||||
// last one) is actually used when we will free up the humongous
|
// last one) is actually used when we will free up the humongous
|
||||||
// region in free_humongous_region().
|
// region in free_humongous_region().
|
||||||
hr = NULL;
|
hr = NULL;
|
||||||
for (size_t i = first + 1; i < last; ++i) {
|
for (uint i = first + 1; i < last; ++i) {
|
||||||
hr = region_at(i);
|
hr = region_at(i);
|
||||||
if ((i + 1) == last) {
|
if ((i + 1) == last) {
|
||||||
// last continues humongous region
|
// last continues humongous region
|
||||||
@ -803,14 +803,14 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
|||||||
|
|
||||||
verify_region_sets_optional();
|
verify_region_sets_optional();
|
||||||
|
|
||||||
size_t num_regions =
|
size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
|
||||||
round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
|
uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
|
||||||
size_t x_size = expansion_regions();
|
uint x_num = expansion_regions();
|
||||||
size_t fs = _hrs.free_suffix();
|
uint fs = _hrs.free_suffix();
|
||||||
size_t first = humongous_obj_allocate_find_first(num_regions, word_size);
|
uint first = humongous_obj_allocate_find_first(num_regions, word_size);
|
||||||
if (first == G1_NULL_HRS_INDEX) {
|
if (first == G1_NULL_HRS_INDEX) {
|
||||||
// The only thing we can do now is attempt expansion.
|
// The only thing we can do now is attempt expansion.
|
||||||
if (fs + x_size >= num_regions) {
|
if (fs + x_num >= num_regions) {
|
||||||
// If the number of regions we're trying to allocate for this
|
// If the number of regions we're trying to allocate for this
|
||||||
// object is at most the number of regions in the free suffix,
|
// object is at most the number of regions in the free suffix,
|
||||||
// then the call to humongous_obj_allocate_find_first() above
|
// then the call to humongous_obj_allocate_find_first() above
|
||||||
@ -1255,10 +1255,10 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
// Timing
|
// Timing
|
||||||
bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
|
bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
|
||||||
assert(!system_gc || explicit_gc, "invariant");
|
assert(!system_gc || explicit_gc, "invariant");
|
||||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||||
TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
|
TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
|
||||||
PrintGC, true, gclog_or_tty);
|
G1Log::fine(), true, gclog_or_tty);
|
||||||
|
|
||||||
TraceCollectorStats tcs(g1mm()->full_collection_counters());
|
TraceCollectorStats tcs(g1mm()->full_collection_counters());
|
||||||
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
|
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
|
||||||
@ -1290,8 +1290,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||||
prepare_for_verify();
|
prepare_for_verify();
|
||||||
Universe::verify(/* allow dirty */ true,
|
Universe::verify(/* silent */ false,
|
||||||
/* silent */ false,
|
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
/* option */ VerifyOption_G1UsePrevMarking);
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -1365,8 +1364,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
gclog_or_tty->print(" VerifyAfterGC:");
|
||||||
prepare_for_verify();
|
prepare_for_verify();
|
||||||
Universe::verify(/* allow dirty */ false,
|
Universe::verify(/* silent */ false,
|
||||||
/* silent */ false,
|
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
/* option */ VerifyOption_G1UsePrevMarking);
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -1444,7 +1442,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
heap_region_iterate(&rebuild_rs);
|
heap_region_iterate(&rebuild_rs);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PrintGC) {
|
if (G1Log::fine()) {
|
||||||
print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
|
print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1782,7 +1780,7 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
|
|||||||
ReservedSpace::page_align_size_down(shrink_bytes);
|
ReservedSpace::page_align_size_down(shrink_bytes);
|
||||||
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
|
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
|
||||||
HeapRegion::GrainBytes);
|
HeapRegion::GrainBytes);
|
||||||
size_t num_regions_deleted = 0;
|
uint num_regions_deleted = 0;
|
||||||
MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
|
MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
|
||||||
HeapWord* old_end = (HeapWord*) _g1_storage.high();
|
HeapWord* old_end = (HeapWord*) _g1_storage.high();
|
||||||
assert(mr.end() == old_end, "post-condition");
|
assert(mr.end() == old_end, "post-condition");
|
||||||
@ -1917,6 +1915,8 @@ jint G1CollectedHeap::initialize() {
|
|||||||
CollectedHeap::pre_initialize();
|
CollectedHeap::pre_initialize();
|
||||||
os::enable_vtime();
|
os::enable_vtime();
|
||||||
|
|
||||||
|
G1Log::init();
|
||||||
|
|
||||||
// Necessary to satisfy locking discipline assertions.
|
// Necessary to satisfy locking discipline assertions.
|
||||||
|
|
||||||
MutexLocker x(Heap_lock);
|
MutexLocker x(Heap_lock);
|
||||||
@ -2003,7 +2003,7 @@ jint G1CollectedHeap::initialize() {
|
|||||||
_reserved.set_start((HeapWord*)heap_rs.base());
|
_reserved.set_start((HeapWord*)heap_rs.base());
|
||||||
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
|
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||||
|
|
||||||
_expansion_regions = max_byte_size/HeapRegion::GrainBytes;
|
_expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
|
||||||
|
|
||||||
// Create the gen rem set (and barrier set) for the entire reserved region.
|
// Create the gen rem set (and barrier set) for the entire reserved region.
|
||||||
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
|
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
|
||||||
@ -2040,7 +2040,7 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
// 6843694 - ensure that the maximum region index can fit
|
// 6843694 - ensure that the maximum region index can fit
|
||||||
// in the remembered set structures.
|
// in the remembered set structures.
|
||||||
const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
|
const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
|
||||||
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
|
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
|
||||||
|
|
||||||
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
||||||
@ -2056,13 +2056,14 @@ jint G1CollectedHeap::initialize() {
|
|||||||
_g1h = this;
|
_g1h = this;
|
||||||
|
|
||||||
_in_cset_fast_test_length = max_regions();
|
_in_cset_fast_test_length = max_regions();
|
||||||
_in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
|
_in_cset_fast_test_base =
|
||||||
|
NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length);
|
||||||
|
|
||||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||||
// beginning of the heap every time we want to index; basically
|
// beginning of the heap every time we want to index; basically
|
||||||
// it's the same with what we do with the card table.
|
// it's the same with what we do with the card table.
|
||||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||||
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
||||||
|
|
||||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||||
// regions to the incremental collection set for the first
|
// regions to the incremental collection set for the first
|
||||||
@ -2071,7 +2072,7 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
// Create the ConcurrentMark data structure and thread.
|
// Create the ConcurrentMark data structure and thread.
|
||||||
// (Must do this late, so that "max_regions" is defined.)
|
// (Must do this late, so that "max_regions" is defined.)
|
||||||
_cm = new ConcurrentMark(heap_rs, (int) max_regions());
|
_cm = new ConcurrentMark(heap_rs, max_regions());
|
||||||
_cmThread = _cm->cmThread();
|
_cmThread = _cm->cmThread();
|
||||||
|
|
||||||
// Initialize the from_card cache structure of HeapRegionRemSet.
|
// Initialize the from_card cache structure of HeapRegionRemSet.
|
||||||
@ -2580,7 +2581,7 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
|
|||||||
uint worker,
|
uint worker,
|
||||||
uint no_of_par_workers,
|
uint no_of_par_workers,
|
||||||
jint claim_value) {
|
jint claim_value) {
|
||||||
const size_t regions = n_regions();
|
const uint regions = n_regions();
|
||||||
const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
|
const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
|
||||||
no_of_par_workers :
|
no_of_par_workers :
|
||||||
1);
|
1);
|
||||||
@ -2588,11 +2589,11 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
|
|||||||
no_of_par_workers == workers()->total_workers(),
|
no_of_par_workers == workers()->total_workers(),
|
||||||
"Non dynamic should use fixed number of workers");
|
"Non dynamic should use fixed number of workers");
|
||||||
// try to spread out the starting points of the workers
|
// try to spread out the starting points of the workers
|
||||||
const size_t start_index = regions / max_workers * (size_t) worker;
|
const uint start_index = regions / max_workers * worker;
|
||||||
|
|
||||||
// each worker will actually look at all regions
|
// each worker will actually look at all regions
|
||||||
for (size_t count = 0; count < regions; ++count) {
|
for (uint count = 0; count < regions; ++count) {
|
||||||
const size_t index = (start_index + count) % regions;
|
const uint index = (start_index + count) % regions;
|
||||||
assert(0 <= index && index < regions, "sanity");
|
assert(0 <= index && index < regions, "sanity");
|
||||||
HeapRegion* r = region_at(index);
|
HeapRegion* r = region_at(index);
|
||||||
// we'll ignore "continues humongous" regions (we'll process them
|
// we'll ignore "continues humongous" regions (we'll process them
|
||||||
@ -2614,7 +2615,7 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
|
|||||||
// result, we might end up processing them twice. So, we'll do
|
// result, we might end up processing them twice. So, we'll do
|
||||||
// them first (notice: most closures will ignore them anyway) and
|
// them first (notice: most closures will ignore them anyway) and
|
||||||
// then we'll do the "starts humongous" region.
|
// then we'll do the "starts humongous" region.
|
||||||
for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
|
for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
|
||||||
HeapRegion* chr = region_at(ch_index);
|
HeapRegion* chr = region_at(ch_index);
|
||||||
|
|
||||||
// if the region has already been claimed or it's not
|
// if the region has already been claimed or it's not
|
||||||
@ -2682,8 +2683,9 @@ void G1CollectedHeap::reset_cset_heap_region_claim_values() {
|
|||||||
class CheckClaimValuesClosure : public HeapRegionClosure {
|
class CheckClaimValuesClosure : public HeapRegionClosure {
|
||||||
private:
|
private:
|
||||||
jint _claim_value;
|
jint _claim_value;
|
||||||
size_t _failures;
|
uint _failures;
|
||||||
HeapRegion* _sh_region;
|
HeapRegion* _sh_region;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CheckClaimValuesClosure(jint claim_value) :
|
CheckClaimValuesClosure(jint claim_value) :
|
||||||
_claim_value(claim_value), _failures(0), _sh_region(NULL) { }
|
_claim_value(claim_value), _failures(0), _sh_region(NULL) { }
|
||||||
@ -2711,9 +2713,7 @@ public:
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
size_t failures() {
|
uint failures() { return _failures; }
|
||||||
return _failures;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
|
bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
|
||||||
@ -2723,17 +2723,15 @@ bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
|
class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
|
||||||
jint _claim_value;
|
private:
|
||||||
size_t _failures;
|
jint _claim_value;
|
||||||
|
uint _failures;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CheckClaimValuesInCSetHRClosure(jint claim_value) :
|
CheckClaimValuesInCSetHRClosure(jint claim_value) :
|
||||||
_claim_value(claim_value),
|
_claim_value(claim_value), _failures(0) { }
|
||||||
_failures(0) { }
|
|
||||||
|
|
||||||
size_t failures() {
|
uint failures() { return _failures; }
|
||||||
return _failures;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
assert(hr->in_collection_set(), "how?");
|
assert(hr->in_collection_set(), "how?");
|
||||||
@ -2800,14 +2798,14 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
|
|||||||
|
|
||||||
result = g1_policy()->collection_set();
|
result = g1_policy()->collection_set();
|
||||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
size_t cs_size = g1_policy()->cset_region_length();
|
uint cs_size = g1_policy()->cset_region_length();
|
||||||
uint active_workers = workers()->active_workers();
|
uint active_workers = workers()->active_workers();
|
||||||
assert(UseDynamicNumberOfGCThreads ||
|
assert(UseDynamicNumberOfGCThreads ||
|
||||||
active_workers == workers()->total_workers(),
|
active_workers == workers()->total_workers(),
|
||||||
"Unless dynamic should use total workers");
|
"Unless dynamic should use total workers");
|
||||||
|
|
||||||
size_t end_ind = (cs_size * worker_i) / active_workers;
|
uint end_ind = (cs_size * worker_i) / active_workers;
|
||||||
size_t start_ind = 0;
|
uint start_ind = 0;
|
||||||
|
|
||||||
if (worker_i > 0 &&
|
if (worker_i > 0 &&
|
||||||
_worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
|
_worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
|
||||||
@ -2817,7 +2815,7 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
|
|||||||
result = _worker_cset_start_region[worker_i - 1];
|
result = _worker_cset_start_region[worker_i - 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = start_ind; i < end_ind; i++) {
|
for (uint i = start_ind; i < end_ind; i++) {
|
||||||
result = result->next_in_collection_set();
|
result = result->next_in_collection_set();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3033,7 +3031,6 @@ public:
|
|||||||
|
|
||||||
class VerifyRegionClosure: public HeapRegionClosure {
|
class VerifyRegionClosure: public HeapRegionClosure {
|
||||||
private:
|
private:
|
||||||
bool _allow_dirty;
|
|
||||||
bool _par;
|
bool _par;
|
||||||
VerifyOption _vo;
|
VerifyOption _vo;
|
||||||
bool _failures;
|
bool _failures;
|
||||||
@ -3041,9 +3038,8 @@ public:
|
|||||||
// _vo == UsePrevMarking -> use "prev" marking information,
|
// _vo == UsePrevMarking -> use "prev" marking information,
|
||||||
// _vo == UseNextMarking -> use "next" marking information,
|
// _vo == UseNextMarking -> use "next" marking information,
|
||||||
// _vo == UseMarkWord -> use mark word from object header.
|
// _vo == UseMarkWord -> use mark word from object header.
|
||||||
VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo)
|
VerifyRegionClosure(bool par, VerifyOption vo)
|
||||||
: _allow_dirty(allow_dirty),
|
: _par(par),
|
||||||
_par(par),
|
|
||||||
_vo(vo),
|
_vo(vo),
|
||||||
_failures(false) {}
|
_failures(false) {}
|
||||||
|
|
||||||
@ -3056,7 +3052,7 @@ public:
|
|||||||
"Should be unclaimed at verify points.");
|
"Should be unclaimed at verify points.");
|
||||||
if (!r->continuesHumongous()) {
|
if (!r->continuesHumongous()) {
|
||||||
bool failures = false;
|
bool failures = false;
|
||||||
r->verify(_allow_dirty, _vo, &failures);
|
r->verify(_vo, &failures);
|
||||||
if (failures) {
|
if (failures) {
|
||||||
_failures = true;
|
_failures = true;
|
||||||
} else {
|
} else {
|
||||||
@ -3124,7 +3120,6 @@ public:
|
|||||||
class G1ParVerifyTask: public AbstractGangTask {
|
class G1ParVerifyTask: public AbstractGangTask {
|
||||||
private:
|
private:
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
bool _allow_dirty;
|
|
||||||
VerifyOption _vo;
|
VerifyOption _vo;
|
||||||
bool _failures;
|
bool _failures;
|
||||||
|
|
||||||
@ -3132,10 +3127,9 @@ public:
|
|||||||
// _vo == UsePrevMarking -> use "prev" marking information,
|
// _vo == UsePrevMarking -> use "prev" marking information,
|
||||||
// _vo == UseNextMarking -> use "next" marking information,
|
// _vo == UseNextMarking -> use "next" marking information,
|
||||||
// _vo == UseMarkWord -> use mark word from object header.
|
// _vo == UseMarkWord -> use mark word from object header.
|
||||||
G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) :
|
G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
|
||||||
AbstractGangTask("Parallel verify task"),
|
AbstractGangTask("Parallel verify task"),
|
||||||
_g1h(g1h),
|
_g1h(g1h),
|
||||||
_allow_dirty(allow_dirty),
|
|
||||||
_vo(vo),
|
_vo(vo),
|
||||||
_failures(false) { }
|
_failures(false) { }
|
||||||
|
|
||||||
@ -3145,7 +3139,7 @@ public:
|
|||||||
|
|
||||||
void work(uint worker_id) {
|
void work(uint worker_id) {
|
||||||
HandleMark hm;
|
HandleMark hm;
|
||||||
VerifyRegionClosure blk(_allow_dirty, true, _vo);
|
VerifyRegionClosure blk(true, _vo);
|
||||||
_g1h->heap_region_par_iterate_chunked(&blk, worker_id,
|
_g1h->heap_region_par_iterate_chunked(&blk, worker_id,
|
||||||
_g1h->workers()->active_workers(),
|
_g1h->workers()->active_workers(),
|
||||||
HeapRegion::ParVerifyClaimValue);
|
HeapRegion::ParVerifyClaimValue);
|
||||||
@ -3155,12 +3149,11 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
|
void G1CollectedHeap::verify(bool silent) {
|
||||||
verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking);
|
verify(silent, VerifyOption_G1UsePrevMarking);
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::verify(bool allow_dirty,
|
void G1CollectedHeap::verify(bool silent,
|
||||||
bool silent,
|
|
||||||
VerifyOption vo) {
|
VerifyOption vo) {
|
||||||
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
|
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
|
||||||
if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
|
if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
|
||||||
@ -3212,7 +3205,7 @@ void G1CollectedHeap::verify(bool allow_dirty,
|
|||||||
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||||
"sanity check");
|
"sanity check");
|
||||||
|
|
||||||
G1ParVerifyTask task(this, allow_dirty, vo);
|
G1ParVerifyTask task(this, vo);
|
||||||
assert(UseDynamicNumberOfGCThreads ||
|
assert(UseDynamicNumberOfGCThreads ||
|
||||||
workers()->active_workers() == workers()->total_workers(),
|
workers()->active_workers() == workers()->total_workers(),
|
||||||
"If not dynamic should be using all the workers");
|
"If not dynamic should be using all the workers");
|
||||||
@ -3234,7 +3227,7 @@ void G1CollectedHeap::verify(bool allow_dirty,
|
|||||||
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||||
"sanity check");
|
"sanity check");
|
||||||
} else {
|
} else {
|
||||||
VerifyRegionClosure blk(allow_dirty, false, vo);
|
VerifyRegionClosure blk(false, vo);
|
||||||
heap_region_iterate(&blk);
|
heap_region_iterate(&blk);
|
||||||
if (blk.failures()) {
|
if (blk.failures()) {
|
||||||
failures = true;
|
failures = true;
|
||||||
@ -3284,12 +3277,12 @@ void G1CollectedHeap::print_on(outputStream* st) const {
|
|||||||
_g1_storage.high_boundary());
|
_g1_storage.high_boundary());
|
||||||
st->cr();
|
st->cr();
|
||||||
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
|
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
|
||||||
size_t young_regions = _young_list->length();
|
uint young_regions = _young_list->length();
|
||||||
st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
|
st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
|
||||||
young_regions, young_regions * HeapRegion::GrainBytes / K);
|
(size_t) young_regions * HeapRegion::GrainBytes / K);
|
||||||
size_t survivor_regions = g1_policy()->recorded_survivor_regions();
|
uint survivor_regions = g1_policy()->recorded_survivor_regions();
|
||||||
st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
|
st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
|
||||||
survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
|
(size_t) survivor_regions * HeapRegion::GrainBytes / K);
|
||||||
st->cr();
|
st->cr();
|
||||||
perm()->as_gen()->print_on(st);
|
perm()->as_gen()->print_on(st);
|
||||||
}
|
}
|
||||||
@ -3299,7 +3292,11 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
|
|||||||
|
|
||||||
// Print the per-region information.
|
// Print the per-region information.
|
||||||
st->cr();
|
st->cr();
|
||||||
st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), HS=humongous(starts), HC=humongous(continues), CS=collection set, F=free, TS=gc time stamp, PTAMS=previous top-at-mark-start, NTAMS=next top-at-mark-start)");
|
st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
|
||||||
|
"HS=humongous(starts), HC=humongous(continues), "
|
||||||
|
"CS=collection set, F=free, TS=gc time stamp, "
|
||||||
|
"PTAMS=previous top-at-mark-start, "
|
||||||
|
"NTAMS=next top-at-mark-start)");
|
||||||
PrintRegionClosure blk(st);
|
PrintRegionClosure blk(st);
|
||||||
heap_region_iterate(&blk);
|
heap_region_iterate(&blk);
|
||||||
}
|
}
|
||||||
@ -3477,16 +3474,16 @@ size_t G1CollectedHeap::cards_scanned() {
|
|||||||
|
|
||||||
void
|
void
|
||||||
G1CollectedHeap::setup_surviving_young_words() {
|
G1CollectedHeap::setup_surviving_young_words() {
|
||||||
guarantee( _surviving_young_words == NULL, "pre-condition" );
|
assert(_surviving_young_words == NULL, "pre-condition");
|
||||||
size_t array_length = g1_policy()->young_cset_region_length();
|
uint array_length = g1_policy()->young_cset_region_length();
|
||||||
_surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
|
_surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length);
|
||||||
if (_surviving_young_words == NULL) {
|
if (_surviving_young_words == NULL) {
|
||||||
vm_exit_out_of_memory(sizeof(size_t) * array_length,
|
vm_exit_out_of_memory(sizeof(size_t) * array_length,
|
||||||
"Not enough space for young surv words summary.");
|
"Not enough space for young surv words summary.");
|
||||||
}
|
}
|
||||||
memset(_surviving_young_words, 0, array_length * sizeof(size_t));
|
memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
for (size_t i = 0; i < array_length; ++i) {
|
for (uint i = 0; i < array_length; ++i) {
|
||||||
assert( _surviving_young_words[i] == 0, "memset above" );
|
assert( _surviving_young_words[i] == 0, "memset above" );
|
||||||
}
|
}
|
||||||
#endif // !ASSERT
|
#endif // !ASSERT
|
||||||
@ -3495,9 +3492,10 @@ G1CollectedHeap::setup_surviving_young_words() {
|
|||||||
void
|
void
|
||||||
G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
|
G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
|
||||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||||
size_t array_length = g1_policy()->young_cset_region_length();
|
uint array_length = g1_policy()->young_cset_region_length();
|
||||||
for (size_t i = 0; i < array_length; ++i)
|
for (uint i = 0; i < array_length; ++i) {
|
||||||
_surviving_young_words[i] += surv_young_words[i];
|
_surviving_young_words[i] += surv_young_words[i];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -3609,12 +3607,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
increment_total_full_collections();
|
increment_total_full_collections();
|
||||||
}
|
}
|
||||||
|
|
||||||
// if PrintGCDetails is on, we'll print long statistics information
|
// if the log level is "finer" is on, we'll print long statistics information
|
||||||
// in the collector policy code, so let's not print this as the output
|
// in the collector policy code, so let's not print this as the output
|
||||||
// is messy if we do.
|
// is messy if we do.
|
||||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||||
TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
|
TraceTime t(verbose_str, G1Log::fine() && !G1Log::finer(), true, gclog_or_tty);
|
||||||
|
|
||||||
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
|
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
|
||||||
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
|
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
|
||||||
@ -3647,8 +3645,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||||
prepare_for_verify();
|
prepare_for_verify();
|
||||||
Universe::verify(/* allow dirty */ false,
|
Universe::verify(/* silent */ false,
|
||||||
/* silent */ false,
|
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
/* option */ VerifyOption_G1UsePrevMarking);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3892,8 +3889,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
gclog_or_tty->print(" VerifyAfterGC:");
|
||||||
prepare_for_verify();
|
prepare_for_verify();
|
||||||
Universe::verify(/* allow dirty */ true,
|
Universe::verify(/* silent */ false,
|
||||||
/* silent */ false,
|
|
||||||
/* option */ VerifyOption_G1UsePrevMarking);
|
/* option */ VerifyOption_G1UsePrevMarking);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3931,8 +3927,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The closing of the inner scope, immediately above, will complete
|
// The closing of the inner scope, immediately above, will complete
|
||||||
// the PrintGC logging output. The record_collection_pause_end() call
|
// logging at the "fine" level. The record_collection_pause_end() call
|
||||||
// above will complete the logging output of PrintGCDetails.
|
// above will complete logging at the "finer" level.
|
||||||
//
|
//
|
||||||
// It is not yet to safe, however, to tell the concurrent mark to
|
// It is not yet to safe, however, to tell the concurrent mark to
|
||||||
// start as we have some optional output below. We don't want the
|
// start as we have some optional output below. We don't want the
|
||||||
@ -4068,7 +4064,6 @@ void G1CollectedHeap::finalize_for_evac_failure() {
|
|||||||
|
|
||||||
void G1CollectedHeap::remove_self_forwarding_pointers() {
|
void G1CollectedHeap::remove_self_forwarding_pointers() {
|
||||||
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
|
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
|
||||||
assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
|
|
||||||
|
|
||||||
G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
|
G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
|
||||||
|
|
||||||
@ -4086,7 +4081,6 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
|
|||||||
reset_cset_heap_region_claim_values();
|
reset_cset_heap_region_claim_values();
|
||||||
|
|
||||||
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
|
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
|
||||||
assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
|
|
||||||
|
|
||||||
// Now restore saved marks, if any.
|
// Now restore saved marks, if any.
|
||||||
if (_objs_with_preserved_marks != NULL) {
|
if (_objs_with_preserved_marks != NULL) {
|
||||||
@ -4248,16 +4242,16 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
|
|||||||
// non-young regions (where the age is -1)
|
// non-young regions (where the age is -1)
|
||||||
// We also add a few elements at the beginning and at the end in
|
// We also add a few elements at the beginning and at the end in
|
||||||
// an attempt to eliminate cache contention
|
// an attempt to eliminate cache contention
|
||||||
size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
|
uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
|
||||||
size_t array_length = PADDING_ELEM_NUM +
|
uint array_length = PADDING_ELEM_NUM +
|
||||||
real_length +
|
real_length +
|
||||||
PADDING_ELEM_NUM;
|
PADDING_ELEM_NUM;
|
||||||
_surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
|
_surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
|
||||||
if (_surviving_young_words_base == NULL)
|
if (_surviving_young_words_base == NULL)
|
||||||
vm_exit_out_of_memory(array_length * sizeof(size_t),
|
vm_exit_out_of_memory(array_length * sizeof(size_t),
|
||||||
"Not enough space for young surv histo.");
|
"Not enough space for young surv histo.");
|
||||||
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
|
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
|
||||||
memset(_surviving_young_words, 0, real_length * sizeof(size_t));
|
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
|
||||||
|
|
||||||
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
|
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
|
||||||
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
|
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
|
||||||
@ -4394,7 +4388,7 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
|
|||||||
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
|
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
|
||||||
oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
|
oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
|
||||||
::copy_to_survivor_space(oop old) {
|
::copy_to_survivor_space(oop old) {
|
||||||
size_t word_sz = old->size();
|
size_t word_sz = old->size();
|
||||||
HeapRegion* from_region = _g1->heap_region_containing_raw(old);
|
HeapRegion* from_region = _g1->heap_region_containing_raw(old);
|
||||||
// +1 to make the -1 indexes valid...
|
// +1 to make the -1 indexes valid...
|
||||||
int young_index = from_region->young_index_in_cset()+1;
|
int young_index = from_region->young_index_in_cset()+1;
|
||||||
@ -5514,9 +5508,9 @@ void G1CollectedHeap::evacuate_collection_set() {
|
|||||||
|
|
||||||
if (evacuation_failed()) {
|
if (evacuation_failed()) {
|
||||||
remove_self_forwarding_pointers();
|
remove_self_forwarding_pointers();
|
||||||
if (PrintGCDetails) {
|
if (G1Log::finer()) {
|
||||||
gclog_or_tty->print(" (to-space overflow)");
|
gclog_or_tty->print(" (to-space overflow)");
|
||||||
} else if (PrintGC) {
|
} else if (G1Log::fine()) {
|
||||||
gclog_or_tty->print("--");
|
gclog_or_tty->print("--");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5591,8 +5585,8 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
|
|||||||
hr->set_notHumongous();
|
hr->set_notHumongous();
|
||||||
free_region(hr, &hr_pre_used, free_list, par);
|
free_region(hr, &hr_pre_used, free_list, par);
|
||||||
|
|
||||||
size_t i = hr->hrs_index() + 1;
|
uint i = hr->hrs_index() + 1;
|
||||||
size_t num = 1;
|
uint num = 1;
|
||||||
while (i < n_regions()) {
|
while (i < n_regions()) {
|
||||||
HeapRegion* curr_hr = region_at(i);
|
HeapRegion* curr_hr = region_at(i);
|
||||||
if (!curr_hr->continuesHumongous()) {
|
if (!curr_hr->continuesHumongous()) {
|
||||||
@ -5801,7 +5795,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
|
|||||||
if (cur->is_young()) {
|
if (cur->is_young()) {
|
||||||
int index = cur->young_index_in_cset();
|
int index = cur->young_index_in_cset();
|
||||||
assert(index != -1, "invariant");
|
assert(index != -1, "invariant");
|
||||||
assert((size_t) index < policy->young_cset_region_length(), "invariant");
|
assert((uint) index < policy->young_cset_region_length(), "invariant");
|
||||||
size_t words_survived = _surviving_young_words[index];
|
size_t words_survived = _surviving_young_words[index];
|
||||||
cur->record_surv_words_in_group(words_survived);
|
cur->record_surv_words_in_group(words_survived);
|
||||||
|
|
||||||
@ -6141,7 +6135,7 @@ void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
|
|||||||
// Methods for the GC alloc regions
|
// Methods for the GC alloc regions
|
||||||
|
|
||||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||||
size_t count,
|
uint count,
|
||||||
GCAllocPurpose ap) {
|
GCAllocPurpose ap) {
|
||||||
assert(FreeList_lock->owned_by_self(), "pre-condition");
|
assert(FreeList_lock->owned_by_self(), "pre-condition");
|
||||||
|
|
||||||
@ -6213,7 +6207,7 @@ private:
|
|||||||
FreeRegionList* _free_list;
|
FreeRegionList* _free_list;
|
||||||
OldRegionSet* _old_set;
|
OldRegionSet* _old_set;
|
||||||
HumongousRegionSet* _humongous_set;
|
HumongousRegionSet* _humongous_set;
|
||||||
size_t _region_count;
|
uint _region_count;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
VerifyRegionListsClosure(OldRegionSet* old_set,
|
VerifyRegionListsClosure(OldRegionSet* old_set,
|
||||||
@ -6222,7 +6216,7 @@ public:
|
|||||||
_old_set(old_set), _humongous_set(humongous_set),
|
_old_set(old_set), _humongous_set(humongous_set),
|
||||||
_free_list(free_list), _region_count(0) { }
|
_free_list(free_list), _region_count(0) { }
|
||||||
|
|
||||||
size_t region_count() { return _region_count; }
|
uint region_count() { return _region_count; }
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
_region_count += 1;
|
_region_count += 1;
|
||||||
@ -6244,7 +6238,7 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index,
|
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
|
||||||
HeapWord* bottom) {
|
HeapWord* bottom) {
|
||||||
HeapWord* end = bottom + HeapRegion::GrainWords;
|
HeapWord* end = bottom + HeapRegion::GrainWords;
|
||||||
MemRegion mr(bottom, end);
|
MemRegion mr(bottom, end);
|
||||||
|
@ -85,8 +85,8 @@ private:
|
|||||||
|
|
||||||
HeapRegion* _curr;
|
HeapRegion* _curr;
|
||||||
|
|
||||||
size_t _length;
|
uint _length;
|
||||||
size_t _survivor_length;
|
uint _survivor_length;
|
||||||
|
|
||||||
size_t _last_sampled_rs_lengths;
|
size_t _last_sampled_rs_lengths;
|
||||||
size_t _sampled_rs_lengths;
|
size_t _sampled_rs_lengths;
|
||||||
@ -101,8 +101,8 @@ public:
|
|||||||
|
|
||||||
void empty_list();
|
void empty_list();
|
||||||
bool is_empty() { return _length == 0; }
|
bool is_empty() { return _length == 0; }
|
||||||
size_t length() { return _length; }
|
uint length() { return _length; }
|
||||||
size_t survivor_length() { return _survivor_length; }
|
uint survivor_length() { return _survivor_length; }
|
||||||
|
|
||||||
// Currently we do not keep track of the used byte sum for the
|
// Currently we do not keep track of the used byte sum for the
|
||||||
// young list and the survivors and it'd be quite a lot of work to
|
// young list and the survivors and it'd be quite a lot of work to
|
||||||
@ -111,10 +111,10 @@ public:
|
|||||||
// we'll report the more accurate information then.
|
// we'll report the more accurate information then.
|
||||||
size_t eden_used_bytes() {
|
size_t eden_used_bytes() {
|
||||||
assert(length() >= survivor_length(), "invariant");
|
assert(length() >= survivor_length(), "invariant");
|
||||||
return (length() - survivor_length()) * HeapRegion::GrainBytes;
|
return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes;
|
||||||
}
|
}
|
||||||
size_t survivor_used_bytes() {
|
size_t survivor_used_bytes() {
|
||||||
return survivor_length() * HeapRegion::GrainBytes;
|
return (size_t) survivor_length() * HeapRegion::GrainBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rs_length_sampling_init();
|
void rs_length_sampling_init();
|
||||||
@ -247,7 +247,7 @@ private:
|
|||||||
MasterHumongousRegionSet _humongous_set;
|
MasterHumongousRegionSet _humongous_set;
|
||||||
|
|
||||||
// The number of regions we could create by expansion.
|
// The number of regions we could create by expansion.
|
||||||
size_t _expansion_regions;
|
uint _expansion_regions;
|
||||||
|
|
||||||
// The block offset table for the G1 heap.
|
// The block offset table for the G1 heap.
|
||||||
G1BlockOffsetSharedArray* _bot_shared;
|
G1BlockOffsetSharedArray* _bot_shared;
|
||||||
@ -339,7 +339,7 @@ private:
|
|||||||
bool* _in_cset_fast_test_base;
|
bool* _in_cset_fast_test_base;
|
||||||
|
|
||||||
// The length of the _in_cset_fast_test_base array.
|
// The length of the _in_cset_fast_test_base array.
|
||||||
size_t _in_cset_fast_test_length;
|
uint _in_cset_fast_test_length;
|
||||||
|
|
||||||
volatile unsigned _gc_time_stamp;
|
volatile unsigned _gc_time_stamp;
|
||||||
|
|
||||||
@ -458,14 +458,14 @@ protected:
|
|||||||
// length and remove them from the master free list. Return the
|
// length and remove them from the master free list. Return the
|
||||||
// index of the first region or G1_NULL_HRS_INDEX if the search
|
// index of the first region or G1_NULL_HRS_INDEX if the search
|
||||||
// was unsuccessful.
|
// was unsuccessful.
|
||||||
size_t humongous_obj_allocate_find_first(size_t num_regions,
|
uint humongous_obj_allocate_find_first(uint num_regions,
|
||||||
size_t word_size);
|
size_t word_size);
|
||||||
|
|
||||||
// Initialize a contiguous set of free regions of length num_regions
|
// Initialize a contiguous set of free regions of length num_regions
|
||||||
// and starting at index first so that they appear as a single
|
// and starting at index first so that they appear as a single
|
||||||
// humongous region.
|
// humongous region.
|
||||||
HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
|
HeapWord* humongous_obj_allocate_initialize_regions(uint first,
|
||||||
size_t num_regions,
|
uint num_regions,
|
||||||
size_t word_size);
|
size_t word_size);
|
||||||
|
|
||||||
// Attempt to allocate a humongous object of the given size. Return
|
// Attempt to allocate a humongous object of the given size. Return
|
||||||
@ -574,7 +574,7 @@ protected:
|
|||||||
size_t allocated_bytes);
|
size_t allocated_bytes);
|
||||||
|
|
||||||
// For GC alloc regions.
|
// For GC alloc regions.
|
||||||
HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
|
HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
|
||||||
GCAllocPurpose ap);
|
GCAllocPurpose ap);
|
||||||
void retire_gc_alloc_region(HeapRegion* alloc_region,
|
void retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||||
size_t allocated_bytes, GCAllocPurpose ap);
|
size_t allocated_bytes, GCAllocPurpose ap);
|
||||||
@ -641,7 +641,7 @@ public:
|
|||||||
void register_region_with_in_cset_fast_test(HeapRegion* r) {
|
void register_region_with_in_cset_fast_test(HeapRegion* r) {
|
||||||
assert(_in_cset_fast_test_base != NULL, "sanity");
|
assert(_in_cset_fast_test_base != NULL, "sanity");
|
||||||
assert(r->in_collection_set(), "invariant");
|
assert(r->in_collection_set(), "invariant");
|
||||||
size_t index = r->hrs_index();
|
uint index = r->hrs_index();
|
||||||
assert(index < _in_cset_fast_test_length, "invariant");
|
assert(index < _in_cset_fast_test_length, "invariant");
|
||||||
assert(!_in_cset_fast_test_base[index], "invariant");
|
assert(!_in_cset_fast_test_base[index], "invariant");
|
||||||
_in_cset_fast_test_base[index] = true;
|
_in_cset_fast_test_base[index] = true;
|
||||||
@ -655,7 +655,7 @@ public:
|
|||||||
if (_g1_committed.contains((HeapWord*) obj)) {
|
if (_g1_committed.contains((HeapWord*) obj)) {
|
||||||
// no need to subtract the bottom of the heap from obj,
|
// no need to subtract the bottom of the heap from obj,
|
||||||
// _in_cset_fast_test is biased
|
// _in_cset_fast_test is biased
|
||||||
size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
|
uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
|
||||||
bool ret = _in_cset_fast_test[index];
|
bool ret = _in_cset_fast_test[index];
|
||||||
// let's make sure the result is consistent with what the slower
|
// let's make sure the result is consistent with what the slower
|
||||||
// test returns
|
// test returns
|
||||||
@ -670,7 +670,7 @@ public:
|
|||||||
void clear_cset_fast_test() {
|
void clear_cset_fast_test() {
|
||||||
assert(_in_cset_fast_test_base != NULL, "sanity");
|
assert(_in_cset_fast_test_base != NULL, "sanity");
|
||||||
memset(_in_cset_fast_test_base, false,
|
memset(_in_cset_fast_test_base, false,
|
||||||
_in_cset_fast_test_length * sizeof(bool));
|
(size_t) _in_cset_fast_test_length * sizeof(bool));
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is called at the end of either a concurrent cycle or a Full
|
// This is called at the end of either a concurrent cycle or a Full
|
||||||
@ -1101,23 +1101,23 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The total number of regions in the heap.
|
// The total number of regions in the heap.
|
||||||
size_t n_regions() { return _hrs.length(); }
|
uint n_regions() { return _hrs.length(); }
|
||||||
|
|
||||||
// The max number of regions in the heap.
|
// The max number of regions in the heap.
|
||||||
size_t max_regions() { return _hrs.max_length(); }
|
uint max_regions() { return _hrs.max_length(); }
|
||||||
|
|
||||||
// The number of regions that are completely free.
|
// The number of regions that are completely free.
|
||||||
size_t free_regions() { return _free_list.length(); }
|
uint free_regions() { return _free_list.length(); }
|
||||||
|
|
||||||
// The number of regions that are not completely free.
|
// The number of regions that are not completely free.
|
||||||
size_t used_regions() { return n_regions() - free_regions(); }
|
uint used_regions() { return n_regions() - free_regions(); }
|
||||||
|
|
||||||
// The number of regions available for "regular" expansion.
|
// The number of regions available for "regular" expansion.
|
||||||
size_t expansion_regions() { return _expansion_regions; }
|
uint expansion_regions() { return _expansion_regions; }
|
||||||
|
|
||||||
// Factory method for HeapRegion instances. It will return NULL if
|
// Factory method for HeapRegion instances. It will return NULL if
|
||||||
// the allocation fails.
|
// the allocation fails.
|
||||||
HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
|
HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
|
||||||
|
|
||||||
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||||
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||||
@ -1301,7 +1301,7 @@ public:
|
|||||||
void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
|
void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
|
||||||
|
|
||||||
// Return the region with the given index. It assumes the index is valid.
|
// Return the region with the given index. It assumes the index is valid.
|
||||||
HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
|
HeapRegion* region_at(uint index) const { return _hrs.at(index); }
|
||||||
|
|
||||||
// Divide the heap region sequence into "chunks" of some size (the number
|
// Divide the heap region sequence into "chunks" of some size (the number
|
||||||
// of regions divided by the number of parallel threads times some
|
// of regions divided by the number of parallel threads times some
|
||||||
@ -1504,10 +1504,10 @@ public:
|
|||||||
// Currently there is only one place where this is called with
|
// Currently there is only one place where this is called with
|
||||||
// vo == UseMarkWord, which is to verify the marking during a
|
// vo == UseMarkWord, which is to verify the marking during a
|
||||||
// full GC.
|
// full GC.
|
||||||
void verify(bool allow_dirty, bool silent, VerifyOption vo);
|
void verify(bool silent, VerifyOption vo);
|
||||||
|
|
||||||
// Override; it uses the "prev" marking information
|
// Override; it uses the "prev" marking information
|
||||||
virtual void verify(bool allow_dirty, bool silent);
|
virtual void verify(bool silent);
|
||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
virtual void print_extended_on(outputStream* st) const;
|
virtual void print_extended_on(outputStream* st) const;
|
||||||
|
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
|
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Log.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||||
#include "gc_implementation/shared/gcPolicyCounters.hpp"
|
#include "gc_implementation/shared/gcPolicyCounters.hpp"
|
||||||
#include "runtime/arguments.hpp"
|
#include "runtime/arguments.hpp"
|
||||||
@ -191,11 +192,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
_in_marking_window(false),
|
_in_marking_window(false),
|
||||||
_in_marking_window_im(false),
|
_in_marking_window_im(false),
|
||||||
|
|
||||||
_known_garbage_ratio(0.0),
|
|
||||||
_known_garbage_bytes(0),
|
|
||||||
|
|
||||||
_young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
|
|
||||||
|
|
||||||
_recent_prev_end_times_for_all_gcs_sec(
|
_recent_prev_end_times_for_all_gcs_sec(
|
||||||
new TruncatedSeq(NumPrevPausesForHeuristics)),
|
new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||||
|
|
||||||
@ -430,31 +426,36 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||||
_min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
|
_min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
|
||||||
|
1U);
|
||||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||||
_max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
|
_max_desired_young_length =
|
||||||
|
MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
|
||||||
|
1U);
|
||||||
_sizer_kind = SizerMaxAndNewSize;
|
_sizer_kind = SizerMaxAndNewSize;
|
||||||
_adaptive_size = _min_desired_young_length == _max_desired_young_length;
|
_adaptive_size = _min_desired_young_length == _max_desired_young_length;
|
||||||
} else {
|
} else {
|
||||||
_sizer_kind = SizerNewSizeOnly;
|
_sizer_kind = SizerNewSizeOnly;
|
||||||
}
|
}
|
||||||
} else if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
} else if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||||
_max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
|
_max_desired_young_length =
|
||||||
|
MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
|
||||||
|
1U);
|
||||||
_sizer_kind = SizerMaxNewSizeOnly;
|
_sizer_kind = SizerMaxNewSizeOnly;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
|
uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
|
||||||
size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
|
uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
|
||||||
return MAX2((size_t)1, default_value);
|
return MAX2(1U, default_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
|
uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
|
||||||
size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
|
uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
|
||||||
return MAX2((size_t)1, default_value);
|
return MAX2(1U, default_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
|
void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
|
||||||
assert(new_number_of_heap_regions > 0, "Heap must be initialized");
|
assert(new_number_of_heap_regions > 0, "Heap must be initialized");
|
||||||
|
|
||||||
switch (_sizer_kind) {
|
switch (_sizer_kind) {
|
||||||
@ -511,16 +512,16 @@ void G1CollectorPolicy::initialize_gc_policy_counters() {
|
|||||||
_gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
|
_gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool G1CollectorPolicy::predict_will_fit(size_t young_length,
|
bool G1CollectorPolicy::predict_will_fit(uint young_length,
|
||||||
double base_time_ms,
|
double base_time_ms,
|
||||||
size_t base_free_regions,
|
uint base_free_regions,
|
||||||
double target_pause_time_ms) {
|
double target_pause_time_ms) {
|
||||||
if (young_length >= base_free_regions) {
|
if (young_length >= base_free_regions) {
|
||||||
// end condition 1: not enough space for the young regions
|
// end condition 1: not enough space for the young regions
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
|
double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
|
||||||
size_t bytes_to_copy =
|
size_t bytes_to_copy =
|
||||||
(size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
|
(size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
|
||||||
double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
|
double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
|
||||||
@ -532,7 +533,7 @@ bool G1CollectorPolicy::predict_will_fit(size_t young_length,
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t free_bytes =
|
size_t free_bytes =
|
||||||
(base_free_regions - young_length) * HeapRegion::GrainBytes;
|
(base_free_regions - young_length) * HeapRegion::GrainBytes;
|
||||||
if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
|
if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
|
||||||
// end condition 3: out-of-space (conservatively!)
|
// end condition 3: out-of-space (conservatively!)
|
||||||
return false;
|
return false;
|
||||||
@ -542,25 +543,25 @@ bool G1CollectorPolicy::predict_will_fit(size_t young_length,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
|
void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
|
||||||
// re-calculate the necessary reserve
|
// re-calculate the necessary reserve
|
||||||
double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
|
double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
|
||||||
// We use ceiling so that if reserve_regions_d is > 0.0 (but
|
// We use ceiling so that if reserve_regions_d is > 0.0 (but
|
||||||
// smaller than 1.0) we'll get 1.
|
// smaller than 1.0) we'll get 1.
|
||||||
_reserve_regions = (size_t) ceil(reserve_regions_d);
|
_reserve_regions = (uint) ceil(reserve_regions_d);
|
||||||
|
|
||||||
_young_gen_sizer->heap_size_changed(new_number_of_regions);
|
_young_gen_sizer->heap_size_changed(new_number_of_regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
|
uint G1CollectorPolicy::calculate_young_list_desired_min_length(
|
||||||
size_t base_min_length) {
|
uint base_min_length) {
|
||||||
size_t desired_min_length = 0;
|
uint desired_min_length = 0;
|
||||||
if (adaptive_young_list_length()) {
|
if (adaptive_young_list_length()) {
|
||||||
if (_alloc_rate_ms_seq->num() > 3) {
|
if (_alloc_rate_ms_seq->num() > 3) {
|
||||||
double now_sec = os::elapsedTime();
|
double now_sec = os::elapsedTime();
|
||||||
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
|
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
|
||||||
double alloc_rate_ms = predict_alloc_rate_ms();
|
double alloc_rate_ms = predict_alloc_rate_ms();
|
||||||
desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
|
desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
|
||||||
} else {
|
} else {
|
||||||
// otherwise we don't have enough info to make the prediction
|
// otherwise we don't have enough info to make the prediction
|
||||||
}
|
}
|
||||||
@ -570,7 +571,7 @@ size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
|
|||||||
return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
|
return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
|
uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
|
||||||
// Here, we might want to also take into account any additional
|
// Here, we might want to also take into account any additional
|
||||||
// constraints (i.e., user-defined minimum bound). Currently, we
|
// constraints (i.e., user-defined minimum bound). Currently, we
|
||||||
// effectively don't set this bound.
|
// effectively don't set this bound.
|
||||||
@ -587,11 +588,11 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
|
|||||||
// Calculate the absolute and desired min bounds.
|
// Calculate the absolute and desired min bounds.
|
||||||
|
|
||||||
// This is how many young regions we already have (currently: the survivors).
|
// This is how many young regions we already have (currently: the survivors).
|
||||||
size_t base_min_length = recorded_survivor_regions();
|
uint base_min_length = recorded_survivor_regions();
|
||||||
// This is the absolute minimum young length, which ensures that we
|
// This is the absolute minimum young length, which ensures that we
|
||||||
// can allocate one eden region in the worst-case.
|
// can allocate one eden region in the worst-case.
|
||||||
size_t absolute_min_length = base_min_length + 1;
|
uint absolute_min_length = base_min_length + 1;
|
||||||
size_t desired_min_length =
|
uint desired_min_length =
|
||||||
calculate_young_list_desired_min_length(base_min_length);
|
calculate_young_list_desired_min_length(base_min_length);
|
||||||
if (desired_min_length < absolute_min_length) {
|
if (desired_min_length < absolute_min_length) {
|
||||||
desired_min_length = absolute_min_length;
|
desired_min_length = absolute_min_length;
|
||||||
@ -600,16 +601,16 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
|
|||||||
// Calculate the absolute and desired max bounds.
|
// Calculate the absolute and desired max bounds.
|
||||||
|
|
||||||
// We will try our best not to "eat" into the reserve.
|
// We will try our best not to "eat" into the reserve.
|
||||||
size_t absolute_max_length = 0;
|
uint absolute_max_length = 0;
|
||||||
if (_free_regions_at_end_of_collection > _reserve_regions) {
|
if (_free_regions_at_end_of_collection > _reserve_regions) {
|
||||||
absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
|
absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
|
||||||
}
|
}
|
||||||
size_t desired_max_length = calculate_young_list_desired_max_length();
|
uint desired_max_length = calculate_young_list_desired_max_length();
|
||||||
if (desired_max_length > absolute_max_length) {
|
if (desired_max_length > absolute_max_length) {
|
||||||
desired_max_length = absolute_max_length;
|
desired_max_length = absolute_max_length;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t young_list_target_length = 0;
|
uint young_list_target_length = 0;
|
||||||
if (adaptive_young_list_length()) {
|
if (adaptive_young_list_length()) {
|
||||||
if (gcs_are_young()) {
|
if (gcs_are_young()) {
|
||||||
young_list_target_length =
|
young_list_target_length =
|
||||||
@ -647,11 +648,11 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
|
|||||||
update_max_gc_locker_expansion();
|
update_max_gc_locker_expansion();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
uint
|
||||||
G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
|
G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
|
||||||
size_t base_min_length,
|
uint base_min_length,
|
||||||
size_t desired_min_length,
|
uint desired_min_length,
|
||||||
size_t desired_max_length) {
|
uint desired_max_length) {
|
||||||
assert(adaptive_young_list_length(), "pre-condition");
|
assert(adaptive_young_list_length(), "pre-condition");
|
||||||
assert(gcs_are_young(), "only call this for young GCs");
|
assert(gcs_are_young(), "only call this for young GCs");
|
||||||
|
|
||||||
@ -666,9 +667,9 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
|
|||||||
// will be reflected in the predictions by the
|
// will be reflected in the predictions by the
|
||||||
// survivor_regions_evac_time prediction.
|
// survivor_regions_evac_time prediction.
|
||||||
assert(desired_min_length > base_min_length, "invariant");
|
assert(desired_min_length > base_min_length, "invariant");
|
||||||
size_t min_young_length = desired_min_length - base_min_length;
|
uint min_young_length = desired_min_length - base_min_length;
|
||||||
assert(desired_max_length > base_min_length, "invariant");
|
assert(desired_max_length > base_min_length, "invariant");
|
||||||
size_t max_young_length = desired_max_length - base_min_length;
|
uint max_young_length = desired_max_length - base_min_length;
|
||||||
|
|
||||||
double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
|
double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
|
||||||
double survivor_regions_evac_time = predict_survivor_regions_evac_time();
|
double survivor_regions_evac_time = predict_survivor_regions_evac_time();
|
||||||
@ -678,8 +679,8 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
|
|||||||
double base_time_ms =
|
double base_time_ms =
|
||||||
predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
|
predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
|
||||||
survivor_regions_evac_time;
|
survivor_regions_evac_time;
|
||||||
size_t available_free_regions = _free_regions_at_end_of_collection;
|
uint available_free_regions = _free_regions_at_end_of_collection;
|
||||||
size_t base_free_regions = 0;
|
uint base_free_regions = 0;
|
||||||
if (available_free_regions > _reserve_regions) {
|
if (available_free_regions > _reserve_regions) {
|
||||||
base_free_regions = available_free_regions - _reserve_regions;
|
base_free_regions = available_free_regions - _reserve_regions;
|
||||||
}
|
}
|
||||||
@ -716,9 +717,9 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
|
|||||||
// the new max. This way we maintain the loop invariants.
|
// the new max. This way we maintain the loop invariants.
|
||||||
|
|
||||||
assert(min_young_length < max_young_length, "invariant");
|
assert(min_young_length < max_young_length, "invariant");
|
||||||
size_t diff = (max_young_length - min_young_length) / 2;
|
uint diff = (max_young_length - min_young_length) / 2;
|
||||||
while (diff > 0) {
|
while (diff > 0) {
|
||||||
size_t young_length = min_young_length + diff;
|
uint young_length = min_young_length + diff;
|
||||||
if (predict_will_fit(young_length, base_time_ms,
|
if (predict_will_fit(young_length, base_time_ms,
|
||||||
base_free_regions, target_pause_time_ms)) {
|
base_free_regions, target_pause_time_ms)) {
|
||||||
min_young_length = young_length;
|
min_young_length = young_length;
|
||||||
@ -862,8 +863,6 @@ void G1CollectorPolicy::record_full_collection_end() {
|
|||||||
_last_young_gc = false;
|
_last_young_gc = false;
|
||||||
clear_initiate_conc_mark_if_possible();
|
clear_initiate_conc_mark_if_possible();
|
||||||
clear_during_initial_mark_pause();
|
clear_during_initial_mark_pause();
|
||||||
_known_garbage_bytes = 0;
|
|
||||||
_known_garbage_ratio = 0.0;
|
|
||||||
_in_marking_window = false;
|
_in_marking_window = false;
|
||||||
_in_marking_window_im = false;
|
_in_marking_window_im = false;
|
||||||
|
|
||||||
@ -876,7 +875,7 @@ void G1CollectorPolicy::record_full_collection_end() {
|
|||||||
// Reset survivors SurvRateGroup.
|
// Reset survivors SurvRateGroup.
|
||||||
_survivor_surv_rate_group->reset();
|
_survivor_surv_rate_group->reset();
|
||||||
update_young_list_target_length();
|
update_young_list_target_length();
|
||||||
_collectionSetChooser->clearMarkedHeapRegions();
|
_collectionSetChooser->clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectorPolicy::record_stop_world_start() {
|
void G1CollectorPolicy::record_stop_world_start() {
|
||||||
@ -885,7 +884,7 @@ void G1CollectorPolicy::record_stop_world_start() {
|
|||||||
|
|
||||||
void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
||||||
size_t start_used) {
|
size_t start_used) {
|
||||||
if (PrintGCDetails) {
|
if (G1Log::finer()) {
|
||||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print("[GC pause");
|
gclog_or_tty->print("[GC pause");
|
||||||
gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
|
gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
|
||||||
@ -1022,11 +1021,16 @@ void G1CollectorPolicy::print_par_stats(int level,
|
|||||||
if (val > max)
|
if (val > max)
|
||||||
max = val;
|
max = val;
|
||||||
total += val;
|
total += val;
|
||||||
buf.append(" %3.1lf", val);
|
if (G1Log::finest()) {
|
||||||
|
buf.append(" %.1lf", val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (G1Log::finest()) {
|
||||||
|
buf.append_and_print_cr("");
|
||||||
}
|
}
|
||||||
buf.append_and_print_cr("");
|
|
||||||
double avg = total / (double) no_of_gc_threads();
|
double avg = total / (double) no_of_gc_threads();
|
||||||
buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
|
buf.append_and_print_cr(" Avg: %.1lf Min: %.1lf Max: %.1lf Diff: %.1lf]",
|
||||||
avg, min, max, max - min);
|
avg, min, max, max - min);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1223,7 +1227,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
|
|
||||||
// These values are used to update the summary information that is
|
// These values are used to update the summary information that is
|
||||||
// displayed when TraceGen0Time is enabled, and are output as part
|
// displayed when TraceGen0Time is enabled, and are output as part
|
||||||
// of the PrintGCDetails output, in the non-parallel case.
|
// of the "finer" output, in the non-parallel case.
|
||||||
|
|
||||||
double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
|
double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
|
||||||
double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
|
double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
|
||||||
@ -1316,7 +1320,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
// given that humongous object allocations do not really affect
|
// given that humongous object allocations do not really affect
|
||||||
// either the pause's duration nor when the next pause will take
|
// either the pause's duration nor when the next pause will take
|
||||||
// place we can safely ignore them here.
|
// place we can safely ignore them here.
|
||||||
size_t regions_allocated = eden_cset_region_length();
|
uint regions_allocated = eden_cset_region_length();
|
||||||
double alloc_rate_ms = (double) regions_allocated / app_time_ms;
|
double alloc_rate_ms = (double) regions_allocated / app_time_ms;
|
||||||
_alloc_rate_ms_seq->add(alloc_rate_ms);
|
_alloc_rate_ms_seq->add(alloc_rate_ms);
|
||||||
|
|
||||||
@ -1356,8 +1360,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintGCDetails output
|
if (G1Log::finer()) {
|
||||||
if (PrintGCDetails) {
|
|
||||||
bool print_marking_info =
|
bool print_marking_info =
|
||||||
_g1->mark_in_progress() && !last_pause_included_initial_mark;
|
_g1->mark_in_progress() && !last_pause_included_initial_mark;
|
||||||
|
|
||||||
@ -1376,11 +1379,15 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
|
print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
|
||||||
}
|
}
|
||||||
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
|
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
|
||||||
print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
|
if (G1Log::finest()) {
|
||||||
|
print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
|
||||||
|
}
|
||||||
print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
|
print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
|
||||||
print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
|
print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
|
||||||
print_par_stats(2, "Termination", _par_last_termination_times_ms);
|
print_par_stats(2, "Termination", _par_last_termination_times_ms);
|
||||||
print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
|
if (G1Log::finest()) {
|
||||||
|
print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
|
||||||
|
}
|
||||||
|
|
||||||
for (int i = 0; i < _parallel_gc_threads; i++) {
|
for (int i = 0; i < _parallel_gc_threads; i++) {
|
||||||
_par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
|
_par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
|
||||||
@ -1406,7 +1413,9 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
print_stats(1, "SATB Filtering", satb_filtering_time);
|
print_stats(1, "SATB Filtering", satb_filtering_time);
|
||||||
}
|
}
|
||||||
print_stats(1, "Update RS", update_rs_time);
|
print_stats(1, "Update RS", update_rs_time);
|
||||||
print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
|
if (G1Log::finest()) {
|
||||||
|
print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
|
||||||
|
}
|
||||||
print_stats(1, "Scan RS", scan_rs_time);
|
print_stats(1, "Scan RS", scan_rs_time);
|
||||||
print_stats(1, "Object Copying", obj_copy_time);
|
print_stats(1, "Object Copying", obj_copy_time);
|
||||||
}
|
}
|
||||||
@ -1440,16 +1449,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the efficiency-since-mark vars.
|
|
||||||
double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
|
|
||||||
if (elapsed_ms < MIN_TIMER_GRANULARITY) {
|
|
||||||
// This usually happens due to the timer not having the required
|
|
||||||
// granularity. Some Linuxes are the usual culprits.
|
|
||||||
// We'll just set it to something (arbitrarily) small.
|
|
||||||
proc_ms = 1.0;
|
|
||||||
}
|
|
||||||
double cur_efficiency = (double) freed_bytes / proc_ms;
|
|
||||||
|
|
||||||
bool new_in_marking_window = _in_marking_window;
|
bool new_in_marking_window = _in_marking_window;
|
||||||
bool new_in_marking_window_im = false;
|
bool new_in_marking_window_im = false;
|
||||||
if (during_initial_mark_pause()) {
|
if (during_initial_mark_pause()) {
|
||||||
@ -1484,10 +1483,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_last_gc_was_young && !_during_marking) {
|
|
||||||
_young_gc_eff_seq->add(cur_efficiency);
|
|
||||||
}
|
|
||||||
|
|
||||||
_short_lived_surv_rate_group->start_adding_regions();
|
_short_lived_surv_rate_group->start_adding_regions();
|
||||||
// do that for any other surv rate groupsx
|
// do that for any other surv rate groupsx
|
||||||
|
|
||||||
@ -1495,8 +1490,9 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
double pause_time_ms = elapsed_ms;
|
double pause_time_ms = elapsed_ms;
|
||||||
|
|
||||||
size_t diff = 0;
|
size_t diff = 0;
|
||||||
if (_max_pending_cards >= _pending_cards)
|
if (_max_pending_cards >= _pending_cards) {
|
||||||
diff = _max_pending_cards - _pending_cards;
|
diff = _max_pending_cards - _pending_cards;
|
||||||
|
}
|
||||||
_pending_card_diff_seq->add((double) diff);
|
_pending_card_diff_seq->add((double) diff);
|
||||||
|
|
||||||
double cost_per_card_ms = 0.0;
|
double cost_per_card_ms = 0.0;
|
||||||
@ -1601,7 +1597,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
|
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
|
||||||
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
|
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
|
||||||
|
|
||||||
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
|
_collectionSetChooser->verify();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EXT_SIZE_FORMAT "%d%s"
|
#define EXT_SIZE_FORMAT "%d%s"
|
||||||
@ -1610,7 +1606,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
|||||||
proper_unit_for_byte_size((bytes))
|
proper_unit_for_byte_size((bytes))
|
||||||
|
|
||||||
void G1CollectorPolicy::print_heap_transition() {
|
void G1CollectorPolicy::print_heap_transition() {
|
||||||
if (PrintGCDetails) {
|
if (G1Log::finer()) {
|
||||||
YoungList* young_list = _g1->young_list();
|
YoungList* young_list = _g1->young_list();
|
||||||
size_t eden_bytes = young_list->eden_used_bytes();
|
size_t eden_bytes = young_list->eden_used_bytes();
|
||||||
size_t survivor_bytes = young_list->survivor_used_bytes();
|
size_t survivor_bytes = young_list->survivor_used_bytes();
|
||||||
@ -1637,7 +1633,7 @@ void G1CollectorPolicy::print_heap_transition() {
|
|||||||
EXT_SIZE_PARAMS(capacity));
|
EXT_SIZE_PARAMS(capacity));
|
||||||
|
|
||||||
_prev_eden_capacity = eden_capacity;
|
_prev_eden_capacity = eden_capacity;
|
||||||
} else if (PrintGC) {
|
} else if (G1Log::fine()) {
|
||||||
_g1->print_size_transition(gclog_or_tty,
|
_g1->print_size_transition(gclog_or_tty,
|
||||||
_cur_collection_pause_used_at_start_bytes,
|
_cur_collection_pause_used_at_start_bytes,
|
||||||
_g1->used(), _g1->capacity());
|
_g1->used(), _g1->capacity());
|
||||||
@ -1730,8 +1726,7 @@ G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
|
|||||||
return region_elapsed_time_ms;
|
return region_elapsed_time_ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
|
||||||
G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
|
|
||||||
size_t bytes_to_copy;
|
size_t bytes_to_copy;
|
||||||
if (hr->is_marked())
|
if (hr->is_marked())
|
||||||
bytes_to_copy = hr->max_live_bytes();
|
bytes_to_copy = hr->max_live_bytes();
|
||||||
@ -1745,8 +1740,8 @@ G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
|
G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
|
||||||
size_t survivor_cset_region_length) {
|
uint survivor_cset_region_length) {
|
||||||
_eden_cset_region_length = eden_cset_region_length;
|
_eden_cset_region_length = eden_cset_region_length;
|
||||||
_survivor_cset_region_length = survivor_cset_region_length;
|
_survivor_cset_region_length = survivor_cset_region_length;
|
||||||
_old_cset_region_length = 0;
|
_old_cset_region_length = 0;
|
||||||
@ -2010,7 +2005,7 @@ region_num_to_mbs(int length) {
|
|||||||
}
|
}
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
size_t G1CollectorPolicy::max_regions(int purpose) {
|
uint G1CollectorPolicy::max_regions(int purpose) {
|
||||||
switch (purpose) {
|
switch (purpose) {
|
||||||
case GCAllocForSurvived:
|
case GCAllocForSurvived:
|
||||||
return _max_survivor_regions;
|
return _max_survivor_regions;
|
||||||
@ -2023,13 +2018,13 @@ size_t G1CollectorPolicy::max_regions(int purpose) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectorPolicy::update_max_gc_locker_expansion() {
|
void G1CollectorPolicy::update_max_gc_locker_expansion() {
|
||||||
size_t expansion_region_num = 0;
|
uint expansion_region_num = 0;
|
||||||
if (GCLockerEdenExpansionPercent > 0) {
|
if (GCLockerEdenExpansionPercent > 0) {
|
||||||
double perc = (double) GCLockerEdenExpansionPercent / 100.0;
|
double perc = (double) GCLockerEdenExpansionPercent / 100.0;
|
||||||
double expansion_region_num_d = perc * (double) _young_list_target_length;
|
double expansion_region_num_d = perc * (double) _young_list_target_length;
|
||||||
// We use ceiling so that if expansion_region_num_d is > 0.0 (but
|
// We use ceiling so that if expansion_region_num_d is > 0.0 (but
|
||||||
// less than 1.0) we'll get 1.
|
// less than 1.0) we'll get 1.
|
||||||
expansion_region_num = (size_t) ceil(expansion_region_num_d);
|
expansion_region_num = (uint) ceil(expansion_region_num_d);
|
||||||
} else {
|
} else {
|
||||||
assert(expansion_region_num == 0, "sanity");
|
assert(expansion_region_num == 0, "sanity");
|
||||||
}
|
}
|
||||||
@ -2043,34 +2038,12 @@ void G1CollectorPolicy::update_survivors_policy() {
|
|||||||
(double) _young_list_target_length / (double) SurvivorRatio;
|
(double) _young_list_target_length / (double) SurvivorRatio;
|
||||||
// We use ceiling so that if max_survivor_regions_d is > 0.0 (but
|
// We use ceiling so that if max_survivor_regions_d is > 0.0 (but
|
||||||
// smaller than 1.0) we'll get 1.
|
// smaller than 1.0) we'll get 1.
|
||||||
_max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
|
_max_survivor_regions = (uint) ceil(max_survivor_regions_d);
|
||||||
|
|
||||||
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
|
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
|
||||||
HeapRegion::GrainWords * _max_survivor_regions);
|
HeapRegion::GrainWords * _max_survivor_regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
class HRSortIndexIsOKClosure: public HeapRegionClosure {
|
|
||||||
CollectionSetChooser* _chooser;
|
|
||||||
public:
|
|
||||||
HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
|
|
||||||
_chooser(chooser) {}
|
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
|
||||||
if (!r->continuesHumongous()) {
|
|
||||||
assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
bool G1CollectorPolicy::assertMarkedBytesDataOK() {
|
|
||||||
HRSortIndexIsOKClosure cl(_collectionSetChooser);
|
|
||||||
_g1->heap_region_iterate(&cl);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
|
bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
|
||||||
GCCause::Cause gc_cause) {
|
GCCause::Cause gc_cause) {
|
||||||
bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
|
bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
|
||||||
@ -2168,8 +2141,8 @@ public:
|
|||||||
// We will skip any region that's currently used as an old GC
|
// We will skip any region that's currently used as an old GC
|
||||||
// alloc region (we should not consider those for collection
|
// alloc region (we should not consider those for collection
|
||||||
// before we fill them up).
|
// before we fill them up).
|
||||||
if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
|
if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
|
||||||
_hrSorted->addMarkedHeapRegion(r);
|
_hrSorted->add_region(r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -2179,16 +2152,14 @@ public:
|
|||||||
class ParKnownGarbageHRClosure: public HeapRegionClosure {
|
class ParKnownGarbageHRClosure: public HeapRegionClosure {
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
CollectionSetChooser* _hrSorted;
|
CollectionSetChooser* _hrSorted;
|
||||||
jint _marked_regions_added;
|
uint _marked_regions_added;
|
||||||
size_t _reclaimable_bytes_added;
|
size_t _reclaimable_bytes_added;
|
||||||
jint _chunk_size;
|
uint _chunk_size;
|
||||||
jint _cur_chunk_idx;
|
uint _cur_chunk_idx;
|
||||||
jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
|
uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
|
||||||
int _worker;
|
|
||||||
int _invokes;
|
|
||||||
|
|
||||||
void get_new_chunk() {
|
void get_new_chunk() {
|
||||||
_cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
|
_cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size);
|
||||||
_cur_chunk_end = _cur_chunk_idx + _chunk_size;
|
_cur_chunk_end = _cur_chunk_idx + _chunk_size;
|
||||||
}
|
}
|
||||||
void add_region(HeapRegion* r) {
|
void add_region(HeapRegion* r) {
|
||||||
@ -2196,7 +2167,7 @@ class ParKnownGarbageHRClosure: public HeapRegionClosure {
|
|||||||
get_new_chunk();
|
get_new_chunk();
|
||||||
}
|
}
|
||||||
assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
|
assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
|
||||||
_hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
|
_hrSorted->set_region(_cur_chunk_idx, r);
|
||||||
_marked_regions_added++;
|
_marked_regions_added++;
|
||||||
_reclaimable_bytes_added += r->reclaimable_bytes();
|
_reclaimable_bytes_added += r->reclaimable_bytes();
|
||||||
_cur_chunk_idx++;
|
_cur_chunk_idx++;
|
||||||
@ -2204,104 +2175,79 @@ class ParKnownGarbageHRClosure: public HeapRegionClosure {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
|
ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
|
||||||
jint chunk_size,
|
uint chunk_size) :
|
||||||
int worker) :
|
|
||||||
_g1h(G1CollectedHeap::heap()),
|
_g1h(G1CollectedHeap::heap()),
|
||||||
_hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
|
_hrSorted(hrSorted), _chunk_size(chunk_size),
|
||||||
_marked_regions_added(0), _reclaimable_bytes_added(0),
|
_marked_regions_added(0), _reclaimable_bytes_added(0),
|
||||||
_cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
|
_cur_chunk_idx(0), _cur_chunk_end(0) { }
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
// We only include humongous regions in collection
|
|
||||||
// sets when concurrent mark shows that their contained object is
|
|
||||||
// unreachable.
|
|
||||||
_invokes++;
|
|
||||||
|
|
||||||
// Do we have any marking information for this region?
|
// Do we have any marking information for this region?
|
||||||
if (r->is_marked()) {
|
if (r->is_marked()) {
|
||||||
// We will skip any region that's currently used as an old GC
|
// We will skip any region that's currently used as an old GC
|
||||||
// alloc region (we should not consider those for collection
|
// alloc region (we should not consider those for collection
|
||||||
// before we fill them up).
|
// before we fill them up).
|
||||||
if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
|
if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
|
||||||
add_region(r);
|
add_region(r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
jint marked_regions_added() { return _marked_regions_added; }
|
uint marked_regions_added() { return _marked_regions_added; }
|
||||||
size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
|
size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
|
||||||
int invokes() { return _invokes; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class ParKnownGarbageTask: public AbstractGangTask {
|
class ParKnownGarbageTask: public AbstractGangTask {
|
||||||
CollectionSetChooser* _hrSorted;
|
CollectionSetChooser* _hrSorted;
|
||||||
jint _chunk_size;
|
uint _chunk_size;
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
public:
|
public:
|
||||||
ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
|
ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
|
||||||
AbstractGangTask("ParKnownGarbageTask"),
|
AbstractGangTask("ParKnownGarbageTask"),
|
||||||
_hrSorted(hrSorted), _chunk_size(chunk_size),
|
_hrSorted(hrSorted), _chunk_size(chunk_size),
|
||||||
_g1(G1CollectedHeap::heap()) { }
|
_g1(G1CollectedHeap::heap()) { }
|
||||||
|
|
||||||
void work(uint worker_id) {
|
void work(uint worker_id) {
|
||||||
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
|
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
|
||||||
_chunk_size,
|
|
||||||
worker_id);
|
|
||||||
// Back to zero for the claim value.
|
// Back to zero for the claim value.
|
||||||
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
|
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
|
||||||
_g1->workers()->active_workers(),
|
_g1->workers()->active_workers(),
|
||||||
HeapRegion::InitialClaimValue);
|
HeapRegion::InitialClaimValue);
|
||||||
jint regions_added = parKnownGarbageCl.marked_regions_added();
|
uint regions_added = parKnownGarbageCl.marked_regions_added();
|
||||||
size_t reclaimable_bytes_added =
|
size_t reclaimable_bytes_added =
|
||||||
parKnownGarbageCl.reclaimable_bytes_added();
|
parKnownGarbageCl.reclaimable_bytes_added();
|
||||||
_hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
|
_hrSorted->update_totals(regions_added, reclaimable_bytes_added);
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
|
|
||||||
worker_id, parKnownGarbageCl.invokes(), regions_added);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
void
|
||||||
G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
|
G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
|
||||||
double start_sec;
|
_collectionSetChooser->clear();
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
start_sec = os::elapsedTime();
|
|
||||||
}
|
|
||||||
|
|
||||||
_collectionSetChooser->clearMarkedHeapRegions();
|
|
||||||
double clear_marked_end_sec;
|
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
clear_marked_end_sec = os::elapsedTime();
|
|
||||||
gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.",
|
|
||||||
(clear_marked_end_sec - start_sec) * 1000.0);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
uint region_num = _g1->n_regions();
|
||||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
const size_t OverpartitionFactor = 4;
|
const uint OverpartitionFactor = 4;
|
||||||
size_t WorkUnit;
|
uint WorkUnit;
|
||||||
// The use of MinChunkSize = 8 in the original code
|
// The use of MinChunkSize = 8 in the original code
|
||||||
// causes some assertion failures when the total number of
|
// causes some assertion failures when the total number of
|
||||||
// region is less than 8. The code here tries to fix that.
|
// region is less than 8. The code here tries to fix that.
|
||||||
// Should the original code also be fixed?
|
// Should the original code also be fixed?
|
||||||
if (no_of_gc_threads > 0) {
|
if (no_of_gc_threads > 0) {
|
||||||
const size_t MinWorkUnit =
|
const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
|
||||||
MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
|
WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
|
||||||
WorkUnit =
|
MinWorkUnit);
|
||||||
MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
|
|
||||||
MinWorkUnit);
|
|
||||||
} else {
|
} else {
|
||||||
assert(no_of_gc_threads > 0,
|
assert(no_of_gc_threads > 0,
|
||||||
"The active gc workers should be greater than 0");
|
"The active gc workers should be greater than 0");
|
||||||
// In a product build do something reasonable to avoid a crash.
|
// In a product build do something reasonable to avoid a crash.
|
||||||
const size_t MinWorkUnit =
|
const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
|
||||||
MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
|
|
||||||
WorkUnit =
|
WorkUnit =
|
||||||
MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
|
MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
|
||||||
MinWorkUnit);
|
MinWorkUnit);
|
||||||
}
|
}
|
||||||
_collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
|
_collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
|
||||||
WorkUnit);
|
WorkUnit);
|
||||||
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
|
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
|
||||||
(int) WorkUnit);
|
(int) WorkUnit);
|
||||||
_g1->workers()->run_task(&parKnownGarbageTask);
|
_g1->workers()->run_task(&parKnownGarbageTask);
|
||||||
@ -2312,20 +2258,10 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
|
|||||||
KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
|
KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
|
||||||
_g1->heap_region_iterate(&knownGarbagecl);
|
_g1->heap_region_iterate(&knownGarbagecl);
|
||||||
}
|
}
|
||||||
double known_garbage_end_sec;
|
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
known_garbage_end_sec = os::elapsedTime();
|
|
||||||
gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
|
|
||||||
(known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
|
|
||||||
}
|
|
||||||
|
|
||||||
_collectionSetChooser->sortMarkedHeapRegions();
|
_collectionSetChooser->sort_regions();
|
||||||
|
|
||||||
double end_sec = os::elapsedTime();
|
double end_sec = os::elapsedTime();
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
gclog_or_tty->print_cr(" sorting: %8.3f ms.",
|
|
||||||
(end_sec - known_garbage_end_sec) * 1000.0);
|
|
||||||
}
|
|
||||||
|
|
||||||
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
|
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
|
||||||
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
|
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
|
||||||
_cur_mark_stop_world_time_ms += elapsed_time_ms;
|
_cur_mark_stop_world_time_ms += elapsed_time_ms;
|
||||||
@ -2541,13 +2477,13 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream
|
|||||||
bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
||||||
const char* false_action_str) {
|
const char* false_action_str) {
|
||||||
CollectionSetChooser* cset_chooser = _collectionSetChooser;
|
CollectionSetChooser* cset_chooser = _collectionSetChooser;
|
||||||
if (cset_chooser->isEmpty()) {
|
if (cset_chooser->is_empty()) {
|
||||||
ergo_verbose0(ErgoMixedGCs,
|
ergo_verbose0(ErgoMixedGCs,
|
||||||
false_action_str,
|
false_action_str,
|
||||||
ergo_format_reason("candidate old regions not available"));
|
ergo_format_reason("candidate old regions not available"));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
|
size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
|
||||||
size_t capacity_bytes = _g1->capacity();
|
size_t capacity_bytes = _g1->capacity();
|
||||||
double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
|
double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
|
||||||
double threshold = (double) G1HeapWastePercent;
|
double threshold = (double) G1HeapWastePercent;
|
||||||
@ -2558,7 +2494,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
|||||||
ergo_format_region("candidate old regions")
|
ergo_format_region("candidate old regions")
|
||||||
ergo_format_byte_perc("reclaimable")
|
ergo_format_byte_perc("reclaimable")
|
||||||
ergo_format_perc("threshold"),
|
ergo_format_perc("threshold"),
|
||||||
cset_chooser->remainingRegions(),
|
cset_chooser->remaining_regions(),
|
||||||
reclaimable_bytes, perc, threshold);
|
reclaimable_bytes, perc, threshold);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -2569,7 +2505,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
|||||||
ergo_format_region("candidate old regions")
|
ergo_format_region("candidate old regions")
|
||||||
ergo_format_byte_perc("reclaimable")
|
ergo_format_byte_perc("reclaimable")
|
||||||
ergo_format_perc("threshold"),
|
ergo_format_perc("threshold"),
|
||||||
cset_chooser->remainingRegions(),
|
cset_chooser->remaining_regions(),
|
||||||
reclaimable_bytes, perc, threshold);
|
reclaimable_bytes, perc, threshold);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -2613,8 +2549,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
|
|||||||
// pause are appended to the RHS of the young list, i.e.
|
// pause are appended to the RHS of the young list, i.e.
|
||||||
// [Newly Young Regions ++ Survivors from last pause].
|
// [Newly Young Regions ++ Survivors from last pause].
|
||||||
|
|
||||||
size_t survivor_region_length = young_list->survivor_length();
|
uint survivor_region_length = young_list->survivor_length();
|
||||||
size_t eden_region_length = young_list->length() - survivor_region_length;
|
uint eden_region_length = young_list->length() - survivor_region_length;
|
||||||
init_cset_region_lengths(eden_region_length, survivor_region_length);
|
init_cset_region_lengths(eden_region_length, survivor_region_length);
|
||||||
hr = young_list->first_survivor_region();
|
hr = young_list->first_survivor_region();
|
||||||
while (hr != NULL) {
|
while (hr != NULL) {
|
||||||
@ -2652,11 +2588,11 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
|
|||||||
|
|
||||||
if (!gcs_are_young()) {
|
if (!gcs_are_young()) {
|
||||||
CollectionSetChooser* cset_chooser = _collectionSetChooser;
|
CollectionSetChooser* cset_chooser = _collectionSetChooser;
|
||||||
assert(cset_chooser->verify(), "CSet Chooser verification - pre");
|
cset_chooser->verify();
|
||||||
const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
|
const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
|
||||||
const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
|
const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
|
||||||
|
|
||||||
size_t expensive_region_num = 0;
|
uint expensive_region_num = 0;
|
||||||
bool check_time_remaining = adaptive_young_list_length();
|
bool check_time_remaining = adaptive_young_list_length();
|
||||||
HeapRegion* hr = cset_chooser->peek();
|
HeapRegion* hr = cset_chooser->peek();
|
||||||
while (hr != NULL) {
|
while (hr != NULL) {
|
||||||
@ -2741,7 +2677,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
|
|||||||
time_remaining_ms);
|
time_remaining_ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(cset_chooser->verify(), "CSet Chooser verification - post");
|
cset_chooser->verify();
|
||||||
}
|
}
|
||||||
|
|
||||||
stop_incremental_cset_building();
|
stop_incremental_cset_building();
|
||||||
|
@ -128,19 +128,19 @@ private:
|
|||||||
SizerNewRatio
|
SizerNewRatio
|
||||||
};
|
};
|
||||||
SizerKind _sizer_kind;
|
SizerKind _sizer_kind;
|
||||||
size_t _min_desired_young_length;
|
uint _min_desired_young_length;
|
||||||
size_t _max_desired_young_length;
|
uint _max_desired_young_length;
|
||||||
bool _adaptive_size;
|
bool _adaptive_size;
|
||||||
size_t calculate_default_min_length(size_t new_number_of_heap_regions);
|
uint calculate_default_min_length(uint new_number_of_heap_regions);
|
||||||
size_t calculate_default_max_length(size_t new_number_of_heap_regions);
|
uint calculate_default_max_length(uint new_number_of_heap_regions);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1YoungGenSizer();
|
G1YoungGenSizer();
|
||||||
void heap_size_changed(size_t new_number_of_heap_regions);
|
void heap_size_changed(uint new_number_of_heap_regions);
|
||||||
size_t min_desired_young_length() {
|
uint min_desired_young_length() {
|
||||||
return _min_desired_young_length;
|
return _min_desired_young_length;
|
||||||
}
|
}
|
||||||
size_t max_desired_young_length() {
|
uint max_desired_young_length() {
|
||||||
return _max_desired_young_length;
|
return _max_desired_young_length;
|
||||||
}
|
}
|
||||||
bool adaptive_young_list_length() {
|
bool adaptive_young_list_length() {
|
||||||
@ -175,7 +175,7 @@ private:
|
|||||||
|
|
||||||
double _cur_collection_start_sec;
|
double _cur_collection_start_sec;
|
||||||
size_t _cur_collection_pause_used_at_start_bytes;
|
size_t _cur_collection_pause_used_at_start_bytes;
|
||||||
size_t _cur_collection_pause_used_regions_at_start;
|
uint _cur_collection_pause_used_regions_at_start;
|
||||||
double _cur_collection_par_time_ms;
|
double _cur_collection_par_time_ms;
|
||||||
|
|
||||||
double _cur_collection_code_root_fixup_time_ms;
|
double _cur_collection_code_root_fixup_time_ms;
|
||||||
@ -233,13 +233,13 @@ private:
|
|||||||
// indicates whether we are in young or mixed GC mode
|
// indicates whether we are in young or mixed GC mode
|
||||||
bool _gcs_are_young;
|
bool _gcs_are_young;
|
||||||
|
|
||||||
size_t _young_list_target_length;
|
uint _young_list_target_length;
|
||||||
size_t _young_list_fixed_length;
|
uint _young_list_fixed_length;
|
||||||
size_t _prev_eden_capacity; // used for logging
|
size_t _prev_eden_capacity; // used for logging
|
||||||
|
|
||||||
// The max number of regions we can extend the eden by while the GC
|
// The max number of regions we can extend the eden by while the GC
|
||||||
// locker is active. This should be >= _young_list_target_length;
|
// locker is active. This should be >= _young_list_target_length;
|
||||||
size_t _young_list_max_length;
|
uint _young_list_max_length;
|
||||||
|
|
||||||
bool _last_gc_was_young;
|
bool _last_gc_was_young;
|
||||||
|
|
||||||
@ -257,7 +257,7 @@ private:
|
|||||||
double _gc_overhead_perc;
|
double _gc_overhead_perc;
|
||||||
|
|
||||||
double _reserve_factor;
|
double _reserve_factor;
|
||||||
size_t _reserve_regions;
|
uint _reserve_regions;
|
||||||
|
|
||||||
bool during_marking() {
|
bool during_marking() {
|
||||||
return _during_marking;
|
return _during_marking;
|
||||||
@ -288,22 +288,20 @@ private:
|
|||||||
|
|
||||||
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
|
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
|
||||||
|
|
||||||
TruncatedSeq* _young_gc_eff_seq;
|
|
||||||
|
|
||||||
G1YoungGenSizer* _young_gen_sizer;
|
G1YoungGenSizer* _young_gen_sizer;
|
||||||
|
|
||||||
size_t _eden_cset_region_length;
|
uint _eden_cset_region_length;
|
||||||
size_t _survivor_cset_region_length;
|
uint _survivor_cset_region_length;
|
||||||
size_t _old_cset_region_length;
|
uint _old_cset_region_length;
|
||||||
|
|
||||||
void init_cset_region_lengths(size_t eden_cset_region_length,
|
void init_cset_region_lengths(uint eden_cset_region_length,
|
||||||
size_t survivor_cset_region_length);
|
uint survivor_cset_region_length);
|
||||||
|
|
||||||
size_t eden_cset_region_length() { return _eden_cset_region_length; }
|
uint eden_cset_region_length() { return _eden_cset_region_length; }
|
||||||
size_t survivor_cset_region_length() { return _survivor_cset_region_length; }
|
uint survivor_cset_region_length() { return _survivor_cset_region_length; }
|
||||||
size_t old_cset_region_length() { return _old_cset_region_length; }
|
uint old_cset_region_length() { return _old_cset_region_length; }
|
||||||
|
|
||||||
size_t _free_regions_at_end_of_collection;
|
uint _free_regions_at_end_of_collection;
|
||||||
|
|
||||||
size_t _recorded_rs_lengths;
|
size_t _recorded_rs_lengths;
|
||||||
size_t _max_rs_lengths;
|
size_t _max_rs_lengths;
|
||||||
@ -315,9 +313,6 @@ private:
|
|||||||
|
|
||||||
size_t _rs_lengths_prediction;
|
size_t _rs_lengths_prediction;
|
||||||
|
|
||||||
size_t _known_garbage_bytes;
|
|
||||||
double _known_garbage_ratio;
|
|
||||||
|
|
||||||
double sigma() { return _sigma; }
|
double sigma() { return _sigma; }
|
||||||
|
|
||||||
// A function that prevents us putting too much stock in small sample
|
// A function that prevents us putting too much stock in small sample
|
||||||
@ -496,10 +491,10 @@ public:
|
|||||||
|
|
||||||
void set_recorded_rs_lengths(size_t rs_lengths);
|
void set_recorded_rs_lengths(size_t rs_lengths);
|
||||||
|
|
||||||
size_t cset_region_length() { return young_cset_region_length() +
|
uint cset_region_length() { return young_cset_region_length() +
|
||||||
old_cset_region_length(); }
|
old_cset_region_length(); }
|
||||||
size_t young_cset_region_length() { return eden_cset_region_length() +
|
uint young_cset_region_length() { return eden_cset_region_length() +
|
||||||
survivor_cset_region_length(); }
|
survivor_cset_region_length(); }
|
||||||
|
|
||||||
void record_young_free_cset_time_ms(double time_ms) {
|
void record_young_free_cset_time_ms(double time_ms) {
|
||||||
_recorded_young_free_cset_time_ms = time_ms;
|
_recorded_young_free_cset_time_ms = time_ms;
|
||||||
@ -509,10 +504,6 @@ public:
|
|||||||
_recorded_non_young_free_cset_time_ms = time_ms;
|
_recorded_non_young_free_cset_time_ms = time_ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
double predict_young_gc_eff() {
|
|
||||||
return get_new_neg_prediction(_young_gc_eff_seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_survivor_regions_evac_time();
|
double predict_survivor_regions_evac_time();
|
||||||
|
|
||||||
void cset_regions_freed() {
|
void cset_regions_freed() {
|
||||||
@ -522,20 +513,6 @@ public:
|
|||||||
// also call it on any more surv rate groups
|
// also call it on any more surv rate groups
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_known_garbage_bytes(size_t known_garbage_bytes) {
|
|
||||||
_known_garbage_bytes = known_garbage_bytes;
|
|
||||||
size_t heap_bytes = _g1->capacity();
|
|
||||||
_known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
|
|
||||||
guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
|
|
||||||
|
|
||||||
_known_garbage_bytes -= known_garbage_bytes;
|
|
||||||
size_t heap_bytes = _g1->capacity();
|
|
||||||
_known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
G1MMUTracker* mmu_tracker() {
|
G1MMUTracker* mmu_tracker() {
|
||||||
return _mmu_tracker;
|
return _mmu_tracker;
|
||||||
}
|
}
|
||||||
@ -720,12 +697,12 @@ private:
|
|||||||
// Calculate and return the minimum desired young list target
|
// Calculate and return the minimum desired young list target
|
||||||
// length. This is the minimum desired young list length according
|
// length. This is the minimum desired young list length according
|
||||||
// to the user's inputs.
|
// to the user's inputs.
|
||||||
size_t calculate_young_list_desired_min_length(size_t base_min_length);
|
uint calculate_young_list_desired_min_length(uint base_min_length);
|
||||||
|
|
||||||
// Calculate and return the maximum desired young list target
|
// Calculate and return the maximum desired young list target
|
||||||
// length. This is the maximum desired young list length according
|
// length. This is the maximum desired young list length according
|
||||||
// to the user's inputs.
|
// to the user's inputs.
|
||||||
size_t calculate_young_list_desired_max_length();
|
uint calculate_young_list_desired_max_length();
|
||||||
|
|
||||||
// Calculate and return the maximum young list target length that
|
// Calculate and return the maximum young list target length that
|
||||||
// can fit into the pause time goal. The parameters are: rs_lengths
|
// can fit into the pause time goal. The parameters are: rs_lengths
|
||||||
@ -733,18 +710,18 @@ private:
|
|||||||
// be, base_min_length is the alreay existing number of regions in
|
// be, base_min_length is the alreay existing number of regions in
|
||||||
// the young list, min_length and max_length are the desired min and
|
// the young list, min_length and max_length are the desired min and
|
||||||
// max young list length according to the user's inputs.
|
// max young list length according to the user's inputs.
|
||||||
size_t calculate_young_list_target_length(size_t rs_lengths,
|
uint calculate_young_list_target_length(size_t rs_lengths,
|
||||||
size_t base_min_length,
|
uint base_min_length,
|
||||||
size_t desired_min_length,
|
uint desired_min_length,
|
||||||
size_t desired_max_length);
|
uint desired_max_length);
|
||||||
|
|
||||||
// Check whether a given young length (young_length) fits into the
|
// Check whether a given young length (young_length) fits into the
|
||||||
// given target pause time and whether the prediction for the amount
|
// given target pause time and whether the prediction for the amount
|
||||||
// of objects to be copied for the given length will fit into the
|
// of objects to be copied for the given length will fit into the
|
||||||
// given free space (expressed by base_free_regions). It is used by
|
// given free space (expressed by base_free_regions). It is used by
|
||||||
// calculate_young_list_target_length().
|
// calculate_young_list_target_length().
|
||||||
bool predict_will_fit(size_t young_length, double base_time_ms,
|
bool predict_will_fit(uint young_length, double base_time_ms,
|
||||||
size_t base_free_regions, double target_pause_time_ms);
|
uint base_free_regions, double target_pause_time_ms);
|
||||||
|
|
||||||
// Count the number of bytes used in the CS.
|
// Count the number of bytes used in the CS.
|
||||||
void count_CS_bytes_used();
|
void count_CS_bytes_used();
|
||||||
@ -773,7 +750,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This should be called after the heap is resized.
|
// This should be called after the heap is resized.
|
||||||
void record_new_heap_size(size_t new_number_of_regions);
|
void record_new_heap_size(uint new_number_of_regions);
|
||||||
|
|
||||||
void init();
|
void init();
|
||||||
|
|
||||||
@ -1026,12 +1003,6 @@ public:
|
|||||||
// exceeded the desired limit, return an amount to expand by.
|
// exceeded the desired limit, return an amount to expand by.
|
||||||
size_t expansion_amount();
|
size_t expansion_amount();
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
// Check any appropriate marked bytes info, asserting false if
|
|
||||||
// something's wrong, else returning "true".
|
|
||||||
bool assertMarkedBytesDataOK();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Print tracing information.
|
// Print tracing information.
|
||||||
void print_tracing_info() const;
|
void print_tracing_info() const;
|
||||||
|
|
||||||
@ -1048,18 +1019,18 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool is_young_list_full() {
|
bool is_young_list_full() {
|
||||||
size_t young_list_length = _g1->young_list()->length();
|
uint young_list_length = _g1->young_list()->length();
|
||||||
size_t young_list_target_length = _young_list_target_length;
|
uint young_list_target_length = _young_list_target_length;
|
||||||
return young_list_length >= young_list_target_length;
|
return young_list_length >= young_list_target_length;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool can_expand_young_list() {
|
bool can_expand_young_list() {
|
||||||
size_t young_list_length = _g1->young_list()->length();
|
uint young_list_length = _g1->young_list()->length();
|
||||||
size_t young_list_max_length = _young_list_max_length;
|
uint young_list_max_length = _young_list_max_length;
|
||||||
return young_list_length < young_list_max_length;
|
return young_list_length < young_list_max_length;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t young_list_max_length() {
|
uint young_list_max_length() {
|
||||||
return _young_list_max_length;
|
return _young_list_max_length;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1074,19 +1045,6 @@ public:
|
|||||||
return _young_gen_sizer->adaptive_young_list_length();
|
return _young_gen_sizer->adaptive_young_list_length();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline double get_gc_eff_factor() {
|
|
||||||
double ratio = _known_garbage_ratio;
|
|
||||||
|
|
||||||
double square = ratio * ratio;
|
|
||||||
// square = square * square;
|
|
||||||
double ret = square * 9.0 + 1.0;
|
|
||||||
#if 0
|
|
||||||
gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
|
|
||||||
#endif // 0
|
|
||||||
guarantee(0.0 <= ret && ret < 10.0, "invariant!");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
//
|
//
|
||||||
// Survivor regions policy.
|
// Survivor regions policy.
|
||||||
@ -1097,7 +1055,7 @@ private:
|
|||||||
int _tenuring_threshold;
|
int _tenuring_threshold;
|
||||||
|
|
||||||
// The limit on the number of regions allocated for survivors.
|
// The limit on the number of regions allocated for survivors.
|
||||||
size_t _max_survivor_regions;
|
uint _max_survivor_regions;
|
||||||
|
|
||||||
// For reporting purposes.
|
// For reporting purposes.
|
||||||
size_t _eden_bytes_before_gc;
|
size_t _eden_bytes_before_gc;
|
||||||
@ -1105,7 +1063,7 @@ private:
|
|||||||
size_t _capacity_before_gc;
|
size_t _capacity_before_gc;
|
||||||
|
|
||||||
// The amount of survor regions after a collection.
|
// The amount of survor regions after a collection.
|
||||||
size_t _recorded_survivor_regions;
|
uint _recorded_survivor_regions;
|
||||||
// List of survivor regions.
|
// List of survivor regions.
|
||||||
HeapRegion* _recorded_survivor_head;
|
HeapRegion* _recorded_survivor_head;
|
||||||
HeapRegion* _recorded_survivor_tail;
|
HeapRegion* _recorded_survivor_tail;
|
||||||
@ -1127,9 +1085,9 @@ public:
|
|||||||
return purpose == GCAllocForSurvived;
|
return purpose == GCAllocForSurvived;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const size_t REGIONS_UNLIMITED = ~(size_t)0;
|
static const uint REGIONS_UNLIMITED = (uint) -1;
|
||||||
|
|
||||||
size_t max_regions(int purpose);
|
uint max_regions(int purpose);
|
||||||
|
|
||||||
// The limit on regions for a particular purpose is reached.
|
// The limit on regions for a particular purpose is reached.
|
||||||
void note_alloc_region_limit_reached(int purpose) {
|
void note_alloc_region_limit_reached(int purpose) {
|
||||||
@ -1146,7 +1104,7 @@ public:
|
|||||||
_survivor_surv_rate_group->stop_adding_regions();
|
_survivor_surv_rate_group->stop_adding_regions();
|
||||||
}
|
}
|
||||||
|
|
||||||
void record_survivor_regions(size_t regions,
|
void record_survivor_regions(uint regions,
|
||||||
HeapRegion* head,
|
HeapRegion* head,
|
||||||
HeapRegion* tail) {
|
HeapRegion* tail) {
|
||||||
_recorded_survivor_regions = regions;
|
_recorded_survivor_regions = regions;
|
||||||
@ -1154,12 +1112,11 @@ public:
|
|||||||
_recorded_survivor_tail = tail;
|
_recorded_survivor_tail = tail;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t recorded_survivor_regions() {
|
uint recorded_survivor_regions() {
|
||||||
return _recorded_survivor_regions;
|
return _recorded_survivor_regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
void record_thread_age_table(ageTable* age_table)
|
void record_thread_age_table(ageTable* age_table) {
|
||||||
{
|
|
||||||
_survivors_age_table.merge_par(age_table);
|
_survivors_age_table.merge_par(age_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -120,7 +120,7 @@ public:
|
|||||||
|
|
||||||
// Single parameter format strings
|
// Single parameter format strings
|
||||||
#define ergo_format_str(_name_) ", " _name_ ": %s"
|
#define ergo_format_str(_name_) ", " _name_ ": %s"
|
||||||
#define ergo_format_region(_name_) ", " _name_ ": "SIZE_FORMAT" regions"
|
#define ergo_format_region(_name_) ", " _name_ ": %u regions"
|
||||||
#define ergo_format_byte(_name_) ", " _name_ ": "SIZE_FORMAT" bytes"
|
#define ergo_format_byte(_name_) ", " _name_ ": "SIZE_FORMAT" bytes"
|
||||||
#define ergo_format_double(_name_) ", " _name_ ": %1.2f"
|
#define ergo_format_double(_name_) ", " _name_ ": %1.2f"
|
||||||
#define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%"
|
#define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%"
|
||||||
|
56
hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp
Normal file
56
hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/g1_globals.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Log.hpp"
|
||||||
|
#include "runtime/globals.hpp"
|
||||||
|
|
||||||
|
G1Log::LogLevel G1Log::_level = G1Log::LevelNone;
|
||||||
|
|
||||||
|
// If G1LogLevel has not been set up we will use the values of PrintGC
|
||||||
|
// and PrintGCDetails for the logging level.
|
||||||
|
// - PrintGC maps to "fine".
|
||||||
|
// - PrintGCDetails maps to "finer".
|
||||||
|
void G1Log::init() {
|
||||||
|
if (G1LogLevel != NULL && G1LogLevel[0] != '\0') {
|
||||||
|
if (strncmp("none", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') {
|
||||||
|
_level = LevelNone;
|
||||||
|
} else if (strncmp("fine", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') {
|
||||||
|
_level = LevelFine;
|
||||||
|
} else if (strncmp("finer", G1LogLevel, 5) == 0 && G1LogLevel[5] == '\0') {
|
||||||
|
_level = LevelFiner;
|
||||||
|
} else if (strncmp("finest", G1LogLevel, 6) == 0 && G1LogLevel[6] == '\0') {
|
||||||
|
_level = LevelFinest;
|
||||||
|
} else {
|
||||||
|
warning("Unknown logging level '%s', should be one of 'fine', 'finer' or 'finest'.", G1LogLevel);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (PrintGCDetails) {
|
||||||
|
_level = LevelFiner;
|
||||||
|
} else if (PrintGC) {
|
||||||
|
_level = LevelFine;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
56
hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp
Normal file
56
hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP
|
||||||
|
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
|
||||||
|
class G1Log : public AllStatic {
|
||||||
|
typedef enum {
|
||||||
|
LevelNone,
|
||||||
|
LevelFine,
|
||||||
|
LevelFiner,
|
||||||
|
LevelFinest
|
||||||
|
} LogLevel;
|
||||||
|
|
||||||
|
static LogLevel _level;
|
||||||
|
|
||||||
|
public:
|
||||||
|
inline static bool fine() {
|
||||||
|
return _level >= LevelFine;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline static bool finer() {
|
||||||
|
return _level >= LevelFiner;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline static bool finest() {
|
||||||
|
return _level == LevelFinest;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void init();
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP
|
@ -29,6 +29,7 @@
|
|||||||
#include "classfile/vmSymbols.hpp"
|
#include "classfile/vmSymbols.hpp"
|
||||||
#include "code/codeCache.hpp"
|
#include "code/codeCache.hpp"
|
||||||
#include "code/icBuffer.hpp"
|
#include "code/icBuffer.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Log.hpp"
|
||||||
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
||||||
#include "memory/gcLocker.hpp"
|
#include "memory/gcLocker.hpp"
|
||||||
#include "memory/genCollectedHeap.hpp"
|
#include "memory/genCollectedHeap.hpp"
|
||||||
@ -126,7 +127,7 @@ void G1MarkSweep::allocate_stacks() {
|
|||||||
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||||
bool clear_all_softrefs) {
|
bool clear_all_softrefs) {
|
||||||
// Recursively traverse all live objects and mark them
|
// Recursively traverse all live objects and mark them
|
||||||
TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
|
TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty);
|
||||||
GenMarkSweep::trace(" 1");
|
GenMarkSweep::trace(" 1");
|
||||||
|
|
||||||
SharedHeap* sh = SharedHeap::heap();
|
SharedHeap* sh = SharedHeap::heap();
|
||||||
@ -192,8 +193,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
|||||||
// fail. At the end of the GC, the orginal mark word values
|
// fail. At the end of the GC, the orginal mark word values
|
||||||
// (including hash values) are restored to the appropriate
|
// (including hash values) are restored to the appropriate
|
||||||
// objects.
|
// objects.
|
||||||
Universe::heap()->verify(/* allow dirty */ true,
|
Universe::heap()->verify(/* silent */ false,
|
||||||
/* silent */ false,
|
|
||||||
/* option */ VerifyOption_G1UseMarkWord);
|
/* option */ VerifyOption_G1UseMarkWord);
|
||||||
|
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
@ -291,7 +291,7 @@ void G1MarkSweep::mark_sweep_phase2() {
|
|||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
Generation* pg = g1h->perm_gen();
|
Generation* pg = g1h->perm_gen();
|
||||||
|
|
||||||
TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
|
TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty);
|
||||||
GenMarkSweep::trace("2");
|
GenMarkSweep::trace("2");
|
||||||
|
|
||||||
FindFirstRegionClosure cl;
|
FindFirstRegionClosure cl;
|
||||||
@ -335,7 +335,7 @@ void G1MarkSweep::mark_sweep_phase3() {
|
|||||||
Generation* pg = g1h->perm_gen();
|
Generation* pg = g1h->perm_gen();
|
||||||
|
|
||||||
// Adjust the pointers to reflect the new locations
|
// Adjust the pointers to reflect the new locations
|
||||||
TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
|
TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty);
|
||||||
GenMarkSweep::trace("3");
|
GenMarkSweep::trace("3");
|
||||||
|
|
||||||
SharedHeap* sh = SharedHeap::heap();
|
SharedHeap* sh = SharedHeap::heap();
|
||||||
@ -399,7 +399,7 @@ void G1MarkSweep::mark_sweep_phase4() {
|
|||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
Generation* pg = g1h->perm_gen();
|
Generation* pg = g1h->perm_gen();
|
||||||
|
|
||||||
TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
|
TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty);
|
||||||
GenMarkSweep::trace("4");
|
GenMarkSweep::trace("4");
|
||||||
|
|
||||||
pg->compact();
|
pg->compact();
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -177,19 +177,19 @@ void G1MonitoringSupport::recalculate_sizes() {
|
|||||||
// values we read here are possible (i.e., at a STW phase at the end
|
// values we read here are possible (i.e., at a STW phase at the end
|
||||||
// of a GC).
|
// of a GC).
|
||||||
|
|
||||||
size_t young_list_length = g1->young_list()->length();
|
uint young_list_length = g1->young_list()->length();
|
||||||
size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
|
uint survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
|
||||||
assert(young_list_length >= survivor_list_length, "invariant");
|
assert(young_list_length >= survivor_list_length, "invariant");
|
||||||
size_t eden_list_length = young_list_length - survivor_list_length;
|
uint eden_list_length = young_list_length - survivor_list_length;
|
||||||
// Max length includes any potential extensions to the young gen
|
// Max length includes any potential extensions to the young gen
|
||||||
// we'll do when the GC locker is active.
|
// we'll do when the GC locker is active.
|
||||||
size_t young_list_max_length = g1->g1_policy()->young_list_max_length();
|
uint young_list_max_length = g1->g1_policy()->young_list_max_length();
|
||||||
assert(young_list_max_length >= survivor_list_length, "invariant");
|
assert(young_list_max_length >= survivor_list_length, "invariant");
|
||||||
size_t eden_list_max_length = young_list_max_length - survivor_list_length;
|
uint eden_list_max_length = young_list_max_length - survivor_list_length;
|
||||||
|
|
||||||
_overall_used = g1->used_unlocked();
|
_overall_used = g1->used_unlocked();
|
||||||
_eden_used = eden_list_length * HeapRegion::GrainBytes;
|
_eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
|
||||||
_survivor_used = survivor_list_length * HeapRegion::GrainBytes;
|
_survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
|
||||||
_young_region_num = young_list_length;
|
_young_region_num = young_list_length;
|
||||||
_old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
|
_old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
|
||||||
|
|
||||||
@ -207,7 +207,7 @@ void G1MonitoringSupport::recalculate_sizes() {
|
|||||||
committed -= _survivor_committed + _old_committed;
|
committed -= _survivor_committed + _old_committed;
|
||||||
|
|
||||||
// Next, calculate and remove the committed size for the eden.
|
// Next, calculate and remove the committed size for the eden.
|
||||||
_eden_committed = eden_list_max_length * HeapRegion::GrainBytes;
|
_eden_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
|
||||||
// Somewhat defensive: be robust in case there are inaccuracies in
|
// Somewhat defensive: be robust in case there are inaccuracies in
|
||||||
// the calculations
|
// the calculations
|
||||||
_eden_committed = MIN2(_eden_committed, committed);
|
_eden_committed = MIN2(_eden_committed, committed);
|
||||||
@ -237,10 +237,10 @@ void G1MonitoringSupport::recalculate_eden_size() {
|
|||||||
// When a new eden region is allocated, only the eden_used size is
|
// When a new eden region is allocated, only the eden_used size is
|
||||||
// affected (since we have recalculated everything else at the last GC).
|
// affected (since we have recalculated everything else at the last GC).
|
||||||
|
|
||||||
size_t young_region_num = g1h()->young_list()->length();
|
uint young_region_num = g1h()->young_list()->length();
|
||||||
if (young_region_num > _young_region_num) {
|
if (young_region_num > _young_region_num) {
|
||||||
size_t diff = young_region_num - _young_region_num;
|
uint diff = young_region_num - _young_region_num;
|
||||||
_eden_used += diff * HeapRegion::GrainBytes;
|
_eden_used += (size_t) diff * HeapRegion::GrainBytes;
|
||||||
// Somewhat defensive: cap the eden used size to make sure it
|
// Somewhat defensive: cap the eden used size to make sure it
|
||||||
// never exceeds the committed size.
|
// never exceeds the committed size.
|
||||||
_eden_used = MIN2(_eden_used, _eden_committed);
|
_eden_used = MIN2(_eden_used, _eden_committed);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -147,7 +147,7 @@ class G1MonitoringSupport : public CHeapObj {
|
|||||||
size_t _overall_committed;
|
size_t _overall_committed;
|
||||||
size_t _overall_used;
|
size_t _overall_used;
|
||||||
|
|
||||||
size_t _young_region_num;
|
uint _young_region_num;
|
||||||
size_t _young_gen_committed;
|
size_t _young_gen_committed;
|
||||||
size_t _eden_committed;
|
size_t _eden_committed;
|
||||||
size_t _eden_used;
|
size_t _eden_used;
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1_GLOBALS_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1_GLOBALS_HPP
|
||||||
|
|
||||||
#include "runtime/globals.hpp"
|
#include "runtime/globals.hpp"
|
||||||
|
|
||||||
//
|
//
|
||||||
// Defines all globals flags used by the garbage-first compiler.
|
// Defines all globals flags used by the garbage-first compiler.
|
||||||
//
|
//
|
||||||
@ -128,9 +127,6 @@
|
|||||||
"Prints the liveness information for all regions in the heap " \
|
"Prints the liveness information for all regions in the heap " \
|
||||||
"at the end of a marking cycle.") \
|
"at the end of a marking cycle.") \
|
||||||
\
|
\
|
||||||
develop(bool, G1PrintParCleanupStats, false, \
|
|
||||||
"When true, print extra stats about parallel cleanup.") \
|
|
||||||
\
|
|
||||||
product(intx, G1UpdateBufferSize, 256, \
|
product(intx, G1UpdateBufferSize, 256, \
|
||||||
"Size of an update buffer") \
|
"Size of an update buffer") \
|
||||||
\
|
\
|
||||||
@ -309,7 +305,10 @@
|
|||||||
\
|
\
|
||||||
develop(uintx, G1OldCSetRegionThresholdPercent, 10, \
|
develop(uintx, G1OldCSetRegionThresholdPercent, 10, \
|
||||||
"An upper bound for the number of old CSet regions expressed " \
|
"An upper bound for the number of old CSet regions expressed " \
|
||||||
"as a percentage of the heap size.")
|
"as a percentage of the heap size.") \
|
||||||
|
\
|
||||||
|
experimental(ccstr, G1LogLevel, NULL, \
|
||||||
|
"Log level for G1 logging: fine, finer, finest")
|
||||||
|
|
||||||
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
||||||
|
|
||||||
|
@ -334,7 +334,7 @@ void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
|
|||||||
|
|
||||||
guarantee(GrainWords == 0, "we should only set it once");
|
guarantee(GrainWords == 0, "we should only set it once");
|
||||||
GrainWords = GrainBytes >> LogHeapWordSize;
|
GrainWords = GrainBytes >> LogHeapWordSize;
|
||||||
guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity");
|
guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
|
||||||
|
|
||||||
guarantee(CardsPerRegion == 0, "we should only set it once");
|
guarantee(CardsPerRegion == 0, "we should only set it once");
|
||||||
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
|
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
|
||||||
@ -370,7 +370,6 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
|
|||||||
_claimed = InitialClaimValue;
|
_claimed = InitialClaimValue;
|
||||||
}
|
}
|
||||||
zero_marked_bytes();
|
zero_marked_bytes();
|
||||||
set_sort_index(-1);
|
|
||||||
|
|
||||||
_offsets.resize(HeapRegion::GrainWords);
|
_offsets.resize(HeapRegion::GrainWords);
|
||||||
init_top_at_mark_start();
|
init_top_at_mark_start();
|
||||||
@ -482,17 +481,16 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
|||||||
#endif // _MSC_VER
|
#endif // _MSC_VER
|
||||||
|
|
||||||
|
|
||||||
HeapRegion::
|
HeapRegion::HeapRegion(uint hrs_index,
|
||||||
HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
|
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||||
MemRegion mr, bool is_zeroed)
|
MemRegion mr, bool is_zeroed) :
|
||||||
: G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
|
G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
|
||||||
_hrs_index(hrs_index),
|
_hrs_index(hrs_index),
|
||||||
_humongous_type(NotHumongous), _humongous_start_region(NULL),
|
_humongous_type(NotHumongous), _humongous_start_region(NULL),
|
||||||
_in_collection_set(false),
|
_in_collection_set(false),
|
||||||
_next_in_special_set(NULL), _orig_end(NULL),
|
_next_in_special_set(NULL), _orig_end(NULL),
|
||||||
_claimed(InitialClaimValue), _evacuation_failed(false),
|
_claimed(InitialClaimValue), _evacuation_failed(false),
|
||||||
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
|
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
|
||||||
_gc_efficiency(0.0),
|
|
||||||
_young_type(NotYoung), _next_young_region(NULL),
|
_young_type(NotYoung), _next_young_region(NULL),
|
||||||
_next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
|
_next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -779,16 +777,15 @@ void HeapRegion::print_on(outputStream* st) const {
|
|||||||
G1OffsetTableContigSpace::print_on(st);
|
G1OffsetTableContigSpace::print_on(st);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegion::verify(bool allow_dirty) const {
|
void HeapRegion::verify() const {
|
||||||
bool dummy = false;
|
bool dummy = false;
|
||||||
verify(allow_dirty, VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
|
verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This really ought to be commoned up into OffsetTableContigSpace somehow.
|
// This really ought to be commoned up into OffsetTableContigSpace somehow.
|
||||||
// We would need a mechanism to make that code skip dead objects.
|
// We would need a mechanism to make that code skip dead objects.
|
||||||
|
|
||||||
void HeapRegion::verify(bool allow_dirty,
|
void HeapRegion::verify(VerifyOption vo,
|
||||||
VerifyOption vo,
|
|
||||||
bool* failures) const {
|
bool* failures) const {
|
||||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
||||||
*failures = false;
|
*failures = false;
|
||||||
|
@ -52,12 +52,15 @@ class HeapRegionRemSetIterator;
|
|||||||
class HeapRegion;
|
class HeapRegion;
|
||||||
class HeapRegionSetBase;
|
class HeapRegionSetBase;
|
||||||
|
|
||||||
#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
|
#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
|
||||||
#define HR_FORMAT_PARAMS(_hr_) \
|
#define HR_FORMAT_PARAMS(_hr_) \
|
||||||
(_hr_)->hrs_index(), \
|
(_hr_)->hrs_index(), \
|
||||||
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
|
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
|
||||||
(_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
|
(_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
|
||||||
|
|
||||||
|
// sentinel value for hrs_index
|
||||||
|
#define G1_NULL_HRS_INDEX ((uint) -1)
|
||||||
|
|
||||||
// A dirty card to oop closure for heap regions. It
|
// A dirty card to oop closure for heap regions. It
|
||||||
// knows how to get the G1 heap and how to use the bitmap
|
// knows how to get the G1 heap and how to use the bitmap
|
||||||
// in the concurrent marker used by G1 to filter remembered
|
// in the concurrent marker used by G1 to filter remembered
|
||||||
@ -235,7 +238,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
// The index of this region in the heap region sequence.
|
// The index of this region in the heap region sequence.
|
||||||
size_t _hrs_index;
|
uint _hrs_index;
|
||||||
|
|
||||||
HumongousType _humongous_type;
|
HumongousType _humongous_type;
|
||||||
// For a humongous region, region in which it starts.
|
// For a humongous region, region in which it starts.
|
||||||
@ -278,12 +281,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
|
size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
|
||||||
size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
|
size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
|
||||||
|
|
||||||
// See "sort_index" method. -1 means is not in the array.
|
// The calculated GC efficiency of the region.
|
||||||
int _sort_index;
|
|
||||||
|
|
||||||
// <PREDICTION>
|
|
||||||
double _gc_efficiency;
|
double _gc_efficiency;
|
||||||
// </PREDICTION>
|
|
||||||
|
|
||||||
enum YoungType {
|
enum YoungType {
|
||||||
NotYoung, // a region is not young
|
NotYoung, // a region is not young
|
||||||
@ -342,7 +341,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
|
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
|
||||||
HeapRegion(size_t hrs_index,
|
HeapRegion(uint hrs_index,
|
||||||
G1BlockOffsetSharedArray* sharedOffsetArray,
|
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||||
MemRegion mr, bool is_zeroed);
|
MemRegion mr, bool is_zeroed);
|
||||||
|
|
||||||
@ -389,7 +388,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
|
|
||||||
// If this region is a member of a HeapRegionSeq, the index in that
|
// If this region is a member of a HeapRegionSeq, the index in that
|
||||||
// sequence, otherwise -1.
|
// sequence, otherwise -1.
|
||||||
size_t hrs_index() const { return _hrs_index; }
|
uint hrs_index() const { return _hrs_index; }
|
||||||
|
|
||||||
// The number of bytes marked live in the region in the last marking phase.
|
// The number of bytes marked live in the region in the last marking phase.
|
||||||
size_t marked_bytes() { return _prev_marked_bytes; }
|
size_t marked_bytes() { return _prev_marked_bytes; }
|
||||||
@ -626,16 +625,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
// last mark phase ended.
|
// last mark phase ended.
|
||||||
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
|
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
|
||||||
|
|
||||||
// If "is_marked()" is true, then this is the index of the region in
|
|
||||||
// an array constructed at the end of marking of the regions in a
|
|
||||||
// "desirability" order.
|
|
||||||
int sort_index() {
|
|
||||||
return _sort_index;
|
|
||||||
}
|
|
||||||
void set_sort_index(int i) {
|
|
||||||
_sort_index = i;
|
|
||||||
}
|
|
||||||
|
|
||||||
void init_top_at_conc_mark_count() {
|
void init_top_at_conc_mark_count() {
|
||||||
_top_at_conc_mark_count = bottom();
|
_top_at_conc_mark_count = bottom();
|
||||||
}
|
}
|
||||||
@ -823,10 +812,10 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
// Currently there is only one place where this is called with
|
// Currently there is only one place where this is called with
|
||||||
// vo == UseMarkWord, which is to verify the marking during a
|
// vo == UseMarkWord, which is to verify the marking during a
|
||||||
// full GC.
|
// full GC.
|
||||||
void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
|
void verify(VerifyOption vo, bool *failures) const;
|
||||||
|
|
||||||
// Override; it uses the "prev" marking information
|
// Override; it uses the "prev" marking information
|
||||||
virtual void verify(bool allow_dirty) const;
|
virtual void verify() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
// HeapRegionClosure is used for iterating over regions.
|
// HeapRegionClosure is used for iterating over regions.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -577,7 +577,7 @@ void OtherRegionsTable::print_from_card_cache() {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||||
size_t cur_hrs_ind = hr()->hrs_index();
|
size_t cur_hrs_ind = (size_t) hr()->hrs_index();
|
||||||
|
|
||||||
#if HRRS_VERBOSE
|
#if HRRS_VERBOSE
|
||||||
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
|
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
|
||||||
@ -841,7 +841,7 @@ PosParPRT* OtherRegionsTable::delete_region_table() {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Set the corresponding coarse bit.
|
// Set the corresponding coarse bit.
|
||||||
size_t max_hrs_index = max->hr()->hrs_index();
|
size_t max_hrs_index = (size_t) max->hr()->hrs_index();
|
||||||
if (!_coarse_map.at(max_hrs_index)) {
|
if (!_coarse_map.at(max_hrs_index)) {
|
||||||
_coarse_map.at_put(max_hrs_index, true);
|
_coarse_map.at_put(max_hrs_index, true);
|
||||||
_n_coarse_entries++;
|
_n_coarse_entries++;
|
||||||
@ -866,17 +866,20 @@ PosParPRT* OtherRegionsTable::delete_region_table() {
|
|||||||
void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
|
void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
|
||||||
BitMap* region_bm, BitMap* card_bm) {
|
BitMap* region_bm, BitMap* card_bm) {
|
||||||
// First eliminated garbage regions from the coarse map.
|
// First eliminated garbage regions from the coarse map.
|
||||||
if (G1RSScrubVerbose)
|
if (G1RSScrubVerbose) {
|
||||||
gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
|
gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
|
||||||
hr()->hrs_index());
|
}
|
||||||
|
|
||||||
assert(_coarse_map.size() == region_bm->size(), "Precondition");
|
assert(_coarse_map.size() == region_bm->size(), "Precondition");
|
||||||
if (G1RSScrubVerbose)
|
if (G1RSScrubVerbose) {
|
||||||
gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries);
|
gclog_or_tty->print(" Coarse map: before = "SIZE_FORMAT"...",
|
||||||
|
_n_coarse_entries);
|
||||||
|
}
|
||||||
_coarse_map.set_intersection(*region_bm);
|
_coarse_map.set_intersection(*region_bm);
|
||||||
_n_coarse_entries = _coarse_map.count_one_bits();
|
_n_coarse_entries = _coarse_map.count_one_bits();
|
||||||
if (G1RSScrubVerbose)
|
if (G1RSScrubVerbose) {
|
||||||
gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries);
|
gclog_or_tty->print_cr(" after = "SIZE_FORMAT".", _n_coarse_entries);
|
||||||
|
}
|
||||||
|
|
||||||
// Now do the fine-grained maps.
|
// Now do the fine-grained maps.
|
||||||
for (size_t i = 0; i < _max_fine_entries; i++) {
|
for (size_t i = 0; i < _max_fine_entries; i++) {
|
||||||
@ -885,23 +888,27 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
|
|||||||
while (cur != NULL) {
|
while (cur != NULL) {
|
||||||
PosParPRT* nxt = cur->next();
|
PosParPRT* nxt = cur->next();
|
||||||
// If the entire region is dead, eliminate.
|
// If the entire region is dead, eliminate.
|
||||||
if (G1RSScrubVerbose)
|
if (G1RSScrubVerbose) {
|
||||||
gclog_or_tty->print_cr(" For other region "SIZE_FORMAT":",
|
gclog_or_tty->print_cr(" For other region %u:",
|
||||||
cur->hr()->hrs_index());
|
cur->hr()->hrs_index());
|
||||||
if (!region_bm->at(cur->hr()->hrs_index())) {
|
}
|
||||||
|
if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
|
||||||
*prev = nxt;
|
*prev = nxt;
|
||||||
cur->set_next(NULL);
|
cur->set_next(NULL);
|
||||||
_n_fine_entries--;
|
_n_fine_entries--;
|
||||||
if (G1RSScrubVerbose)
|
if (G1RSScrubVerbose) {
|
||||||
gclog_or_tty->print_cr(" deleted via region map.");
|
gclog_or_tty->print_cr(" deleted via region map.");
|
||||||
|
}
|
||||||
PosParPRT::free(cur);
|
PosParPRT::free(cur);
|
||||||
} else {
|
} else {
|
||||||
// Do fine-grain elimination.
|
// Do fine-grain elimination.
|
||||||
if (G1RSScrubVerbose)
|
if (G1RSScrubVerbose) {
|
||||||
gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
|
gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
|
||||||
|
}
|
||||||
cur->scrub(ctbs, card_bm);
|
cur->scrub(ctbs, card_bm);
|
||||||
if (G1RSScrubVerbose)
|
if (G1RSScrubVerbose) {
|
||||||
gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
|
gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
|
||||||
|
}
|
||||||
// Did that empty the table completely?
|
// Did that empty the table completely?
|
||||||
if (cur->occupied() == 0) {
|
if (cur->occupied() == 0) {
|
||||||
*prev = nxt;
|
*prev = nxt;
|
||||||
@ -1003,7 +1010,7 @@ void OtherRegionsTable::clear() {
|
|||||||
|
|
||||||
void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
|
void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
|
||||||
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
|
||||||
size_t hrs_ind = from_hr->hrs_index();
|
size_t hrs_ind = (size_t) from_hr->hrs_index();
|
||||||
size_t ind = hrs_ind & _mod_max_fine_entries_mask;
|
size_t ind = hrs_ind & _mod_max_fine_entries_mask;
|
||||||
if (del_single_region_table(ind, from_hr)) {
|
if (del_single_region_table(ind, from_hr)) {
|
||||||
assert(!_coarse_map.at(hrs_ind), "Inv");
|
assert(!_coarse_map.at(hrs_ind), "Inv");
|
||||||
@ -1011,7 +1018,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
|
|||||||
_coarse_map.par_at_put(hrs_ind, 0);
|
_coarse_map.par_at_put(hrs_ind, 0);
|
||||||
}
|
}
|
||||||
// Check to see if any of the fcc entries come from here.
|
// Check to see if any of the fcc entries come from here.
|
||||||
size_t hr_ind = hr()->hrs_index();
|
size_t hr_ind = (size_t) hr()->hrs_index();
|
||||||
for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
|
for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
|
||||||
int fcc_ent = _from_card_cache[tid][hr_ind];
|
int fcc_ent = _from_card_cache[tid][hr_ind];
|
||||||
if (fcc_ent != -1) {
|
if (fcc_ent != -1) {
|
||||||
@ -1223,7 +1230,7 @@ bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
|
|||||||
if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
|
if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
|
||||||
_coarse_cur_region_cur_card = 0;
|
_coarse_cur_region_cur_card = 0;
|
||||||
HeapWord* r_bot =
|
HeapWord* r_bot =
|
||||||
_g1h->region_at(_coarse_cur_region_index)->bottom();
|
_g1h->region_at((uint) _coarse_cur_region_index)->bottom();
|
||||||
_cur_region_card_offset = _bosa->index_for(r_bot);
|
_cur_region_card_offset = _bosa->index_for(r_bot);
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
|
@ -329,13 +329,13 @@ public:
|
|||||||
|
|
||||||
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
|
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
|
||||||
// (Uses it to initialize from_card_cache).
|
// (Uses it to initialize from_card_cache).
|
||||||
static void init_heap(size_t max_regions) {
|
static void init_heap(uint max_regions) {
|
||||||
OtherRegionsTable::init_from_card_cache(max_regions);
|
OtherRegionsTable::init_from_card_cache((size_t) max_regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
|
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
|
||||||
static void shrink_heap(size_t new_n_regs) {
|
static void shrink_heap(uint new_n_regs) {
|
||||||
OtherRegionsTable::shrink_from_card_cache(new_n_regs);
|
OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -31,16 +31,15 @@
|
|||||||
|
|
||||||
// Private
|
// Private
|
||||||
|
|
||||||
size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
|
uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
|
||||||
size_t len = length();
|
uint len = length();
|
||||||
assert(num > 1, "use this only for sequences of length 2 or greater");
|
assert(num > 1, "use this only for sequences of length 2 or greater");
|
||||||
assert(from <= len,
|
assert(from <= len,
|
||||||
err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
|
err_msg("from: %u should be valid and <= than %u", from, len));
|
||||||
from, len));
|
|
||||||
|
|
||||||
size_t curr = from;
|
uint curr = from;
|
||||||
size_t first = G1_NULL_HRS_INDEX;
|
uint first = G1_NULL_HRS_INDEX;
|
||||||
size_t num_so_far = 0;
|
uint num_so_far = 0;
|
||||||
while (curr < len && num_so_far < num) {
|
while (curr < len && num_so_far < num) {
|
||||||
if (at(curr)->is_empty()) {
|
if (at(curr)->is_empty()) {
|
||||||
if (first == G1_NULL_HRS_INDEX) {
|
if (first == G1_NULL_HRS_INDEX) {
|
||||||
@ -60,7 +59,7 @@ size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
|
|||||||
// we found enough space for the humongous object
|
// we found enough space for the humongous object
|
||||||
assert(from <= first && first < len, "post-condition");
|
assert(from <= first && first < len, "post-condition");
|
||||||
assert(first < curr && (curr - first) == num, "post-condition");
|
assert(first < curr && (curr - first) == num, "post-condition");
|
||||||
for (size_t i = first; i < first + num; ++i) {
|
for (uint i = first; i < first + num; ++i) {
|
||||||
assert(at(i)->is_empty(), "post-condition");
|
assert(at(i)->is_empty(), "post-condition");
|
||||||
}
|
}
|
||||||
return first;
|
return first;
|
||||||
@ -73,10 +72,10 @@ size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
|
|||||||
// Public
|
// Public
|
||||||
|
|
||||||
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
|
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
|
||||||
size_t max_length) {
|
uint max_length) {
|
||||||
assert((size_t) bottom % HeapRegion::GrainBytes == 0,
|
assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
|
||||||
"bottom should be heap region aligned");
|
"bottom should be heap region aligned");
|
||||||
assert((size_t) end % HeapRegion::GrainBytes == 0,
|
assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
|
||||||
"end should be heap region aligned");
|
"end should be heap region aligned");
|
||||||
|
|
||||||
_length = 0;
|
_length = 0;
|
||||||
@ -88,8 +87,8 @@ void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
|
|||||||
_max_length = max_length;
|
_max_length = max_length;
|
||||||
|
|
||||||
_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
|
_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
|
||||||
memset(_regions, 0, max_length * sizeof(HeapRegion*));
|
memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
|
||||||
_regions_biased = _regions - ((size_t) bottom >> _region_shift);
|
_regions_biased = _regions - ((uintx) bottom >> _region_shift);
|
||||||
|
|
||||||
assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
|
assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
|
||||||
"bottom should be included in the region with index 0");
|
"bottom should be included in the region with index 0");
|
||||||
@ -105,7 +104,7 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
|
|||||||
assert(_heap_bottom <= next_bottom, "invariant");
|
assert(_heap_bottom <= next_bottom, "invariant");
|
||||||
while (next_bottom < new_end) {
|
while (next_bottom < new_end) {
|
||||||
assert(next_bottom < _heap_end, "invariant");
|
assert(next_bottom < _heap_end, "invariant");
|
||||||
size_t index = length();
|
uint index = length();
|
||||||
|
|
||||||
assert(index < _max_length, "otherwise we cannot expand further");
|
assert(index < _max_length, "otherwise we cannot expand further");
|
||||||
if (index == 0) {
|
if (index == 0) {
|
||||||
@ -139,9 +138,9 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
|
|||||||
return MemRegion(old_end, next_bottom);
|
return MemRegion(old_end, next_bottom);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t HeapRegionSeq::free_suffix() {
|
uint HeapRegionSeq::free_suffix() {
|
||||||
size_t res = 0;
|
uint res = 0;
|
||||||
size_t index = length();
|
uint index = length();
|
||||||
while (index > 0) {
|
while (index > 0) {
|
||||||
index -= 1;
|
index -= 1;
|
||||||
if (!at(index)->is_empty()) {
|
if (!at(index)->is_empty()) {
|
||||||
@ -152,27 +151,24 @@ size_t HeapRegionSeq::free_suffix() {
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t HeapRegionSeq::find_contiguous(size_t num) {
|
uint HeapRegionSeq::find_contiguous(uint num) {
|
||||||
assert(num > 1, "use this only for sequences of length 2 or greater");
|
assert(num > 1, "use this only for sequences of length 2 or greater");
|
||||||
assert(_next_search_index <= length(),
|
assert(_next_search_index <= length(),
|
||||||
err_msg("_next_search_indeex: "SIZE_FORMAT" "
|
err_msg("_next_search_index: %u should be valid and <= than %u",
|
||||||
"should be valid and <= than "SIZE_FORMAT,
|
|
||||||
_next_search_index, length()));
|
_next_search_index, length()));
|
||||||
|
|
||||||
size_t start = _next_search_index;
|
uint start = _next_search_index;
|
||||||
size_t res = find_contiguous_from(start, num);
|
uint res = find_contiguous_from(start, num);
|
||||||
if (res == G1_NULL_HRS_INDEX && start > 0) {
|
if (res == G1_NULL_HRS_INDEX && start > 0) {
|
||||||
// Try starting from the beginning. If _next_search_index was 0,
|
// Try starting from the beginning. If _next_search_index was 0,
|
||||||
// no point in doing this again.
|
// no point in doing this again.
|
||||||
res = find_contiguous_from(0, num);
|
res = find_contiguous_from(0, num);
|
||||||
}
|
}
|
||||||
if (res != G1_NULL_HRS_INDEX) {
|
if (res != G1_NULL_HRS_INDEX) {
|
||||||
assert(res < length(),
|
assert(res < length(), err_msg("res: %u should be valid", res));
|
||||||
err_msg("res: "SIZE_FORMAT" should be valid", res));
|
|
||||||
_next_search_index = res + num;
|
_next_search_index = res + num;
|
||||||
assert(_next_search_index <= length(),
|
assert(_next_search_index <= length(),
|
||||||
err_msg("_next_search_indeex: "SIZE_FORMAT" "
|
err_msg("_next_search_index: %u should be valid and <= than %u",
|
||||||
"should be valid and <= than "SIZE_FORMAT,
|
|
||||||
_next_search_index, length()));
|
_next_search_index, length()));
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
@ -183,20 +179,20 @@ void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
|
void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
|
||||||
size_t hr_index = 0;
|
uint hr_index = 0;
|
||||||
if (hr != NULL) {
|
if (hr != NULL) {
|
||||||
hr_index = (size_t) hr->hrs_index();
|
hr_index = hr->hrs_index();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t len = length();
|
uint len = length();
|
||||||
for (size_t i = hr_index; i < len; i += 1) {
|
for (uint i = hr_index; i < len; i += 1) {
|
||||||
bool res = blk->doHeapRegion(at(i));
|
bool res = blk->doHeapRegion(at(i));
|
||||||
if (res) {
|
if (res) {
|
||||||
blk->incomplete();
|
blk->incomplete();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (size_t i = 0; i < hr_index; i += 1) {
|
for (uint i = 0; i < hr_index; i += 1) {
|
||||||
bool res = blk->doHeapRegion(at(i));
|
bool res = blk->doHeapRegion(at(i));
|
||||||
if (res) {
|
if (res) {
|
||||||
blk->incomplete();
|
blk->incomplete();
|
||||||
@ -206,7 +202,7 @@ void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
|
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
|
||||||
size_t* num_regions_deleted) {
|
uint* num_regions_deleted) {
|
||||||
// Reset this in case it's currently pointing into the regions that
|
// Reset this in case it's currently pointing into the regions that
|
||||||
// we just removed.
|
// we just removed.
|
||||||
_next_search_index = 0;
|
_next_search_index = 0;
|
||||||
@ -218,7 +214,7 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
|
|||||||
assert(_allocated_length > 0, "we should have at least one region committed");
|
assert(_allocated_length > 0, "we should have at least one region committed");
|
||||||
|
|
||||||
// around the loop, i will be the next region to be removed
|
// around the loop, i will be the next region to be removed
|
||||||
size_t i = length() - 1;
|
uint i = length() - 1;
|
||||||
assert(i > 0, "we should never remove all regions");
|
assert(i > 0, "we should never remove all regions");
|
||||||
// [last_start, end) is the MemRegion that covers the regions we will remove.
|
// [last_start, end) is the MemRegion that covers the regions we will remove.
|
||||||
HeapWord* end = at(i)->end();
|
HeapWord* end = at(i)->end();
|
||||||
@ -249,29 +245,24 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
|
|||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void HeapRegionSeq::verify_optional() {
|
void HeapRegionSeq::verify_optional() {
|
||||||
guarantee(_length <= _allocated_length,
|
guarantee(_length <= _allocated_length,
|
||||||
err_msg("invariant: _length: "SIZE_FORMAT" "
|
err_msg("invariant: _length: %u _allocated_length: %u",
|
||||||
"_allocated_length: "SIZE_FORMAT,
|
|
||||||
_length, _allocated_length));
|
_length, _allocated_length));
|
||||||
guarantee(_allocated_length <= _max_length,
|
guarantee(_allocated_length <= _max_length,
|
||||||
err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
|
err_msg("invariant: _allocated_length: %u _max_length: %u",
|
||||||
"_max_length: "SIZE_FORMAT,
|
|
||||||
_allocated_length, _max_length));
|
_allocated_length, _max_length));
|
||||||
guarantee(_next_search_index <= _length,
|
guarantee(_next_search_index <= _length,
|
||||||
err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
|
err_msg("invariant: _next_search_index: %u _length: %u",
|
||||||
"_length: "SIZE_FORMAT,
|
|
||||||
_next_search_index, _length));
|
_next_search_index, _length));
|
||||||
|
|
||||||
HeapWord* prev_end = _heap_bottom;
|
HeapWord* prev_end = _heap_bottom;
|
||||||
for (size_t i = 0; i < _allocated_length; i += 1) {
|
for (uint i = 0; i < _allocated_length; i += 1) {
|
||||||
HeapRegion* hr = _regions[i];
|
HeapRegion* hr = _regions[i];
|
||||||
guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
|
guarantee(hr != NULL, err_msg("invariant: i: %u", i));
|
||||||
guarantee(hr->bottom() == prev_end,
|
guarantee(hr->bottom() == prev_end,
|
||||||
err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
|
err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
|
||||||
"prev_end: "PTR_FORMAT,
|
|
||||||
i, HR_FORMAT_PARAMS(hr), prev_end));
|
i, HR_FORMAT_PARAMS(hr), prev_end));
|
||||||
guarantee(hr->hrs_index() == i,
|
guarantee(hr->hrs_index() == i,
|
||||||
err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
|
err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
|
||||||
i, hr->hrs_index()));
|
|
||||||
if (i < _length) {
|
if (i < _length) {
|
||||||
// Asserts will fire if i is >= _length
|
// Asserts will fire if i is >= _length
|
||||||
HeapWord* addr = hr->bottom();
|
HeapWord* addr = hr->bottom();
|
||||||
@ -290,8 +281,8 @@ void HeapRegionSeq::verify_optional() {
|
|||||||
prev_end = hr->end();
|
prev_end = hr->end();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (size_t i = _allocated_length; i < _max_length; i += 1) {
|
for (uint i = _allocated_length; i < _max_length; i += 1) {
|
||||||
guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
|
guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -29,8 +29,6 @@ class HeapRegion;
|
|||||||
class HeapRegionClosure;
|
class HeapRegionClosure;
|
||||||
class FreeRegionList;
|
class FreeRegionList;
|
||||||
|
|
||||||
#define G1_NULL_HRS_INDEX ((size_t) -1)
|
|
||||||
|
|
||||||
// This class keeps track of the region metadata (i.e., HeapRegion
|
// This class keeps track of the region metadata (i.e., HeapRegion
|
||||||
// instances). They are kept in the _regions array in address
|
// instances). They are kept in the _regions array in address
|
||||||
// order. A region's index in the array corresponds to its index in
|
// order. A region's index in the array corresponds to its index in
|
||||||
@ -65,7 +63,7 @@ class HeapRegionSeq: public CHeapObj {
|
|||||||
HeapRegion** _regions_biased;
|
HeapRegion** _regions_biased;
|
||||||
|
|
||||||
// The number of regions committed in the heap.
|
// The number of regions committed in the heap.
|
||||||
size_t _length;
|
uint _length;
|
||||||
|
|
||||||
// The address of the first reserved word in the heap.
|
// The address of the first reserved word in the heap.
|
||||||
HeapWord* _heap_bottom;
|
HeapWord* _heap_bottom;
|
||||||
@ -74,32 +72,32 @@ class HeapRegionSeq: public CHeapObj {
|
|||||||
HeapWord* _heap_end;
|
HeapWord* _heap_end;
|
||||||
|
|
||||||
// The log of the region byte size.
|
// The log of the region byte size.
|
||||||
size_t _region_shift;
|
uint _region_shift;
|
||||||
|
|
||||||
// A hint for which index to start searching from for humongous
|
// A hint for which index to start searching from for humongous
|
||||||
// allocations.
|
// allocations.
|
||||||
size_t _next_search_index;
|
uint _next_search_index;
|
||||||
|
|
||||||
// The number of regions for which we have allocated HeapRegions for.
|
// The number of regions for which we have allocated HeapRegions for.
|
||||||
size_t _allocated_length;
|
uint _allocated_length;
|
||||||
|
|
||||||
// The maximum number of regions in the heap.
|
// The maximum number of regions in the heap.
|
||||||
size_t _max_length;
|
uint _max_length;
|
||||||
|
|
||||||
// Find a contiguous set of empty regions of length num, starting
|
// Find a contiguous set of empty regions of length num, starting
|
||||||
// from the given index.
|
// from the given index.
|
||||||
size_t find_contiguous_from(size_t from, size_t num);
|
uint find_contiguous_from(uint from, uint num);
|
||||||
|
|
||||||
// Map a heap address to a biased region index. Assume that the
|
// Map a heap address to a biased region index. Assume that the
|
||||||
// address is valid.
|
// address is valid.
|
||||||
inline size_t addr_to_index_biased(HeapWord* addr) const;
|
inline uintx addr_to_index_biased(HeapWord* addr) const;
|
||||||
|
|
||||||
void increment_length(size_t* length) {
|
void increment_length(uint* length) {
|
||||||
assert(*length < _max_length, "pre-condition");
|
assert(*length < _max_length, "pre-condition");
|
||||||
*length += 1;
|
*length += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void decrement_length(size_t* length) {
|
void decrement_length(uint* length) {
|
||||||
assert(*length > 0, "pre-condition");
|
assert(*length > 0, "pre-condition");
|
||||||
*length -= 1;
|
*length -= 1;
|
||||||
}
|
}
|
||||||
@ -108,11 +106,11 @@ class HeapRegionSeq: public CHeapObj {
|
|||||||
// Empty contructor, we'll initialize it with the initialize() method.
|
// Empty contructor, we'll initialize it with the initialize() method.
|
||||||
HeapRegionSeq() { }
|
HeapRegionSeq() { }
|
||||||
|
|
||||||
void initialize(HeapWord* bottom, HeapWord* end, size_t max_length);
|
void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
|
||||||
|
|
||||||
// Return the HeapRegion at the given index. Assume that the index
|
// Return the HeapRegion at the given index. Assume that the index
|
||||||
// is valid.
|
// is valid.
|
||||||
inline HeapRegion* at(size_t index) const;
|
inline HeapRegion* at(uint index) const;
|
||||||
|
|
||||||
// If addr is within the committed space return its corresponding
|
// If addr is within the committed space return its corresponding
|
||||||
// HeapRegion, otherwise return NULL.
|
// HeapRegion, otherwise return NULL.
|
||||||
@ -123,10 +121,10 @@ class HeapRegionSeq: public CHeapObj {
|
|||||||
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
|
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
|
||||||
|
|
||||||
// Return the number of regions that have been committed in the heap.
|
// Return the number of regions that have been committed in the heap.
|
||||||
size_t length() const { return _length; }
|
uint length() const { return _length; }
|
||||||
|
|
||||||
// Return the maximum number of regions in the heap.
|
// Return the maximum number of regions in the heap.
|
||||||
size_t max_length() const { return _max_length; }
|
uint max_length() const { return _max_length; }
|
||||||
|
|
||||||
// Expand the sequence to reflect that the heap has grown from
|
// Expand the sequence to reflect that the heap has grown from
|
||||||
// old_end to new_end. Either create new HeapRegions, or re-use
|
// old_end to new_end. Either create new HeapRegions, or re-use
|
||||||
@ -139,12 +137,12 @@ class HeapRegionSeq: public CHeapObj {
|
|||||||
|
|
||||||
// Return the number of contiguous regions at the end of the sequence
|
// Return the number of contiguous regions at the end of the sequence
|
||||||
// that are available for allocation.
|
// that are available for allocation.
|
||||||
size_t free_suffix();
|
uint free_suffix();
|
||||||
|
|
||||||
// Find a contiguous set of empty regions of length num and return
|
// Find a contiguous set of empty regions of length num and return
|
||||||
// the index of the first region or G1_NULL_HRS_INDEX if the
|
// the index of the first region or G1_NULL_HRS_INDEX if the
|
||||||
// search was unsuccessful.
|
// search was unsuccessful.
|
||||||
size_t find_contiguous(size_t num);
|
uint find_contiguous(uint num);
|
||||||
|
|
||||||
// Apply blk->doHeapRegion() on all committed regions in address order,
|
// Apply blk->doHeapRegion() on all committed regions in address order,
|
||||||
// terminating the iteration early if doHeapRegion() returns true.
|
// terminating the iteration early if doHeapRegion() returns true.
|
||||||
@ -159,7 +157,7 @@ class HeapRegionSeq: public CHeapObj {
|
|||||||
// sequence. Return a MemRegion that corresponds to the address
|
// sequence. Return a MemRegion that corresponds to the address
|
||||||
// range of the uncommitted regions. Assume shrink_bytes is page and
|
// range of the uncommitted regions. Assume shrink_bytes is page and
|
||||||
// heap region aligned.
|
// heap region aligned.
|
||||||
MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted);
|
MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted);
|
||||||
|
|
||||||
// Do some sanity checking.
|
// Do some sanity checking.
|
||||||
void verify_optional() PRODUCT_RETURN;
|
void verify_optional() PRODUCT_RETURN;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -28,11 +28,11 @@
|
|||||||
#include "gc_implementation/g1/heapRegion.hpp"
|
#include "gc_implementation/g1/heapRegion.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||||
|
|
||||||
inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
|
inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
|
||||||
assert(_heap_bottom <= addr && addr < _heap_end,
|
assert(_heap_bottom <= addr && addr < _heap_end,
|
||||||
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
|
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
|
||||||
addr, _heap_bottom, _heap_end));
|
addr, _heap_bottom, _heap_end));
|
||||||
size_t index = (size_t) addr >> _region_shift;
|
uintx index = (uintx) addr >> _region_shift;
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
|
|||||||
assert(_heap_bottom <= addr && addr < _heap_end,
|
assert(_heap_bottom <= addr && addr < _heap_end,
|
||||||
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
|
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
|
||||||
addr, _heap_bottom, _heap_end));
|
addr, _heap_bottom, _heap_end));
|
||||||
size_t index_biased = addr_to_index_biased(addr);
|
uintx index_biased = addr_to_index_biased(addr);
|
||||||
HeapRegion* hr = _regions_biased[index_biased];
|
HeapRegion* hr = _regions_biased[index_biased];
|
||||||
assert(hr != NULL, "invariant");
|
assert(hr != NULL, "invariant");
|
||||||
return hr;
|
return hr;
|
||||||
@ -55,7 +55,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline HeapRegion* HeapRegionSeq::at(size_t index) const {
|
inline HeapRegion* HeapRegionSeq::at(uint index) const {
|
||||||
assert(index < length(), "pre-condition");
|
assert(index < length(), "pre-condition");
|
||||||
HeapRegion* hr = _regions[index];
|
HeapRegion* hr = _regions[index];
|
||||||
assert(hr != NULL, "sanity");
|
assert(hr != NULL, "sanity");
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -25,28 +25,26 @@
|
|||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||||
|
|
||||||
size_t HeapRegionSetBase::_unrealistically_long_length = 0;
|
uint HeapRegionSetBase::_unrealistically_long_length = 0;
|
||||||
HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone;
|
HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone;
|
||||||
|
|
||||||
//////////////////// HeapRegionSetBase ////////////////////
|
//////////////////// HeapRegionSetBase ////////////////////
|
||||||
|
|
||||||
void HeapRegionSetBase::set_unrealistically_long_length(size_t len) {
|
void HeapRegionSetBase::set_unrealistically_long_length(uint len) {
|
||||||
guarantee(_unrealistically_long_length == 0, "should only be set once");
|
guarantee(_unrealistically_long_length == 0, "should only be set once");
|
||||||
_unrealistically_long_length = len;
|
_unrealistically_long_length = len;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
|
uint HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
|
||||||
assert(hr->startsHumongous(), "pre-condition");
|
assert(hr->startsHumongous(), "pre-condition");
|
||||||
assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
|
assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
|
||||||
size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes;
|
uint region_num = (uint) (hr->capacity() >> HeapRegion::LogOfHRGrainBytes);
|
||||||
assert(region_num > 0, "sanity");
|
assert(region_num > 0, "sanity");
|
||||||
return region_num;
|
return region_num;
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
|
void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
|
||||||
msg->append("[%s] %s "
|
msg->append("[%s] %s ln: %u rn: %u cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
|
||||||
"ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
|
|
||||||
"cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
|
|
||||||
name(), message, length(), region_num(),
|
name(), message, length(), region_num(),
|
||||||
total_capacity_bytes(), total_used_bytes());
|
total_capacity_bytes(), total_used_bytes());
|
||||||
fill_in_ext_msg_extra(msg);
|
fill_in_ext_msg_extra(msg);
|
||||||
@ -170,13 +168,11 @@ void HeapRegionSetBase::verify_end() {
|
|||||||
hrs_ext_msg(this, "verification should be in progress"));
|
hrs_ext_msg(this, "verification should be in progress"));
|
||||||
|
|
||||||
guarantee(length() == _calc_length,
|
guarantee(length() == _calc_length,
|
||||||
hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == "
|
hrs_err_msg("[%s] length: %u should be == calc length: %u",
|
||||||
"calc length: "SIZE_FORMAT,
|
|
||||||
name(), length(), _calc_length));
|
name(), length(), _calc_length));
|
||||||
|
|
||||||
guarantee(region_num() == _calc_region_num,
|
guarantee(region_num() == _calc_region_num,
|
||||||
hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
|
hrs_err_msg("[%s] region num: %u should be == calc region num: %u",
|
||||||
"calc region num: "SIZE_FORMAT,
|
|
||||||
name(), region_num(), _calc_region_num));
|
name(), region_num(), _calc_region_num));
|
||||||
|
|
||||||
guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
|
guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
|
||||||
@ -211,8 +207,8 @@ void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
|
|||||||
out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous()));
|
out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous()));
|
||||||
out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty()));
|
out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty()));
|
||||||
out->print_cr(" Attributes");
|
out->print_cr(" Attributes");
|
||||||
out->print_cr(" length : "SIZE_FORMAT_W(14), length());
|
out->print_cr(" length : %14u", length());
|
||||||
out->print_cr(" region num : "SIZE_FORMAT_W(14), region_num());
|
out->print_cr(" region num : %14u", region_num());
|
||||||
out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes",
|
out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes",
|
||||||
total_capacity_bytes());
|
total_capacity_bytes());
|
||||||
out->print_cr(" total used : "SIZE_FORMAT_W(14)" bytes",
|
out->print_cr(" total used : "SIZE_FORMAT_W(14)" bytes",
|
||||||
@ -243,14 +239,12 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
|
|||||||
if (proxy_set->is_empty()) return;
|
if (proxy_set->is_empty()) return;
|
||||||
|
|
||||||
assert(proxy_set->length() <= _length,
|
assert(proxy_set->length() <= _length,
|
||||||
hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
|
hrs_err_msg("[%s] proxy set length: %u should be <= length: %u",
|
||||||
"should be <= length: "SIZE_FORMAT,
|
|
||||||
name(), proxy_set->length(), _length));
|
name(), proxy_set->length(), _length));
|
||||||
_length -= proxy_set->length();
|
_length -= proxy_set->length();
|
||||||
|
|
||||||
assert(proxy_set->region_num() <= _region_num,
|
assert(proxy_set->region_num() <= _region_num,
|
||||||
hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
|
hrs_err_msg("[%s] proxy set region num: %u should be <= region num: %u",
|
||||||
"should be <= region num: "SIZE_FORMAT,
|
|
||||||
name(), proxy_set->region_num(), _region_num));
|
name(), proxy_set->region_num(), _region_num));
|
||||||
_region_num -= proxy_set->region_num();
|
_region_num -= proxy_set->region_num();
|
||||||
|
|
||||||
@ -369,17 +363,17 @@ void HeapRegionLinkedList::remove_all() {
|
|||||||
verify_optional();
|
verify_optional();
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
|
void HeapRegionLinkedList::remove_all_pending(uint target_count) {
|
||||||
hrs_assert_mt_safety_ok(this);
|
hrs_assert_mt_safety_ok(this);
|
||||||
assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
|
assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
|
||||||
assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
|
assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
|
||||||
|
|
||||||
verify_optional();
|
verify_optional();
|
||||||
DEBUG_ONLY(size_t old_length = length();)
|
DEBUG_ONLY(uint old_length = length();)
|
||||||
|
|
||||||
HeapRegion* curr = _head;
|
HeapRegion* curr = _head;
|
||||||
HeapRegion* prev = NULL;
|
HeapRegion* prev = NULL;
|
||||||
size_t count = 0;
|
uint count = 0;
|
||||||
while (curr != NULL) {
|
while (curr != NULL) {
|
||||||
hrs_assert_region_ok(this, curr, this);
|
hrs_assert_region_ok(this, curr, this);
|
||||||
HeapRegion* next = curr->next();
|
HeapRegion* next = curr->next();
|
||||||
@ -387,7 +381,7 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
|
|||||||
if (curr->pending_removal()) {
|
if (curr->pending_removal()) {
|
||||||
assert(count < target_count,
|
assert(count < target_count,
|
||||||
hrs_err_msg("[%s] should not come across more regions "
|
hrs_err_msg("[%s] should not come across more regions "
|
||||||
"pending for removal than target_count: "SIZE_FORMAT,
|
"pending for removal than target_count: %u",
|
||||||
name(), target_count));
|
name(), target_count));
|
||||||
|
|
||||||
if (prev == NULL) {
|
if (prev == NULL) {
|
||||||
@ -422,12 +416,11 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert(count == target_count,
|
assert(count == target_count,
|
||||||
hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == "
|
hrs_err_msg("[%s] count: %u should be == target_count: %u",
|
||||||
"target_count: "SIZE_FORMAT, name(), count, target_count));
|
name(), count, target_count));
|
||||||
assert(length() + target_count == old_length,
|
assert(length() + target_count == old_length,
|
||||||
hrs_err_msg("[%s] new length should be consistent "
|
hrs_err_msg("[%s] new length should be consistent "
|
||||||
"new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
|
"new length: %u old length: %u target_count: %u",
|
||||||
"target_count: "SIZE_FORMAT,
|
|
||||||
name(), length(), old_length, target_count));
|
name(), length(), old_length, target_count));
|
||||||
|
|
||||||
verify_optional();
|
verify_optional();
|
||||||
@ -444,16 +437,16 @@ void HeapRegionLinkedList::verify() {
|
|||||||
HeapRegion* curr = _head;
|
HeapRegion* curr = _head;
|
||||||
HeapRegion* prev1 = NULL;
|
HeapRegion* prev1 = NULL;
|
||||||
HeapRegion* prev0 = NULL;
|
HeapRegion* prev0 = NULL;
|
||||||
size_t count = 0;
|
uint count = 0;
|
||||||
while (curr != NULL) {
|
while (curr != NULL) {
|
||||||
verify_next_region(curr);
|
verify_next_region(curr);
|
||||||
|
|
||||||
count += 1;
|
count += 1;
|
||||||
guarantee(count < _unrealistically_long_length,
|
guarantee(count < _unrealistically_long_length,
|
||||||
hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
|
hrs_err_msg("[%s] the calculated length: %u "
|
||||||
"seems very long, is there maybe a cycle? "
|
"seems very long, is there maybe a cycle? "
|
||||||
"curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
|
"curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
|
||||||
"prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
|
"prev1: "PTR_FORMAT" length: %u",
|
||||||
name(), count, curr, prev0, prev1, length()));
|
name(), count, curr, prev0, prev1, length()));
|
||||||
|
|
||||||
prev1 = prev0;
|
prev1 = prev0;
|
||||||
|
@ -62,20 +62,20 @@ class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
|
|||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
static size_t calculate_region_num(HeapRegion* hr);
|
static uint calculate_region_num(HeapRegion* hr);
|
||||||
|
|
||||||
static size_t _unrealistically_long_length;
|
static uint _unrealistically_long_length;
|
||||||
|
|
||||||
// The number of regions added to the set. If the set contains
|
// The number of regions added to the set. If the set contains
|
||||||
// only humongous regions, this reflects only 'starts humongous'
|
// only humongous regions, this reflects only 'starts humongous'
|
||||||
// regions and does not include 'continues humongous' ones.
|
// regions and does not include 'continues humongous' ones.
|
||||||
size_t _length;
|
uint _length;
|
||||||
|
|
||||||
// The total number of regions represented by the set. If the set
|
// The total number of regions represented by the set. If the set
|
||||||
// does not contain humongous regions, this should be the same as
|
// does not contain humongous regions, this should be the same as
|
||||||
// _length. If the set contains only humongous regions, this will
|
// _length. If the set contains only humongous regions, this will
|
||||||
// include the 'continues humongous' regions.
|
// include the 'continues humongous' regions.
|
||||||
size_t _region_num;
|
uint _region_num;
|
||||||
|
|
||||||
// We don't keep track of the total capacity explicitly, we instead
|
// We don't keep track of the total capacity explicitly, we instead
|
||||||
// recalculate it based on _region_num and the heap region size.
|
// recalculate it based on _region_num and the heap region size.
|
||||||
@ -86,8 +86,8 @@ protected:
|
|||||||
const char* _name;
|
const char* _name;
|
||||||
|
|
||||||
bool _verify_in_progress;
|
bool _verify_in_progress;
|
||||||
size_t _calc_length;
|
uint _calc_length;
|
||||||
size_t _calc_region_num;
|
uint _calc_region_num;
|
||||||
size_t _calc_total_capacity_bytes;
|
size_t _calc_total_capacity_bytes;
|
||||||
size_t _calc_total_used_bytes;
|
size_t _calc_total_used_bytes;
|
||||||
|
|
||||||
@ -153,18 +153,18 @@ protected:
|
|||||||
HeapRegionSetBase(const char* name);
|
HeapRegionSetBase(const char* name);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void set_unrealistically_long_length(size_t len);
|
static void set_unrealistically_long_length(uint len);
|
||||||
|
|
||||||
const char* name() { return _name; }
|
const char* name() { return _name; }
|
||||||
|
|
||||||
size_t length() { return _length; }
|
uint length() { return _length; }
|
||||||
|
|
||||||
bool is_empty() { return _length == 0; }
|
bool is_empty() { return _length == 0; }
|
||||||
|
|
||||||
size_t region_num() { return _region_num; }
|
uint region_num() { return _region_num; }
|
||||||
|
|
||||||
size_t total_capacity_bytes() {
|
size_t total_capacity_bytes() {
|
||||||
return region_num() << HeapRegion::LogOfHRGrainBytes;
|
return (size_t) region_num() << HeapRegion::LogOfHRGrainBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t total_used_bytes() { return _total_used_bytes; }
|
size_t total_used_bytes() { return _total_used_bytes; }
|
||||||
@ -341,7 +341,7 @@ public:
|
|||||||
// of regions that are pending for removal in the list, and
|
// of regions that are pending for removal in the list, and
|
||||||
// target_count should be > 1 (currently, we never need to remove a
|
// target_count should be > 1 (currently, we never need to remove a
|
||||||
// single region using this).
|
// single region using this).
|
||||||
void remove_all_pending(size_t target_count);
|
void remove_all_pending(uint target_count);
|
||||||
|
|
||||||
virtual void verify();
|
virtual void verify();
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -54,15 +54,15 @@ inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
|
|||||||
assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
|
assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
|
||||||
_length -= 1;
|
_length -= 1;
|
||||||
|
|
||||||
size_t region_num_diff;
|
uint region_num_diff;
|
||||||
if (!hr->isHumongous()) {
|
if (!hr->isHumongous()) {
|
||||||
region_num_diff = 1;
|
region_num_diff = 1;
|
||||||
} else {
|
} else {
|
||||||
region_num_diff = calculate_region_num(hr);
|
region_num_diff = calculate_region_num(hr);
|
||||||
}
|
}
|
||||||
assert(region_num_diff <= _region_num,
|
assert(region_num_diff <= _region_num,
|
||||||
hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" "
|
hrs_err_msg("[%s] region's region num: %u "
|
||||||
"should be <= region num: "SIZE_FORMAT,
|
"should be <= region num: %u",
|
||||||
name(), region_num_diff, _region_num));
|
name(), region_num_diff, _region_num));
|
||||||
_region_num -= region_num_diff;
|
_region_num -= region_num_diff;
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -481,8 +481,7 @@ size_t SparsePRT::mem_size() const {
|
|||||||
|
|
||||||
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
|
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
|
||||||
#if SPARSE_PRT_VERBOSE
|
#if SPARSE_PRT_VERBOSE
|
||||||
gclog_or_tty->print_cr(" Adding card %d from region %d to region "
|
gclog_or_tty->print_cr(" Adding card %d from region %d to region %u sparse.",
|
||||||
SIZE_FORMAT" sparse.",
|
|
||||||
card_index, region_id, _hr->hrs_index());
|
card_index, region_id, _hr->hrs_index());
|
||||||
#endif
|
#endif
|
||||||
if (_next->occupied_entries() * 2 > _next->capacity()) {
|
if (_next->occupied_entries() * 2 > _next->capacity()) {
|
||||||
@ -534,7 +533,7 @@ void SparsePRT::expand() {
|
|||||||
_next = new RSHashTable(last->capacity() * 2);
|
_next = new RSHashTable(last->capacity() * 2);
|
||||||
|
|
||||||
#if SPARSE_PRT_VERBOSE
|
#if SPARSE_PRT_VERBOSE
|
||||||
gclog_or_tty->print_cr(" Expanded sparse table for "SIZE_FORMAT" to %d.",
|
gclog_or_tty->print_cr(" Expanded sparse table for %u to %d.",
|
||||||
_hr->hrs_index(), _next->capacity());
|
_hr->hrs_index(), _next->capacity());
|
||||||
#endif
|
#endif
|
||||||
for (size_t i = 0; i < last->capacity(); i++) {
|
for (size_t i = 0; i < last->capacity(); i++) {
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
static_field(HeapRegion, GrainBytes, size_t) \
|
static_field(HeapRegion, GrainBytes, size_t) \
|
||||||
\
|
\
|
||||||
nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \
|
nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \
|
||||||
nonstatic_field(HeapRegionSeq, _length, size_t) \
|
nonstatic_field(HeapRegionSeq, _length, uint) \
|
||||||
\
|
\
|
||||||
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
|
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
|
||||||
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
|
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
|
||||||
@ -50,8 +50,8 @@
|
|||||||
nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \
|
nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \
|
||||||
nonstatic_field(G1MonitoringSupport, _old_used, size_t) \
|
nonstatic_field(G1MonitoringSupport, _old_used, size_t) \
|
||||||
\
|
\
|
||||||
nonstatic_field(HeapRegionSetBase, _length, size_t) \
|
nonstatic_field(HeapRegionSetBase, _length, uint) \
|
||||||
nonstatic_field(HeapRegionSetBase, _region_num, size_t) \
|
nonstatic_field(HeapRegionSetBase, _region_num, uint) \
|
||||||
nonstatic_field(HeapRegionSetBase, _total_used_bytes, size_t) \
|
nonstatic_field(HeapRegionSetBase, _total_used_bytes, size_t) \
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
|
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Log.hpp"
|
||||||
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
||||||
#include "gc_implementation/shared/isGCActiveMark.hpp"
|
#include "gc_implementation/shared/isGCActiveMark.hpp"
|
||||||
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
||||||
@ -223,9 +224,9 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void VM_CGC_Operation::doit() {
|
void VM_CGC_Operation::doit() {
|
||||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||||
TraceTime t(_printGCMessage, PrintGC, true, gclog_or_tty);
|
TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty);
|
||||||
SharedHeap* sh = SharedHeap::heap();
|
SharedHeap* sh = SharedHeap::heap();
|
||||||
// This could go away if CollectedHeap gave access to _gc_is_active...
|
// This could go away if CollectedHeap gave access to _gc_is_active...
|
||||||
if (sh != NULL) {
|
if (sh != NULL) {
|
||||||
|
@ -42,7 +42,7 @@ class CheckForUnmarkedOops : public OopClosure {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
template <class T> void do_oop_work(T* p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
if (_young_gen->is_in_reserved(obj) &&
|
if (_young_gen->is_in_reserved(obj) &&
|
||||||
!_card_table->addr_is_marked_imprecise(p)) {
|
!_card_table->addr_is_marked_imprecise(p)) {
|
||||||
// Don't overwrite the first missing card mark
|
// Don't overwrite the first missing card mark
|
||||||
|
@ -911,23 +911,23 @@ void ParallelScavengeHeap::print_tracing_info() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
|
void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
|
||||||
// Why do we need the total_collections()-filter below?
|
// Why do we need the total_collections()-filter below?
|
||||||
if (total_collections() > 0) {
|
if (total_collections() > 0) {
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
gclog_or_tty->print("permanent ");
|
gclog_or_tty->print("permanent ");
|
||||||
}
|
}
|
||||||
perm_gen()->verify(allow_dirty);
|
perm_gen()->verify();
|
||||||
|
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
gclog_or_tty->print("tenured ");
|
gclog_or_tty->print("tenured ");
|
||||||
}
|
}
|
||||||
old_gen()->verify(allow_dirty);
|
old_gen()->verify();
|
||||||
|
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
gclog_or_tty->print("eden ");
|
gclog_or_tty->print("eden ");
|
||||||
}
|
}
|
||||||
young_gen()->verify(allow_dirty);
|
young_gen()->verify();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,7 +257,7 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
|
|||||||
virtual void gc_threads_do(ThreadClosure* tc) const;
|
virtual void gc_threads_do(ThreadClosure* tc) const;
|
||||||
virtual void print_tracing_info() const;
|
virtual void print_tracing_info() const;
|
||||||
|
|
||||||
void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
|
void verify(bool silent, VerifyOption option /* ignored */);
|
||||||
|
|
||||||
void print_heap_change(size_t prev_used);
|
void print_heap_change(size_t prev_used);
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -477,8 +477,8 @@ void PSOldGen::space_invariants() {
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void PSOldGen::verify(bool allow_dirty) {
|
void PSOldGen::verify() {
|
||||||
object_space()->verify(allow_dirty);
|
object_space()->verify();
|
||||||
}
|
}
|
||||||
class VerifyObjectStartArrayClosure : public ObjectClosure {
|
class VerifyObjectStartArrayClosure : public ObjectClosure {
|
||||||
PSOldGen* _gen;
|
PSOldGen* _gen;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -174,7 +174,7 @@ class PSOldGen : public CHeapObj {
|
|||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
void print_used_change(size_t prev_used) const;
|
void print_used_change(size_t prev_used) const;
|
||||||
|
|
||||||
void verify(bool allow_dirty);
|
void verify();
|
||||||
void verify_object_start_array();
|
void verify_object_start_array();
|
||||||
|
|
||||||
// These should not used
|
// These should not used
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -937,10 +937,10 @@ void PSYoungGen::update_counters() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void PSYoungGen::verify(bool allow_dirty) {
|
void PSYoungGen::verify() {
|
||||||
eden_space()->verify(allow_dirty);
|
eden_space()->verify();
|
||||||
from_space()->verify(allow_dirty);
|
from_space()->verify();
|
||||||
to_space()->verify(allow_dirty);
|
to_space()->verify();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -181,7 +181,7 @@ class PSYoungGen : public CHeapObj {
|
|||||||
void print_used_change(size_t prev_used) const;
|
void print_used_change(size_t prev_used) const;
|
||||||
virtual const char* name() const { return "PSYoungGen"; }
|
virtual const char* name() const { return "PSYoungGen"; }
|
||||||
|
|
||||||
void verify(bool allow_dirty);
|
void verify();
|
||||||
|
|
||||||
// Space boundary invariant checker
|
// Space boundary invariant checker
|
||||||
void space_invariants() PRODUCT_RETURN;
|
void space_invariants() PRODUCT_RETURN;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -70,7 +70,7 @@ void ImmutableSpace::print() const {
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void ImmutableSpace::verify(bool allow_dirty) {
|
void ImmutableSpace::verify() {
|
||||||
HeapWord* p = bottom();
|
HeapWord* p = bottom();
|
||||||
HeapWord* t = end();
|
HeapWord* t = end();
|
||||||
HeapWord* prev_p = NULL;
|
HeapWord* prev_p = NULL;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -65,7 +65,7 @@ class ImmutableSpace: public CHeapObj {
|
|||||||
// Debugging
|
// Debugging
|
||||||
virtual void print() const PRODUCT_RETURN;
|
virtual void print() const PRODUCT_RETURN;
|
||||||
virtual void print_short() const PRODUCT_RETURN;
|
virtual void print_short() const PRODUCT_RETURN;
|
||||||
virtual void verify(bool allow_dirty);
|
virtual void verify();
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_IMMUTABLESPACE_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_IMMUTABLESPACE_HPP
|
||||||
|
@ -891,12 +891,12 @@ void MutableNUMASpace::print_on(outputStream* st) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MutableNUMASpace::verify(bool allow_dirty) {
|
void MutableNUMASpace::verify() {
|
||||||
// This can be called after setting an arbitary value to the space's top,
|
// This can be called after setting an arbitary value to the space's top,
|
||||||
// so an object can cross the chunk boundary. We ensure the parsablity
|
// so an object can cross the chunk boundary. We ensure the parsablity
|
||||||
// of the space and just walk the objects in linear fashion.
|
// of the space and just walk the objects in linear fashion.
|
||||||
ensure_parsability();
|
ensure_parsability();
|
||||||
MutableSpace::verify(allow_dirty);
|
MutableSpace::verify();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scan pages and gather stats about page placement and size.
|
// Scan pages and gather stats about page placement and size.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -225,7 +225,7 @@ class MutableNUMASpace : public MutableSpace {
|
|||||||
// Debugging
|
// Debugging
|
||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
virtual void print_short_on(outputStream* st) const;
|
virtual void print_short_on(outputStream* st) const;
|
||||||
virtual void verify(bool allow_dirty);
|
virtual void verify();
|
||||||
|
|
||||||
virtual void set_top(HeapWord* value);
|
virtual void set_top(HeapWord* value);
|
||||||
};
|
};
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -246,7 +246,7 @@ void MutableSpace::print_on(outputStream* st) const {
|
|||||||
bottom(), top(), end());
|
bottom(), top(), end());
|
||||||
}
|
}
|
||||||
|
|
||||||
void MutableSpace::verify(bool allow_dirty) {
|
void MutableSpace::verify() {
|
||||||
HeapWord* p = bottom();
|
HeapWord* p = bottom();
|
||||||
HeapWord* t = top();
|
HeapWord* t = top();
|
||||||
HeapWord* prev_p = NULL;
|
HeapWord* prev_p = NULL;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -141,7 +141,7 @@ class MutableSpace: public ImmutableSpace {
|
|||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
virtual void print_short() const;
|
virtual void print_short() const;
|
||||||
virtual void print_short_on(outputStream* st) const;
|
virtual void print_short_on(outputStream* st) const;
|
||||||
virtual void verify(bool allow_dirty);
|
virtual void verify();
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP
|
||||||
|
@ -659,7 +659,7 @@ class CollectedHeap : public CHeapObj {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Heap verification
|
// Heap verification
|
||||||
virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
|
virtual void verify(bool silent, VerifyOption option) = 0;
|
||||||
|
|
||||||
// Non product verification and debugging.
|
// Non product verification and debugging.
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -444,11 +444,11 @@ void CompactingPermGenGen::invalidate_remembered_set() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CompactingPermGenGen::verify(bool allow_dirty) {
|
void CompactingPermGenGen::verify() {
|
||||||
the_space()->verify(allow_dirty);
|
the_space()->verify();
|
||||||
if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
|
if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
|
||||||
ro_space()->verify(allow_dirty);
|
ro_space()->verify();
|
||||||
rw_space()->verify(allow_dirty);
|
rw_space()->verify();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -230,7 +230,7 @@ public:
|
|||||||
void* new_vtable_start,
|
void* new_vtable_start,
|
||||||
void* obj);
|
void* obj);
|
||||||
|
|
||||||
void verify(bool allow_dirty);
|
void verify();
|
||||||
|
|
||||||
// Serialization
|
// Serialization
|
||||||
static void initialize_oops() KERNEL_RETURN;
|
static void initialize_oops() KERNEL_RETURN;
|
||||||
|
@ -939,10 +939,10 @@ void DefNewGeneration::update_counters() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void DefNewGeneration::verify(bool allow_dirty) {
|
void DefNewGeneration::verify() {
|
||||||
eden()->verify(allow_dirty);
|
eden()->verify();
|
||||||
from()->verify(allow_dirty);
|
from()->verify();
|
||||||
to()->verify(allow_dirty);
|
to()->verify();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DefNewGeneration::print_on(outputStream* st) const {
|
void DefNewGeneration::print_on(outputStream* st) const {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -340,7 +340,7 @@ protected:
|
|||||||
// PrintHeapAtGC support.
|
// PrintHeapAtGC support.
|
||||||
void print_on(outputStream* st) const;
|
void print_on(outputStream* st) const;
|
||||||
|
|
||||||
void verify(bool allow_dirty);
|
void verify();
|
||||||
|
|
||||||
bool promo_failure_scan_is_complete() const {
|
bool promo_failure_scan_is_complete() const {
|
||||||
return _promo_failure_scan_stack.is_empty();
|
return _promo_failure_scan_stack.is_empty();
|
||||||
|
@ -1247,18 +1247,18 @@ GCStats* GenCollectedHeap::gc_stats(int level) const {
|
|||||||
return _gens[level]->gc_stats();
|
return _gens[level]->gc_stats();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
|
void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
gclog_or_tty->print("permgen ");
|
gclog_or_tty->print("permgen ");
|
||||||
}
|
}
|
||||||
perm_gen()->verify(allow_dirty);
|
perm_gen()->verify();
|
||||||
for (int i = _n_gens-1; i >= 0; i--) {
|
for (int i = _n_gens-1; i >= 0; i--) {
|
||||||
Generation* g = _gens[i];
|
Generation* g = _gens[i];
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
gclog_or_tty->print(g->name());
|
gclog_or_tty->print(g->name());
|
||||||
gclog_or_tty->print(" ");
|
gclog_or_tty->print(" ");
|
||||||
}
|
}
|
||||||
g->verify(allow_dirty);
|
g->verify();
|
||||||
}
|
}
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
gclog_or_tty->print("remset ");
|
gclog_or_tty->print("remset ");
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -357,7 +357,7 @@ public:
|
|||||||
void prepare_for_verify();
|
void prepare_for_verify();
|
||||||
|
|
||||||
// Override.
|
// Override.
|
||||||
void verify(bool allow_dirty, bool silent, VerifyOption option);
|
void verify(bool silent, VerifyOption option);
|
||||||
|
|
||||||
// Override.
|
// Override.
|
||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -696,8 +696,8 @@ void OneContigSpaceCardGeneration::record_spaces_top() {
|
|||||||
the_space()->set_top_for_allocations();
|
the_space()->set_top_for_allocations();
|
||||||
}
|
}
|
||||||
|
|
||||||
void OneContigSpaceCardGeneration::verify(bool allow_dirty) {
|
void OneContigSpaceCardGeneration::verify() {
|
||||||
the_space()->verify(allow_dirty);
|
the_space()->verify();
|
||||||
}
|
}
|
||||||
|
|
||||||
void OneContigSpaceCardGeneration::print_on(outputStream* st) const {
|
void OneContigSpaceCardGeneration::print_on(outputStream* st) const {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -599,7 +599,7 @@ class Generation: public CHeapObj {
|
|||||||
virtual void print() const;
|
virtual void print() const;
|
||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
|
|
||||||
virtual void verify(bool allow_dirty) = 0;
|
virtual void verify() = 0;
|
||||||
|
|
||||||
struct StatRecord {
|
struct StatRecord {
|
||||||
int invocations;
|
int invocations;
|
||||||
@ -753,7 +753,7 @@ class OneContigSpaceCardGeneration: public CardGeneration {
|
|||||||
|
|
||||||
virtual void record_spaces_top();
|
virtual void record_spaces_top();
|
||||||
|
|
||||||
virtual void verify(bool allow_dirty);
|
virtual void verify();
|
||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -127,9 +127,12 @@ constantPoolCacheOop oopFactory::new_constantPoolCache(int length,
|
|||||||
klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_len,
|
klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_len,
|
||||||
int static_field_size,
|
int static_field_size,
|
||||||
unsigned int nonstatic_oop_map_count,
|
unsigned int nonstatic_oop_map_count,
|
||||||
|
AccessFlags access_flags,
|
||||||
ReferenceType rt, TRAPS) {
|
ReferenceType rt, TRAPS) {
|
||||||
instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj());
|
instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj());
|
||||||
return ikk->allocate_instance_klass(name, vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL);
|
return ikk->allocate_instance_klass(name, vtable_len, itable_len,
|
||||||
|
static_field_size, nonstatic_oop_map_count,
|
||||||
|
access_flags, rt, CHECK_NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -77,6 +77,7 @@ class oopFactory: AllStatic {
|
|||||||
int vtable_len, int itable_len,
|
int vtable_len, int itable_len,
|
||||||
int static_field_size,
|
int static_field_size,
|
||||||
unsigned int nonstatic_oop_map_count,
|
unsigned int nonstatic_oop_map_count,
|
||||||
|
AccessFlags access_flags,
|
||||||
ReferenceType rt, TRAPS);
|
ReferenceType rt, TRAPS);
|
||||||
|
|
||||||
// Methods
|
// Methods
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -531,7 +531,7 @@ void OffsetTableContigSpace::print_on(outputStream* st) const {
|
|||||||
bottom(), top(), _offsets.threshold(), end());
|
bottom(), top(), _offsets.threshold(), end());
|
||||||
}
|
}
|
||||||
|
|
||||||
void ContiguousSpace::verify(bool allow_dirty) const {
|
void ContiguousSpace::verify() const {
|
||||||
HeapWord* p = bottom();
|
HeapWord* p = bottom();
|
||||||
HeapWord* t = top();
|
HeapWord* t = top();
|
||||||
HeapWord* prev_p = NULL;
|
HeapWord* prev_p = NULL;
|
||||||
@ -965,27 +965,12 @@ OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOff
|
|||||||
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class VerifyOldOopClosure : public OopClosure {
|
|
||||||
public:
|
|
||||||
oop _the_obj;
|
|
||||||
bool _allow_dirty;
|
|
||||||
void do_oop(oop* p) {
|
|
||||||
_the_obj->verify_old_oop(p, _allow_dirty);
|
|
||||||
}
|
|
||||||
void do_oop(narrowOop* p) {
|
|
||||||
_the_obj->verify_old_oop(p, _allow_dirty);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#define OBJ_SAMPLE_INTERVAL 0
|
#define OBJ_SAMPLE_INTERVAL 0
|
||||||
#define BLOCK_SAMPLE_INTERVAL 100
|
#define BLOCK_SAMPLE_INTERVAL 100
|
||||||
|
|
||||||
void OffsetTableContigSpace::verify(bool allow_dirty) const {
|
void OffsetTableContigSpace::verify() const {
|
||||||
HeapWord* p = bottom();
|
HeapWord* p = bottom();
|
||||||
HeapWord* prev_p = NULL;
|
HeapWord* prev_p = NULL;
|
||||||
VerifyOldOopClosure blk; // Does this do anything?
|
|
||||||
blk._allow_dirty = allow_dirty;
|
|
||||||
int objs = 0;
|
int objs = 0;
|
||||||
int blocks = 0;
|
int blocks = 0;
|
||||||
|
|
||||||
@ -1007,8 +992,6 @@ void OffsetTableContigSpace::verify(bool allow_dirty) const {
|
|||||||
|
|
||||||
if (objs == OBJ_SAMPLE_INTERVAL) {
|
if (objs == OBJ_SAMPLE_INTERVAL) {
|
||||||
oop(p)->verify();
|
oop(p)->verify();
|
||||||
blk._the_obj = oop(p);
|
|
||||||
oop(p)->oop_iterate(&blk);
|
|
||||||
objs = 0;
|
objs = 0;
|
||||||
} else {
|
} else {
|
||||||
objs++;
|
objs++;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -306,7 +306,7 @@ class Space: public CHeapObj {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Debugging
|
// Debugging
|
||||||
virtual void verify(bool allow_dirty) const = 0;
|
virtual void verify() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
// A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
|
// A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
|
||||||
@ -948,7 +948,7 @@ class ContiguousSpace: public CompactibleSpace {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Debugging
|
// Debugging
|
||||||
virtual void verify(bool allow_dirty) const;
|
virtual void verify() const;
|
||||||
|
|
||||||
// Used to increase collection frequency. "factor" of 0 means entire
|
// Used to increase collection frequency. "factor" of 0 means entire
|
||||||
// space.
|
// space.
|
||||||
@ -1100,7 +1100,7 @@ class OffsetTableContigSpace: public ContiguousSpace {
|
|||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
|
|
||||||
// Debugging
|
// Debugging
|
||||||
void verify(bool allow_dirty) const;
|
void verify() const;
|
||||||
|
|
||||||
// Shared space support
|
// Shared space support
|
||||||
void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
|
void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -1326,7 +1326,7 @@ void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
|
|||||||
st->print_cr("}");
|
st->print_cr("}");
|
||||||
}
|
}
|
||||||
|
|
||||||
void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) {
|
void Universe::verify(bool silent, VerifyOption option) {
|
||||||
if (SharedSkipVerify) {
|
if (SharedSkipVerify) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1350,7 +1350,7 @@ void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) {
|
|||||||
if (!silent) gclog_or_tty->print("[Verifying ");
|
if (!silent) gclog_or_tty->print("[Verifying ");
|
||||||
if (!silent) gclog_or_tty->print("threads ");
|
if (!silent) gclog_or_tty->print("threads ");
|
||||||
Threads::verify();
|
Threads::verify();
|
||||||
heap()->verify(allow_dirty, silent, option);
|
heap()->verify(silent, option);
|
||||||
|
|
||||||
if (!silent) gclog_or_tty->print("syms ");
|
if (!silent) gclog_or_tty->print("syms ");
|
||||||
SymbolTable::verify();
|
SymbolTable::verify();
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -412,7 +412,7 @@ class Universe: AllStatic {
|
|||||||
|
|
||||||
// Debugging
|
// Debugging
|
||||||
static bool verify_in_progress() { return _verify_in_progress; }
|
static bool verify_in_progress() { return _verify_in_progress; }
|
||||||
static void verify(bool allow_dirty = true, bool silent = false,
|
static void verify(bool silent = false,
|
||||||
VerifyOption option = VerifyOption_Default );
|
VerifyOption option = VerifyOption_Default );
|
||||||
static int verify_count() { return _verify_count; }
|
static int verify_count() { return _verify_count; }
|
||||||
// The default behavior is to call print_on() on gclog_or_tty.
|
// The default behavior is to call print_on() on gclog_or_tty.
|
||||||
|
@ -567,8 +567,18 @@ void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle
|
|||||||
ol.notify_all(CHECK);
|
ol.notify_all(CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The embedded _implementor field can only record one implementor.
|
||||||
|
// When there are more than one implementors, the _implementor field
|
||||||
|
// is set to the interface klassOop itself. Following are the possible
|
||||||
|
// values for the _implementor field:
|
||||||
|
// NULL - no implementor
|
||||||
|
// implementor klassOop - one implementor
|
||||||
|
// self - more than one implementor
|
||||||
|
//
|
||||||
|
// The _implementor field only exists for interfaces.
|
||||||
void instanceKlass::add_implementor(klassOop k) {
|
void instanceKlass::add_implementor(klassOop k) {
|
||||||
assert(Compile_lock->owned_by_self(), "");
|
assert(Compile_lock->owned_by_self(), "");
|
||||||
|
assert(is_interface(), "not interface");
|
||||||
// Filter out my subinterfaces.
|
// Filter out my subinterfaces.
|
||||||
// (Note: Interfaces are never on the subklass list.)
|
// (Note: Interfaces are never on the subklass list.)
|
||||||
if (instanceKlass::cast(k)->is_interface()) return;
|
if (instanceKlass::cast(k)->is_interface()) return;
|
||||||
@ -583,17 +593,13 @@ void instanceKlass::add_implementor(klassOop k) {
|
|||||||
// Any supers of the super have the same (or fewer) transitive_interfaces.
|
// Any supers of the super have the same (or fewer) transitive_interfaces.
|
||||||
return;
|
return;
|
||||||
|
|
||||||
// Update number of implementors
|
klassOop ik = implementor();
|
||||||
int i = _nof_implementors++;
|
if (ik == NULL) {
|
||||||
|
set_implementor(k);
|
||||||
// Record this implementor, if there are not too many already
|
} else if (ik != this->as_klassOop()) {
|
||||||
if (i < implementors_limit) {
|
// There is already an implementor. Use itself as an indicator of
|
||||||
assert(_implementors[i] == NULL, "should be exactly one implementor");
|
// more than one implementors.
|
||||||
oop_store_without_check((oop*)&_implementors[i], k);
|
set_implementor(this->as_klassOop());
|
||||||
} else if (i == implementors_limit) {
|
|
||||||
// clear out the list on first overflow
|
|
||||||
for (int i2 = 0; i2 < implementors_limit; i2++)
|
|
||||||
oop_store_without_check((oop*)&_implementors[i2], NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The implementor also implements the transitive_interfaces
|
// The implementor also implements the transitive_interfaces
|
||||||
@ -603,9 +609,9 @@ void instanceKlass::add_implementor(klassOop k) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void instanceKlass::init_implementor() {
|
void instanceKlass::init_implementor() {
|
||||||
for (int i = 0; i < implementors_limit; i++)
|
if (is_interface()) {
|
||||||
oop_store_without_check((oop*)&_implementors[i], NULL);
|
set_implementor(NULL);
|
||||||
_nof_implementors = 0;
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1849,24 +1855,22 @@ int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
|||||||
void instanceKlass::follow_weak_klass_links(
|
void instanceKlass::follow_weak_klass_links(
|
||||||
BoolObjectClosure* is_alive, OopClosure* keep_alive) {
|
BoolObjectClosure* is_alive, OopClosure* keep_alive) {
|
||||||
assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
|
assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
|
||||||
if (ClassUnloading) {
|
|
||||||
for (int i = 0; i < implementors_limit; i++) {
|
if (is_interface()) {
|
||||||
klassOop impl = _implementors[i];
|
if (ClassUnloading) {
|
||||||
if (impl == NULL) break; // no more in the list
|
klassOop impl = implementor();
|
||||||
if (!is_alive->do_object_b(impl)) {
|
if (impl != NULL) {
|
||||||
// remove this guy from the list by overwriting him with the tail
|
if (!is_alive->do_object_b(impl)) {
|
||||||
int lasti = --_nof_implementors;
|
// remove this guy
|
||||||
assert(lasti >= i && lasti < implementors_limit, "just checking");
|
*start_of_implementor() = NULL;
|
||||||
_implementors[i] = _implementors[lasti];
|
}
|
||||||
_implementors[lasti] = NULL;
|
|
||||||
--i; // rerun the loop at this index
|
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
} else {
|
assert(adr_implementor() != NULL, "just checking");
|
||||||
for (int i = 0; i < implementors_limit; i++) {
|
keep_alive->do_oop(adr_implementor());
|
||||||
keep_alive->do_oop(&adr_implementors()[i]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Klass::follow_weak_klass_links(is_alive, keep_alive);
|
Klass::follow_weak_klass_links(is_alive, keep_alive);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -56,8 +56,6 @@
|
|||||||
// [methods ]
|
// [methods ]
|
||||||
// [local interfaces ]
|
// [local interfaces ]
|
||||||
// [transitive interfaces ]
|
// [transitive interfaces ]
|
||||||
// [number of implementors ]
|
|
||||||
// [implementors ] klassOop[2]
|
|
||||||
// [fields ]
|
// [fields ]
|
||||||
// [constants ]
|
// [constants ]
|
||||||
// [class loader ]
|
// [class loader ]
|
||||||
@ -77,9 +75,9 @@
|
|||||||
// [oop map cache (stack maps) ]
|
// [oop map cache (stack maps) ]
|
||||||
// [EMBEDDED Java vtable ] size in words = vtable_len
|
// [EMBEDDED Java vtable ] size in words = vtable_len
|
||||||
// [EMBEDDED nonstatic oop-map blocks] size in words = nonstatic_oop_map_size
|
// [EMBEDDED nonstatic oop-map blocks] size in words = nonstatic_oop_map_size
|
||||||
//
|
// The embedded nonstatic oop-map blocks are short pairs (offset, length)
|
||||||
// The embedded nonstatic oop-map blocks are short pairs (offset, length) indicating
|
// indicating where oops are located in instances of this klass.
|
||||||
// where oops are located in instances of this klass.
|
// [EMBEDDED implementor of the interface] only exist for interface
|
||||||
|
|
||||||
|
|
||||||
// forward declaration for class -- see below for definition
|
// forward declaration for class -- see below for definition
|
||||||
@ -153,10 +151,6 @@ class instanceKlass: public Klass {
|
|||||||
oop* oop_block_beg() const { return adr_array_klasses(); }
|
oop* oop_block_beg() const { return adr_array_klasses(); }
|
||||||
oop* oop_block_end() const { return adr_methods_default_annotations() + 1; }
|
oop* oop_block_end() const { return adr_methods_default_annotations() + 1; }
|
||||||
|
|
||||||
enum {
|
|
||||||
implementors_limit = 2 // how many implems can we track?
|
|
||||||
};
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
//
|
//
|
||||||
// The oop block. See comment in klass.hpp before making changes.
|
// The oop block. See comment in klass.hpp before making changes.
|
||||||
@ -200,8 +194,6 @@ class instanceKlass: public Klass {
|
|||||||
// and EnclosingMethod attributes the _inner_classes array length is
|
// and EnclosingMethod attributes the _inner_classes array length is
|
||||||
// number_of_inner_classes * 4 + enclosing_method_attribute_size.
|
// number_of_inner_classes * 4 + enclosing_method_attribute_size.
|
||||||
typeArrayOop _inner_classes;
|
typeArrayOop _inner_classes;
|
||||||
// Implementors of this interface (not valid if it overflows)
|
|
||||||
klassOop _implementors[implementors_limit];
|
|
||||||
// Annotations for this class, or null if none.
|
// Annotations for this class, or null if none.
|
||||||
typeArrayOop _class_annotations;
|
typeArrayOop _class_annotations;
|
||||||
// Annotation objects (byte arrays) for fields, or null if no annotations.
|
// Annotation objects (byte arrays) for fields, or null if no annotations.
|
||||||
@ -257,7 +249,6 @@ class instanceKlass: public Klass {
|
|||||||
nmethodBucket* _dependencies; // list of dependent nmethods
|
nmethodBucket* _dependencies; // list of dependent nmethods
|
||||||
nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class
|
nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class
|
||||||
BreakpointInfo* _breakpoints; // bpt lists, managed by methodOop
|
BreakpointInfo* _breakpoints; // bpt lists, managed by methodOop
|
||||||
int _nof_implementors; // No of implementors of this interface (zero if not an interface)
|
|
||||||
// Array of interesting part(s) of the previous version(s) of this
|
// Array of interesting part(s) of the previous version(s) of this
|
||||||
// instanceKlass. See PreviousVersionWalker below.
|
// instanceKlass. See PreviousVersionWalker below.
|
||||||
GrowableArray<PreviousVersionNode *>* _previous_versions;
|
GrowableArray<PreviousVersionNode *>* _previous_versions;
|
||||||
@ -278,6 +269,13 @@ class instanceKlass: public Klass {
|
|||||||
// embedded Java itables follows here
|
// embedded Java itables follows here
|
||||||
// embedded static fields follows here
|
// embedded static fields follows here
|
||||||
// embedded nonstatic oop-map blocks follows here
|
// embedded nonstatic oop-map blocks follows here
|
||||||
|
// embedded implementor of this interface follows here
|
||||||
|
// The embedded implementor only exists if the current klass is an
|
||||||
|
// iterface. The possible values of the implementor fall into following
|
||||||
|
// three cases:
|
||||||
|
// NULL: no implementor.
|
||||||
|
// A klassOop that's not itself: one implementor.
|
||||||
|
// Itsef: more than one implementors.
|
||||||
|
|
||||||
friend class instanceKlassKlass;
|
friend class instanceKlassKlass;
|
||||||
friend class SystemDictionary;
|
friend class SystemDictionary;
|
||||||
@ -644,19 +642,40 @@ class instanceKlass: public Klass {
|
|||||||
|
|
||||||
// support for stub routines
|
// support for stub routines
|
||||||
static ByteSize init_state_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_state)); }
|
static ByteSize init_state_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_state)); }
|
||||||
|
TRACE_DEFINE_OFFSET;
|
||||||
static ByteSize init_thread_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_thread)); }
|
static ByteSize init_thread_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_thread)); }
|
||||||
|
|
||||||
// subclass/subinterface checks
|
// subclass/subinterface checks
|
||||||
bool implements_interface(klassOop k) const;
|
bool implements_interface(klassOop k) const;
|
||||||
|
|
||||||
// Access to implementors of an interface. We only store the count
|
// Access to the implementor of an interface.
|
||||||
// of implementors, and in case, there are only a few
|
klassOop implementor() const
|
||||||
// implementors, we store them in a short list.
|
{
|
||||||
// This accessor returns NULL if we walk off the end of the list.
|
klassOop* k = start_of_implementor();
|
||||||
klassOop implementor(int i) const {
|
if (k == NULL) {
|
||||||
return (i < implementors_limit)? _implementors[i]: (klassOop) NULL;
|
return NULL;
|
||||||
|
} else {
|
||||||
|
return *k;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
int nof_implementors() const { return _nof_implementors; }
|
|
||||||
|
void set_implementor(klassOop k) {
|
||||||
|
assert(is_interface(), "not interface");
|
||||||
|
oop* addr = (oop*)start_of_implementor();
|
||||||
|
oop_store_without_check(addr, k);
|
||||||
|
}
|
||||||
|
|
||||||
|
int nof_implementors() const {
|
||||||
|
klassOop k = implementor();
|
||||||
|
if (k == NULL) {
|
||||||
|
return 0;
|
||||||
|
} else if (k != this->as_klassOop()) {
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void add_implementor(klassOop k); // k is a new class that implements this interface
|
void add_implementor(klassOop k); // k is a new class that implements this interface
|
||||||
void init_implementor(); // initialize
|
void init_implementor(); // initialize
|
||||||
|
|
||||||
@ -693,7 +712,15 @@ class instanceKlass: public Klass {
|
|||||||
|
|
||||||
// Sizing (in words)
|
// Sizing (in words)
|
||||||
static int header_size() { return align_object_offset(oopDesc::header_size() + sizeof(instanceKlass)/HeapWordSize); }
|
static int header_size() { return align_object_offset(oopDesc::header_size() + sizeof(instanceKlass)/HeapWordSize); }
|
||||||
int object_size() const { return object_size(align_object_offset(vtable_length()) + align_object_offset(itable_length()) + nonstatic_oop_map_size()); }
|
|
||||||
|
int object_size() const
|
||||||
|
{
|
||||||
|
return object_size(align_object_offset(vtable_length()) +
|
||||||
|
align_object_offset(itable_length()) +
|
||||||
|
(is_interface() ?
|
||||||
|
(align_object_offset(nonstatic_oop_map_size()) + (int)sizeof(klassOop)/HeapWordSize) :
|
||||||
|
nonstatic_oop_map_size()));
|
||||||
|
}
|
||||||
static int vtable_start_offset() { return header_size(); }
|
static int vtable_start_offset() { return header_size(); }
|
||||||
static int vtable_length_offset() { return oopDesc::header_size() + offset_of(instanceKlass, _vtable_len) / HeapWordSize; }
|
static int vtable_length_offset() { return oopDesc::header_size() + offset_of(instanceKlass, _vtable_len) / HeapWordSize; }
|
||||||
static int object_size(int extra) { return align_object_size(header_size() + extra); }
|
static int object_size(int extra) { return align_object_size(header_size() + extra); }
|
||||||
@ -710,6 +737,15 @@ class instanceKlass: public Klass {
|
|||||||
return (OopMapBlock*)(start_of_itable() + align_object_offset(itable_length()));
|
return (OopMapBlock*)(start_of_itable() + align_object_offset(itable_length()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
klassOop* start_of_implementor() const {
|
||||||
|
if (is_interface()) {
|
||||||
|
return (klassOop*)(start_of_nonstatic_oop_maps() +
|
||||||
|
nonstatic_oop_map_count());
|
||||||
|
} else {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Allocation profiling support
|
// Allocation profiling support
|
||||||
juint alloc_size() const { return _alloc_count * size_helper(); }
|
juint alloc_size() const { return _alloc_count * size_helper(); }
|
||||||
void set_alloc_size(juint n) {}
|
void set_alloc_size(juint n) {}
|
||||||
@ -819,7 +855,7 @@ private:
|
|||||||
oop* adr_host_klass() const { return (oop*)&this->_host_klass;}
|
oop* adr_host_klass() const { return (oop*)&this->_host_klass;}
|
||||||
oop* adr_signers() const { return (oop*)&this->_signers;}
|
oop* adr_signers() const { return (oop*)&this->_signers;}
|
||||||
oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;}
|
oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;}
|
||||||
oop* adr_implementors() const { return (oop*)&this->_implementors[0];}
|
oop* adr_implementor() const { return (oop*)start_of_implementor(); }
|
||||||
oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;}
|
oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;}
|
||||||
oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;}
|
oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;}
|
||||||
oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;}
|
oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -111,7 +111,7 @@ void instanceKlassKlass::oop_follow_contents(oop obj) {
|
|||||||
MarkSweep::mark_and_push(ik->adr_methods_parameter_annotations());
|
MarkSweep::mark_and_push(ik->adr_methods_parameter_annotations());
|
||||||
MarkSweep::mark_and_push(ik->adr_methods_default_annotations());
|
MarkSweep::mark_and_push(ik->adr_methods_default_annotations());
|
||||||
|
|
||||||
// We do not follow adr_implementors() here. It is followed later
|
// We do not follow adr_implementor() here. It is followed later
|
||||||
// in instanceKlass::follow_weak_klass_links()
|
// in instanceKlass::follow_weak_klass_links()
|
||||||
|
|
||||||
klassKlass::oop_follow_contents(obj);
|
klassKlass::oop_follow_contents(obj);
|
||||||
@ -180,8 +180,8 @@ int instanceKlassKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
|
|||||||
blk->do_oop(ik->adr_host_klass());
|
blk->do_oop(ik->adr_host_klass());
|
||||||
blk->do_oop(ik->adr_signers());
|
blk->do_oop(ik->adr_signers());
|
||||||
blk->do_oop(ik->adr_inner_classes());
|
blk->do_oop(ik->adr_inner_classes());
|
||||||
for (int i = 0; i < instanceKlass::implementors_limit; i++) {
|
if (ik->is_interface()) {
|
||||||
blk->do_oop(&ik->adr_implementors()[i]);
|
blk->do_oop(ik->adr_implementor());
|
||||||
}
|
}
|
||||||
blk->do_oop(ik->adr_class_annotations());
|
blk->do_oop(ik->adr_class_annotations());
|
||||||
blk->do_oop(ik->adr_fields_annotations());
|
blk->do_oop(ik->adr_fields_annotations());
|
||||||
@ -232,9 +232,9 @@ int instanceKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
|
|||||||
if (mr.contains(adr)) blk->do_oop(adr);
|
if (mr.contains(adr)) blk->do_oop(adr);
|
||||||
adr = ik->adr_inner_classes();
|
adr = ik->adr_inner_classes();
|
||||||
if (mr.contains(adr)) blk->do_oop(adr);
|
if (mr.contains(adr)) blk->do_oop(adr);
|
||||||
adr = ik->adr_implementors();
|
if (ik->is_interface()) {
|
||||||
for (int i = 0; i < instanceKlass::implementors_limit; i++) {
|
adr = ik->adr_implementor();
|
||||||
if (mr.contains(&adr[i])) blk->do_oop(&adr[i]);
|
if (mr.contains(adr)) blk->do_oop(adr);
|
||||||
}
|
}
|
||||||
adr = ik->adr_class_annotations();
|
adr = ik->adr_class_annotations();
|
||||||
if (mr.contains(adr)) blk->do_oop(adr);
|
if (mr.contains(adr)) blk->do_oop(adr);
|
||||||
@ -273,8 +273,8 @@ int instanceKlassKlass::oop_adjust_pointers(oop obj) {
|
|||||||
MarkSweep::adjust_pointer(ik->adr_host_klass());
|
MarkSweep::adjust_pointer(ik->adr_host_klass());
|
||||||
MarkSweep::adjust_pointer(ik->adr_signers());
|
MarkSweep::adjust_pointer(ik->adr_signers());
|
||||||
MarkSweep::adjust_pointer(ik->adr_inner_classes());
|
MarkSweep::adjust_pointer(ik->adr_inner_classes());
|
||||||
for (int i = 0; i < instanceKlass::implementors_limit; i++) {
|
if (ik->is_interface()) {
|
||||||
MarkSweep::adjust_pointer(&ik->adr_implementors()[i]);
|
MarkSweep::adjust_pointer(ik->adr_implementor());
|
||||||
}
|
}
|
||||||
MarkSweep::adjust_pointer(ik->adr_class_annotations());
|
MarkSweep::adjust_pointer(ik->adr_class_annotations());
|
||||||
MarkSweep::adjust_pointer(ik->adr_fields_annotations());
|
MarkSweep::adjust_pointer(ik->adr_fields_annotations());
|
||||||
@ -328,6 +328,9 @@ int instanceKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
|||||||
for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
|
for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
|
||||||
PSParallelCompact::adjust_pointer(cur_oop);
|
PSParallelCompact::adjust_pointer(cur_oop);
|
||||||
}
|
}
|
||||||
|
if (ik->is_interface()) {
|
||||||
|
PSParallelCompact::adjust_pointer(ik->adr_implementor());
|
||||||
|
}
|
||||||
|
|
||||||
OopClosure* closure = PSParallelCompact::adjust_root_pointer_closure();
|
OopClosure* closure = PSParallelCompact::adjust_root_pointer_closure();
|
||||||
iterate_c_heap_oops(ik, closure);
|
iterate_c_heap_oops(ik, closure);
|
||||||
@ -342,11 +345,18 @@ klassOop
|
|||||||
instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int itable_len,
|
instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int itable_len,
|
||||||
int static_field_size,
|
int static_field_size,
|
||||||
unsigned nonstatic_oop_map_count,
|
unsigned nonstatic_oop_map_count,
|
||||||
|
AccessFlags access_flags,
|
||||||
ReferenceType rt, TRAPS) {
|
ReferenceType rt, TRAPS) {
|
||||||
|
|
||||||
const int nonstatic_oop_map_size =
|
const int nonstatic_oop_map_size =
|
||||||
instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count);
|
instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count);
|
||||||
int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + nonstatic_oop_map_size);
|
int size = align_object_offset(vtable_len) + align_object_offset(itable_len);
|
||||||
|
if (access_flags.is_interface()) {
|
||||||
|
size += align_object_offset(nonstatic_oop_map_size) + (int)sizeof(klassOop)/HeapWordSize;
|
||||||
|
} else {
|
||||||
|
size += nonstatic_oop_map_size;
|
||||||
|
}
|
||||||
|
size = instanceKlass::object_size(size);
|
||||||
|
|
||||||
// Allocation
|
// Allocation
|
||||||
KlassHandle h_this_klass(THREAD, as_klassOop());
|
KlassHandle h_this_klass(THREAD, as_klassOop());
|
||||||
@ -378,6 +388,7 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it
|
|||||||
ik->set_itable_length(itable_len);
|
ik->set_itable_length(itable_len);
|
||||||
ik->set_static_field_size(static_field_size);
|
ik->set_static_field_size(static_field_size);
|
||||||
ik->set_nonstatic_oop_map_size(nonstatic_oop_map_size);
|
ik->set_nonstatic_oop_map_size(nonstatic_oop_map_size);
|
||||||
|
ik->set_access_flags(access_flags);
|
||||||
assert(k()->size() == size, "wrong size for object");
|
assert(k()->size() == size, "wrong size for object");
|
||||||
|
|
||||||
ik->set_array_klasses(NULL);
|
ik->set_array_klasses(NULL);
|
||||||
@ -470,16 +481,12 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) {
|
|||||||
|
|
||||||
if (ik->is_interface()) {
|
if (ik->is_interface()) {
|
||||||
st->print_cr(BULLET"nof implementors: %d", ik->nof_implementors());
|
st->print_cr(BULLET"nof implementors: %d", ik->nof_implementors());
|
||||||
int print_impl = 0;
|
if (ik->nof_implementors() == 1) {
|
||||||
for (int i = 0; i < instanceKlass::implementors_limit; i++) {
|
st->print_cr(BULLET"implementor: ");
|
||||||
if (ik->implementor(i) != NULL) {
|
st->print(" ");
|
||||||
if (++print_impl == 1)
|
ik->implementor()->print_value_on(st);
|
||||||
st->print_cr(BULLET"implementor: ");
|
st->cr();
|
||||||
st->print(" ");
|
|
||||||
ik->implementor(i)->print_value_on(st);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (print_impl > 0) st->cr();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
st->print(BULLET"arrays: "); ik->array_klasses()->print_value_on(st); st->cr();
|
st->print(BULLET"arrays: "); ik->array_klasses()->print_value_on(st); st->cr();
|
||||||
@ -640,16 +647,12 @@ void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify implementor fields
|
// Verify implementor fields
|
||||||
bool saw_null_impl = false;
|
klassOop im = ik->implementor();
|
||||||
for (int i = 0; i < instanceKlass::implementors_limit; i++) {
|
if (im != NULL) {
|
||||||
klassOop im = ik->implementor(i);
|
|
||||||
if (im == NULL) { saw_null_impl = true; continue; }
|
|
||||||
guarantee(!saw_null_impl, "non-nulls must preceded all nulls");
|
|
||||||
guarantee(ik->is_interface(), "only interfaces should have implementor set");
|
guarantee(ik->is_interface(), "only interfaces should have implementor set");
|
||||||
guarantee(i < ik->nof_implementors(), "should only have one implementor");
|
|
||||||
guarantee(im->is_perm(), "should be in permspace");
|
guarantee(im->is_perm(), "should be in permspace");
|
||||||
guarantee(im->is_klass(), "should be klass");
|
guarantee(im->is_klass(), "should be klass");
|
||||||
guarantee(!Klass::cast(klassOop(im))->is_interface(), "implementors cannot be interfaces");
|
guarantee(!Klass::cast(klassOop(im))->is_interface() || im == ik->as_klassOop(), "implementors cannot be interfaces");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify local interfaces
|
// Verify local interfaces
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -46,6 +46,7 @@ class instanceKlassKlass : public klassKlass {
|
|||||||
int itable_len,
|
int itable_len,
|
||||||
int static_field_size,
|
int static_field_size,
|
||||||
unsigned int nonstatic_oop_map_count,
|
unsigned int nonstatic_oop_map_count,
|
||||||
|
AccessFlags access_flags,
|
||||||
ReferenceType rt,
|
ReferenceType rt,
|
||||||
TRAPS);
|
TRAPS);
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -497,36 +497,12 @@ void instanceRefKlass::oop_verify_on(oop obj, outputStream* st) {
|
|||||||
|
|
||||||
if (referent != NULL) {
|
if (referent != NULL) {
|
||||||
guarantee(referent->is_oop(), "referent field heap failed");
|
guarantee(referent->is_oop(), "referent field heap failed");
|
||||||
if (gch != NULL && !gch->is_in_young(obj)) {
|
|
||||||
// We do a specific remembered set check here since the referent
|
|
||||||
// field is not part of the oop mask and therefore skipped by the
|
|
||||||
// regular verify code.
|
|
||||||
if (UseCompressedOops) {
|
|
||||||
narrowOop* referent_addr = (narrowOop*)java_lang_ref_Reference::referent_addr(obj);
|
|
||||||
obj->verify_old_oop(referent_addr, true);
|
|
||||||
} else {
|
|
||||||
oop* referent_addr = (oop*)java_lang_ref_Reference::referent_addr(obj);
|
|
||||||
obj->verify_old_oop(referent_addr, true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Verify next field
|
// Verify next field
|
||||||
oop next = java_lang_ref_Reference::next(obj);
|
oop next = java_lang_ref_Reference::next(obj);
|
||||||
if (next != NULL) {
|
if (next != NULL) {
|
||||||
guarantee(next->is_oop(), "next field verify failed");
|
guarantee(next->is_oop(), "next field verify failed");
|
||||||
guarantee(next->is_instanceRef(), "next field verify failed");
|
guarantee(next->is_instanceRef(), "next field verify failed");
|
||||||
if (gch != NULL && !gch->is_in_young(obj)) {
|
|
||||||
// We do a specific remembered set check here since the next field is
|
|
||||||
// not part of the oop mask and therefore skipped by the regular
|
|
||||||
// verify code.
|
|
||||||
if (UseCompressedOops) {
|
|
||||||
narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
|
|
||||||
obj->verify_old_oop(next_addr, true);
|
|
||||||
} else {
|
|
||||||
oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
|
|
||||||
obj->verify_old_oop(next_addr, true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -581,14 +581,6 @@ void Klass::oop_verify_on(oop obj, outputStream* st) {
|
|||||||
guarantee(obj->klass()->is_klass(), "klass field is not a klass");
|
guarantee(obj->klass()->is_klass(), "klass field is not a klass");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Klass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
|
|
||||||
/* $$$ I think this functionality should be handled by verification of
|
|
||||||
RememberedSet::verify_old_oop(obj, p, allow_dirty, false);
|
|
||||||
the card table. */
|
|
||||||
}
|
|
||||||
void Klass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) { }
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
|
||||||
void Klass::verify_vtable_index(int i) {
|
void Klass::verify_vtable_index(int i) {
|
||||||
|
@ -805,8 +805,6 @@ class Klass : public Klass_vtbl {
|
|||||||
// Verification
|
// Verification
|
||||||
virtual const char* internal_name() const = 0;
|
virtual const char* internal_name() const = 0;
|
||||||
virtual void oop_verify_on(oop obj, outputStream* st);
|
virtual void oop_verify_on(oop obj, outputStream* st);
|
||||||
virtual void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
|
|
||||||
virtual void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
|
|
||||||
// tells whether obj is partially constructed (gc during class loading)
|
// tells whether obj is partially constructed (gc during class loading)
|
||||||
virtual bool oop_partially_loaded(oop obj) const { return false; }
|
virtual bool oop_partially_loaded(oop obj) const { return false; }
|
||||||
virtual void oop_set_partially_loaded(oop obj) {};
|
virtual void oop_set_partially_loaded(oop obj) {};
|
||||||
|
@ -545,10 +545,3 @@ void objArrayKlass::oop_verify_on(oop obj, outputStream* st) {
|
|||||||
guarantee(oa->obj_at(index)->is_oop_or_null(), "should be oop");
|
guarantee(oa->obj_at(index)->is_oop_or_null(), "should be oop");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void objArrayKlass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
|
|
||||||
/* $$$ move into remembered set verification?
|
|
||||||
RememberedSet::verify_old_oop(obj, p, allow_dirty, true);
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
void objArrayKlass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) {}
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -144,8 +144,6 @@ class objArrayKlass : public arrayKlass {
|
|||||||
// Verification
|
// Verification
|
||||||
const char* internal_name() const;
|
const char* internal_name() const;
|
||||||
void oop_verify_on(oop obj, outputStream* st);
|
void oop_verify_on(oop obj, outputStream* st);
|
||||||
void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
|
|
||||||
void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_OOPS_OBJARRAYKLASS_HPP
|
#endif // SHARE_VM_OOPS_OBJARRAYKLASS_HPP
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -107,16 +107,6 @@ void oopDesc::verify() {
|
|||||||
verify_on(tty);
|
verify_on(tty);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// XXX verify_old_oop doesn't do anything (should we remove?)
|
|
||||||
void oopDesc::verify_old_oop(oop* p, bool allow_dirty) {
|
|
||||||
blueprint()->oop_verify_old_oop(this, p, allow_dirty);
|
|
||||||
}
|
|
||||||
|
|
||||||
void oopDesc::verify_old_oop(narrowOop* p, bool allow_dirty) {
|
|
||||||
blueprint()->oop_verify_old_oop(this, p, allow_dirty);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool oopDesc::partially_loaded() {
|
bool oopDesc::partially_loaded() {
|
||||||
return blueprint()->oop_partially_loaded(this);
|
return blueprint()->oop_partially_loaded(this);
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -293,8 +293,6 @@ class oopDesc {
|
|||||||
// verification operations
|
// verification operations
|
||||||
void verify_on(outputStream* st);
|
void verify_on(outputStream* st);
|
||||||
void verify();
|
void verify();
|
||||||
void verify_old_oop(oop* p, bool allow_dirty);
|
|
||||||
void verify_old_oop(narrowOop* p, bool allow_dirty);
|
|
||||||
|
|
||||||
// tells whether this oop is partially constructed (gc during class loading)
|
// tells whether this oop is partially constructed (gc during class loading)
|
||||||
bool partially_loaded();
|
bool partially_loaded();
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -175,7 +175,11 @@ class LibraryCallKit : public GraphKit {
|
|||||||
bool inline_unsafe_allocate();
|
bool inline_unsafe_allocate();
|
||||||
bool inline_unsafe_copyMemory();
|
bool inline_unsafe_copyMemory();
|
||||||
bool inline_native_currentThread();
|
bool inline_native_currentThread();
|
||||||
bool inline_native_time_funcs(bool isNano);
|
#ifdef TRACE_HAVE_INTRINSICS
|
||||||
|
bool inline_native_classID();
|
||||||
|
bool inline_native_threadID();
|
||||||
|
#endif
|
||||||
|
bool inline_native_time_funcs(address method, const char* funcName);
|
||||||
bool inline_native_isInterrupted();
|
bool inline_native_isInterrupted();
|
||||||
bool inline_native_Class_query(vmIntrinsics::ID id);
|
bool inline_native_Class_query(vmIntrinsics::ID id);
|
||||||
bool inline_native_subtype_check();
|
bool inline_native_subtype_check();
|
||||||
@ -638,10 +642,18 @@ bool LibraryCallKit::try_to_inline() {
|
|||||||
case vmIntrinsics::_isInterrupted:
|
case vmIntrinsics::_isInterrupted:
|
||||||
return inline_native_isInterrupted();
|
return inline_native_isInterrupted();
|
||||||
|
|
||||||
|
#ifdef TRACE_HAVE_INTRINSICS
|
||||||
|
case vmIntrinsics::_classID:
|
||||||
|
return inline_native_classID();
|
||||||
|
case vmIntrinsics::_threadID:
|
||||||
|
return inline_native_threadID();
|
||||||
|
case vmIntrinsics::_counterTime:
|
||||||
|
return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
|
||||||
|
#endif
|
||||||
case vmIntrinsics::_currentTimeMillis:
|
case vmIntrinsics::_currentTimeMillis:
|
||||||
return inline_native_time_funcs(false);
|
return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
|
||||||
case vmIntrinsics::_nanoTime:
|
case vmIntrinsics::_nanoTime:
|
||||||
return inline_native_time_funcs(true);
|
return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
|
||||||
case vmIntrinsics::_allocateInstance:
|
case vmIntrinsics::_allocateInstance:
|
||||||
return inline_unsafe_allocate();
|
return inline_unsafe_allocate();
|
||||||
case vmIntrinsics::_copyMemory:
|
case vmIntrinsics::_copyMemory:
|
||||||
@ -2840,14 +2852,63 @@ bool LibraryCallKit::inline_unsafe_allocate() {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef TRACE_HAVE_INTRINSICS
|
||||||
|
/*
|
||||||
|
* oop -> myklass
|
||||||
|
* myklass->trace_id |= USED
|
||||||
|
* return myklass->trace_id & ~0x3
|
||||||
|
*/
|
||||||
|
bool LibraryCallKit::inline_native_classID() {
|
||||||
|
int nargs = 1 + 1;
|
||||||
|
null_check_receiver(callee()); // check then ignore argument(0)
|
||||||
|
_sp += nargs;
|
||||||
|
Node* cls = do_null_check(argument(1), T_OBJECT);
|
||||||
|
_sp -= nargs;
|
||||||
|
Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0);
|
||||||
|
_sp += nargs;
|
||||||
|
kls = do_null_check(kls, T_OBJECT);
|
||||||
|
_sp -= nargs;
|
||||||
|
ByteSize offset = TRACE_ID_OFFSET;
|
||||||
|
Node* insp = basic_plus_adr(kls, in_bytes(offset));
|
||||||
|
Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG);
|
||||||
|
Node* bits = longcon(~0x03l); // ignore bit 0 & 1
|
||||||
|
Node* andl = _gvn.transform(new (C, 3) AndLNode(tvalue, bits));
|
||||||
|
Node* clsused = longcon(0x01l); // set the class bit
|
||||||
|
Node* orl = _gvn.transform(new (C, 3) OrLNode(tvalue, clsused));
|
||||||
|
|
||||||
|
const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
|
||||||
|
store_to_memory(control(), insp, orl, T_LONG, adr_type);
|
||||||
|
push_pair(andl);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LibraryCallKit::inline_native_threadID() {
|
||||||
|
Node* tls_ptr = NULL;
|
||||||
|
Node* cur_thr = generate_current_thread(tls_ptr);
|
||||||
|
Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
|
||||||
|
Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
|
||||||
|
p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
|
||||||
|
|
||||||
|
Node* threadid = NULL;
|
||||||
|
size_t thread_id_size = OSThread::thread_id_size();
|
||||||
|
if (thread_id_size == (size_t) BytesPerLong) {
|
||||||
|
threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG));
|
||||||
|
push(threadid);
|
||||||
|
} else if (thread_id_size == (size_t) BytesPerInt) {
|
||||||
|
threadid = make_load(control(), p, TypeInt::INT, T_INT);
|
||||||
|
push(threadid);
|
||||||
|
} else {
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
//------------------------inline_native_time_funcs--------------
|
//------------------------inline_native_time_funcs--------------
|
||||||
// inline code for System.currentTimeMillis() and System.nanoTime()
|
// inline code for System.currentTimeMillis() and System.nanoTime()
|
||||||
// these have the same type and signature
|
// these have the same type and signature
|
||||||
bool LibraryCallKit::inline_native_time_funcs(bool isNano) {
|
bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
|
||||||
address funcAddr = isNano ? CAST_FROM_FN_PTR(address, os::javaTimeNanos) :
|
const TypeFunc *tf = OptoRuntime::void_long_Type();
|
||||||
CAST_FROM_FN_PTR(address, os::javaTimeMillis);
|
|
||||||
const char * funcName = isNano ? "nanoTime" : "currentTimeMillis";
|
|
||||||
const TypeFunc *tf = OptoRuntime::current_time_millis_Type();
|
|
||||||
const TypePtr* no_memory_effects = NULL;
|
const TypePtr* no_memory_effects = NULL;
|
||||||
Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
|
Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
|
||||||
Node* value = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms+0));
|
Node* value = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms+0));
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -709,9 +709,9 @@ const TypeFunc* OptoRuntime::Math_DD_D_Type() {
|
|||||||
return TypeFunc::make(domain, range);
|
return TypeFunc::make(domain, range);
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------- currentTimeMillis
|
//-------------- currentTimeMillis, currentTimeNanos, etc
|
||||||
|
|
||||||
const TypeFunc* OptoRuntime::current_time_millis_Type() {
|
const TypeFunc* OptoRuntime::void_long_Type() {
|
||||||
// create input type (domain)
|
// create input type (domain)
|
||||||
const Type **fields = TypeTuple::fields(0);
|
const Type **fields = TypeTuple::fields(0);
|
||||||
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
|
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user