8015774: Add support for multiple code heaps

Support for segmentation of the code cache. Separate code heaps are created and used to store code of different types.

Reviewed-by: kvn, iveresov, roland, anoll, egahlin, sla
This commit is contained in:
Tobias Hartmann 2014-09-17 08:00:07 +02:00
parent b2620f89c3
commit cd01de591a
51 changed files with 1756 additions and 680 deletions

@ -32,12 +32,10 @@ import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class CodeCache {
private static AddressField heapField;
private static AddressField scavengeRootNMethodsField;
private static GrowableArray<CodeHeap> heapArray;
private static AddressField scavengeRootNMethodsField;
private static VirtualConstructor virtualConstructor;
private CodeHeap heap;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
@ -49,7 +47,10 @@ public class CodeCache {
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("CodeCache");
heapField = type.getAddressField("_heap");
// Get array of CodeHeaps
AddressField heapsField = type.getAddressField("_heaps");
heapArray = GrowableArray.create(heapsField.getValue(), new StaticBaseConstructor<CodeHeap>(CodeHeap.class));
scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
virtualConstructor = new VirtualConstructor(db);
@ -67,16 +68,17 @@ public class CodeCache {
}
}
public CodeCache() {
heap = (CodeHeap) VMObjectFactory.newObject(CodeHeap.class, heapField.getValue());
}
public NMethod scavengeRootMethods() {
return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
}
public boolean contains(Address p) {
return getHeap().contains(p);
for (int i = 0; i < heapArray.length(); ++i) {
if (heapArray.at(i).contains(p)) {
return true;
}
}
return false;
}
/** When VM.getVM().isDebugging() returns true, this behaves like
@ -97,14 +99,24 @@ public class CodeCache {
public CodeBlob findBlobUnsafe(Address start) {
CodeBlob result = null;
CodeHeap containing_heap = null;
for (int i = 0; i < heapArray.length(); ++i) {
if (heapArray.at(i).contains(start)) {
containing_heap = heapArray.at(i);
break;
}
}
if (containing_heap == null) {
return null;
}
try {
result = (CodeBlob) virtualConstructor.instantiateWrapperFor(getHeap().findStart(start));
result = (CodeBlob) virtualConstructor.instantiateWrapperFor(containing_heap.findStart(start));
}
catch (WrongTypeException wte) {
Address cbAddr = null;
try {
cbAddr = getHeap().findStart(start);
cbAddr = containing_heap.findStart(start);
}
catch (Exception findEx) {
findEx.printStackTrace();
@ -167,31 +179,32 @@ public class CodeCache {
}
public void iterate(CodeCacheVisitor visitor) {
CodeHeap heap = getHeap();
Address ptr = heap.begin();
Address end = heap.end();
visitor.prologue(ptr, end);
visitor.prologue(lowBound(), highBound());
CodeBlob lastBlob = null;
while (ptr != null && ptr.lessThan(end)) {
try {
// Use findStart to get a pointer inside blob other findBlob asserts
CodeBlob blob = findBlobUnsafe(heap.findStart(ptr));
if (blob != null) {
visitor.visit(blob);
if (blob == lastBlob) {
throw new InternalError("saw same blob twice");
for (int i = 0; i < heapArray.length(); ++i) {
CodeHeap current_heap = heapArray.at(i);
Address ptr = current_heap.begin();
while (ptr != null && ptr.lessThan(current_heap.end())) {
try {
// Use findStart to get a pointer inside blob other findBlob asserts
CodeBlob blob = findBlobUnsafe(current_heap.findStart(ptr));
if (blob != null) {
visitor.visit(blob);
if (blob == lastBlob) {
throw new InternalError("saw same blob twice");
}
lastBlob = blob;
}
lastBlob = blob;
} catch (RuntimeException e) {
e.printStackTrace();
}
} catch (RuntimeException e) {
e.printStackTrace();
Address next = current_heap.nextBlock(ptr);
if (next != null && next.lessThan(ptr)) {
throw new InternalError("pointer moved backwards");
}
ptr = next;
}
Address next = heap.nextBlock(ptr);
if (next != null && next.lessThan(ptr)) {
throw new InternalError("pointer moved backwards");
}
ptr = next;
}
visitor.epilogue();
}
@ -200,7 +213,23 @@ public class CodeCache {
// Internals only below this point
//
private CodeHeap getHeap() {
return heap;
private Address lowBound() {
Address low = heapArray.at(0).begin();
for (int i = 1; i < heapArray.length(); ++i) {
if (heapArray.at(i).begin().lessThan(low)) {
low = heapArray.at(i).begin();
}
}
return low;
}
private Address highBound() {
Address high = heapArray.at(0).end();
for (int i = 1; i < heapArray.length(); ++i) {
if (heapArray.at(i).end().greaterThan(high)) {
high = heapArray.at(i).end();
}
}
return high;
}
}

@ -29,7 +29,7 @@
SUNWprivate_1.1 {
global:
# Dtrace support
__1cJCodeCacheF_heap_;
__1cJCodeCacheG_heaps_;
__1cIUniverseO_collectedHeap_;
__1cGMethodG__vtbl_;
__1cHnmethodG__vtbl_;

@ -29,7 +29,7 @@
SUNWprivate_1.1 {
global:
# Dtrace support
__1cJCodeCacheF_heap_;
__1cJCodeCacheG_heaps_;
__1cIUniverseO_collectedHeap_;
__1cGMethodG__vtbl_;
__1cHnmethodG__vtbl_;

@ -29,7 +29,7 @@
SUNWprivate_1.1 {
global:
# Dtrace support
__1cJCodeCacheF_heap_;
__1cJCodeCacheG_heaps_;
__1cIUniverseO_collectedHeap_;
__1cGMethodG__vtbl_;
__1cHnmethodG__vtbl_;

@ -79,6 +79,9 @@ define_pd_global(bool, OptoScheduling, false);
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 256*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 125*M);
define_pd_global(intx, ProfiledCodeHeapSize, 126*M);
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags

@ -47,6 +47,9 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(bool, ResizeTLAB, true );
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);

@ -74,6 +74,9 @@ define_pd_global(bool, OptoScheduling, true);
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
@ -82,6 +85,9 @@ define_pd_global(uint64_t,MaxRAM, 128ULL*G);
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M);
define_pd_global(intx, ProfiledCodeHeapSize, 14*M);
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uint64_t,MaxRAM, 4ULL*G);

@ -47,6 +47,9 @@ define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1);

@ -84,6 +84,9 @@ define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);

@ -53,6 +53,9 @@ define_pd_global(uintx, NewRatio, 12 );
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );

@ -67,7 +67,7 @@
* we link this program with -z nodefs .
*
* But for 'debug1' and 'fastdebug1' we still have to provide
* a particular workaround for the following symbols bellow.
* a particular workaround for the following symbols below.
* It will be good to find out a generic way in the future.
*/
@ -87,21 +87,24 @@ StubQueue* AbstractInterpreter::_code = NULL;
#endif /* ASSERT */
#endif /* COMPILER1 */
#define GEN_OFFS(Type,Name) \
#define GEN_OFFS_NAME(Type,Name,OutputType) \
switch(gen_variant) { \
case GEN_OFFSET: \
printf("#define OFFSET_%-33s %ld\n", \
#Type #Name, offset_of(Type, Name)); \
printf("#define OFFSET_%-33s %ld\n", \
#OutputType #Name, offset_of(Type, Name)); \
break; \
case GEN_INDEX: \
printf("#define IDX_OFFSET_%-33s %d\n", \
#Type #Name, index++); \
#OutputType #Name, index++); \
break; \
case GEN_TABLE: \
printf("\tOFFSET_%s,\n", #Type #Name); \
printf("\tOFFSET_%s,\n", #OutputType #Name); \
break; \
}
#define GEN_OFFS(Type,Name) \
GEN_OFFS_NAME(Type,Name,Type)
#define GEN_SIZE(Type) \
switch(gen_variant) { \
case GEN_OFFSET: \
@ -246,6 +249,11 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(VirtualSpace, _high);
printf("\n");
/* We need to use different names here because of the template parameter */
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
printf("\n");
GEN_OFFS(CodeBlob, _name);
GEN_OFFS(CodeBlob, _header_size);
GEN_OFFS(CodeBlob, _content_offset);

@ -43,7 +43,9 @@
extern pointer __JvmOffsets;
extern pointer __1cJCodeCacheF_heap_;
/* GrowableArray<CodeHeaps*>* */
extern pointer __1cJCodeCacheG_heaps_;
extern pointer __1cIUniverseO_collectedHeap_;
extern pointer __1cHnmethodG__vtbl_;
@ -95,8 +97,8 @@ dtrace:helper:ustack:
/!init_done && !this->done/
{
MARK_LINE;
init_done = 1;
copyin_offset(POINTER_SIZE);
copyin_offset(COMPILER);
copyin_offset(OFFSET_CollectedHeap_reserved);
copyin_offset(OFFSET_MemRegion_start);
@ -122,6 +124,9 @@ dtrace:helper:ustack:
copyin_offset(OFFSET_CodeHeap_segmap);
copyin_offset(OFFSET_CodeHeap_log2_segment_size);
copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
copyin_offset(OFFSET_VirtualSpace_low);
copyin_offset(OFFSET_VirtualSpace_high);
@ -152,26 +157,14 @@ dtrace:helper:ustack:
#error "Don't know architecture"
#endif
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
/* Reading volatile values */
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
this->CodeHeap_log2_segment_size = copyin_uint32(
this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
this->Method_vtbl = (pointer) &``__1cNMethodG__vtbl_;
/* Read address of GrowableArray<CodeHeaps*> */
this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
/* Read address of _data array field in GrowableArray */
this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
/*
* Get Java heap bounds
*/
@ -187,21 +180,152 @@ dtrace:helper:ustack:
this->heap_end = this->heap_start + this->heap_size;
}
/*
* IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in
* the code cache. If more code heaps are added the following probes have to
* be extended. This is done by simply adding a probe to get the heap bounds
* and another probe to set the code heap address of the newly created heap.
*/
/*
* ----- BEGIN: Get bounds of code heaps -----
*/
dtrace:helper:ustack:
/!this->done &&
this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
{
MARK_LINE;
/* CodeHeap 1 */
init_done = 1;
this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap1_low = copyin_ptr(this->code_heap1_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap1_high = copyin_ptr(this->code_heap1_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
dtrace:helper:ustack:
/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
{
MARK_LINE;
/* CodeHeap 2 */
init_done = 2;
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap2_low = copyin_ptr(this->code_heap2_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap2_high = copyin_ptr(this->code_heap2_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
dtrace:helper:ustack:
/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
{
/* CodeHeap 3 */
init_done = 3;
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap3_low = copyin_ptr(this->code_heap3_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap3_high = copyin_ptr(this->code_heap3_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
dtrace:helper:ustack:
/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
{
/* CodeHeap 4 */
init_done = 4;
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap4_low = copyin_ptr(this->code_heap4_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap4_high = copyin_ptr(this->code_heap4_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
dtrace:helper:ustack:
/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
{
/* CodeHeap 5 */
init_done = 5;
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap5_low = copyin_ptr(this->code_heap5_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap5_high = copyin_ptr(this->code_heap5_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
/*
* ----- END: Get bounds of code heaps -----
*/
/*
* ----- BEGIN: Get address of the code heap pc points to -----
*/
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap1_address;
}
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap2_address;
}
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap3_address;
}
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap4_address;
}
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap5_address;
}
/*
* ----- END: Get address of the code heap pc points to -----
*/
dtrace:helper:ustack:
/!this->done && this->codecache/
{
MARK_LINE;
/*
* Get code heap configuration
*/
this->code_heap_low = copyin_ptr(this->code_heap_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
this->code_heap_log2_segment_size = copyin_uint32(
this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
/*
* Find start.
* Find start
*/
this->segment = (this->pc - this->CodeCache_low) >>
this->CodeHeap_log2_segment_size;
this->block = this->CodeCache_segmap_low;
this->segment = (this->pc - this->code_heap_low) >>
this->code_heap_log2_segment_size;
this->block = this->code_heap_segmap_low;
this->tag = copyin_uchar(this->block + this->segment);
"second";
}
dtrace:helper:ustack:
@ -256,8 +380,8 @@ dtrace:helper:ustack:
/!this->done && this->codecache/
{
MARK_LINE;
this->block = this->CodeCache_low +
(this->segment << this->CodeHeap_log2_segment_size);
this->block = this->code_heap_low +
(this->segment << this->code_heap_log2_segment_size);
this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
}

@ -150,16 +150,18 @@ struct jvm_agent {
uint64_t Use_Compressed_Oops_address;
uint64_t Universe_narrow_oop_base_address;
uint64_t Universe_narrow_oop_shift_address;
uint64_t CodeCache_heap_address;
uint64_t CodeCache_heaps_address;
/* Volatiles */
uint8_t Use_Compressed_Oops;
uint64_t Universe_narrow_oop_base;
uint32_t Universe_narrow_oop_shift;
uint64_t CodeCache_low;
uint64_t CodeCache_high;
uint64_t CodeCache_segmap_low;
uint64_t CodeCache_segmap_high;
// Code cache heaps
int32_t Number_of_heaps;
uint64_t* Heap_low;
uint64_t* Heap_high;
uint64_t* Heap_segmap_low;
uint64_t* Heap_segmap_high;
int32_t SIZE_CodeCache_log2_segment;
@ -278,8 +280,9 @@ static int parse_vmstructs(jvm_agent_t* J) {
}
if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
if (strcmp("_heap", vmp->fieldName) == 0) {
err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
/* Read _heaps field of type GrowableArray<CodeHeaps*>* */
if (strcmp("_heaps", vmp->fieldName) == 0) {
err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
}
} else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
@ -318,7 +321,9 @@ static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
}
static int read_volatiles(jvm_agent_t* J) {
uint64_t ptr;
int i;
uint64_t array_data;
uint64_t code_heap_address;
int err;
err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
@ -334,20 +339,43 @@ static int read_volatiles(jvm_agent_t* J) {
err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_low, &J->CodeCache_low);
CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_high, &J->CodeCache_high);
CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
CHECK_FAIL(err);
/* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
pointing to the first entry of type CodeCache* in the array */
err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
/* Read _len field containing the number of code heaps */
err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
&J->Number_of_heaps, sizeof(J->Number_of_heaps));
err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
/* Allocate memory for heap configurations */
J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
/* Read code heap configurations */
for (i = 0; i < J->Number_of_heaps; ++i) {
/* Read address of heap */
err = read_pointer(J, array_data, &code_heap_address);
CHECK_FAIL(err);
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_low, &J->Heap_low[i]);
CHECK_FAIL(err);
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_high, &J->Heap_high[i]);
CHECK_FAIL(err);
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
CHECK_FAIL(err);
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
CHECK_FAIL(err);
/* Increment pointer to next entry */
array_data = array_data + POINTER_SIZE;
}
err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
&J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
CHECK_FAIL(err);
@ -357,46 +385,57 @@ static int read_volatiles(jvm_agent_t* J) {
return err;
}
static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
}
static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
/* make sure the code cache is up to date */
return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
int i;
for (i = 0; i < J->Number_of_heaps; ++i) {
if (codeheap_contains(i, J, ptr)) {
return 1;
}
}
return 0;
}
static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
}
static uint64_t block_at(jvm_agent_t* J, int i) {
return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
}
static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
int err;
int i;
*startp = 0;
if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
int32_t used;
uint64_t segment = segment_for(J, ptr);
uint64_t block = J->CodeCache_segmap_low;
uint8_t tag;
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
CHECK_FAIL(err);
if (tag == 0xff)
return PS_OK;
while (tag > 0) {
for (i = 0; i < J->Number_of_heaps; ++i) {
*startp = 0;
if (codeheap_contains(i, J, ptr)) {
int32_t used;
uint64_t segment = segment_for(i, J, ptr);
uint64_t block = J->Heap_segmap_low[i];
uint8_t tag;
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
CHECK_FAIL(err);
segment -= tag;
}
block = block_at(J, segment);
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
CHECK_FAIL(err);
if (used) {
*startp = block + SIZE_HeapBlockHeader;
if (tag == 0xff)
return PS_OK;
while (tag > 0) {
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
CHECK_FAIL(err);
segment -= tag;
}
block = block_at(i, J, segment);
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
CHECK_FAIL(err);
if (used) {
*startp = block + SIZE_HeapBlockHeader;
}
}
return PS_OK;
}
return PS_OK;
fail:
return -1;

@ -82,21 +82,24 @@ StubQueue* AbstractInterpreter::_code = NULL;
#endif /* ASSERT */
#endif /* COMPILER1 */
#define GEN_OFFS(Type,Name) \
#define GEN_OFFS_NAME(Type,Name,OutputType) \
switch(gen_variant) { \
case GEN_OFFSET: \
printf("#define OFFSET_%-33s %d\n", \
#Type #Name, offset_of(Type, Name)); \
#OutputType #Name, offset_of(Type, Name)); \
break; \
case GEN_INDEX: \
printf("#define IDX_OFFSET_%-33s %d\n", \
#Type #Name, index++); \
#OutputType #Name, index++); \
break; \
case GEN_TABLE: \
printf("\tOFFSET_%s,\n", #Type #Name); \
printf("\tOFFSET_%s,\n", #OutputType #Name); \
break; \
}
#define GEN_OFFS(Type,Name) \
GEN_OFFS_NAME(Type,Name,Type)
#define GEN_SIZE(Type) \
switch(gen_variant) { \
case GEN_OFFSET: \
@ -241,6 +244,11 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(VirtualSpace, _high);
printf("\n");
/* We need to use different names here because of the template parameter */
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
printf("\n");
GEN_OFFS(CodeBlob, _name);
GEN_OFFS(CodeBlob, _header_size);
GEN_OFFS(CodeBlob, _content_offset);

@ -43,7 +43,9 @@
extern pointer __JvmOffsets;
extern pointer __1cJCodeCacheF_heap_;
/* GrowableArray<CodeHeaps*>* */
extern pointer __1cJCodeCacheG_heaps_;
extern pointer __1cIUniverseO_collectedHeap_;
extern pointer __1cHnmethodG__vtbl_;
@ -95,8 +97,8 @@ dtrace:helper:ustack:
/!init_done && !this->done/
{
MARK_LINE;
init_done = 1;
copyin_offset(POINTER_SIZE);
copyin_offset(COMPILER);
copyin_offset(OFFSET_CollectedHeap_reserved);
copyin_offset(OFFSET_MemRegion_start);
@ -122,6 +124,9 @@ dtrace:helper:ustack:
copyin_offset(OFFSET_CodeHeap_segmap);
copyin_offset(OFFSET_CodeHeap_log2_segment_size);
copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
copyin_offset(OFFSET_VirtualSpace_low);
copyin_offset(OFFSET_VirtualSpace_high);
@ -152,24 +157,13 @@ dtrace:helper:ustack:
#error "Don't know architecture"
#endif
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
/* Read address of GrowableArray<CodeHeaps*> */
this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
/* Read address of _data array field in GrowableArray */
this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
this->CodeHeap_log2_segment_size = copyin_uint32(
this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
/*
* Get Java heap bounds
@ -186,21 +180,152 @@ dtrace:helper:ustack:
this->heap_end = this->heap_start + this->heap_size;
}
/*
* IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in
* the code cache. If more code heaps are added the following probes have to
* be extended. This is done by simply adding a probe to get the heap bounds
* and another probe to set the code heap address of the newly created heap.
*/
/*
* ----- BEGIN: Get bounds of code heaps -----
*/
dtrace:helper:ustack:
/!this->done &&
this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
{
MARK_LINE;
/* CodeHeap 1 */
init_done = 1;
this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap1_low = copyin_ptr(this->code_heap1_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap1_high = copyin_ptr(this->code_heap1_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
dtrace:helper:ustack:
/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
{
MARK_LINE;
/* CodeHeap 2 */
init_done = 2;
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap2_low = copyin_ptr(this->code_heap2_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap2_high = copyin_ptr(this->code_heap2_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
dtrace:helper:ustack:
/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
{
/* CodeHeap 3 */
init_done = 3;
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap3_low = copyin_ptr(this->code_heap3_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap3_high = copyin_ptr(this->code_heap3_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
dtrace:helper:ustack:
/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
{
/* CodeHeap 4 */
init_done = 4;
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap4_low = copyin_ptr(this->code_heap4_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap4_high = copyin_ptr(this->code_heap4_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
dtrace:helper:ustack:
/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
{
/* CodeHeap 5 */
init_done = 5;
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
this->code_heap5_low = copyin_ptr(this->code_heap5_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap5_high = copyin_ptr(this->code_heap5_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
}
/*
* ----- END: Get bounds of code heaps -----
*/
/*
* ----- BEGIN: Get address of the code heap pc points to -----
*/
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap1_address;
}
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap2_address;
}
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap3_address;
}
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap4_address;
}
dtrace:helper:ustack:
/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
{
MARK_LINE;
this->codecache = 1;
this->code_heap_address = this->code_heap5_address;
}
/*
* ----- END: Get address of the code heap pc points to -----
*/
dtrace:helper:ustack:
/!this->done && this->codecache/
{
MARK_LINE;
/*
* Get code heap configuration
*/
this->code_heap_low = copyin_ptr(this->code_heap_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
this->code_heap_log2_segment_size = copyin_uint32(
this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
/*
* Find start.
* Find start
*/
this->segment = (this->pc - this->CodeCache_low) >>
this->CodeHeap_log2_segment_size;
this->block = this->CodeCache_segmap_low;
this->segment = (this->pc - this->code_heap_low) >>
this->code_heap_log2_segment_size;
this->block = this->code_heap_segmap_low;
this->tag = copyin_uchar(this->block + this->segment);
"second";
}
dtrace:helper:ustack:
@ -255,8 +380,8 @@ dtrace:helper:ustack:
/!this->done && this->codecache/
{
MARK_LINE;
this->block = this->CodeCache_low +
(this->segment << this->CodeHeap_log2_segment_size);
this->block = this->code_heap_low +
(this->segment << this->code_heap_log2_segment_size);
this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
}

@ -150,16 +150,18 @@ struct jvm_agent {
uint64_t Use_Compressed_Oops_address;
uint64_t Universe_narrow_oop_base_address;
uint64_t Universe_narrow_oop_shift_address;
uint64_t CodeCache_heap_address;
uint64_t CodeCache_heaps_address;
/* Volatiles */
uint8_t Use_Compressed_Oops;
uint64_t Universe_narrow_oop_base;
uint32_t Universe_narrow_oop_shift;
uint64_t CodeCache_low;
uint64_t CodeCache_high;
uint64_t CodeCache_segmap_low;
uint64_t CodeCache_segmap_high;
// Code cache heaps
int32_t Number_of_heaps;
uint64_t* Heap_low;
uint64_t* Heap_high;
uint64_t* Heap_segmap_low;
uint64_t* Heap_segmap_high;
int32_t SIZE_CodeCache_log2_segment;
@ -278,8 +280,9 @@ static int parse_vmstructs(jvm_agent_t* J) {
}
if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
if (strcmp("_heap", vmp->fieldName) == 0) {
err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
/* Read _heaps field of type GrowableArray<CodeHeaps*>* */
if (strcmp("_heaps", vmp->fieldName) == 0) {
err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
}
} else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
@ -318,7 +321,9 @@ static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
}
static int read_volatiles(jvm_agent_t* J) {
uint64_t ptr;
int i;
uint64_t array_data;
uint64_t code_heap_address;
int err;
err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
@ -334,20 +339,43 @@ static int read_volatiles(jvm_agent_t* J) {
err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_low, &J->CodeCache_low);
CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_high, &J->CodeCache_high);
CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
CHECK_FAIL(err);
/* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
pointing to the first entry of type CodeCache* in the array */
err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
/* Read _len field containing the number of code heaps */
err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
&J->Number_of_heaps, sizeof(J->Number_of_heaps));
err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
/* Allocate memory for heap configurations */
J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
/* Read code heap configurations */
for (i = 0; i < J->Number_of_heaps; ++i) {
/* Read address of heap */
err = read_pointer(J, array_data, &code_heap_address);
CHECK_FAIL(err);
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_low, &J->Heap_low[i]);
CHECK_FAIL(err);
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_high, &J->Heap_high[i]);
CHECK_FAIL(err);
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
CHECK_FAIL(err);
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
CHECK_FAIL(err);
/* Increment pointer to next entry */
array_data = array_data + POINTER_SIZE;
}
err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
&J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
CHECK_FAIL(err);
@ -357,46 +385,57 @@ static int read_volatiles(jvm_agent_t* J) {
return err;
}
static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
}
static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
/* make sure the code cache is up to date */
return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
int i;
for (i = 0; i < J->Number_of_heaps; ++i) {
if (codeheap_contains(i, J, ptr)) {
return 1;
}
}
return 0;
}
static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
}
static uint64_t block_at(jvm_agent_t* J, int i) {
return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
}
static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
int err;
int i;
*startp = 0;
if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
int32_t used;
uint64_t segment = segment_for(J, ptr);
uint64_t block = J->CodeCache_segmap_low;
uint8_t tag;
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
CHECK_FAIL(err);
if (tag == 0xff)
return PS_OK;
while (tag > 0) {
for (i = 0; i < J->Number_of_heaps; ++i) {
*startp = 0;
if (codeheap_contains(i, J, ptr)) {
int32_t used;
uint64_t segment = segment_for(i, J, ptr);
uint64_t block = J->Heap_segmap_low[i];
uint8_t tag;
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
CHECK_FAIL(err);
segment -= tag;
}
block = block_at(J, segment);
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
CHECK_FAIL(err);
if (used) {
*startp = block + SIZE_HeapBlockHeader;
if (tag == 0xff)
return PS_OK;
while (tag > 0) {
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
CHECK_FAIL(err);
segment -= tag;
}
block = block_at(i, J, segment);
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
CHECK_FAIL(err);
if (used) {
*startp = block + SIZE_HeapBlockHeader;
}
}
return PS_OK;
}
return PS_OK;
fail:
return -1;

@ -76,6 +76,11 @@ void Compiler::initialize() {
}
}
int Compiler::code_buffer_size() {
assert(SegmentedCodeCache, "Should be only used with a segmented code cache");
return Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
}
BufferBlob* Compiler::init_buffer_blob() {
// Allocate buffer blob once at startup since allocation for each
// compilation seems to be too expensive (at least on Intel win32).

@ -54,6 +54,9 @@ class Compiler: public AbstractCompiler {
// Print compilation timers and statistics
virtual void print_timers();
// Size of the code buffer
static int code_buffer_size();
};
#endif // SHARE_VM_C1_C1_COMPILER_HPP

@ -34,6 +34,7 @@
#include "ci/ciUtilities.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/scopeDesc.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
@ -1085,7 +1086,7 @@ void ciEnv::register_method(ciMethod* target,
} else {
// The CodeCache is full. Print out warning and disable compilation.
record_failure("code cache is full");
CompileBroker::handle_full_code_cache();
CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
}
}

@ -229,14 +229,11 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
return blob;
}
void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
void* p = CodeCache::allocate(size, is_critical);
return p;
return CodeCache::allocate(size, CodeBlobType::NonMethod, is_critical);
}
void BufferBlob::free( BufferBlob *blob ) {
void BufferBlob::free(BufferBlob *blob) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
blob->flush();
{
@ -299,7 +296,6 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
return blob;
}
//----------------------------------------------------------------------------------------------------
// Implementation of RuntimeStub
@ -340,14 +336,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, true);
void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
// operator new shared by all singletons:
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, true);
void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}

@ -30,6 +30,18 @@
#include "runtime/frame.hpp"
#include "runtime/handles.hpp"
// CodeBlob Types
// Used in the CodeCache to assign CodeBlobs to different CodeHeaps
struct CodeBlobType {
enum {
MethodNonProfiled = 0, // Execution level 1 and 4 (non-profiled) nmethods (including native nmethods)
MethodProfiled = 1, // Execution level 2 and 3 (profiled) nmethods
NonMethod = 2, // Non-methods like Buffers, Adapters and Runtime Stubs
All = 3, // All types (No code cache segmentation)
NumTypes = 4 // Number of CodeBlobTypes
};
};
// CodeBlob - superclass for all entries in the CodeCache.
//
// Suptypes are:
@ -385,9 +397,6 @@ class DeoptimizationBlob: public SingletonBlob {
return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc);
}
// GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }

File diff suppressed because it is too large Load Diff

@ -26,105 +26,117 @@
#define SHARE_VM_CODE_CODECACHE_HPP
#include "code/codeBlob.hpp"
#include "code/nmethod.hpp"
#include "memory/allocation.hpp"
#include "memory/heap.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/mutexLocker.hpp"
// The CodeCache implements the code cache for various pieces of generated
// code, e.g., compiled java methods, runtime stubs, transition frames, etc.
// The entries in the CodeCache are all CodeBlob's.
// Implementation:
// - Each CodeBlob occupies one chunk of memory.
// - Like the offset table in oldspace the zone has at table for
// locating a method given a addess of an instruction.
// -- Implementation --
// The CodeCache consists of one or more CodeHeaps, each of which contains
// CodeBlobs of a specific CodeBlobType. Currently heaps for the following
// types are available:
// - Non-methods: Non-methods like Buffers, Adapters and Runtime Stubs
// - Profiled nmethods: nmethods that are profiled, i.e., those
// executed at level 2 or 3
// - Non-Profiled nmethods: nmethods that are not profiled, i.e., those
// executed at level 1 or 4 and native methods
// - All: Used for code of all types if code cache segmentation is disabled.
//
// In the rare case of the non-method code heap getting full, non-method code
// will be stored in the non-profiled code heap as a fallback solution.
//
// Depending on the availability of compilers and TieredCompilation there
// may be fewer heaps. The size of the code heaps depends on the values of
// ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize
// (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..)
// for details).
//
// Code cache segmentation is controlled by the flag SegmentedCodeCache.
// If turned off, all code types are stored in a single code heap. By default
// code cache segmentation is turned on if TieredCompilation is enabled and
// ReservedCodeCacheSize >= 240 MB.
//
// All methods of the CodeCache accepting a CodeBlobType only apply to
// CodeBlobs of the given type. For example, iteration over the
// CodeBlobs of a specific type can be done by using CodeCache::first_blob(..)
// and CodeCache::next_blob(..) and providing the corresponding CodeBlobType.
//
// IMPORTANT: If you add new CodeHeaps to the code cache or change the
// existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
// Solaris and BSD.
class OopClosure;
class DepChange;
class CodeCache : AllStatic {
friend class VMStructs;
friend class NMethodIterator;
private:
// CodeHeap is malloc()'ed at startup and never deleted during shutdown,
// so that the generated assembly code is always there when it's needed.
// This may cause memory leak, but is necessary, for now. See 4423824,
// 4422213 or 4436291 for details.
static CodeHeap * _heap;
static int _number_of_blobs;
static int _number_of_adapters;
static int _number_of_nmethods;
static int _number_of_nmethods_with_dependencies;
static bool _needs_cache_clean;
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
// CodeHeaps of the cache
static GrowableArray<CodeHeap*>* _heaps;
static address _low_bound; // Lower bound of CodeHeap addresses
static address _high_bound; // Upper bound of CodeHeap addresses
static int _number_of_blobs; // Total number of CodeBlobs in the cache
static int _number_of_adapters; // Total number of Adapters in the cache
static int _number_of_nmethods; // Total number of nmethods in the cache
static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
static bool _needs_cache_clean; // True if inline caches of the nmethods needs to be flushed
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
static int _codemem_full_count; // Number of times a CodeHeap in the cache was full
static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
static int _codemem_full_count;
static size_t bytes_allocated_in_freelist() { return _heap->allocated_in_freelist(); }
static int allocated_segments() { return _heap->allocated_segments(); }
static size_t freelist_length() { return _heap->freelist_length(); }
// CodeHeap management
static void initialize_heaps(); // Initializes the CodeHeaps
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
static bool heap_available(int code_blob_type); // Returns true if a CodeHeap for the given CodeBlobType is available
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
// Iteration
static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap
static CodeBlob* first_blob(int code_blob_type); // Returns the first CodeBlob of the given type
static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the first alive CodeBlob on the given CodeHeap
static CodeBlob* next_blob(CodeBlob* cb); // Returns the next CodeBlob of the given type succeeding the given CodeBlob
static size_t bytes_allocated_in_freelists();
static int allocated_segments();
static size_t freelists_length();
public:
// Initialization
static void initialize();
static void report_codemem_full();
// Allocation/administration
static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
static void free(CodeBlob* cb); // frees a CodeBlob
static bool contains(void *p); // returns whether p is included
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
static void free(CodeBlob* cb); // frees a CodeBlob
static bool contains(void *p); // returns whether p is included
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
// Lookup
static CodeBlob* find_blob(void* start);
static nmethod* find_nmethod(void* start);
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method
static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
// what you are doing)
static CodeBlob* find_blob_unsafe(void* start) {
// NMT can walk the stack before code cache is created
if (_heap == NULL) return NULL;
CodeBlob* result = (CodeBlob*)_heap->find_start(start);
// this assert is too strong because the heap code will return the
// heapblock containing start. That block can often be larger than
// the codeBlob itself. If you look up an address that is within
// the heapblock but not in the codeBlob you will assert.
//
// Most things will not lookup such bad addresses. However
// AsyncGetCallTrace can see intermediate frames and get that kind
// of invalid address and so can a developer using hsfind.
//
// The more correct answer is to return NULL if blob_contains() returns
// false.
// assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
if (result != NULL && !result->blob_contains((address)start)) {
result = NULL;
}
return result;
}
// Iteration
static CodeBlob* first();
static CodeBlob* next (CodeBlob* cb);
static CodeBlob* alive(CodeBlob *cb);
static nmethod* alive_nmethod(CodeBlob *cb);
static nmethod* first_nmethod();
static nmethod* next_nmethod (CodeBlob* cb);
static int nof_blobs() { return _number_of_blobs; }
static int nof_adapters() { return _number_of_adapters; }
static int nof_nmethods() { return _number_of_nmethods; }
static int nof_blobs() { return _number_of_blobs; } // Returns the total number of CodeBlobs in the cache
static int nof_adapters() { return _number_of_adapters; } // Returns the total number of Adapters in the cache
static int nof_nmethods() { return _number_of_nmethods; } // Returns the total number of nmethods in the cache
// GC support
static void gc_epilogue();
@ -137,7 +149,7 @@ class CodeCache : AllStatic {
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
static void scavenge_root_nmethods_do(CodeBlobClosure* f);
static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
static void add_scavenge_root_nmethod(nmethod* nm);
static void drop_scavenge_root_nmethod(nmethod* nm);
@ -151,27 +163,47 @@ class CodeCache : AllStatic {
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
static void log_state(outputStream* st);
static const char* get_code_heap_name(int code_blob_type) { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); }
static void report_codemem_full(int code_blob_type, bool print);
// Dcmd (Diagnostic commands)
static void print_codelist(outputStream* st);
static void print_layout(outputStream* st);
// The full limits of the codeCache
static address low_bound() { return (address) _heap->low_boundary(); }
static address high_bound() { return (address) _heap->high_boundary(); }
static address high() { return (address) _heap->high(); }
static address low_bound() { return _low_bound; }
static address high_bound() { return _high_bound; }
// Profiling
static address first_address(); // first address used for CodeBlobs
static address last_address(); // last address used for CodeBlobs
static size_t capacity() { return _heap->capacity(); }
static size_t max_capacity() { return _heap->max_capacity(); }
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
static double reverse_free_ratio();
static size_t capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->capacity() : 0; }
static size_t capacity();
static size_t unallocated_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->unallocated_capacity() : 0; }
static size_t unallocated_capacity();
static size_t max_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->max_capacity() : 0; }
static size_t max_capacity();
static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
static bool is_full(int* code_blob_type);
static double reverse_free_ratio(int code_blob_type);
static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
// Returns the CodeBlobType for nmethods of the given compilation level
static int get_code_blob_type(int comp_level) {
if (comp_level == CompLevel_none ||
comp_level == CompLevel_simple ||
comp_level == CompLevel_full_optimization) {
// Non profiled methods
return CodeBlobType::MethodNonProfiled;
} else if (comp_level == CompLevel_limited_profile ||
comp_level == CompLevel_full_profile) {
// Profiled methods
return CodeBlobType::MethodProfiled;
}
ShouldNotReachHere();
return 0;
}
static void verify_clean_inline_caches();
static void verify_icholder_relocations();
@ -187,10 +219,87 @@ class CodeCache : AllStatic {
static void make_marked_nmethods_zombies();
static void make_marked_nmethods_not_entrant();
// tells how many nmethods have dependencies
// tells how many nmethods have dependencies
static int number_of_nmethods_with_dependencies();
static int get_codemem_full_count() { return _codemem_full_count; }
};
// Iterator to iterate over nmethods in the CodeCache.
class NMethodIterator : public StackObj {
private:
CodeBlob* _code_blob; // Current CodeBlob
int _code_blob_type; // Refers to current CodeHeap
public:
NMethodIterator() {
initialize(NULL); // Set to NULL, initialized by first call to next()
}
NMethodIterator(nmethod* nm) {
initialize(nm);
}
// Advance iterator to next nmethod
bool next() {
assert_locked_or_safepoint(CodeCache_lock);
assert(_code_blob_type < CodeBlobType::NumTypes, "end reached");
bool result = next_nmethod();
while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) {
// Advance to next code heap if segmented code cache
_code_blob_type++;
result = next_nmethod();
}
return result;
}
// Advance iterator to next alive nmethod
bool next_alive() {
bool result = next();
while(result && !_code_blob->is_alive()) {
result = next();
}
return result;
}
bool end() const { return _code_blob == NULL; }
nmethod* method() const { return (nmethod*)_code_blob; }
private:
// Initialize iterator to given nmethod
void initialize(nmethod* nm) {
_code_blob = (CodeBlob*)nm;
if (!SegmentedCodeCache) {
// Iterate over all CodeBlobs
_code_blob_type = CodeBlobType::All;
} else if (nm != NULL) {
_code_blob_type = CodeCache::get_code_blob_type(nm->comp_level());
} else {
// Only iterate over method code heaps, starting with non-profiled
_code_blob_type = CodeBlobType::MethodNonProfiled;
}
}
// Advance iterator to the next nmethod in the current code heap
bool next_nmethod() {
// Get first method CodeBlob
if (_code_blob == NULL) {
_code_blob = CodeCache::first_blob(_code_blob_type);
if (_code_blob == NULL) {
return false;
} else if (_code_blob->is_nmethod()) {
return true;
}
}
// Search for next method CodeBlob
_code_blob = CodeCache::next_blob(_code_blob);
while (_code_blob != NULL && !_code_blob->is_nmethod()) {
_code_blob = CodeCache::next_blob(_code_blob);
}
return _code_blob != NULL;
}
};
#endif // SHARE_VM_CODE_CODECACHE_HPP

@ -500,7 +500,7 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
compile_id, &offsets,
code_buffer, frame_size,
basic_lock_owner_sp_offset,
@ -538,7 +538,7 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
nm = new (nmethod_size) nmethod(method(), nmethod_size,
nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size,
&offsets, code_buffer, frame_size);
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
@ -586,7 +586,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
+ round_to(nul_chk_table->size_in_bytes(), oopSize)
+ round_to(debug_info->data_size() , oopSize);
nm = new (nmethod_size)
nm = new (nmethod_size, comp_level)
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
oop_maps,
@ -803,9 +803,11 @@ nmethod::nmethod(
}
#endif // def HAVE_DTRACE_H
void* nmethod::operator new(size_t size, int nmethod_size) throw() {
// Not critical, may return null if there is too little continuous memory
return CodeCache::allocate(nmethod_size);
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
// With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
// with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
bool is_critical = SegmentedCodeCache;
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
}
nmethod::nmethod(
@ -1530,7 +1532,7 @@ void nmethod::flush() {
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
if (PrintMethodFlushing) {
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
}
// We need to deallocate any ExceptionCache data.
@ -1557,7 +1559,6 @@ void nmethod::flush() {
CodeCache::free(this);
}
//
// Notify all classes this nmethod is dependent on that it is no
// longer dependent. This should only be called in two situations.
@ -2418,15 +2419,18 @@ void nmethod::check_all_dependencies(DepChange& changes) {
// Turn off dependency tracing while actually testing dependencies.
NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
&DependencySignature::equals, 11027> DepTable;
typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
&DependencySignature::equals, 11027> DepTable;
DepTable* table = new DepTable();
DepTable* table = new DepTable();
// Iterate over live nmethods and check dependencies of all nmethods that are not
// marked for deoptimization. A particular dependency is only checked once.
for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
if (!nm->is_marked_for_deoptimization()) {
NMethodIterator iter;
while(iter.next()) {
nmethod* nm = iter.method();
// Only notify for live nmethods
if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
for (Dependencies::DepStream deps(nm); deps.next(); ) {
// Construct abstraction of a dependency.
DependencySignature* current_sig = new DependencySignature(deps);

@ -288,7 +288,7 @@ class nmethod : public CodeBlob {
int comp_level);
// helper methods
void* operator new(size_t size, int nmethod_size) throw();
void* operator new(size_t size, int nmethod_size, int comp_level) throw();
const char* reloc_string_for(u_char* begin, u_char* end);
// Returns true if this thread changed the state of the nmethod or

@ -63,7 +63,7 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
// If changing the name, update the other file accordingly.
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
CompileBroker::handle_full_code_cache();
CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
return NULL;
}
_chunk = blob->content_begin();

@ -1747,9 +1747,11 @@ void CompileBroker::compiler_thread_loop() {
// We need this HandleMark to avoid leaking VM handles.
HandleMark hm(thread);
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
// the code cache is really full
handle_full_code_cache();
// Check if the CodeCache is full
int code_blob_type = 0;
if (CodeCache::is_full(&code_blob_type)) {
// The CodeHeap for code_blob_type is really full
handle_full_code_cache(code_blob_type);
}
CompileTask* task = queue->get();
@ -2079,7 +2081,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
* The CodeCache is full. Print out warning and disable compilation
* or try code cache cleaning so compilation can continue later.
*/
void CompileBroker::handle_full_code_cache() {
void CompileBroker::handle_full_code_cache(int code_blob_type) {
UseInterpreter = true;
if (UseCompiler || AlwaysCompileLoopMethods ) {
if (xtty != NULL) {
@ -2096,8 +2098,6 @@ void CompileBroker::handle_full_code_cache() {
xtty->end_elem();
}
CodeCache::report_codemem_full();
#ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) {
codecache_print(/* detailed= */ true);
@ -2119,12 +2119,7 @@ void CompileBroker::handle_full_code_cache() {
disable_compilation_forever();
}
// Print warning only once
if (should_print_compiler_warning()) {
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
codecache_print(/* detailed= */ true);
}
CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning());
}
}

@ -434,7 +434,7 @@ class CompileBroker: AllStatic {
static bool is_compilation_disabled_forever() {
return _should_compile_new_jobs == shutdown_compilaton;
}
static void handle_full_code_cache();
static void handle_full_code_cache(int code_blob_type);
// Ensures that warning is only printed once.
static bool should_print_compiler_warning() {
jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);

@ -5095,7 +5095,11 @@ private:
_num_entered_barrier(0)
{
nmethod::increase_unloading_clock();
_first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
// Get first alive nmethod
NMethodIterator iter = NMethodIterator();
if(iter.next_alive()) {
_first_nmethod = iter.method();
}
_claimed_nmethod = (volatile nmethod*)_first_nmethod;
}
@ -5138,27 +5142,26 @@ private:
void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
nmethod* first;
nmethod* last;
NMethodIterator last;
do {
*num_claimed_nmethods = 0;
first = last = (nmethod*)_claimed_nmethod;
first = (nmethod*)_claimed_nmethod;
last = NMethodIterator(first);
if (first != NULL) {
for (int i = 0; i < MaxClaimNmethods; i++) {
last = CodeCache::alive_nmethod(CodeCache::next(last));
if (last == NULL) {
for (int i = 0; i < MaxClaimNmethods; i++) {
if (!last.next_alive()) {
break;
}
claimed_nmethods[i] = last;
claimed_nmethods[i] = last.method();
(*num_claimed_nmethods)++;
}
}
} while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
} while ((nmethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
}
nmethod* claim_postponed_nmethod() {

@ -1077,7 +1077,7 @@ IRT_END
address SignatureHandlerLibrary::set_handler_blob() {
BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
if (handler_blob == NULL) {
CompileBroker::handle_full_code_cache();
CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
return NULL;
}
address handler = handler_blob->code_begin();

@ -35,7 +35,9 @@ size_t CodeHeap::header_size() {
// Implementation of Heap
CodeHeap::CodeHeap() {
CodeHeap::CodeHeap(const char* name, const int code_blob_type)
: _code_blob_type(code_blob_type) {
_name = name;
_number_of_committed_segments = 0;
_number_of_reserved_segments = 0;
_segment_size = 0;
@ -44,6 +46,8 @@ CodeHeap::CodeHeap() {
_freelist = NULL;
_freelist_segments = 0;
_freelist_length = 0;
_max_allocated_capacity = 0;
_was_full = false;
}
@ -88,9 +92,8 @@ void CodeHeap::on_code_mapping(char* base, size_t size) {
}
bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
size_t segment_size) {
assert(reserved_size >= committed_size, "reserved < committed");
bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
assert(rs.size() >= committed_size, "reserved < committed");
assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
@ -99,17 +102,12 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
// Reserve and initialize space for _memory.
const size_t page_size = os::can_execute_large_page_memory() ?
os::page_size_for_region(committed_size, reserved_size, 8) :
os::page_size_for_region(committed_size, rs.size(), 8) :
os::vm_page_size();
const size_t granularity = os::vm_allocation_granularity();
const size_t r_align = MAX2(page_size, granularity);
const size_t r_size = align_size_up(reserved_size, r_align);
const size_t c_size = align_size_up(committed_size, page_size);
const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(page_size, granularity);
ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
rs.base(), rs.size());
if (!_memory.initialize(rs, c_size)) {
return false;
@ -182,6 +180,7 @@ void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
assert(!block->free(), "must be marked free");
DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
_max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
return block->allocated_space();
}
@ -203,6 +202,7 @@ void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
b->initialize(number_of_segments);
_next_segment += number_of_segments;
DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
_max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
return b->allocated_space();
} else {
return NULL;

@ -25,6 +25,7 @@
#ifndef SHARE_VM_MEMORY_HEAP_HPP
#define SHARE_VM_MEMORY_HEAP_HPP
#include "code/codeBlob.hpp"
#include "memory/allocation.hpp"
#include "runtime/virtualspace.hpp"
@ -93,6 +94,11 @@ class CodeHeap : public CHeapObj<mtCode> {
FreeBlock* _freelist;
size_t _freelist_segments; // No. of segments in freelist
int _freelist_length;
size_t _max_allocated_capacity; // Peak capacity that was allocated during lifetime of the heap
const char* _name; // Name of the CodeHeap
const int _code_blob_type; // CodeBlobType it contains
bool _was_full; // True if the code heap was full
enum { free_sentinel = 0xFF };
@ -127,10 +133,10 @@ class CodeHeap : public CHeapObj<mtCode> {
void clear(); // clears all heap contents
public:
CodeHeap();
CodeHeap(const char* name, const int code_blob_type);
// Heap extents
bool reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
bool reserve(ReservedSpace rs, size_t committed_size, size_t segment_size);
bool expand_by(size_t size); // expands committed memory by size
// Memory allocation
@ -161,8 +167,18 @@ class CodeHeap : public CHeapObj<mtCode> {
size_t max_capacity() const;
int allocated_segments() const;
size_t allocated_capacity() const;
size_t max_allocated_capacity() const { return _max_allocated_capacity; }
size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); }
// Returns true if the CodeHeap contains CodeBlobs of the given type
bool accepts(int code_blob_type) const { return (_code_blob_type == code_blob_type); }
int code_blob_type() const { return _code_blob_type; }
// Debugging / Profiling
const char* name() const { return _name; }
bool was_full() { return _was_full; }
void report_full() { _was_full = true; }
private:
size_t heap_unallocated_capacity() const;

@ -24,7 +24,9 @@
#include "precompiled.hpp"
#include "opto/c2compiler.hpp"
#include "opto/compile.hpp"
#include "opto/optoreg.hpp"
#include "opto/output.hpp"
#include "opto/runtime.hpp"
// register information defined by ADLC
@ -147,3 +149,8 @@ void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
void C2Compiler::print_timers() {
// do nothing
}
int C2Compiler::initial_code_buffer_size() {
assert(SegmentedCodeCache, "Should be only used with a segmented code cache");
return Compile::MAX_inst_size + Compile::MAX_locs_size + initial_const_capacity;
}

@ -50,6 +50,9 @@ public:
// Print compilation timers and statistics
void print_timers();
// Initial size of the code buffer (may be increased at runtime)
static int initial_code_buffer_size();
};
#endif // SHARE_VM_OPTO_C2COMPILER_HPP

@ -535,7 +535,7 @@ void Compile::init_scratch_buffer_blob(int const_size) {
if (scratch_buffer_blob() == NULL) {
// Let CompilerBroker disable further compilations.
record_failure("Not enough space for scratch buffer in CodeCache");
CompileBroker::handle_full_code_cache();
CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
return;
}
}

@ -1166,7 +1166,7 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
// Have we run out of code space?
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
CompileBroker::handle_full_code_cache();
CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
return NULL;
}
// Configure the code buffer.
@ -1491,7 +1491,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
CompileBroker::handle_full_code_cache();
CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
return;
}
@ -1648,7 +1648,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// One last check for failed CodeBuffer::expand:
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
CompileBroker::handle_full_code_cache();
CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
return;
}

@ -228,19 +228,17 @@ jvmtiError JvmtiCodeBlobEvents::generate_compiled_method_load_events(JvmtiEnv* e
// created nmethod will notify normally and nmethods which are freed
// can be safely skipped.
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* current = CodeCache::first_nmethod();
while (current != NULL) {
// Only notify for live nmethods
if (current->is_alive()) {
// Lock the nmethod so it can't be freed
nmethodLocker nml(current);
// Iterate over non-profiled and profiled nmethods
NMethodIterator iter;
while(iter.next_alive()) {
nmethod* current = iter.method();
// Lock the nmethod so it can't be freed
nmethodLocker nml(current);
// Don't hold the lock over the notify or jmethodID creation
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
current->get_and_cache_jmethod_id();
JvmtiExport::post_compiled_method_load(current);
}
current = CodeCache::next_nmethod(current);
// Don't hold the lock over the notify or jmethodID creation
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
current->get_and_cache_jmethod_id();
JvmtiExport::post_compiled_method_load(current);
}
return JVMTI_ERROR_NONE;
}

@ -215,7 +215,7 @@ double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k)
// The main intention is to keep enough free space for C2 compiled code
// to achieve peak performance if the code cache is under stress.
if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) {
double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
}

@ -1143,7 +1143,27 @@ void Arguments::set_tiered_flags() {
}
// Increase the code cache size - tiered compiles a lot more.
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
FLAG_SET_ERGO(uintx, ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
}
// Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) {
FLAG_SET_ERGO(bool, SegmentedCodeCache, true);
// Multiply sizes by 5 but fix NonMethodCodeHeapSize (distribute among non-profiled and profiled code heap)
if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) {
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, ProfiledCodeHeapSize * 5 + NonMethodCodeHeapSize * 2);
}
if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize * 5 + NonMethodCodeHeapSize * 2);
}
// Check consistency of code heap sizes
if ((NonMethodCodeHeapSize + NonProfiledCodeHeapSize + ProfiledCodeHeapSize) != ReservedCodeCacheSize) {
jio_fprintf(defaultStream::error_stream(),
"Invalid code heap sizes: NonMethodCodeHeapSize(%dK) + ProfiledCodeHeapSize(%dK) + NonProfiledCodeHeapSize(%dK) = %dK. Must be equal to ReservedCodeCacheSize = %uK.\n",
NonMethodCodeHeapSize/K, ProfiledCodeHeapSize/K, NonProfiledCodeHeapSize/K,
(NonMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize)/K, ReservedCodeCacheSize/K);
vm_exit(1);
}
}
if (!UseInterpreter) { // -Xcomp
Tier3InvokeNotifyFreqLog = 0;
@ -2442,6 +2462,18 @@ bool Arguments::check_vm_args_consistency() {
"Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
(2*G)/M);
status = false;
} else if (NonMethodCodeHeapSize < min_code_cache_size){
jio_fprintf(defaultStream::error_stream(),
"Invalid NonMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonMethodCodeHeapSize/K,
min_code_cache_size/K);
status = false;
} else if ((!FLAG_IS_DEFAULT(NonMethodCodeHeapSize) || !FLAG_IS_DEFAULT(ProfiledCodeHeapSize) || !FLAG_IS_DEFAULT(NonProfiledCodeHeapSize))
&& (NonMethodCodeHeapSize + NonProfiledCodeHeapSize + ProfiledCodeHeapSize) != ReservedCodeCacheSize) {
jio_fprintf(defaultStream::error_stream(),
"Invalid code heap sizes: NonMethodCodeHeapSize(%dK) + ProfiledCodeHeapSize(%dK) + NonProfiledCodeHeapSize(%dK) = %dK. Must be equal to ReservedCodeCacheSize = %uK.\n",
NonMethodCodeHeapSize/K, ProfiledCodeHeapSize/K, NonProfiledCodeHeapSize/K,
(NonMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize)/K, ReservedCodeCacheSize/K);
status = false;
}
status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
@ -2868,8 +2900,41 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize);
// -XX:NonMethodCodeHeapSize=
} else if (match_option(option, "-XX:NonMethodCodeHeapSize=", &tail)) {
julong long_NonMethodCodeHeapSize = 0;
ArgsRange errcode = parse_memory_size(tail, &long_NonMethodCodeHeapSize, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
"Invalid maximum non-method code heap size: %s.\n", option->optionString);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, NonMethodCodeHeapSize, (uintx)long_NonMethodCodeHeapSize);
// -XX:ProfiledCodeHeapSize=
} else if (match_option(option, "-XX:ProfiledCodeHeapSize=", &tail)) {
julong long_ProfiledCodeHeapSize = 0;
ArgsRange errcode = parse_memory_size(tail, &long_ProfiledCodeHeapSize, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
"Invalid maximum profiled code heap size: %s.\n", option->optionString);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, ProfiledCodeHeapSize, (uintx)long_ProfiledCodeHeapSize);
// -XX:NonProfiledCodeHeapSizee=
} else if (match_option(option, "-XX:NonProfiledCodeHeapSize=", &tail)) {
julong long_NonProfiledCodeHeapSize = 0;
ArgsRange errcode = parse_memory_size(tail, &long_NonProfiledCodeHeapSize, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
"Invalid maximum non-profiled code heap size: %s.\n", option->optionString);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, NonProfiledCodeHeapSize, (uintx)long_NonProfiledCodeHeapSize);
//-XX:IncreaseFirstTierCompileThresholdAt=
} else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) {
} else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) {
uintx uint_IncreaseFirstTierCompileThresholdAt = 0;
if (!parse_uintx(tail, &uint_IncreaseFirstTierCompileThresholdAt, 0) || uint_IncreaseFirstTierCompileThresholdAt > 99) {
jio_fprintf(defaultStream::error_stream(),

@ -165,7 +165,7 @@ void PCRecorder::init() {
for (int index = 0; index < s; index++) {
counters[index] = 0;
}
base = CodeCache::first_address();
base = CodeCache::low_bound();
}
void PCRecorder::record(address pc) {

@ -186,6 +186,10 @@ define_pd_global(intx, InlineClassNatives, true);
define_pd_global(intx, InlineUnsafeOps, true);
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 0);
define_pd_global(intx, ProfiledCodeHeapSize, 0);
define_pd_global(intx, NonMethodCodeHeapSize, 32*M);
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
define_pd_global(intx, CodeCacheMinBlockLength, 1);
define_pd_global(intx, CodeCacheMinimumUseSpace, 200*K);
@ -3354,9 +3358,21 @@ class CommandLineFlags {
develop_pd(uintx, CodeCacheMinimumUseSpace, \
"Minimum code cache size (in bytes) required to start VM.") \
\
product(bool, SegmentedCodeCache, false, \
"Use a segmented code cache") \
\
product_pd(uintx, ReservedCodeCacheSize, \
"Reserved code cache size (in bytes) - maximum code cache size") \
\
product_pd(uintx, NonProfiledCodeHeapSize, \
"Size of code heap with non-profiled methods (in bytes)") \
\
product_pd(uintx, ProfiledCodeHeapSize, \
"Size of code heap with profiled methods (in bytes)") \
\
product_pd(uintx, NonMethodCodeHeapSize, \
"Size of code heap with non-methods (in bytes)") \
\
product(uintx, CodeCacheMinimumFreeSpace, 500*K, \
"When less than X space left, we stop compiling") \
\

@ -49,6 +49,7 @@ void perfMemory_init();
void management_init();
void bytecodes_init();
void classLoader_init();
void compilationPolicy_init();
void codeCache_init();
void VM_Version_init();
void os_init_globals(); // depends on VM_Version_init, before universe_init
@ -68,7 +69,6 @@ void vmStructs_init();
void vtableStubs_init();
void InlineCacheBuffer_init();
void compilerOracle_init();
void compilationPolicy_init();
void compileBroker_init();
// Initialization after compiler initialization
@ -97,6 +97,7 @@ jint init_globals() {
management_init();
bytecodes_init();
classLoader_init();
compilationPolicy_init();
codeCache_init();
VM_Version_init();
os_init_globals();
@ -123,7 +124,6 @@ jint init_globals() {
vtableStubs_init();
InlineCacheBuffer_init();
compilerOracle_init();
compilationPolicy_init();
compileBroker_init();
VMRegImpl::set_regName();

@ -2422,7 +2422,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
// Ought to log this but compile log is only per compile thread
// and we're some non descript Java thread.
MutexUnlocker mu(AdapterHandlerLibrary_lock);
CompileBroker::handle_full_code_cache();
CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
return NULL; // Out of CodeCache space
}
entry->relocate(new_adapter->content_begin());
@ -2596,7 +2596,7 @@ void AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
nm->post_compiled_method_load_event();
} else {
// CodeCache is full, disable compilation
CompileBroker::handle_full_code_cache();
CompileBroker::handle_full_code_cache(CodeBlobType::MethodNonProfiled);
}
}

@ -131,7 +131,7 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
#define SWEEP(nm)
#endif
nmethod* NMethodSweeper::_current = NULL; // Current nmethod
NMethodIterator NMethodSweeper::_current; // Current nmethod
long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
@ -150,26 +150,24 @@ volatile int NMethodSweeper::_bytes_changed = 0; // Counts the tot
// 3) zombie -> marked_for_reclamation
int NMethodSweeper::_hotness_counter_reset_val = 0;
long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
class MarkActivationClosure: public CodeBlobClosure {
public:
virtual void do_code_blob(CodeBlob* cb) {
if (cb->is_nmethod()) {
nmethod* nm = (nmethod*)cb;
nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
// If we see an activation belonging to a non_entrant nmethod, we mark it.
if (nm->is_not_entrant()) {
nm->mark_as_seen_on_stack();
}
assert(cb->is_nmethod(), "CodeBlob should be nmethod");
nmethod* nm = (nmethod*)cb;
nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
// If we see an activation belonging to a non_entrant nmethod, we mark it.
if (nm->is_not_entrant()) {
nm->mark_as_seen_on_stack();
}
}
};
@ -178,10 +176,9 @@ static MarkActivationClosure mark_activation_closure;
class SetHotnessClosure: public CodeBlobClosure {
public:
virtual void do_code_blob(CodeBlob* cb) {
if (cb->is_nmethod()) {
nmethod* nm = (nmethod*)cb;
nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
}
assert(cb->is_nmethod(), "CodeBlob should be nmethod");
nmethod* nm = (nmethod*)cb;
nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
}
};
static SetHotnessClosure set_hotness_closure;
@ -194,7 +191,7 @@ int NMethodSweeper::hotness_counter_reset_val() {
return _hotness_counter_reset_val;
}
bool NMethodSweeper::sweep_in_progress() {
return (_current != NULL);
return !_current.end();
}
// Scans the stacks of all Java threads and marks activations of not-entrant methods.
@ -212,11 +209,13 @@ void NMethodSweeper::mark_active_nmethods() {
_time_counter++;
// Check for restart
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
if (!sweep_in_progress()) {
_seen = 0;
_sweep_fractions_left = NmethodSweepFraction;
_current = CodeCache::first_nmethod();
_current = NMethodIterator();
// Initialize to first nmethod
_current.next();
_traversals += 1;
_total_time_this_sweep = Tickspan();
@ -271,7 +270,9 @@ void NMethodSweeper::possibly_sweep() {
// an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
// value) that disables the intended periodic sweeps.
const int max_wait_time = ReservedCodeCacheSize / (16 * M);
double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
double wait_until_next_sweep = max_wait_time - time_since_last_sweep -
MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled),
CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled));
assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
@ -353,7 +354,7 @@ void NMethodSweeper::sweep_code_cache() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// The last invocation iterates until there are no more nmethods
for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
while ((swept_count < todo || _sweep_fractions_left == 1) && !_current.end()) {
swept_count++;
if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
if (PrintMethodFlushing && Verbose) {
@ -369,19 +370,19 @@ void NMethodSweeper::sweep_code_cache() {
// Since we will give up the CodeCache_lock, always skip ahead
// to the next nmethod. Other blobs can be deleted by other
// threads but nmethods are only reclaimed by the sweeper.
nmethod* next = CodeCache::next_nmethod(_current);
nmethod* nm = _current.method();
_current.next();
// Now ready to process nmethod and give up CodeCache_lock
{
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
freed_memory += process_nmethod(_current);
freed_memory += process_nmethod(nm);
}
_seen++;
_current = next;
}
}
assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
assert(_sweep_fractions_left > 1 || _current.end(), "must have scanned the whole cache");
const Ticks sweep_end_counter = Ticks::now();
const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
@ -594,7 +595,8 @@ void NMethodSweeper::possibly_flush(nmethod* nm) {
// ReservedCodeCacheSize
int reset_val = hotness_counter_reset_val();
int time_since_reset = reset_val - nm->hotness_counter();
double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
int code_blob_type = (CodeCache::get_code_blob_type(nm->comp_level()));
double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity);
// The less free space in the code cache we have - the bigger reverse_free_ratio() is.
// I.e., 'threshold' increases with lower available space in the code cache and a higher
// NmethodSweepActivity. If the current hotness counter - which decreases from its initial

@ -54,33 +54,33 @@
// is full.
class NMethodSweeper : public AllStatic {
static long _traversals; // Stack scan count, also sweep ID.
static long _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache
static long _time_counter; // Virtual time used to periodically invoke sweeper
static long _last_sweep; // Value of _time_counter when the last sweep happened
static nmethod* _current; // Current nmethod
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static int _flushed_count; // Nof. nmethods flushed in current sweep
static int _zombified_count; // Nof. nmethods made zombie in current sweep
static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
static long _traversals; // Stack scan count, also sweep ID.
static long _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache
static long _time_counter; // Virtual time used to periodically invoke sweeper
static long _last_sweep; // Value of _time_counter when the last sweep happened
static NMethodIterator _current; // Current nmethod
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static int _flushed_count; // Nof. nmethods flushed in current sweep
static int _zombified_count; // Nof. nmethods made zombie in current sweep
static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
static volatile bool _should_sweep; // Indicates if we should invoke the sweeper
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
// 1) alive -> not_entrant
// 2) not_entrant -> zombie
// 3) zombie -> marked_for_reclamation
static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
static volatile bool _should_sweep; // Indicates if we should invoke the sweeper
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
// 1) alive -> not_entrant
// 2) not_entrant -> zombie
// 3) zombie -> marked_for_reclamation
// Stat counters
static long _total_nof_methods_reclaimed; // Accumulated nof methods flushed
static long _total_nof_c2_methods_reclaimed; // Accumulated nof C2-compiled methods flushed
static size_t _total_flushed_size; // Total size of flushed methods
static int _hotness_counter_reset_val;
static Tickspan _total_time_sweeping; // Accumulated time sweeping
static Tickspan _total_time_this_sweep; // Total time this sweep
static Tickspan _peak_sweep_time; // Peak time for a full sweep
static Tickspan _peak_sweep_fraction_time; // Peak time sweeping one fraction
static Tickspan _total_time_sweeping; // Accumulated time sweeping
static Tickspan _total_time_this_sweep; // Total time this sweep
static Tickspan _peak_sweep_time; // Peak time for a full sweep
static Tickspan _peak_sweep_fraction_time; // Peak time sweeping one fraction
static int process_nmethod(nmethod *nm);
static void release_nmethod(nmethod* nm);
@ -98,7 +98,7 @@ class NMethodSweeper : public AllStatic {
#ifdef ASSERT
static bool is_sweeping(nmethod* which) { return _current == which; }
static bool is_sweeping(nmethod* which) { return _current.method() == which; }
// Keep track of sweeper activity in the ring buffer
static void record_sweep(nmethod* nm, int line);
static void report_events(int id, address entry);

@ -765,8 +765,8 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
/* CodeCache (NOTE: incomplete) */ \
/********************************/ \
\
static_field(CodeCache, _heap, CodeHeap*) \
static_field(CodeCache, _scavenge_root_nmethods, nmethod*) \
static_field(CodeCache, _heaps, GrowableArray<CodeHeap*>*) \
static_field(CodeCache, _scavenge_root_nmethods, nmethod*) \
\
/*******************************/ \
/* CodeHeap (NOTE: incomplete) */ \

@ -63,7 +63,9 @@ GrowableArray<MemoryManager*>* MemoryService::_managers_list =
GCMemoryManager* MemoryService::_minor_gc_manager = NULL;
GCMemoryManager* MemoryService::_major_gc_manager = NULL;
MemoryPool* MemoryService::_code_heap_pool = NULL;
MemoryManager* MemoryService::_code_cache_manager = NULL;
GrowableArray<MemoryPool*>* MemoryService::_code_heap_pools =
new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryPool*>(init_code_heap_pools_size, true);
MemoryPool* MemoryService::_metaspace_pool = NULL;
MemoryPool* MemoryService::_compressed_class_pool = NULL;
@ -388,15 +390,21 @@ void MemoryService::add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
}
#endif // INCLUDE_ALL_GCS
void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
_code_heap_pool = new CodeHeapPool(heap,
"Code Cache",
true /* support_usage_threshold */);
MemoryManager* mgr = MemoryManager::get_code_cache_memory_manager();
mgr->add_pool(_code_heap_pool);
void MemoryService::add_code_heap_memory_pool(CodeHeap* heap, const char* name) {
// Create new memory pool for this heap
MemoryPool* code_heap_pool = new CodeHeapPool(heap, name, true /* support_usage_threshold */);
_pools_list->append(_code_heap_pool);
_managers_list->append(mgr);
// Append to lists
_code_heap_pools->append(code_heap_pool);
_pools_list->append(code_heap_pool);
if (_code_cache_manager == NULL) {
// Create CodeCache memory manager
_code_cache_manager = MemoryManager::get_code_cache_memory_manager();
_managers_list->append(_code_cache_manager);
}
_code_cache_manager->add_pool(code_heap_pool);
}
void MemoryService::add_metaspace_memory_pools() {

@ -53,7 +53,8 @@ class MemoryService : public AllStatic {
private:
enum {
init_pools_list_size = 10,
init_managers_list_size = 5
init_managers_list_size = 5,
init_code_heap_pools_size = 9
};
// index for minor and major generations
@ -70,8 +71,9 @@ private:
static GCMemoryManager* _major_gc_manager;
static GCMemoryManager* _minor_gc_manager;
// Code heap memory pool
static MemoryPool* _code_heap_pool;
// memory manager and code heap pools for the CodeCache
static MemoryManager* _code_cache_manager;
static GrowableArray<MemoryPool*>* _code_heap_pools;
static MemoryPool* _metaspace_pool;
static MemoryPool* _compressed_class_pool;
@ -123,7 +125,7 @@ private:
public:
static void set_universe_heap(CollectedHeap* heap);
static void add_code_heap_memory_pool(CodeHeap* heap);
static void add_code_heap_memory_pool(CodeHeap* heap, const char* name);
static void add_metaspace_memory_pools();
static MemoryPool* get_memory_pool(instanceHandle pool);
@ -146,7 +148,10 @@ public:
static void track_memory_usage();
static void track_code_cache_memory_usage() {
track_memory_pool_usage(_code_heap_pool);
// Track memory pool usage of all CodeCache memory pools
for (int i = 0; i < _code_heap_pools->length(); ++i) {
track_memory_pool_usage(_code_heap_pools->at(i));
}
}
static void track_metaspace_memory_usage() {
track_memory_pool_usage(_metaspace_pool);

@ -394,6 +394,7 @@ Declares a structure type that can be used in other events.
<event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"
has_thread="true" is_requestable="false" is_constant="false" is_instant="true">
<value type="CODEBLOBTYPE" field="codeBlobType" label="Code Heap"/>
<value type="ADDRESS" field="startAddress" label="Start Address"/>
<value type="ADDRESS" field="commitedTopAddress" label="Commited Top"/>
<value type="ADDRESS" field="reservedTopAddress" label="Reserved Top"/>

@ -170,6 +170,11 @@ Now we can use the content + data type in declaring event fields.
type="U1" jvm_type="FLAGVALUEORIGIN">
<value type="UTF8" field="origin" label="origin" />
</content_type>
<content_type id="CodeBlobType" hr_name="Code Blob Type"
type="U1" jvm_type="CODEBLOBTYPE">
<value type="UTF8" field="type" label="type" />
</content_type>
</content_types>
@ -371,6 +376,10 @@ Now we can use the content + data type in declaring event fields.
<!-- FLAGVALUEORIGIN -->
<primary_type symbol="FLAGVALUEORIGIN" datatype="U1"
contenttype="FLAGVALUEORIGIN" type="u1" sizeop="sizeof(u1)" />
<!-- CODEBLOBTYPE -->
<primary_type symbol="CODEBLOBTYPE" datatype="U1"
contenttype="CODEBLOBTYPE" type="u1" sizeop="sizeof(u1)" />
</primary_types>
</types>

@ -0,0 +1,132 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import com.oracle.java.testlibrary.*;
/*
* @test CheckSegmentedCodeCache
* @bug 8015774
* @summary "Checks VM options related to the segmented code cache"
* @library /testlibrary
* @run main/othervm CheckSegmentedCodeCache
*/
public class CheckSegmentedCodeCache {
// Code heap names
private static final String NON_METHOD = "CodeHeap 'non-methods'";
private static final String PROFILED = "CodeHeap 'profiled nmethods'";
private static final String NON_PROFILED = "CodeHeap 'non-profiled nmethods'";
private static void verifySegmentedCodeCache(ProcessBuilder pb, boolean enabled) throws Exception {
OutputAnalyzer out = new OutputAnalyzer(pb.start());
if (enabled) {
try {
// Non-method code heap should be always available with the segmented code cache
out.shouldContain(NON_METHOD);
} catch (RuntimeException e) {
// TieredCompilation is disabled in a client VM
out.shouldContain("TieredCompilation is disabled in this release.");
}
} else {
out.shouldNotContain(NON_METHOD);
}
out.shouldHaveExitValue(0);
}
private static void verifyCodeHeapNotExists(ProcessBuilder pb, String... heapNames) throws Exception {
OutputAnalyzer out = new OutputAnalyzer(pb.start());
for (String name : heapNames) {
out.shouldNotContain(name);
}
}
private static void failsWith(ProcessBuilder pb, String message) throws Exception {
OutputAnalyzer out = new OutputAnalyzer(pb.start());
out.shouldContain(message);
out.shouldHaveExitValue(1);
}
/**
* Check the result of segmented code cache related VM options.
*/
public static void main(String[] args) throws Exception {
ProcessBuilder pb;
// Disabled with ReservedCodeCacheSize < 240MB
pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=239m",
"-XX:+PrintCodeCache", "-version");
verifySegmentedCodeCache(pb, false);
// Disabled without TieredCompilation
pb = ProcessTools.createJavaProcessBuilder("-XX:-TieredCompilation",
"-XX:+PrintCodeCache", "-version");
verifySegmentedCodeCache(pb, false);
// Enabled with TieredCompilation and ReservedCodeCacheSize >= 240MB
pb = ProcessTools.createJavaProcessBuilder("-XX:+TieredCompilation",
"-XX:ReservedCodeCacheSize=240m",
"-XX:+PrintCodeCache", "-version");
verifySegmentedCodeCache(pb, true);
// Always enabled if SegmentedCodeCache is set
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:-TieredCompilation",
"-XX:ReservedCodeCacheSize=239m",
"-XX:+PrintCodeCache", "-version");
verifySegmentedCodeCache(pb, true);
// The profiled and non-profiled code heaps should not be available in
// interpreter-only mode
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-Xint",
"-XX:+PrintCodeCache", "-version");
verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:TieredStopAtLevel=0",
"-XX:+PrintCodeCache", "-version");
verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
// If we stop compilation at CompLevel_simple
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:TieredStopAtLevel=1",
"-XX:+PrintCodeCache", "-version");
verifyCodeHeapNotExists(pb, PROFILED);
// Fails with too small non-method code heap size
pb = ProcessTools.createJavaProcessBuilder("-XX:NonMethodCodeHeapSize=100K");
failsWith(pb, "Invalid NonMethodCodeHeapSize");
// Fails if code heap sizes do not add up
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:ReservedCodeCacheSize=10M",
"-XX:NonMethodCodeHeapSize=5M",
"-XX:ProfiledCodeHeapSize=5M",
"-XX:NonProfiledCodeHeapSize=5M");
failsWith(pb, "Invalid code heap sizes");
// Fails if not enough space for VM internal code
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:ReservedCodeCacheSize=1700K",
"-XX:InitialCodeCacheSize=100K");
failsWith(pb, "Not enough space in non-method code heap to run VM");
}
}