Merge
This commit is contained in:
commit
8937a201e0
@ -314,7 +314,7 @@ static void * pathmap_dlopen(const char * name, int mode) {
|
|||||||
handle = dlopen(name, mode);
|
handle = dlopen(name, mode);
|
||||||
}
|
}
|
||||||
if (_libsaproc_debug) {
|
if (_libsaproc_debug) {
|
||||||
printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%x\n", name, handle);
|
printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%lx\n", name, (unsigned long) handle);
|
||||||
}
|
}
|
||||||
return handle;
|
return handle;
|
||||||
}
|
}
|
||||||
|
@ -119,8 +119,8 @@ ifeq ($(INCLUDE_NMT), false)
|
|||||||
CFLAGS += -DINCLUDE_NMT=0
|
CFLAGS += -DINCLUDE_NMT=0
|
||||||
|
|
||||||
Src_Files_EXCLUDE += \
|
Src_Files_EXCLUDE += \
|
||||||
memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
|
memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
|
||||||
memTracker.cpp nmtDCmd.cpp
|
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
|
||||||
endif
|
endif
|
||||||
|
|
||||||
-include $(HS_ALT_MAKE)/excludeSrc.make
|
-include $(HS_ALT_MAKE)/excludeSrc.make
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
@ -26,8 +26,9 @@
|
|||||||
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
||||||
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
||||||
|
|
||||||
address generate_normal_entry(void);
|
address generate_normal_entry(bool synchronized);
|
||||||
address generate_native_entry(void);
|
address generate_native_entry(bool synchronized);
|
||||||
|
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
|
|
||||||
void lock_method(void);
|
void lock_method(void);
|
||||||
void unlock_method(void);
|
void unlock_method(void);
|
||||||
|
@ -938,8 +938,9 @@ void CppInterpreterGenerator::generate_counter_incr(Label& overflow) {
|
|||||||
// Interpreter stub for calling a native method. (C++ interpreter)
|
// Interpreter stub for calling a native method. (C++ interpreter)
|
||||||
// This sets up a somewhat different looking stack for calling the native method
|
// This sets up a somewhat different looking stack for calling the native method
|
||||||
// than the typical interpreter frame setup.
|
// than the typical interpreter frame setup.
|
||||||
|
// The synchronized parameter is ignored.
|
||||||
//
|
//
|
||||||
address CppInterpreterGenerator::generate_native_entry(void) {
|
address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||||
if (native_entry != NULL) return native_entry;
|
if (native_entry != NULL) return native_entry;
|
||||||
address entry = __ pc();
|
address entry = __ pc();
|
||||||
|
|
||||||
@ -1729,7 +1730,8 @@ void CppInterpreterGenerator::generate_more_monitors() {
|
|||||||
__ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base); // Mark lock as unused
|
__ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base); // Mark lock as unused
|
||||||
}
|
}
|
||||||
|
|
||||||
address CppInterpreterGenerator::generate_normal_entry(void) {
|
// The synchronized parameter is ignored
|
||||||
|
address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||||
if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
|
if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
|
||||||
|
|
||||||
address entry = __ pc();
|
address entry = __ pc();
|
||||||
@ -2789,38 +2791,6 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
|
|||||||
return interpreter_frame_manager;
|
return interpreter_frame_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate code for various sorts of method entries
|
|
||||||
//
|
|
||||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
|
||||||
address entry_point = NULL;
|
|
||||||
|
|
||||||
switch (kind) {
|
|
||||||
case Interpreter::zerolocals : break;
|
|
||||||
case Interpreter::zerolocals_synchronized : break;
|
|
||||||
case Interpreter::native : // Fall thru
|
|
||||||
case Interpreter::native_synchronized : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry(); break;
|
|
||||||
case Interpreter::empty : break;
|
|
||||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
|
||||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
|
||||||
// These are special interpreter intrinsics which we don't support so far.
|
|
||||||
case Interpreter::java_lang_math_sin : break;
|
|
||||||
case Interpreter::java_lang_math_cos : break;
|
|
||||||
case Interpreter::java_lang_math_tan : break;
|
|
||||||
case Interpreter::java_lang_math_abs : break;
|
|
||||||
case Interpreter::java_lang_math_log : break;
|
|
||||||
case Interpreter::java_lang_math_log10 : break;
|
|
||||||
case Interpreter::java_lang_math_sqrt : break;
|
|
||||||
case Interpreter::java_lang_math_pow : break;
|
|
||||||
case Interpreter::java_lang_math_exp : break;
|
|
||||||
case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
|
||||||
default : ShouldNotReachHere(); break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry_point) {
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
return ((InterpreterGenerator*)this)->generate_normal_entry();
|
|
||||||
}
|
|
||||||
|
|
||||||
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||||
: CppInterpreterGenerator(code) {
|
: CppInterpreterGenerator(code) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
@ -31,7 +31,12 @@
|
|||||||
private:
|
private:
|
||||||
|
|
||||||
address generate_abstract_entry(void);
|
address generate_abstract_entry(void);
|
||||||
address generate_accessor_entry(void);
|
address generate_jump_to_normal_entry(void);
|
||||||
|
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||||
|
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||||
address generate_Reference_get_entry(void);
|
address generate_Reference_get_entry(void);
|
||||||
|
|
||||||
|
// Not supported
|
||||||
|
address generate_CRC32_update_entry() { return NULL; }
|
||||||
|
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
||||||
|
@ -428,6 +428,19 @@ address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||||
|
// vanilla (slow path) entry.
|
||||||
|
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||||
|
address entry = __ pc();
|
||||||
|
address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
|
||||||
|
assert(normal_entry != NULL, "should already be generated.");
|
||||||
|
__ branch_to_entry(normal_entry, R11_scratch1);
|
||||||
|
__ flush();
|
||||||
|
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
// Abstract method entry.
|
// Abstract method entry.
|
||||||
//
|
//
|
||||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||||
@ -485,203 +498,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
|
||||||
// vanilla (slow path) entry.
|
|
||||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
|
||||||
if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
Label Lslow_path, Lacquire;
|
|
||||||
|
|
||||||
const Register
|
|
||||||
Rclass_or_obj = R3_ARG1,
|
|
||||||
Rconst_method = R4_ARG2,
|
|
||||||
Rcodes = Rconst_method,
|
|
||||||
Rcpool_cache = R5_ARG3,
|
|
||||||
Rscratch = R11_scratch1,
|
|
||||||
Rjvmti_mode = Rscratch,
|
|
||||||
Roffset = R12_scratch2,
|
|
||||||
Rflags = R6_ARG4,
|
|
||||||
Rbtable = R7_ARG5;
|
|
||||||
|
|
||||||
static address branch_table[number_of_states];
|
|
||||||
|
|
||||||
address entry = __ pc();
|
|
||||||
|
|
||||||
// Check for safepoint:
|
|
||||||
// Ditch this, real man don't need safepoint checks.
|
|
||||||
|
|
||||||
// Also check for JVMTI mode
|
|
||||||
// Check for null obj, take slow path if so.
|
|
||||||
__ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
|
|
||||||
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
|
|
||||||
__ cmpdi(CCR1, Rclass_or_obj, 0);
|
|
||||||
__ cmpwi(CCR0, Rjvmti_mode, 0);
|
|
||||||
__ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
|
|
||||||
__ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
|
|
||||||
|
|
||||||
// Do 2 things in parallel:
|
|
||||||
// 1. Load the index out of the first instruction word, which looks like this:
|
|
||||||
// <0x2a><0xb4><index (2 byte, native endianess)>.
|
|
||||||
// 2. Load constant pool cache base.
|
|
||||||
__ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
|
|
||||||
__ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
|
|
||||||
|
|
||||||
__ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
|
|
||||||
__ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
|
|
||||||
|
|
||||||
// Get the const pool entry by means of <index>.
|
|
||||||
const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
|
|
||||||
__ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
|
|
||||||
__ add(Rcpool_cache, Rscratch, Rcpool_cache);
|
|
||||||
|
|
||||||
// Check if cpool cache entry is resolved.
|
|
||||||
// We are resolved if the indices offset contains the current bytecode.
|
|
||||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
|
||||||
// Big Endian:
|
|
||||||
__ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
|
|
||||||
__ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
|
|
||||||
__ bne(CCR0, Lslow_path);
|
|
||||||
__ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
|
|
||||||
|
|
||||||
// Finally, start loading the value: Get cp cache entry into regs.
|
|
||||||
__ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
|
|
||||||
__ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
|
|
||||||
|
|
||||||
// Following code is from templateTable::getfield_or_static
|
|
||||||
// Load pointer to branch table
|
|
||||||
__ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
|
|
||||||
|
|
||||||
// Get volatile flag
|
|
||||||
__ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
|
|
||||||
// note: sync is needed before volatile load on PPC64
|
|
||||||
|
|
||||||
// Check field type
|
|
||||||
__ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
Label LFlagInvalid;
|
|
||||||
__ cmpldi(CCR0, Rflags, number_of_states);
|
|
||||||
__ bge(CCR0, LFlagInvalid);
|
|
||||||
|
|
||||||
__ ld(R9_ARG7, 0, R1_SP);
|
|
||||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
|
||||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
|
||||||
__ asm_assert_eq("backlink", 0x543);
|
|
||||||
#endif // ASSERT
|
|
||||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
|
||||||
|
|
||||||
// Load from branch table and dispatch (volatile case: one instruction ahead)
|
|
||||||
__ sldi(Rflags, Rflags, LogBytesPerWord);
|
|
||||||
__ cmpwi(CCR6, Rscratch, 1); // volatile?
|
|
||||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
||||||
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
|
|
||||||
}
|
|
||||||
__ ldx(Rbtable, Rbtable, Rflags);
|
|
||||||
|
|
||||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
||||||
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
|
|
||||||
}
|
|
||||||
__ mtctr(Rbtable);
|
|
||||||
__ bctr();
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
__ bind(LFlagInvalid);
|
|
||||||
__ stop("got invalid flag", 0x6541);
|
|
||||||
|
|
||||||
bool all_uninitialized = true,
|
|
||||||
all_initialized = true;
|
|
||||||
for (int i = 0; i<number_of_states; ++i) {
|
|
||||||
all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
|
|
||||||
all_initialized = all_initialized && (branch_table[i] != NULL);
|
|
||||||
}
|
|
||||||
assert(all_uninitialized != all_initialized, "consistency"); // either or
|
|
||||||
|
|
||||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
|
||||||
if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
|
|
||||||
if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
|
|
||||||
if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
|
|
||||||
__ stop("unexpected type", 0x6551);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (branch_table[itos] == 0) { // generate only once
|
|
||||||
__ align(32, 28, 28); // align load
|
|
||||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
|
||||||
branch_table[itos] = __ pc(); // non-volatile_entry point
|
|
||||||
__ lwax(R3_RET, Rclass_or_obj, Roffset);
|
|
||||||
__ beq(CCR6, Lacquire);
|
|
||||||
__ blr();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (branch_table[ltos] == 0) { // generate only once
|
|
||||||
__ align(32, 28, 28); // align load
|
|
||||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
|
||||||
branch_table[ltos] = __ pc(); // non-volatile_entry point
|
|
||||||
__ ldx(R3_RET, Rclass_or_obj, Roffset);
|
|
||||||
__ beq(CCR6, Lacquire);
|
|
||||||
__ blr();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (branch_table[btos] == 0) { // generate only once
|
|
||||||
__ align(32, 28, 28); // align load
|
|
||||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
|
||||||
branch_table[btos] = __ pc(); // non-volatile_entry point
|
|
||||||
__ lbzx(R3_RET, Rclass_or_obj, Roffset);
|
|
||||||
__ extsb(R3_RET, R3_RET);
|
|
||||||
__ beq(CCR6, Lacquire);
|
|
||||||
__ blr();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (branch_table[ctos] == 0) { // generate only once
|
|
||||||
__ align(32, 28, 28); // align load
|
|
||||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
|
||||||
branch_table[ctos] = __ pc(); // non-volatile_entry point
|
|
||||||
__ lhzx(R3_RET, Rclass_or_obj, Roffset);
|
|
||||||
__ beq(CCR6, Lacquire);
|
|
||||||
__ blr();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (branch_table[stos] == 0) { // generate only once
|
|
||||||
__ align(32, 28, 28); // align load
|
|
||||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
|
||||||
branch_table[stos] = __ pc(); // non-volatile_entry point
|
|
||||||
__ lhax(R3_RET, Rclass_or_obj, Roffset);
|
|
||||||
__ beq(CCR6, Lacquire);
|
|
||||||
__ blr();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (branch_table[atos] == 0) { // generate only once
|
|
||||||
__ align(32, 28, 28); // align load
|
|
||||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
|
||||||
branch_table[atos] = __ pc(); // non-volatile_entry point
|
|
||||||
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
|
|
||||||
__ verify_oop(R3_RET);
|
|
||||||
//__ dcbt(R3_RET); // prefetch
|
|
||||||
__ beq(CCR6, Lacquire);
|
|
||||||
__ blr();
|
|
||||||
}
|
|
||||||
|
|
||||||
__ align(32, 12);
|
|
||||||
__ bind(Lacquire);
|
|
||||||
__ twi_0(R3_RET);
|
|
||||||
__ isync(); // acquire
|
|
||||||
__ blr();
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
for (int i = 0; i<number_of_states; ++i) {
|
|
||||||
assert(branch_table[i], "accessor_entry initialization");
|
|
||||||
//tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
__ bind(Lslow_path);
|
|
||||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
|
|
||||||
__ flush();
|
|
||||||
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interpreter intrinsic for WeakReference.get().
|
// Interpreter intrinsic for WeakReference.get().
|
||||||
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
|
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
|
||||||
// into R8 and return quickly
|
// into R8 and return quickly
|
||||||
@ -713,7 +529,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
|||||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||||
// regular method entry code to generate the NPE.
|
// regular method entry code to generate the NPE.
|
||||||
//
|
//
|
||||||
// This code is based on generate_accessor_enty.
|
|
||||||
|
|
||||||
address entry = __ pc();
|
address entry = __ pc();
|
||||||
|
|
||||||
@ -768,7 +583,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
|||||||
|
|
||||||
return entry;
|
return entry;
|
||||||
} else {
|
} else {
|
||||||
return generate_accessor_entry();
|
return generate_jump_to_normal_entry();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@
|
|||||||
address generate_normal_entry(bool synchronized);
|
address generate_normal_entry(bool synchronized);
|
||||||
address generate_native_entry(bool synchronized);
|
address generate_native_entry(bool synchronized);
|
||||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||||
address generate_empty_entry(void);
|
|
||||||
|
|
||||||
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
|
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
|
||||||
void unlock_method(bool check_exceptions = true);
|
void unlock_method(bool check_exceptions = true);
|
||||||
|
@ -602,48 +602,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
|
|||||||
|
|
||||||
// End of helpers
|
// End of helpers
|
||||||
|
|
||||||
// ============================================================================
|
|
||||||
// Various method entries
|
|
||||||
//
|
|
||||||
|
|
||||||
// Empty method, generate a very fast return. We must skip this entry if
|
|
||||||
// someone's debugging, indicated by the flag
|
|
||||||
// "interp_mode" in the Thread obj.
|
|
||||||
// Note: empty methods are generated mostly methods that do assertions, which are
|
|
||||||
// disabled in the "java opt build".
|
|
||||||
address TemplateInterpreterGenerator::generate_empty_entry(void) {
|
|
||||||
if (!UseFastEmptyMethods) {
|
|
||||||
NOT_PRODUCT(__ should_not_reach_here();)
|
|
||||||
return Interpreter::entry_for_kind(Interpreter::zerolocals);
|
|
||||||
}
|
|
||||||
|
|
||||||
Label Lslow_path;
|
|
||||||
const Register Rjvmti_mode = R11_scratch1;
|
|
||||||
address entry = __ pc();
|
|
||||||
|
|
||||||
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
|
|
||||||
__ cmpwi(CCR0, Rjvmti_mode, 0);
|
|
||||||
__ bne(CCR0, Lslow_path); // jvmti_mode!=0
|
|
||||||
|
|
||||||
// Noone's debuggin: Simply return.
|
|
||||||
// Pop c2i arguments (if any) off when we return.
|
|
||||||
#ifdef ASSERT
|
|
||||||
__ ld(R9_ARG7, 0, R1_SP);
|
|
||||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
|
||||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
|
||||||
__ asm_assert_eq("backlink", 0x545);
|
|
||||||
#endif // ASSERT
|
|
||||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
|
||||||
|
|
||||||
// And we're done.
|
|
||||||
__ blr();
|
|
||||||
|
|
||||||
__ bind(Lslow_path);
|
|
||||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
|
|
||||||
__ flush();
|
|
||||||
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Support abs and sqrt like in compiler.
|
// Support abs and sqrt like in compiler.
|
||||||
// For others we can use a normal (native) entry.
|
// For others we can use a normal (native) entry.
|
||||||
@ -1289,45 +1247,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Entry points
|
|
||||||
|
|
||||||
address AbstractInterpreterGenerator::generate_method_entry(
|
|
||||||
AbstractInterpreter::MethodKind kind) {
|
|
||||||
// Determine code generation flags.
|
|
||||||
bool synchronized = false;
|
|
||||||
address entry_point = NULL;
|
|
||||||
|
|
||||||
switch (kind) {
|
|
||||||
case Interpreter::zerolocals : break;
|
|
||||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
|
||||||
case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
|
|
||||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
|
|
||||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
|
|
||||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
|
|
||||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
|
|
||||||
|
|
||||||
case Interpreter::java_lang_math_sin : // fall thru
|
|
||||||
case Interpreter::java_lang_math_cos : // fall thru
|
|
||||||
case Interpreter::java_lang_math_tan : // fall thru
|
|
||||||
case Interpreter::java_lang_math_abs : // fall thru
|
|
||||||
case Interpreter::java_lang_math_log : // fall thru
|
|
||||||
case Interpreter::java_lang_math_log10 : // fall thru
|
|
||||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
|
||||||
case Interpreter::java_lang_math_pow : // fall thru
|
|
||||||
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
|
|
||||||
case Interpreter::java_lang_ref_reference_get
|
|
||||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
|
||||||
default : ShouldNotReachHere(); break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry_point) {
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized);
|
|
||||||
}
|
|
||||||
|
|
||||||
// These should never be compiled since the interpreter will prefer
|
// These should never be compiled since the interpreter will prefer
|
||||||
// the compiled version to the intrinsic version.
|
// the compiled version to the intrinsic version.
|
||||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||||
@ -1355,7 +1274,7 @@ int AbstractInterpreter::size_activation(int max_stack,
|
|||||||
int callee_locals,
|
int callee_locals,
|
||||||
bool is_top_frame) {
|
bool is_top_frame) {
|
||||||
// Note: This calculation must exactly parallel the frame setup
|
// Note: This calculation must exactly parallel the frame setup
|
||||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
// in InterpreterGenerator::generate_fixed_frame.
|
||||||
assert(Interpreter::stackElementWords == 1, "sanity");
|
assert(Interpreter::stackElementWords == 1, "sanity");
|
||||||
const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
|
const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
|
||||||
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
|
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "runtime/java.hpp"
|
#include "runtime/java.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/stubCodeGenerator.hpp"
|
#include "runtime/stubCodeGenerator.hpp"
|
||||||
#include "utilities/defaultStream.hpp"
|
#include "utilities/defaultStream.hpp"
|
||||||
#include "vm_version_ppc.hpp"
|
#include "vm_version_ppc.hpp"
|
||||||
@ -108,7 +109,7 @@ void VM_Version::initialize() {
|
|||||||
(has_vand() ? " vand" : "")
|
(has_vand() ? " vand" : "")
|
||||||
// Make sure number of %s matches num_features!
|
// Make sure number of %s matches num_features!
|
||||||
);
|
);
|
||||||
_features_str = strdup(buf);
|
_features_str = os::strdup(buf);
|
||||||
NOT_PRODUCT(if (Verbose) print_features(););
|
NOT_PRODUCT(if (Verbose) print_features(););
|
||||||
|
|
||||||
// PPC64 supports 8-byte compare-exchange operations (see
|
// PPC64 supports 8-byte compare-exchange operations (see
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -29,6 +29,7 @@
|
|||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "interpreter/interpreterGenerator.hpp"
|
#include "interpreter/interpreterGenerator.hpp"
|
||||||
#include "interpreter/interpreterRuntime.hpp"
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
|
#include "interpreter/interp_masm.hpp"
|
||||||
#include "oops/arrayOop.hpp"
|
#include "oops/arrayOop.hpp"
|
||||||
#include "oops/methodData.hpp"
|
#include "oops/methodData.hpp"
|
||||||
#include "oops/method.hpp"
|
#include "oops/method.hpp"
|
||||||
@ -68,9 +69,7 @@ bool CppInterpreter::contains(address pc) {
|
|||||||
#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
|
#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
|
||||||
#define __ _masm->
|
#define __ _masm->
|
||||||
|
|
||||||
Label frame_manager_entry;
|
Label frame_manager_entry; // c++ interpreter entry point this holds that entry point label.
|
||||||
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
|
|
||||||
// c++ interpreter entry point this holds that entry point label.
|
|
||||||
|
|
||||||
static address unctrap_frame_manager_entry = NULL;
|
static address unctrap_frame_manager_entry = NULL;
|
||||||
|
|
||||||
@ -452,110 +451,6 @@ address InterpreterGenerator::generate_empty_entry(void) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
|
||||||
// vanilla (slow path) entry
|
|
||||||
|
|
||||||
// Generates code to elide accessor methods
|
|
||||||
// Uses G3_scratch and G1_scratch as scratch
|
|
||||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
|
||||||
|
|
||||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
|
|
||||||
// parameter size = 1
|
|
||||||
// Note: We can only use this code if the getfield has been resolved
|
|
||||||
// and if we don't have a null-pointer exception => check for
|
|
||||||
// these conditions first and use slow path if necessary.
|
|
||||||
address entry = __ pc();
|
|
||||||
Label slow_path;
|
|
||||||
|
|
||||||
if ( UseFastAccessorMethods) {
|
|
||||||
// Check if we need to reach a safepoint and generate full interpreter
|
|
||||||
// frame if so.
|
|
||||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
|
||||||
__ load_contents(sync_state, G3_scratch);
|
|
||||||
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
|
||||||
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
|
||||||
__ delayed()->nop();
|
|
||||||
|
|
||||||
// Check if local 0 != NULL
|
|
||||||
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
|
|
||||||
__ tst(Otos_i); // check if local 0 == NULL and go the slow path
|
|
||||||
__ brx(Assembler::zero, false, Assembler::pn, slow_path);
|
|
||||||
__ delayed()->nop();
|
|
||||||
|
|
||||||
|
|
||||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
|
||||||
// get first 4 bytes of the bytecodes (big endian!)
|
|
||||||
__ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
|
|
||||||
__ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
|
|
||||||
|
|
||||||
// move index @ 2 far left then to the right most two bytes.
|
|
||||||
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
|
|
||||||
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
|
|
||||||
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
|
|
||||||
|
|
||||||
// get constant pool cache
|
|
||||||
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), G3_scratch);
|
|
||||||
__ ld_ptr(G3_scratch, in_bytes(ConstMethod::constants_offset()), G3_scratch);
|
|
||||||
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
|
|
||||||
|
|
||||||
// get specific constant pool cache entry
|
|
||||||
__ add(G3_scratch, G1_scratch, G3_scratch);
|
|
||||||
|
|
||||||
// Check the constant Pool cache entry to see if it has been resolved.
|
|
||||||
// If not, need the slow path.
|
|
||||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
|
||||||
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
|
|
||||||
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
|
|
||||||
__ and3(G1_scratch, 0xFF, G1_scratch);
|
|
||||||
__ cmp(G1_scratch, Bytecodes::_getfield);
|
|
||||||
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
|
||||||
__ delayed()->nop();
|
|
||||||
|
|
||||||
// Get the type and return field offset from the constant pool cache
|
|
||||||
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
|
|
||||||
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
|
|
||||||
|
|
||||||
Label xreturn_path;
|
|
||||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
|
||||||
// because they are different sizes.
|
|
||||||
// Get the type from the constant pool cache
|
|
||||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
|
|
||||||
// Make sure we don't need to mask G1_scratch after the above shift
|
|
||||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
|
||||||
__ cmp(G1_scratch, atos );
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ cmp(G1_scratch, itos);
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ cmp(G1_scratch, stos);
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ cmp(G1_scratch, ctos);
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
|
|
||||||
#ifdef ASSERT
|
|
||||||
__ cmp(G1_scratch, btos);
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ should_not_reach_here();
|
|
||||||
#endif
|
|
||||||
__ ldsb(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ bind(xreturn_path);
|
|
||||||
|
|
||||||
// _ireturn/_areturn
|
|
||||||
__ retl(); // return from leaf routine
|
|
||||||
__ delayed()->mov(O5_savedSP, SP);
|
|
||||||
|
|
||||||
// Generate regular method entry
|
|
||||||
__ bind(slow_path);
|
|
||||||
__ ba(fast_accessor_slow_entry_path);
|
|
||||||
__ delayed()->nop();
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
if (UseG1GC) {
|
if (UseG1GC) {
|
||||||
@ -573,7 +468,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
|||||||
|
|
||||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||||
// Reference.get is an accessor
|
// Reference.get is an accessor
|
||||||
return generate_accessor_entry();
|
return generate_jump_to_normal_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -1870,23 +1765,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
__ ba(call_interpreter_2);
|
__ ba(call_interpreter_2);
|
||||||
__ delayed()->st_ptr(O1, STATE(_stack));
|
__ delayed()->st_ptr(O1, STATE(_stack));
|
||||||
|
|
||||||
|
|
||||||
// Fast accessor methods share this entry point.
|
|
||||||
// This works because frame manager is in the same codelet
|
|
||||||
// This can either be an entry via call_stub/c1/c2 or a recursive interpreter call
|
|
||||||
// we need to do a little register fixup here once we distinguish the two of them
|
|
||||||
if (UseFastAccessorMethods && !synchronized) {
|
|
||||||
// Call stub_return address still in O7
|
|
||||||
__ bind(fast_accessor_slow_entry_path);
|
|
||||||
__ set((intptr_t)return_from_native_method - 8, Gtmp1);
|
|
||||||
__ cmp(Gtmp1, O7); // returning to interpreter?
|
|
||||||
__ brx(Assembler::equal, true, Assembler::pt, re_dispatch); // yep
|
|
||||||
__ delayed()->nop();
|
|
||||||
__ ba(re_dispatch);
|
|
||||||
__ delayed()->mov(G0, prevState); // initial entry
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// interpreter returning to native code (call_stub/c1/c2)
|
// interpreter returning to native code (call_stub/c1/c2)
|
||||||
// convert result and unwind initial activation
|
// convert result and unwind initial activation
|
||||||
// L2_scratch - scaled result type index
|
// L2_scratch - scaled result type index
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -32,9 +32,11 @@
|
|||||||
address generate_normal_entry(bool synchronized);
|
address generate_normal_entry(bool synchronized);
|
||||||
address generate_native_entry(bool synchronized);
|
address generate_native_entry(bool synchronized);
|
||||||
address generate_abstract_entry(void);
|
address generate_abstract_entry(void);
|
||||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
// there are no math intrinsics on sparc
|
||||||
address generate_empty_entry(void);
|
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
address generate_accessor_entry(void);
|
address generate_jump_to_normal_entry(void);
|
||||||
|
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||||
|
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||||
address generate_Reference_get_entry(void);
|
address generate_Reference_get_entry(void);
|
||||||
void lock_method(void);
|
void lock_method(void);
|
||||||
void save_native_result(void);
|
void save_native_result(void);
|
||||||
@ -43,4 +45,7 @@
|
|||||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
||||||
void generate_counter_overflow(Label& Lcontinue);
|
void generate_counter_overflow(Label& Lcontinue);
|
||||||
|
|
||||||
|
// Not supported
|
||||||
|
address generate_CRC32_update_entry() { return NULL; }
|
||||||
|
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
|
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
|
||||||
|
@ -241,6 +241,15 @@ void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
|
|||||||
|
|
||||||
// Various method entries
|
// Various method entries
|
||||||
|
|
||||||
|
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||||
|
address entry = __ pc();
|
||||||
|
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
||||||
|
AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||||
|
__ jump_to(al, G3_scratch);
|
||||||
|
__ delayed()->nop();
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
// Abstract method entry
|
// Abstract method entry
|
||||||
// Attempt to execute abstract method. Throw exception
|
// Attempt to execute abstract method. Throw exception
|
||||||
//
|
//
|
||||||
@ -255,159 +264,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------
|
|
||||||
// Entry points & stack frame layout
|
|
||||||
//
|
|
||||||
// Here we generate the various kind of entries into the interpreter.
|
|
||||||
// The two main entry type are generic bytecode methods and native call method.
|
|
||||||
// These both come in synchronized and non-synchronized versions but the
|
|
||||||
// frame layout they create is very similar. The other method entry
|
|
||||||
// types are really just special purpose entries that are really entry
|
|
||||||
// and interpretation all in one. These are for trivial methods like
|
|
||||||
// accessor, empty, or special math methods.
|
|
||||||
//
|
|
||||||
// When control flow reaches any of the entry types for the interpreter
|
|
||||||
// the following holds ->
|
|
||||||
//
|
|
||||||
// C2 Calling Conventions:
|
|
||||||
//
|
|
||||||
// The entry code below assumes that the following registers are set
|
|
||||||
// when coming in:
|
|
||||||
// G5_method: holds the Method* of the method to call
|
|
||||||
// Lesp: points to the TOS of the callers expression stack
|
|
||||||
// after having pushed all the parameters
|
|
||||||
//
|
|
||||||
// The entry code does the following to setup an interpreter frame
|
|
||||||
// pop parameters from the callers stack by adjusting Lesp
|
|
||||||
// set O0 to Lesp
|
|
||||||
// compute X = (max_locals - num_parameters)
|
|
||||||
// bump SP up by X to accomadate the extra locals
|
|
||||||
// compute X = max_expression_stack
|
|
||||||
// + vm_local_words
|
|
||||||
// + 16 words of register save area
|
|
||||||
// save frame doing a save sp, -X, sp growing towards lower addresses
|
|
||||||
// set Lbcp, Lmethod, LcpoolCache
|
|
||||||
// set Llocals to i0
|
|
||||||
// set Lmonitors to FP - rounded_vm_local_words
|
|
||||||
// set Lesp to Lmonitors - 4
|
|
||||||
//
|
|
||||||
// The frame has now been setup to do the rest of the entry code
|
|
||||||
|
|
||||||
// Try this optimization: Most method entries could live in a
|
|
||||||
// "one size fits all" stack frame without all the dynamic size
|
|
||||||
// calculations. It might be profitable to do all this calculation
|
|
||||||
// statically and approximately for "small enough" methods.
|
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// C1 Calling conventions
|
|
||||||
//
|
|
||||||
// Upon method entry, the following registers are setup:
|
|
||||||
//
|
|
||||||
// g2 G2_thread: current thread
|
|
||||||
// g5 G5_method: method to activate
|
|
||||||
// g4 Gargs : pointer to last argument
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Stack:
|
|
||||||
//
|
|
||||||
// +---------------+ <--- sp
|
|
||||||
// | |
|
|
||||||
// : reg save area :
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- sp + 0x40
|
|
||||||
// | |
|
|
||||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- sp + 0x5c
|
|
||||||
// | |
|
|
||||||
// : free :
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- Gargs
|
|
||||||
// | |
|
|
||||||
// : arguments :
|
|
||||||
// | |
|
|
||||||
// +---------------+
|
|
||||||
// | |
|
|
||||||
//
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
|
|
||||||
//
|
|
||||||
// +---------------+ <--- sp
|
|
||||||
// | |
|
|
||||||
// : reg save area :
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- sp + 0x40
|
|
||||||
// | |
|
|
||||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- sp + 0x5c
|
|
||||||
// | |
|
|
||||||
// : :
|
|
||||||
// | | <--- Lesp
|
|
||||||
// +---------------+ <--- Lmonitors (fp - 0x18)
|
|
||||||
// | VM locals |
|
|
||||||
// +---------------+ <--- fp
|
|
||||||
// | |
|
|
||||||
// : reg save area :
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- fp + 0x40
|
|
||||||
// | |
|
|
||||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- fp + 0x5c
|
|
||||||
// | |
|
|
||||||
// : free :
|
|
||||||
// | |
|
|
||||||
// +---------------+
|
|
||||||
// | |
|
|
||||||
// : nonarg locals :
|
|
||||||
// | |
|
|
||||||
// +---------------+
|
|
||||||
// | |
|
|
||||||
// : arguments :
|
|
||||||
// | | <--- Llocals
|
|
||||||
// +---------------+ <--- Gargs
|
|
||||||
// | |
|
|
||||||
|
|
||||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
|
||||||
// determine code generation flags
|
|
||||||
bool synchronized = false;
|
|
||||||
address entry_point = NULL;
|
|
||||||
|
|
||||||
switch (kind) {
|
|
||||||
case Interpreter::zerolocals : break;
|
|
||||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
|
||||||
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
|
|
||||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
|
|
||||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
|
||||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
|
||||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
|
||||||
|
|
||||||
case Interpreter::java_lang_math_sin : break;
|
|
||||||
case Interpreter::java_lang_math_cos : break;
|
|
||||||
case Interpreter::java_lang_math_tan : break;
|
|
||||||
case Interpreter::java_lang_math_sqrt : break;
|
|
||||||
case Interpreter::java_lang_math_abs : break;
|
|
||||||
case Interpreter::java_lang_math_log : break;
|
|
||||||
case Interpreter::java_lang_math_log10 : break;
|
|
||||||
case Interpreter::java_lang_math_pow : break;
|
|
||||||
case Interpreter::java_lang_math_exp : break;
|
|
||||||
case Interpreter::java_lang_ref_reference_get
|
|
||||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
|
||||||
default:
|
|
||||||
fatal(err_msg("unexpected method kind: %d", kind));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry_point) return entry_point;
|
|
||||||
|
|
||||||
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||||
// No special entry points that preclude compilation
|
// No special entry points that preclude compilation
|
||||||
return true;
|
return true;
|
||||||
|
@ -456,6 +456,115 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
|
|||||||
// Generate a fixed interpreter frame. This is identical setup for interpreted
|
// Generate a fixed interpreter frame. This is identical setup for interpreted
|
||||||
// methods and for native methods hence the shared code.
|
// methods and for native methods hence the shared code.
|
||||||
|
|
||||||
|
|
||||||
|
//----------------------------------------------------------------------------------------------------
|
||||||
|
// Stack frame layout
|
||||||
|
//
|
||||||
|
// When control flow reaches any of the entry types for the interpreter
|
||||||
|
// the following holds ->
|
||||||
|
//
|
||||||
|
// C2 Calling Conventions:
|
||||||
|
//
|
||||||
|
// The entry code below assumes that the following registers are set
|
||||||
|
// when coming in:
|
||||||
|
// G5_method: holds the Method* of the method to call
|
||||||
|
// Lesp: points to the TOS of the callers expression stack
|
||||||
|
// after having pushed all the parameters
|
||||||
|
//
|
||||||
|
// The entry code does the following to setup an interpreter frame
|
||||||
|
// pop parameters from the callers stack by adjusting Lesp
|
||||||
|
// set O0 to Lesp
|
||||||
|
// compute X = (max_locals - num_parameters)
|
||||||
|
// bump SP up by X to accomadate the extra locals
|
||||||
|
// compute X = max_expression_stack
|
||||||
|
// + vm_local_words
|
||||||
|
// + 16 words of register save area
|
||||||
|
// save frame doing a save sp, -X, sp growing towards lower addresses
|
||||||
|
// set Lbcp, Lmethod, LcpoolCache
|
||||||
|
// set Llocals to i0
|
||||||
|
// set Lmonitors to FP - rounded_vm_local_words
|
||||||
|
// set Lesp to Lmonitors - 4
|
||||||
|
//
|
||||||
|
// The frame has now been setup to do the rest of the entry code
|
||||||
|
|
||||||
|
// Try this optimization: Most method entries could live in a
|
||||||
|
// "one size fits all" stack frame without all the dynamic size
|
||||||
|
// calculations. It might be profitable to do all this calculation
|
||||||
|
// statically and approximately for "small enough" methods.
|
||||||
|
|
||||||
|
//-----------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// C1 Calling conventions
|
||||||
|
//
|
||||||
|
// Upon method entry, the following registers are setup:
|
||||||
|
//
|
||||||
|
// g2 G2_thread: current thread
|
||||||
|
// g5 G5_method: method to activate
|
||||||
|
// g4 Gargs : pointer to last argument
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Stack:
|
||||||
|
//
|
||||||
|
// +---------------+ <--- sp
|
||||||
|
// | |
|
||||||
|
// : reg save area :
|
||||||
|
// | |
|
||||||
|
// +---------------+ <--- sp + 0x40
|
||||||
|
// | |
|
||||||
|
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||||
|
// | |
|
||||||
|
// +---------------+ <--- sp + 0x5c
|
||||||
|
// | |
|
||||||
|
// : free :
|
||||||
|
// | |
|
||||||
|
// +---------------+ <--- Gargs
|
||||||
|
// | |
|
||||||
|
// : arguments :
|
||||||
|
// | |
|
||||||
|
// +---------------+
|
||||||
|
// | |
|
||||||
|
//
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
|
||||||
|
//
|
||||||
|
// +---------------+ <--- sp
|
||||||
|
// | |
|
||||||
|
// : reg save area :
|
||||||
|
// | |
|
||||||
|
// +---------------+ <--- sp + 0x40
|
||||||
|
// | |
|
||||||
|
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||||
|
// | |
|
||||||
|
// +---------------+ <--- sp + 0x5c
|
||||||
|
// | |
|
||||||
|
// : :
|
||||||
|
// | | <--- Lesp
|
||||||
|
// +---------------+ <--- Lmonitors (fp - 0x18)
|
||||||
|
// | VM locals |
|
||||||
|
// +---------------+ <--- fp
|
||||||
|
// | |
|
||||||
|
// : reg save area :
|
||||||
|
// | |
|
||||||
|
// +---------------+ <--- fp + 0x40
|
||||||
|
// | |
|
||||||
|
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||||
|
// | |
|
||||||
|
// +---------------+ <--- fp + 0x5c
|
||||||
|
// | |
|
||||||
|
// : free :
|
||||||
|
// | |
|
||||||
|
// +---------------+
|
||||||
|
// | |
|
||||||
|
// : nonarg locals :
|
||||||
|
// | |
|
||||||
|
// +---------------+
|
||||||
|
// | |
|
||||||
|
// : arguments :
|
||||||
|
// | | <--- Llocals
|
||||||
|
// +---------------+ <--- Gargs
|
||||||
|
// | |
|
||||||
|
|
||||||
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
@ -599,136 +708,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Empty method, generate a very fast return.
|
|
||||||
|
|
||||||
address InterpreterGenerator::generate_empty_entry(void) {
|
|
||||||
|
|
||||||
// A method that does nother but return...
|
|
||||||
|
|
||||||
address entry = __ pc();
|
|
||||||
Label slow_path;
|
|
||||||
|
|
||||||
// do nothing for empty methods (do not even increment invocation counter)
|
|
||||||
if ( UseFastEmptyMethods) {
|
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
|
||||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
|
||||||
__ set(sync_state, G3_scratch);
|
|
||||||
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
|
|
||||||
|
|
||||||
// Code: _return
|
|
||||||
__ retl();
|
|
||||||
__ delayed()->mov(O5_savedSP, SP);
|
|
||||||
|
|
||||||
__ bind(slow_path);
|
|
||||||
(void) generate_normal_entry(false);
|
|
||||||
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
|
||||||
// vanilla (slow path) entry
|
|
||||||
|
|
||||||
// Generates code to elide accessor methods
|
|
||||||
// Uses G3_scratch and G1_scratch as scratch
|
|
||||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
|
||||||
|
|
||||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
|
|
||||||
// parameter size = 1
|
|
||||||
// Note: We can only use this code if the getfield has been resolved
|
|
||||||
// and if we don't have a null-pointer exception => check for
|
|
||||||
// these conditions first and use slow path if necessary.
|
|
||||||
address entry = __ pc();
|
|
||||||
Label slow_path;
|
|
||||||
|
|
||||||
|
|
||||||
// XXX: for compressed oops pointer loading and decoding doesn't fit in
|
|
||||||
// delay slot and damages G1
|
|
||||||
if ( UseFastAccessorMethods && !UseCompressedOops ) {
|
|
||||||
// Check if we need to reach a safepoint and generate full interpreter
|
|
||||||
// frame if so.
|
|
||||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
|
||||||
__ load_contents(sync_state, G3_scratch);
|
|
||||||
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
|
||||||
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
|
|
||||||
|
|
||||||
// Check if local 0 != NULL
|
|
||||||
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
|
|
||||||
// check if local 0 == NULL and go the slow path
|
|
||||||
__ br_null_short(Otos_i, Assembler::pn, slow_path);
|
|
||||||
|
|
||||||
|
|
||||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
|
||||||
// get first 4 bytes of the bytecodes (big endian!)
|
|
||||||
__ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
|
|
||||||
__ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
|
|
||||||
|
|
||||||
// move index @ 2 far left then to the right most two bytes.
|
|
||||||
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
|
|
||||||
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
|
|
||||||
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
|
|
||||||
|
|
||||||
// get constant pool cache
|
|
||||||
__ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
|
|
||||||
__ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
|
|
||||||
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
|
|
||||||
|
|
||||||
// get specific constant pool cache entry
|
|
||||||
__ add(G3_scratch, G1_scratch, G3_scratch);
|
|
||||||
|
|
||||||
// Check the constant Pool cache entry to see if it has been resolved.
|
|
||||||
// If not, need the slow path.
|
|
||||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
|
||||||
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
|
|
||||||
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
|
|
||||||
__ and3(G1_scratch, 0xFF, G1_scratch);
|
|
||||||
__ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
|
|
||||||
|
|
||||||
// Get the type and return field offset from the constant pool cache
|
|
||||||
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
|
|
||||||
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
|
|
||||||
|
|
||||||
Label xreturn_path;
|
|
||||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
|
||||||
// because they are different sizes.
|
|
||||||
// Get the type from the constant pool cache
|
|
||||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
|
|
||||||
// Make sure we don't need to mask G1_scratch after the above shift
|
|
||||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
|
||||||
__ cmp(G1_scratch, atos );
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ cmp(G1_scratch, itos);
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ cmp(G1_scratch, stos);
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ cmp(G1_scratch, ctos);
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
|
|
||||||
#ifdef ASSERT
|
|
||||||
__ cmp(G1_scratch, btos);
|
|
||||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
||||||
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ should_not_reach_here();
|
|
||||||
#endif
|
|
||||||
__ ldsb(Otos_i, G3_scratch, Otos_i);
|
|
||||||
__ bind(xreturn_path);
|
|
||||||
|
|
||||||
// _ireturn/_areturn
|
|
||||||
__ retl(); // return from leaf routine
|
|
||||||
__ delayed()->mov(O5_savedSP, SP);
|
|
||||||
|
|
||||||
// Generate regular method entry
|
|
||||||
__ bind(slow_path);
|
|
||||||
(void) generate_normal_entry(false);
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Method entry for java.lang.ref.Reference.get.
|
// Method entry for java.lang.ref.Reference.get.
|
||||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
@ -806,7 +785,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
|||||||
|
|
||||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||||
// Reference.get is an accessor
|
// Reference.get is an accessor
|
||||||
return generate_accessor_entry();
|
return generate_jump_to_normal_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -1242,8 +1221,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
|
|
||||||
|
|
||||||
// Generic method entry to (asm) interpreter
|
// Generic method entry to (asm) interpreter
|
||||||
//------------------------------------------------------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||||
address entry = __ pc();
|
address entry = __ pc();
|
||||||
|
|
||||||
@ -1410,123 +1387,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------
|
|
||||||
// Entry points & stack frame layout
|
|
||||||
//
|
|
||||||
// Here we generate the various kind of entries into the interpreter.
|
|
||||||
// The two main entry type are generic bytecode methods and native call method.
|
|
||||||
// These both come in synchronized and non-synchronized versions but the
|
|
||||||
// frame layout they create is very similar. The other method entry
|
|
||||||
// types are really just special purpose entries that are really entry
|
|
||||||
// and interpretation all in one. These are for trivial methods like
|
|
||||||
// accessor, empty, or special math methods.
|
|
||||||
//
|
|
||||||
// When control flow reaches any of the entry types for the interpreter
|
|
||||||
// the following holds ->
|
|
||||||
//
|
|
||||||
// C2 Calling Conventions:
|
|
||||||
//
|
|
||||||
// The entry code below assumes that the following registers are set
|
|
||||||
// when coming in:
|
|
||||||
// G5_method: holds the Method* of the method to call
|
|
||||||
// Lesp: points to the TOS of the callers expression stack
|
|
||||||
// after having pushed all the parameters
|
|
||||||
//
|
|
||||||
// The entry code does the following to setup an interpreter frame
|
|
||||||
// pop parameters from the callers stack by adjusting Lesp
|
|
||||||
// set O0 to Lesp
|
|
||||||
// compute X = (max_locals - num_parameters)
|
|
||||||
// bump SP up by X to accomadate the extra locals
|
|
||||||
// compute X = max_expression_stack
|
|
||||||
// + vm_local_words
|
|
||||||
// + 16 words of register save area
|
|
||||||
// save frame doing a save sp, -X, sp growing towards lower addresses
|
|
||||||
// set Lbcp, Lmethod, LcpoolCache
|
|
||||||
// set Llocals to i0
|
|
||||||
// set Lmonitors to FP - rounded_vm_local_words
|
|
||||||
// set Lesp to Lmonitors - 4
|
|
||||||
//
|
|
||||||
// The frame has now been setup to do the rest of the entry code
|
|
||||||
|
|
||||||
// Try this optimization: Most method entries could live in a
|
|
||||||
// "one size fits all" stack frame without all the dynamic size
|
|
||||||
// calculations. It might be profitable to do all this calculation
|
|
||||||
// statically and approximately for "small enough" methods.
|
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// C1 Calling conventions
|
|
||||||
//
|
|
||||||
// Upon method entry, the following registers are setup:
|
|
||||||
//
|
|
||||||
// g2 G2_thread: current thread
|
|
||||||
// g5 G5_method: method to activate
|
|
||||||
// g4 Gargs : pointer to last argument
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Stack:
|
|
||||||
//
|
|
||||||
// +---------------+ <--- sp
|
|
||||||
// | |
|
|
||||||
// : reg save area :
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- sp + 0x40
|
|
||||||
// | |
|
|
||||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- sp + 0x5c
|
|
||||||
// | |
|
|
||||||
// : free :
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- Gargs
|
|
||||||
// | |
|
|
||||||
// : arguments :
|
|
||||||
// | |
|
|
||||||
// +---------------+
|
|
||||||
// | |
|
|
||||||
//
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
|
|
||||||
//
|
|
||||||
// +---------------+ <--- sp
|
|
||||||
// | |
|
|
||||||
// : reg save area :
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- sp + 0x40
|
|
||||||
// | |
|
|
||||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- sp + 0x5c
|
|
||||||
// | |
|
|
||||||
// : :
|
|
||||||
// | | <--- Lesp
|
|
||||||
// +---------------+ <--- Lmonitors (fp - 0x18)
|
|
||||||
// | VM locals |
|
|
||||||
// +---------------+ <--- fp
|
|
||||||
// | |
|
|
||||||
// : reg save area :
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- fp + 0x40
|
|
||||||
// | |
|
|
||||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
|
||||||
// | |
|
|
||||||
// +---------------+ <--- fp + 0x5c
|
|
||||||
// | |
|
|
||||||
// : free :
|
|
||||||
// | |
|
|
||||||
// +---------------+
|
|
||||||
// | |
|
|
||||||
// : nonarg locals :
|
|
||||||
// | |
|
|
||||||
// +---------------+
|
|
||||||
// | |
|
|
||||||
// : arguments :
|
|
||||||
// | | <--- Llocals
|
|
||||||
// +---------------+ <--- Gargs
|
|
||||||
// | |
|
|
||||||
|
|
||||||
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
|
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
|
||||||
|
|
||||||
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated
|
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "runtime/java.hpp"
|
#include "runtime/java.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/stubCodeGenerator.hpp"
|
#include "runtime/stubCodeGenerator.hpp"
|
||||||
#include "vm_version_sparc.hpp"
|
#include "vm_version_sparc.hpp"
|
||||||
|
|
||||||
@ -249,7 +250,7 @@ void VM_Version::initialize() {
|
|||||||
(!has_hardware_fsmuld() ? ", no-fsmuld" : ""));
|
(!has_hardware_fsmuld() ? ", no-fsmuld" : ""));
|
||||||
|
|
||||||
// buf is started with ", " or is empty
|
// buf is started with ", " or is empty
|
||||||
_features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf);
|
_features_str = os::strdup(strlen(buf) > 2 ? buf + 2 : buf);
|
||||||
|
|
||||||
// There are three 64-bit SPARC families that do not overlap, e.g.,
|
// There are three 64-bit SPARC families that do not overlap, e.g.,
|
||||||
// both is_ultra3() and is_sparc64() cannot be true at the same time.
|
// both is_ultra3() and is_sparc64() cannot be true at the same time.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -27,21 +27,6 @@
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
#if 0
|
|
||||||
address generate_asm_interpreter_entry(bool synchronized);
|
|
||||||
address generate_native_entry(bool synchronized);
|
|
||||||
address generate_abstract_entry(void);
|
|
||||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
|
||||||
address generate_empty_entry(void);
|
|
||||||
address generate_accessor_entry(void);
|
|
||||||
address generate_Reference_get_entry(void);
|
|
||||||
void lock_method(void);
|
|
||||||
void generate_stack_overflow_check(void);
|
|
||||||
|
|
||||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
|
||||||
void generate_counter_overflow(Label* do_continue);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void generate_more_monitors();
|
void generate_more_monitors();
|
||||||
void generate_deopt_handling();
|
void generate_deopt_handling();
|
||||||
address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
|
address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -66,9 +66,6 @@ extern "C" void RecursiveInterpreterActivation(interpreterState istate )
|
|||||||
#define __ _masm->
|
#define __ _masm->
|
||||||
#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
|
#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
|
||||||
|
|
||||||
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
|
|
||||||
// c++ interpreter entry point this holds that entry point label.
|
|
||||||
|
|
||||||
// default registers for state and sender_sp
|
// default registers for state and sender_sp
|
||||||
// state and sender_sp are the same on 32bit because we have no choice.
|
// state and sender_sp are the same on 32bit because we have no choice.
|
||||||
// state could be rsi on 64bit but it is an arg reg and not callee save
|
// state could be rsi on 64bit but it is an arg reg and not callee save
|
||||||
@ -660,7 +657,6 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
|||||||
// generate_method_entry) so the guard should work for them too.
|
// generate_method_entry) so the guard should work for them too.
|
||||||
//
|
//
|
||||||
|
|
||||||
// monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
|
|
||||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||||
|
|
||||||
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
|
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
|
||||||
@ -794,156 +790,6 @@ void InterpreterGenerator::lock_method(void) {
|
|||||||
__ lock_object(monitor);
|
__ lock_object(monitor);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
|
|
||||||
|
|
||||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
|
||||||
|
|
||||||
// rbx: Method*
|
|
||||||
|
|
||||||
// rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
|
|
||||||
|
|
||||||
Label xreturn_path;
|
|
||||||
|
|
||||||
// do fastpath for resolved accessor methods
|
|
||||||
if (UseFastAccessorMethods) {
|
|
||||||
|
|
||||||
address entry_point = __ pc();
|
|
||||||
|
|
||||||
Label slow_path;
|
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
|
||||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
|
||||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
|
||||||
SafepointSynchronize::_not_synchronized);
|
|
||||||
|
|
||||||
__ jcc(Assembler::notEqual, slow_path);
|
|
||||||
// ASM/C++ Interpreter
|
|
||||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
|
|
||||||
// Note: We can only use this code if the getfield has been resolved
|
|
||||||
// and if we don't have a null-pointer exception => check for
|
|
||||||
// these conditions first and use slow path if necessary.
|
|
||||||
// rbx,: method
|
|
||||||
// rcx: receiver
|
|
||||||
__ movptr(rax, Address(rsp, wordSize));
|
|
||||||
|
|
||||||
// check if local 0 != NULL and read field
|
|
||||||
__ testptr(rax, rax);
|
|
||||||
__ jcc(Assembler::zero, slow_path);
|
|
||||||
|
|
||||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
|
||||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
|
||||||
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
|
|
||||||
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
|
|
||||||
// Shift codes right to get the index on the right.
|
|
||||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
|
||||||
__ shrl(rdx, 2*BitsPerByte);
|
|
||||||
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
|
|
||||||
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
|
|
||||||
|
|
||||||
// rax,: local 0
|
|
||||||
// rbx,: method
|
|
||||||
// rcx: receiver - do not destroy since it is needed for slow path!
|
|
||||||
// rcx: scratch
|
|
||||||
// rdx: constant pool cache index
|
|
||||||
// rdi: constant pool cache
|
|
||||||
// rsi/r13: sender sp
|
|
||||||
|
|
||||||
// check if getfield has been resolved and read constant pool cache entry
|
|
||||||
// check the validity of the cache entry by testing whether _indices field
|
|
||||||
// contains Bytecode::_getfield in b1 byte.
|
|
||||||
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
|
|
||||||
__ movl(rcx,
|
|
||||||
Address(rdi,
|
|
||||||
rdx,
|
|
||||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
|
|
||||||
__ shrl(rcx, 2*BitsPerByte);
|
|
||||||
__ andl(rcx, 0xFF);
|
|
||||||
__ cmpl(rcx, Bytecodes::_getfield);
|
|
||||||
__ jcc(Assembler::notEqual, slow_path);
|
|
||||||
|
|
||||||
// Note: constant pool entry is not valid before bytecode is resolved
|
|
||||||
__ movptr(rcx,
|
|
||||||
Address(rdi,
|
|
||||||
rdx,
|
|
||||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
|
|
||||||
__ movl(rdx,
|
|
||||||
Address(rdi,
|
|
||||||
rdx,
|
|
||||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
|
||||||
|
|
||||||
Label notByte, notShort, notChar;
|
|
||||||
const Address field_address (rax, rcx, Address::times_1);
|
|
||||||
|
|
||||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
|
||||||
// because they are different sizes.
|
|
||||||
// Use the type from the constant pool cache
|
|
||||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
|
||||||
// Make sure we don't need to mask rdx after the above shift
|
|
||||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
|
||||||
#ifdef _LP64
|
|
||||||
Label notObj;
|
|
||||||
__ cmpl(rdx, atos);
|
|
||||||
__ jcc(Assembler::notEqual, notObj);
|
|
||||||
// atos
|
|
||||||
__ movptr(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notObj);
|
|
||||||
#endif // _LP64
|
|
||||||
__ cmpl(rdx, btos);
|
|
||||||
__ jcc(Assembler::notEqual, notByte);
|
|
||||||
__ load_signed_byte(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notByte);
|
|
||||||
__ cmpl(rdx, stos);
|
|
||||||
__ jcc(Assembler::notEqual, notShort);
|
|
||||||
__ load_signed_short(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notShort);
|
|
||||||
__ cmpl(rdx, ctos);
|
|
||||||
__ jcc(Assembler::notEqual, notChar);
|
|
||||||
__ load_unsigned_short(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notChar);
|
|
||||||
#ifdef ASSERT
|
|
||||||
Label okay;
|
|
||||||
#ifndef _LP64
|
|
||||||
__ cmpl(rdx, atos);
|
|
||||||
__ jcc(Assembler::equal, okay);
|
|
||||||
#endif // _LP64
|
|
||||||
__ cmpl(rdx, itos);
|
|
||||||
__ jcc(Assembler::equal, okay);
|
|
||||||
__ stop("what type is this?");
|
|
||||||
__ bind(okay);
|
|
||||||
#endif // ASSERT
|
|
||||||
// All the rest are a 32 bit wordsize
|
|
||||||
__ movl(rax, field_address);
|
|
||||||
|
|
||||||
__ bind(xreturn_path);
|
|
||||||
|
|
||||||
// _ireturn/_areturn
|
|
||||||
__ pop(rdi); // get return address
|
|
||||||
__ mov(rsp, sender_sp_on_entry); // set sp to sender sp
|
|
||||||
__ jmp(rdi);
|
|
||||||
|
|
||||||
// generate a vanilla interpreter entry as the slow path
|
|
||||||
__ bind(slow_path);
|
|
||||||
// We will enter c++ interpreter looking like it was
|
|
||||||
// called by the call_stub this will cause it to return
|
|
||||||
// a tosca result to the invoker which might have been
|
|
||||||
// the c++ interpreter itself.
|
|
||||||
|
|
||||||
__ jmp(fast_accessor_slow_entry_path);
|
|
||||||
return entry_point;
|
|
||||||
|
|
||||||
} else {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
if (UseG1GC) {
|
if (UseG1GC) {
|
||||||
@ -961,7 +807,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
|||||||
|
|
||||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||||
// Reference.get is an accessor
|
// Reference.get is an accessor
|
||||||
return generate_accessor_entry();
|
return generate_jump_to_normal_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -1670,10 +1516,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
|
|
||||||
address entry_point = __ pc();
|
address entry_point = __ pc();
|
||||||
|
|
||||||
// Fast accessor methods share this entry point.
|
|
||||||
// This works because frame manager is in the same codelet
|
|
||||||
if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
|
|
||||||
|
|
||||||
Label dispatch_entry_2;
|
Label dispatch_entry_2;
|
||||||
__ movptr(rcx, sender_sp_on_entry);
|
__ movptr(rcx, sender_sp_on_entry);
|
||||||
__ movptr(state, (int32_t)NULL_WORD); // no current activation
|
__ movptr(state, (int32_t)NULL_WORD); // no current activation
|
||||||
@ -2212,40 +2054,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
return entry_point;
|
return entry_point;
|
||||||
}
|
}
|
||||||
|
|
||||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
|
||||||
// determine code generation flags
|
|
||||||
bool synchronized = false;
|
|
||||||
address entry_point = NULL;
|
|
||||||
|
|
||||||
switch (kind) {
|
|
||||||
case Interpreter::zerolocals : break;
|
|
||||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
|
||||||
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
|
|
||||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
|
|
||||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
|
||||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
|
||||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
|
||||||
|
|
||||||
case Interpreter::java_lang_math_sin : // fall thru
|
|
||||||
case Interpreter::java_lang_math_cos : // fall thru
|
|
||||||
case Interpreter::java_lang_math_tan : // fall thru
|
|
||||||
case Interpreter::java_lang_math_abs : // fall thru
|
|
||||||
case Interpreter::java_lang_math_log : // fall thru
|
|
||||||
case Interpreter::java_lang_math_log10 : // fall thru
|
|
||||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
|
||||||
case Interpreter::java_lang_math_pow : // fall thru
|
|
||||||
case Interpreter::java_lang_math_exp : // fall thru
|
|
||||||
entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
|
|
||||||
case Interpreter::java_lang_ref_reference_get
|
|
||||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
|
||||||
default : ShouldNotReachHere(); break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry_point) return entry_point;
|
|
||||||
|
|
||||||
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||||
: CppInterpreterGenerator(code) {
|
: CppInterpreterGenerator(code) {
|
||||||
|
66
hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp
Normal file
66
hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "asm/macroAssembler.hpp"
|
||||||
|
#include "interpreter/interpreter.hpp"
|
||||||
|
#include "interpreter/interpreterGenerator.hpp"
|
||||||
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
|
#include "interpreter/interp_masm.hpp"
|
||||||
|
|
||||||
|
#define __ _masm->
|
||||||
|
|
||||||
|
// Jump into normal path for accessor and empty entry to jump to normal entry
|
||||||
|
// The "fast" optimization don't update compilation count therefore can disable inlining
|
||||||
|
// for these functions that should be inlined.
|
||||||
|
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||||
|
address entry_point = __ pc();
|
||||||
|
|
||||||
|
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
||||||
|
__ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
|
||||||
|
return entry_point;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abstract method entry
|
||||||
|
// Attempt to execute abstract method. Throw exception
|
||||||
|
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||||
|
|
||||||
|
address entry_point = __ pc();
|
||||||
|
|
||||||
|
// abstract method entry
|
||||||
|
|
||||||
|
#ifndef CC_INTERP
|
||||||
|
// pop return address, reset last_sp to NULL
|
||||||
|
__ empty_expression_stack();
|
||||||
|
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
||||||
|
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// throw exception
|
||||||
|
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
|
||||||
|
// the call_VM checks for exception, so we should never return here.
|
||||||
|
__ should_not_reach_here();
|
||||||
|
|
||||||
|
return entry_point;
|
||||||
|
}
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -36,8 +36,9 @@
|
|||||||
address generate_native_entry(bool synchronized);
|
address generate_native_entry(bool synchronized);
|
||||||
address generate_abstract_entry(void);
|
address generate_abstract_entry(void);
|
||||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||||
address generate_empty_entry(void);
|
address generate_jump_to_normal_entry(void);
|
||||||
address generate_accessor_entry(void);
|
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||||
|
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||||
address generate_Reference_get_entry();
|
address generate_Reference_get_entry();
|
||||||
address generate_CRC32_update_entry();
|
address generate_CRC32_update_entry();
|
||||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
||||||
|
@ -67,45 +67,6 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// Various method entries (that c++ and asm interpreter agree upon)
|
|
||||||
//------------------------------------------------------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
// Empty method, generate a very fast return.
|
|
||||||
|
|
||||||
address InterpreterGenerator::generate_empty_entry(void) {
|
|
||||||
|
|
||||||
// rbx,: Method*
|
|
||||||
// rcx: receiver (unused)
|
|
||||||
// rsi: previous interpreter state (C++ interpreter) must preserve
|
|
||||||
// rsi: sender sp must set sp to this value on return
|
|
||||||
|
|
||||||
if (!UseFastEmptyMethods) return NULL;
|
|
||||||
|
|
||||||
address entry_point = __ pc();
|
|
||||||
|
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
|
||||||
Label slow_path;
|
|
||||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
|
||||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
|
||||||
SafepointSynchronize::_not_synchronized);
|
|
||||||
__ jcc(Assembler::notEqual, slow_path);
|
|
||||||
|
|
||||||
// do nothing for empty methods (do not even increment invocation counter)
|
|
||||||
// Code: _return
|
|
||||||
// _return
|
|
||||||
// return w/o popping parameters
|
|
||||||
__ pop(rax);
|
|
||||||
__ mov(rsp, rsi);
|
|
||||||
__ jmp(rax);
|
|
||||||
|
|
||||||
__ bind(slow_path);
|
|
||||||
(void) generate_normal_entry(false);
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
|
|
||||||
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
||||||
|
|
||||||
// rbx,: Method*
|
// rbx,: Method*
|
||||||
@ -216,36 +177,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Abstract method entry
|
|
||||||
// Attempt to execute abstract method. Throw exception
|
|
||||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
|
||||||
|
|
||||||
// rbx,: Method*
|
|
||||||
// rcx: receiver (unused)
|
|
||||||
// rsi: previous interpreter state (C++ interpreter) must preserve
|
|
||||||
|
|
||||||
// rsi: sender SP
|
|
||||||
|
|
||||||
address entry_point = __ pc();
|
|
||||||
|
|
||||||
// abstract method entry
|
|
||||||
|
|
||||||
#ifndef CC_INTERP
|
|
||||||
// pop return address, reset last_sp to NULL
|
|
||||||
__ empty_expression_stack();
|
|
||||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
|
||||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// throw exception
|
|
||||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
|
|
||||||
// the call_VM checks for exception, so we should never return here.
|
|
||||||
__ should_not_reach_here();
|
|
||||||
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||||
|
|
||||||
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
||||||
|
@ -301,66 +301,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
|
|||||||
return entry_point;
|
return entry_point;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Abstract method entry
|
|
||||||
// Attempt to execute abstract method. Throw exception
|
|
||||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
|
||||||
// rbx: Method*
|
|
||||||
// r13: sender SP
|
|
||||||
|
|
||||||
address entry_point = __ pc();
|
|
||||||
|
|
||||||
// abstract method entry
|
|
||||||
|
|
||||||
#ifndef CC_INTERP
|
|
||||||
// pop return address, reset last_sp to NULL
|
|
||||||
__ empty_expression_stack();
|
|
||||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
|
||||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// throw exception
|
|
||||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
|
||||||
InterpreterRuntime::throw_AbstractMethodError));
|
|
||||||
// the call_VM checks for exception, so we should never return here.
|
|
||||||
__ should_not_reach_here();
|
|
||||||
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Empty method, generate a very fast return.
|
|
||||||
|
|
||||||
address InterpreterGenerator::generate_empty_entry(void) {
|
|
||||||
// rbx: Method*
|
|
||||||
// r13: sender sp must set sp to this value on return
|
|
||||||
|
|
||||||
if (!UseFastEmptyMethods) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
address entry_point = __ pc();
|
|
||||||
|
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
|
||||||
Label slow_path;
|
|
||||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
|
||||||
SafepointSynchronize::_not_synchronized);
|
|
||||||
__ jcc(Assembler::notEqual, slow_path);
|
|
||||||
|
|
||||||
// do nothing for empty methods (do not even increment invocation counter)
|
|
||||||
// Code: _return
|
|
||||||
// _return
|
|
||||||
// return w/o popping parameters
|
|
||||||
__ pop(rax);
|
|
||||||
__ mov(rsp, r13);
|
|
||||||
__ jmp(rax);
|
|
||||||
|
|
||||||
__ bind(slow_path);
|
|
||||||
(void) generate_normal_entry(false);
|
|
||||||
return entry_point;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||||
|
|
||||||
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
||||||
|
@ -38,7 +38,7 @@ int AbstractInterpreter::size_activation(int max_stack,
|
|||||||
int callee_locals,
|
int callee_locals,
|
||||||
bool is_top_frame) {
|
bool is_top_frame) {
|
||||||
// Note: This calculation must exactly parallel the frame setup
|
// Note: This calculation must exactly parallel the frame setup
|
||||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
// in InterpreterGenerator::generate_fixed_frame.
|
||||||
|
|
||||||
// fixed size of an interpreter frame:
|
// fixed size of an interpreter frame:
|
||||||
int overhead = frame::sender_sp_offset -
|
int overhead = frame::sender_sp_offset -
|
||||||
|
@ -468,10 +468,10 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
|||||||
// rax,
|
// rax,
|
||||||
|
|
||||||
// NOTE: since the additional locals are also always pushed (wasn't obvious in
|
// NOTE: since the additional locals are also always pushed (wasn't obvious in
|
||||||
// generate_method_entry) so the guard should work for them too.
|
// generate_fixed_frame) so the guard should work for them too.
|
||||||
//
|
//
|
||||||
|
|
||||||
// monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
|
// monitor entry size: see picture of stack in frame_x86.hpp
|
||||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||||
|
|
||||||
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
|
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
|
||||||
@ -633,145 +633,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
|||||||
__ movptr(Address(rsp, 0), rsp); // set expression stack bottom
|
__ movptr(Address(rsp, 0), rsp); // set expression stack bottom
|
||||||
}
|
}
|
||||||
|
|
||||||
// End of helpers
|
|
||||||
|
|
||||||
//
|
|
||||||
// Various method entries
|
|
||||||
//------------------------------------------------------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
|
|
||||||
|
|
||||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
|
||||||
|
|
||||||
// rbx,: Method*
|
|
||||||
// rcx: receiver (preserve for slow entry into asm interpreter)
|
|
||||||
|
|
||||||
// rsi: senderSP must preserved for slow path, set SP to it on fast path
|
|
||||||
|
|
||||||
address entry_point = __ pc();
|
|
||||||
Label xreturn_path;
|
|
||||||
|
|
||||||
// do fastpath for resolved accessor methods
|
|
||||||
if (UseFastAccessorMethods) {
|
|
||||||
Label slow_path;
|
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
|
||||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
|
||||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
|
||||||
SafepointSynchronize::_not_synchronized);
|
|
||||||
|
|
||||||
__ jcc(Assembler::notEqual, slow_path);
|
|
||||||
// ASM/C++ Interpreter
|
|
||||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
|
|
||||||
// Note: We can only use this code if the getfield has been resolved
|
|
||||||
// and if we don't have a null-pointer exception => check for
|
|
||||||
// these conditions first and use slow path if necessary.
|
|
||||||
// rbx,: method
|
|
||||||
// rcx: receiver
|
|
||||||
__ movptr(rax, Address(rsp, wordSize));
|
|
||||||
|
|
||||||
// check if local 0 != NULL and read field
|
|
||||||
__ testptr(rax, rax);
|
|
||||||
__ jcc(Assembler::zero, slow_path);
|
|
||||||
|
|
||||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
|
||||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
|
||||||
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
|
|
||||||
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
|
|
||||||
// Shift codes right to get the index on the right.
|
|
||||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
|
||||||
__ shrl(rdx, 2*BitsPerByte);
|
|
||||||
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
|
|
||||||
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
|
|
||||||
|
|
||||||
// rax,: local 0
|
|
||||||
// rbx,: method
|
|
||||||
// rcx: receiver - do not destroy since it is needed for slow path!
|
|
||||||
// rcx: scratch
|
|
||||||
// rdx: constant pool cache index
|
|
||||||
// rdi: constant pool cache
|
|
||||||
// rsi: sender sp
|
|
||||||
|
|
||||||
// check if getfield has been resolved and read constant pool cache entry
|
|
||||||
// check the validity of the cache entry by testing whether _indices field
|
|
||||||
// contains Bytecode::_getfield in b1 byte.
|
|
||||||
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
|
|
||||||
__ movl(rcx,
|
|
||||||
Address(rdi,
|
|
||||||
rdx,
|
|
||||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
|
|
||||||
__ shrl(rcx, 2*BitsPerByte);
|
|
||||||
__ andl(rcx, 0xFF);
|
|
||||||
__ cmpl(rcx, Bytecodes::_getfield);
|
|
||||||
__ jcc(Assembler::notEqual, slow_path);
|
|
||||||
|
|
||||||
// Note: constant pool entry is not valid before bytecode is resolved
|
|
||||||
__ movptr(rcx,
|
|
||||||
Address(rdi,
|
|
||||||
rdx,
|
|
||||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
|
|
||||||
__ movl(rdx,
|
|
||||||
Address(rdi,
|
|
||||||
rdx,
|
|
||||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
|
||||||
|
|
||||||
Label notByte, notShort, notChar;
|
|
||||||
const Address field_address (rax, rcx, Address::times_1);
|
|
||||||
|
|
||||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
|
||||||
// because they are different sizes.
|
|
||||||
// Use the type from the constant pool cache
|
|
||||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
|
||||||
// Make sure we don't need to mask rdx after the above shift
|
|
||||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
|
||||||
__ cmpl(rdx, btos);
|
|
||||||
__ jcc(Assembler::notEqual, notByte);
|
|
||||||
__ load_signed_byte(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notByte);
|
|
||||||
__ cmpl(rdx, stos);
|
|
||||||
__ jcc(Assembler::notEqual, notShort);
|
|
||||||
__ load_signed_short(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notShort);
|
|
||||||
__ cmpl(rdx, ctos);
|
|
||||||
__ jcc(Assembler::notEqual, notChar);
|
|
||||||
__ load_unsigned_short(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notChar);
|
|
||||||
#ifdef ASSERT
|
|
||||||
Label okay;
|
|
||||||
__ cmpl(rdx, atos);
|
|
||||||
__ jcc(Assembler::equal, okay);
|
|
||||||
__ cmpl(rdx, itos);
|
|
||||||
__ jcc(Assembler::equal, okay);
|
|
||||||
__ stop("what type is this?");
|
|
||||||
__ bind(okay);
|
|
||||||
#endif // ASSERT
|
|
||||||
// All the rest are a 32 bit wordsize
|
|
||||||
// This is ok for now. Since fast accessors should be going away
|
|
||||||
__ movptr(rax, field_address);
|
|
||||||
|
|
||||||
__ bind(xreturn_path);
|
|
||||||
|
|
||||||
// _ireturn/_areturn
|
|
||||||
__ pop(rdi); // get return address
|
|
||||||
__ mov(rsp, rsi); // set sp to sender sp
|
|
||||||
__ jmp(rdi);
|
|
||||||
|
|
||||||
// generate a vanilla interpreter entry as the slow path
|
|
||||||
__ bind(slow_path);
|
|
||||||
|
|
||||||
(void) generate_normal_entry(false);
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Method entry for java.lang.ref.Reference.get.
|
// Method entry for java.lang.ref.Reference.get.
|
||||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
@ -862,7 +723,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
|||||||
|
|
||||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||||
// Reference.get is an accessor
|
// Reference.get is an accessor
|
||||||
return generate_accessor_entry();
|
return generate_jump_to_normal_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1557,100 +1418,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
return entry_point;
|
return entry_point;
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------------------------------------------------
|
|
||||||
// Entry points
|
|
||||||
//
|
|
||||||
// Here we generate the various kind of entries into the interpreter.
|
|
||||||
// The two main entry type are generic bytecode methods and native call method.
|
|
||||||
// These both come in synchronized and non-synchronized versions but the
|
|
||||||
// frame layout they create is very similar. The other method entry
|
|
||||||
// types are really just special purpose entries that are really entry
|
|
||||||
// and interpretation all in one. These are for trivial methods like
|
|
||||||
// accessor, empty, or special math methods.
|
|
||||||
//
|
|
||||||
// When control flow reaches any of the entry types for the interpreter
|
|
||||||
// the following holds ->
|
|
||||||
//
|
|
||||||
// Arguments:
|
|
||||||
//
|
|
||||||
// rbx,: Method*
|
|
||||||
// rcx: receiver
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Stack layout immediately at entry
|
|
||||||
//
|
|
||||||
// [ return address ] <--- rsp
|
|
||||||
// [ parameter n ]
|
|
||||||
// ...
|
|
||||||
// [ parameter 1 ]
|
|
||||||
// [ expression stack ] (caller's java expression stack)
|
|
||||||
|
|
||||||
// Assuming that we don't go to one of the trivial specialized
|
|
||||||
// entries the stack will look like below when we are ready to execute
|
|
||||||
// the first bytecode (or call the native routine). The register usage
|
|
||||||
// will be as the template based interpreter expects (see interpreter_x86.hpp).
|
|
||||||
//
|
|
||||||
// local variables follow incoming parameters immediately; i.e.
|
|
||||||
// the return address is moved to the end of the locals).
|
|
||||||
//
|
|
||||||
// [ monitor entry ] <--- rsp
|
|
||||||
// ...
|
|
||||||
// [ monitor entry ]
|
|
||||||
// [ expr. stack bottom ]
|
|
||||||
// [ saved rsi ]
|
|
||||||
// [ current rdi ]
|
|
||||||
// [ Method* ]
|
|
||||||
// [ saved rbp, ] <--- rbp,
|
|
||||||
// [ return address ]
|
|
||||||
// [ local variable m ]
|
|
||||||
// ...
|
|
||||||
// [ local variable 1 ]
|
|
||||||
// [ parameter n ]
|
|
||||||
// ...
|
|
||||||
// [ parameter 1 ] <--- rdi
|
|
||||||
|
|
||||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
|
||||||
// determine code generation flags
|
|
||||||
bool synchronized = false;
|
|
||||||
address entry_point = NULL;
|
|
||||||
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
|
|
||||||
|
|
||||||
switch (kind) {
|
|
||||||
case Interpreter::zerolocals : break;
|
|
||||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
|
||||||
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
|
|
||||||
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
|
|
||||||
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
|
|
||||||
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
|
|
||||||
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
|
|
||||||
|
|
||||||
case Interpreter::java_lang_math_sin : // fall thru
|
|
||||||
case Interpreter::java_lang_math_cos : // fall thru
|
|
||||||
case Interpreter::java_lang_math_tan : // fall thru
|
|
||||||
case Interpreter::java_lang_math_abs : // fall thru
|
|
||||||
case Interpreter::java_lang_math_log : // fall thru
|
|
||||||
case Interpreter::java_lang_math_log10 : // fall thru
|
|
||||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
|
||||||
case Interpreter::java_lang_math_pow : // fall thru
|
|
||||||
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
|
|
||||||
case Interpreter::java_lang_ref_reference_get
|
|
||||||
: entry_point = ig_this->generate_Reference_get_entry(); break;
|
|
||||||
case Interpreter::java_util_zip_CRC32_update
|
|
||||||
: entry_point = ig_this->generate_CRC32_update_entry(); break;
|
|
||||||
case Interpreter::java_util_zip_CRC32_updateBytes
|
|
||||||
: // fall thru
|
|
||||||
case Interpreter::java_util_zip_CRC32_updateByteBuffer
|
|
||||||
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
|
|
||||||
default:
|
|
||||||
fatal(err_msg("unexpected method kind: %d", kind));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry_point) return entry_point;
|
|
||||||
|
|
||||||
return ig_this->generate_normal_entry(synchronized);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// These should never be compiled since the interpreter will prefer
|
// These should never be compiled since the interpreter will prefer
|
||||||
// the compiled version to the intrinsic version.
|
// the compiled version to the intrinsic version.
|
||||||
|
@ -400,7 +400,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
|||||||
// page mechanism will work for that.
|
// page mechanism will work for that.
|
||||||
//
|
//
|
||||||
// NOTE: Since the additional locals are also always pushed (wasn't
|
// NOTE: Since the additional locals are also always pushed (wasn't
|
||||||
// obvious in generate_method_entry) so the guard should work for them
|
// obvious in generate_fixed_frame) so the guard should work for them
|
||||||
// too.
|
// too.
|
||||||
//
|
//
|
||||||
// Args:
|
// Args:
|
||||||
@ -411,8 +411,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
|||||||
// rax
|
// rax
|
||||||
void InterpreterGenerator::generate_stack_overflow_check(void) {
|
void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||||
|
|
||||||
// monitor entry size: see picture of stack set
|
// monitor entry size: see picture of stack in frame_x86.hpp
|
||||||
// (generate_method_entry) and frame_amd64.hpp
|
|
||||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||||
|
|
||||||
// total overhead size: entry_size + (saved rbp through expr stack
|
// total overhead size: entry_size + (saved rbp through expr stack
|
||||||
@ -600,153 +599,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
|||||||
|
|
||||||
// End of helpers
|
// End of helpers
|
||||||
|
|
||||||
// Various method entries
|
|
||||||
//------------------------------------------------------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
// Call an accessor method (assuming it is resolved, otherwise drop
|
|
||||||
// into vanilla (slow path) entry
|
|
||||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
|
||||||
// rbx: Method*
|
|
||||||
|
|
||||||
// r13: senderSP must preserver for slow path, set SP to it on fast path
|
|
||||||
|
|
||||||
address entry_point = __ pc();
|
|
||||||
Label xreturn_path;
|
|
||||||
|
|
||||||
// do fastpath for resolved accessor methods
|
|
||||||
if (UseFastAccessorMethods) {
|
|
||||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
|
|
||||||
// thereof; parameter size = 1
|
|
||||||
// Note: We can only use this code if the getfield has been resolved
|
|
||||||
// and if we don't have a null-pointer exception => check for
|
|
||||||
// these conditions first and use slow path if necessary.
|
|
||||||
Label slow_path;
|
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
|
||||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
|
||||||
SafepointSynchronize::_not_synchronized);
|
|
||||||
|
|
||||||
__ jcc(Assembler::notEqual, slow_path);
|
|
||||||
// rbx: method
|
|
||||||
__ movptr(rax, Address(rsp, wordSize));
|
|
||||||
|
|
||||||
// check if local 0 != NULL and read field
|
|
||||||
__ testptr(rax, rax);
|
|
||||||
__ jcc(Assembler::zero, slow_path);
|
|
||||||
|
|
||||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
|
||||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
|
||||||
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
|
|
||||||
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
|
|
||||||
// Shift codes right to get the index on the right.
|
|
||||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
|
||||||
__ shrl(rdx, 2 * BitsPerByte);
|
|
||||||
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
|
|
||||||
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
|
|
||||||
|
|
||||||
// rax: local 0
|
|
||||||
// rbx: method
|
|
||||||
// rdx: constant pool cache index
|
|
||||||
// rdi: constant pool cache
|
|
||||||
|
|
||||||
// check if getfield has been resolved and read constant pool cache entry
|
|
||||||
// check the validity of the cache entry by testing whether _indices field
|
|
||||||
// contains Bytecode::_getfield in b1 byte.
|
|
||||||
assert(in_words(ConstantPoolCacheEntry::size()) == 4,
|
|
||||||
"adjust shift below");
|
|
||||||
__ movl(rcx,
|
|
||||||
Address(rdi,
|
|
||||||
rdx,
|
|
||||||
Address::times_8,
|
|
||||||
ConstantPoolCache::base_offset() +
|
|
||||||
ConstantPoolCacheEntry::indices_offset()));
|
|
||||||
__ shrl(rcx, 2 * BitsPerByte);
|
|
||||||
__ andl(rcx, 0xFF);
|
|
||||||
__ cmpl(rcx, Bytecodes::_getfield);
|
|
||||||
__ jcc(Assembler::notEqual, slow_path);
|
|
||||||
|
|
||||||
// Note: constant pool entry is not valid before bytecode is resolved
|
|
||||||
__ movptr(rcx,
|
|
||||||
Address(rdi,
|
|
||||||
rdx,
|
|
||||||
Address::times_8,
|
|
||||||
ConstantPoolCache::base_offset() +
|
|
||||||
ConstantPoolCacheEntry::f2_offset()));
|
|
||||||
// edx: flags
|
|
||||||
__ movl(rdx,
|
|
||||||
Address(rdi,
|
|
||||||
rdx,
|
|
||||||
Address::times_8,
|
|
||||||
ConstantPoolCache::base_offset() +
|
|
||||||
ConstantPoolCacheEntry::flags_offset()));
|
|
||||||
|
|
||||||
Label notObj, notInt, notByte, notShort;
|
|
||||||
const Address field_address(rax, rcx, Address::times_1);
|
|
||||||
|
|
||||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
|
||||||
// because they are different sizes.
|
|
||||||
// Use the type from the constant pool cache
|
|
||||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
|
||||||
// Make sure we don't need to mask edx after the above shift
|
|
||||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
|
||||||
|
|
||||||
__ cmpl(rdx, atos);
|
|
||||||
__ jcc(Assembler::notEqual, notObj);
|
|
||||||
// atos
|
|
||||||
__ load_heap_oop(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notObj);
|
|
||||||
__ cmpl(rdx, itos);
|
|
||||||
__ jcc(Assembler::notEqual, notInt);
|
|
||||||
// itos
|
|
||||||
__ movl(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notInt);
|
|
||||||
__ cmpl(rdx, btos);
|
|
||||||
__ jcc(Assembler::notEqual, notByte);
|
|
||||||
// btos
|
|
||||||
__ load_signed_byte(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notByte);
|
|
||||||
__ cmpl(rdx, stos);
|
|
||||||
__ jcc(Assembler::notEqual, notShort);
|
|
||||||
// stos
|
|
||||||
__ load_signed_short(rax, field_address);
|
|
||||||
__ jmp(xreturn_path);
|
|
||||||
|
|
||||||
__ bind(notShort);
|
|
||||||
#ifdef ASSERT
|
|
||||||
Label okay;
|
|
||||||
__ cmpl(rdx, ctos);
|
|
||||||
__ jcc(Assembler::equal, okay);
|
|
||||||
__ stop("what type is this?");
|
|
||||||
__ bind(okay);
|
|
||||||
#endif
|
|
||||||
// ctos
|
|
||||||
__ load_unsigned_short(rax, field_address);
|
|
||||||
|
|
||||||
__ bind(xreturn_path);
|
|
||||||
|
|
||||||
// _ireturn/_areturn
|
|
||||||
__ pop(rdi);
|
|
||||||
__ mov(rsp, r13);
|
|
||||||
__ jmp(rdi);
|
|
||||||
__ ret(0);
|
|
||||||
|
|
||||||
// generate a vanilla interpreter entry as the slow path
|
|
||||||
__ bind(slow_path);
|
|
||||||
(void) generate_normal_entry(false);
|
|
||||||
} else {
|
|
||||||
(void) generate_normal_entry(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Method entry for java.lang.ref.Reference.get.
|
// Method entry for java.lang.ref.Reference.get.
|
||||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
@ -773,8 +625,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
|||||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||||
// regular method entry code to generate the NPE.
|
// regular method entry code to generate the NPE.
|
||||||
//
|
//
|
||||||
// This code is based on generate_accessor_enty.
|
|
||||||
//
|
|
||||||
// rbx: Method*
|
// rbx: Method*
|
||||||
|
|
||||||
// r13: senderSP must preserve for slow path, set SP to it on fast path
|
// r13: senderSP must preserve for slow path, set SP to it on fast path
|
||||||
@ -832,7 +682,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
|||||||
|
|
||||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||||
// Reference.get is an accessor
|
// Reference.get is an accessor
|
||||||
return generate_accessor_entry();
|
return generate_jump_to_normal_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1566,100 +1416,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
return entry_point;
|
return entry_point;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry points
|
|
||||||
//
|
|
||||||
// Here we generate the various kind of entries into the interpreter.
|
|
||||||
// The two main entry type are generic bytecode methods and native
|
|
||||||
// call method. These both come in synchronized and non-synchronized
|
|
||||||
// versions but the frame layout they create is very similar. The
|
|
||||||
// other method entry types are really just special purpose entries
|
|
||||||
// that are really entry and interpretation all in one. These are for
|
|
||||||
// trivial methods like accessor, empty, or special math methods.
|
|
||||||
//
|
|
||||||
// When control flow reaches any of the entry types for the interpreter
|
|
||||||
// the following holds ->
|
|
||||||
//
|
|
||||||
// Arguments:
|
|
||||||
//
|
|
||||||
// rbx: Method*
|
|
||||||
//
|
|
||||||
// Stack layout immediately at entry
|
|
||||||
//
|
|
||||||
// [ return address ] <--- rsp
|
|
||||||
// [ parameter n ]
|
|
||||||
// ...
|
|
||||||
// [ parameter 1 ]
|
|
||||||
// [ expression stack ] (caller's java expression stack)
|
|
||||||
|
|
||||||
// Assuming that we don't go to one of the trivial specialized entries
|
|
||||||
// the stack will look like below when we are ready to execute the
|
|
||||||
// first bytecode (or call the native routine). The register usage
|
|
||||||
// will be as the template based interpreter expects (see
|
|
||||||
// interpreter_amd64.hpp).
|
|
||||||
//
|
|
||||||
// local variables follow incoming parameters immediately; i.e.
|
|
||||||
// the return address is moved to the end of the locals).
|
|
||||||
//
|
|
||||||
// [ monitor entry ] <--- rsp
|
|
||||||
// ...
|
|
||||||
// [ monitor entry ]
|
|
||||||
// [ expr. stack bottom ]
|
|
||||||
// [ saved r13 ]
|
|
||||||
// [ current r14 ]
|
|
||||||
// [ Method* ]
|
|
||||||
// [ saved ebp ] <--- rbp
|
|
||||||
// [ return address ]
|
|
||||||
// [ local variable m ]
|
|
||||||
// ...
|
|
||||||
// [ local variable 1 ]
|
|
||||||
// [ parameter n ]
|
|
||||||
// ...
|
|
||||||
// [ parameter 1 ] <--- r14
|
|
||||||
|
|
||||||
address AbstractInterpreterGenerator::generate_method_entry(
|
|
||||||
AbstractInterpreter::MethodKind kind) {
|
|
||||||
// determine code generation flags
|
|
||||||
bool synchronized = false;
|
|
||||||
address entry_point = NULL;
|
|
||||||
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
|
|
||||||
|
|
||||||
switch (kind) {
|
|
||||||
case Interpreter::zerolocals : break;
|
|
||||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
|
||||||
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
|
|
||||||
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
|
|
||||||
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
|
|
||||||
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
|
|
||||||
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
|
|
||||||
|
|
||||||
case Interpreter::java_lang_math_sin : // fall thru
|
|
||||||
case Interpreter::java_lang_math_cos : // fall thru
|
|
||||||
case Interpreter::java_lang_math_tan : // fall thru
|
|
||||||
case Interpreter::java_lang_math_abs : // fall thru
|
|
||||||
case Interpreter::java_lang_math_log : // fall thru
|
|
||||||
case Interpreter::java_lang_math_log10 : // fall thru
|
|
||||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
|
||||||
case Interpreter::java_lang_math_pow : // fall thru
|
|
||||||
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
|
|
||||||
case Interpreter::java_lang_ref_reference_get
|
|
||||||
: entry_point = ig_this->generate_Reference_get_entry(); break;
|
|
||||||
case Interpreter::java_util_zip_CRC32_update
|
|
||||||
: entry_point = ig_this->generate_CRC32_update_entry(); break;
|
|
||||||
case Interpreter::java_util_zip_CRC32_updateBytes
|
|
||||||
: // fall thru
|
|
||||||
case Interpreter::java_util_zip_CRC32_updateByteBuffer
|
|
||||||
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
|
|
||||||
default:
|
|
||||||
fatal(err_msg("unexpected method kind: %d", kind));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry_point) {
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ig_this->generate_normal_entry(synchronized);
|
|
||||||
}
|
|
||||||
|
|
||||||
// These should never be compiled since the interpreter will prefer
|
// These should never be compiled since the interpreter will prefer
|
||||||
// the compiled version to the intrinsic version.
|
// the compiled version to the intrinsic version.
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "runtime/java.hpp"
|
#include "runtime/java.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/stubCodeGenerator.hpp"
|
#include "runtime/stubCodeGenerator.hpp"
|
||||||
#include "vm_version_x86.hpp"
|
#include "vm_version_x86.hpp"
|
||||||
|
|
||||||
@ -514,7 +515,7 @@ void VM_Version::get_processor_features() {
|
|||||||
(supports_tscinv() ? ", tscinv": ""),
|
(supports_tscinv() ? ", tscinv": ""),
|
||||||
(supports_bmi1() ? ", bmi1" : ""),
|
(supports_bmi1() ? ", bmi1" : ""),
|
||||||
(supports_bmi2() ? ", bmi2" : ""));
|
(supports_bmi2() ? ", bmi2" : ""));
|
||||||
_features_str = strdup(buf);
|
_features_str = os::strdup(buf);
|
||||||
|
|
||||||
// UseSSE is set to the smaller of what hardware supports and what
|
// UseSSE is set to the smaller of what hardware supports and what
|
||||||
// the command line requires. I.e., you cannot set UseSSE to 2 on
|
// the command line requires. I.e., you cannot set UseSSE to 2 on
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
@ -831,60 +831,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
return generate_entry((address) CppInterpreter::normal_entry);
|
return generate_entry((address) CppInterpreter::normal_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
address AbstractInterpreterGenerator::generate_method_entry(
|
|
||||||
AbstractInterpreter::MethodKind kind) {
|
|
||||||
address entry_point = NULL;
|
|
||||||
|
|
||||||
switch (kind) {
|
|
||||||
case Interpreter::zerolocals:
|
|
||||||
case Interpreter::zerolocals_synchronized:
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Interpreter::native:
|
|
||||||
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Interpreter::native_synchronized:
|
|
||||||
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Interpreter::empty:
|
|
||||||
entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Interpreter::accessor:
|
|
||||||
entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Interpreter::abstract:
|
|
||||||
entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Interpreter::java_lang_math_sin:
|
|
||||||
case Interpreter::java_lang_math_cos:
|
|
||||||
case Interpreter::java_lang_math_tan:
|
|
||||||
case Interpreter::java_lang_math_abs:
|
|
||||||
case Interpreter::java_lang_math_log:
|
|
||||||
case Interpreter::java_lang_math_log10:
|
|
||||||
case Interpreter::java_lang_math_sqrt:
|
|
||||||
case Interpreter::java_lang_math_pow:
|
|
||||||
case Interpreter::java_lang_math_exp:
|
|
||||||
entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Interpreter::java_lang_ref_reference_get:
|
|
||||||
entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
ShouldNotReachHere();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry_point == NULL)
|
|
||||||
entry_point = ((InterpreterGenerator*) this)->generate_normal_entry(false);
|
|
||||||
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
|
|
||||||
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||||
: CppInterpreterGenerator(code) {
|
: CppInterpreterGenerator(code) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
@ -61,6 +61,12 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
|
|||||||
|
|
||||||
define_pd_global(uintx, TypeProfileLevel, 0);
|
define_pd_global(uintx, TypeProfileLevel, 0);
|
||||||
|
|
||||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
|
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||||
|
product(bool, UseFastEmptyMethods, true, \
|
||||||
|
"Use fast method entry code for empty methods") \
|
||||||
|
\
|
||||||
|
product(bool, UseFastAccessorMethods, true, \
|
||||||
|
"Use fast method entry code for accessor methods") \
|
||||||
|
\
|
||||||
|
|
||||||
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
|
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2007 Red Hat, Inc.
|
* Copyright 2007 Red Hat, Inc.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
@ -39,4 +39,7 @@
|
|||||||
address generate_accessor_entry();
|
address generate_accessor_entry();
|
||||||
address generate_Reference_get_entry();
|
address generate_Reference_get_entry();
|
||||||
|
|
||||||
|
// Not supported
|
||||||
|
address generate_CRC32_update_entry() { return NULL; }
|
||||||
|
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
|
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
|
||||||
|
@ -58,6 +58,7 @@
|
|||||||
#include "runtime/mutexLocker.hpp"
|
#include "runtime/mutexLocker.hpp"
|
||||||
#include "runtime/objectMonitor.hpp"
|
#include "runtime/objectMonitor.hpp"
|
||||||
#include "runtime/orderAccess.inline.hpp"
|
#include "runtime/orderAccess.inline.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/osThread.hpp"
|
#include "runtime/osThread.hpp"
|
||||||
#include "runtime/perfMemory.hpp"
|
#include "runtime/perfMemory.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
@ -378,10 +379,10 @@ void os::Aix::query_multipage_support() {
|
|||||||
// default should be 4K.
|
// default should be 4K.
|
||||||
size_t data_page_size = SIZE_4K;
|
size_t data_page_size = SIZE_4K;
|
||||||
{
|
{
|
||||||
void* p = ::malloc(SIZE_16M);
|
void* p = os::malloc(SIZE_16M, mtInternal);
|
||||||
guarantee(p != NULL, "malloc failed");
|
guarantee(p != NULL, "malloc failed");
|
||||||
data_page_size = os::Aix::query_pagesize(p);
|
data_page_size = os::Aix::query_pagesize(p);
|
||||||
::free(p);
|
os::free(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
// query default shm page size (LDR_CNTRL SHMPSIZE)
|
// query default shm page size (LDR_CNTRL SHMPSIZE)
|
||||||
|
@ -24,6 +24,8 @@
|
|||||||
|
|
||||||
#include "asm/assembler.hpp"
|
#include "asm/assembler.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
|
#include "memory/allocation.inline.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
#include "loadlib_aix.hpp"
|
#include "loadlib_aix.hpp"
|
||||||
#include "porting_aix.hpp"
|
#include "porting_aix.hpp"
|
||||||
#include "utilities/debug.hpp"
|
#include "utilities/debug.hpp"
|
||||||
@ -83,7 +85,7 @@ class fixed_strings {
|
|||||||
while (n) {
|
while (n) {
|
||||||
node* p = n;
|
node* p = n;
|
||||||
n = n->next;
|
n = n->next;
|
||||||
free(p->v);
|
os::free(p->v);
|
||||||
delete p;
|
delete p;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -95,7 +97,7 @@ class fixed_strings {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
node* p = new node;
|
node* p = new node;
|
||||||
p->v = strdup(s);
|
p->v = os::strdup_check_oom(s);
|
||||||
p->next = first;
|
p->next = first;
|
||||||
first = p;
|
first = p;
|
||||||
return p->v;
|
return p->v;
|
||||||
|
@ -2439,23 +2439,25 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The memory is committed
|
// The memory is committed
|
||||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool os::release_memory_special(char* base, size_t bytes) {
|
bool os::release_memory_special(char* base, size_t bytes) {
|
||||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||||
int rslt = shmdt(base);
|
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||||
if (rslt == 0) {
|
int rslt = shmdt(base);
|
||||||
tkr.record((address)base, bytes);
|
if (rslt == 0) {
|
||||||
return true;
|
tkr.record((address)base, bytes);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
tkr.discard();
|
return shmdt(base) == 0;
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t os::large_page_size() {
|
size_t os::large_page_size() {
|
||||||
|
@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
|
|||||||
(void)::memset((void*) mapAddress, 0, size);
|
(void)::memset((void*) mapAddress, 0, size);
|
||||||
|
|
||||||
// it does not go through os api, the operation has to record from here
|
// it does not go through os api, the operation has to record from here
|
||||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||||
|
|
||||||
return mapAddress;
|
return mapAddress;
|
||||||
}
|
}
|
||||||
@ -918,7 +918,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
|||||||
}
|
}
|
||||||
|
|
||||||
// it does not go through os api, the operation has to record from here
|
// it does not go through os api, the operation has to record from here
|
||||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||||
|
|
||||||
*addr = mapAddress;
|
*addr = mapAddress;
|
||||||
*sizep = size;
|
*sizep = size;
|
||||||
|
@ -3504,9 +3504,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
|
|||||||
|
|
||||||
assert(is_ptr_aligned(start, alignment), "Must be");
|
assert(is_ptr_aligned(start, alignment), "Must be");
|
||||||
|
|
||||||
// os::reserve_memory_special will record this memory area.
|
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||||
// Need to release it here to prevent overlapping reservations.
|
// os::reserve_memory_special will record this memory area.
|
||||||
MemTracker::record_virtual_memory_release((address)start, bytes);
|
// Need to release it here to prevent overlapping reservations.
|
||||||
|
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||||
|
tkr.record((address)start, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
char* end = start + bytes;
|
char* end = start + bytes;
|
||||||
|
|
||||||
@ -3601,7 +3604,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The memory is committed
|
// The memory is committed
|
||||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
|
||||||
}
|
}
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
@ -3617,24 +3620,30 @@ bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool os::release_memory_special(char* base, size_t bytes) {
|
bool os::release_memory_special(char* base, size_t bytes) {
|
||||||
assert(UseLargePages, "only for large pages");
|
|
||||||
|
|
||||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
|
||||||
|
|
||||||
bool res;
|
bool res;
|
||||||
|
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||||
|
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||||
|
res = os::Linux::release_memory_special_impl(base, bytes);
|
||||||
|
if (res) {
|
||||||
|
tkr.record((address)base, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
res = os::Linux::release_memory_special_impl(base, bytes);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
|
||||||
|
assert(UseLargePages, "only for large pages");
|
||||||
|
bool res;
|
||||||
|
|
||||||
if (UseSHM) {
|
if (UseSHM) {
|
||||||
res = os::Linux::release_memory_special_shm(base, bytes);
|
res = os::Linux::release_memory_special_shm(base, bytes);
|
||||||
} else {
|
} else {
|
||||||
assert(UseHugeTLBFS, "must be");
|
assert(UseHugeTLBFS, "must be");
|
||||||
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
|
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (res) {
|
|
||||||
tkr.record((address)base, bytes);
|
|
||||||
} else {
|
|
||||||
tkr.discard();
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,6 +108,7 @@ class Linux {
|
|||||||
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
|
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
|
||||||
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
||||||
|
|
||||||
|
static bool release_memory_special_impl(char* base, size_t bytes);
|
||||||
static bool release_memory_special_shm(char* base, size_t bytes);
|
static bool release_memory_special_shm(char* base, size_t bytes);
|
||||||
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
|
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
|
||||||
|
|
||||||
|
@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
|
|||||||
(void)::memset((void*) mapAddress, 0, size);
|
(void)::memset((void*) mapAddress, 0, size);
|
||||||
|
|
||||||
// it does not go through os api, the operation has to record from here
|
// it does not go through os api, the operation has to record from here
|
||||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||||
|
|
||||||
return mapAddress;
|
return mapAddress;
|
||||||
}
|
}
|
||||||
@ -924,7 +924,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
|||||||
}
|
}
|
||||||
|
|
||||||
// it does not go through os api, the operation has to record from here
|
// it does not go through os api, the operation has to record from here
|
||||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||||
|
|
||||||
*addr = mapAddress;
|
*addr = mapAddress;
|
||||||
*sizep = size;
|
*sizep = size;
|
||||||
|
@ -75,21 +75,41 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
|||||||
VMError::report_coredump_status(buffer, success);
|
VMError::report_coredump_status(buffer, success);
|
||||||
}
|
}
|
||||||
|
|
||||||
address os::get_caller_pc(int n) {
|
int os::get_native_stack(address* stack, int frames, int toSkip) {
|
||||||
#ifdef _NMT_NOINLINE_
|
#ifdef _NMT_NOINLINE_
|
||||||
n ++;
|
toSkip++;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
int frame_idx = 0;
|
||||||
|
int num_of_frames; // number of frames captured
|
||||||
frame fr = os::current_frame();
|
frame fr = os::current_frame();
|
||||||
while (n > 0 && fr.pc() &&
|
while (fr.pc() && frame_idx < frames) {
|
||||||
!os::is_first_C_frame(&fr) && fr.sender_pc()) {
|
if (toSkip > 0) {
|
||||||
fr = os::get_sender_for_C_frame(&fr);
|
toSkip --;
|
||||||
n --;
|
} else {
|
||||||
|
stack[frame_idx ++] = fr.pc();
|
||||||
|
}
|
||||||
|
if (fr.fp() == NULL || os::is_first_C_frame(&fr)
|
||||||
|
||fr.sender_pc() == NULL || fr.cb() != NULL) break;
|
||||||
|
|
||||||
|
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
|
||||||
|
fr = os::get_sender_for_C_frame(&fr);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (n == 0) {
|
num_of_frames = frame_idx;
|
||||||
return fr.pc();
|
for (; frame_idx < frames; frame_idx ++) {
|
||||||
} else {
|
stack[frame_idx] = NULL;
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return num_of_frames;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool os::unsetenv(const char* name) {
|
||||||
|
assert(name != NULL, "Null pointer");
|
||||||
|
return (::unsetenv(name) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int os::get_last_error() {
|
int os::get_last_error() {
|
||||||
|
@ -770,7 +770,8 @@ static char* mmap_create_shared(size_t size) {
|
|||||||
(void)::memset((void*) mapAddress, 0, size);
|
(void)::memset((void*) mapAddress, 0, size);
|
||||||
|
|
||||||
// it does not go through os api, the operation has to record from here
|
// it does not go through os api, the operation has to record from here
|
||||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
|
||||||
|
size, CURRENT_PC, mtInternal);
|
||||||
|
|
||||||
return mapAddress;
|
return mapAddress;
|
||||||
}
|
}
|
||||||
@ -941,7 +942,8 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
|||||||
}
|
}
|
||||||
|
|
||||||
// it does not go through os api, the operation has to record from here
|
// it does not go through os api, the operation has to record from here
|
||||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
|
||||||
|
size, CURRENT_PC, mtInternal);
|
||||||
|
|
||||||
*addr = mapAddress;
|
*addr = mapAddress;
|
||||||
*sizep = size;
|
*sizep = size;
|
||||||
|
@ -138,9 +138,8 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
|
|||||||
// Workaround for issue when a custom launcher doesn't call
|
// Workaround for issue when a custom launcher doesn't call
|
||||||
// DestroyJavaVM and NMT is trying to track memory when free is
|
// DestroyJavaVM and NMT is trying to track memory when free is
|
||||||
// called from a static destructor
|
// called from a static destructor
|
||||||
if (MemTracker::is_on()) {
|
MemTracker::shutdown();
|
||||||
MemTracker::shutdown(MemTracker::NMT_normal);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@ -163,6 +162,10 @@ bool os::getenv(const char* name, char* buffer, int len) {
|
|||||||
return result > 0 && result < len;
|
return result > 0 && result < len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool os::unsetenv(const char* name) {
|
||||||
|
assert(name != NULL, "Null pointer");
|
||||||
|
return (SetEnvironmentVariable(name, NULL) == TRUE);
|
||||||
|
}
|
||||||
|
|
||||||
// No setuid programs under Windows.
|
// No setuid programs under Windows.
|
||||||
bool os::have_special_privileges() {
|
bool os::have_special_privileges() {
|
||||||
@ -319,15 +322,16 @@ extern "C" void breakpoint() {
|
|||||||
* So far, this method is only used by Native Memory Tracking, which is
|
* So far, this method is only used by Native Memory Tracking, which is
|
||||||
* only supported on Windows XP or later.
|
* only supported on Windows XP or later.
|
||||||
*/
|
*/
|
||||||
address os::get_caller_pc(int n) {
|
int os::get_native_stack(address* stack, int frames, int toSkip) {
|
||||||
#ifdef _NMT_NOINLINE_
|
#ifdef _NMT_NOINLINE_
|
||||||
n++;
|
toSkip ++;
|
||||||
#endif
|
#endif
|
||||||
address pc;
|
int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
|
||||||
if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
|
(PVOID*)stack, NULL);
|
||||||
return pc;
|
for (int index = captured; index < frames; index ++) {
|
||||||
|
stack[index] = NULL;
|
||||||
}
|
}
|
||||||
return NULL;
|
return captured;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2901,7 +2905,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
|||||||
PAGE_READWRITE);
|
PAGE_READWRITE);
|
||||||
// If reservation failed, return NULL
|
// If reservation failed, return NULL
|
||||||
if (p_buf == NULL) return NULL;
|
if (p_buf == NULL) return NULL;
|
||||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
|
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
|
||||||
os::release_memory(p_buf, bytes + chunk_size);
|
os::release_memory(p_buf, bytes + chunk_size);
|
||||||
|
|
||||||
// we still need to round up to a page boundary (in case we are using large pages)
|
// we still need to round up to a page boundary (in case we are using large pages)
|
||||||
@ -2967,7 +2971,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
|||||||
// need to create a dummy 'reserve' record to match
|
// need to create a dummy 'reserve' record to match
|
||||||
// the release.
|
// the release.
|
||||||
MemTracker::record_virtual_memory_reserve((address)p_buf,
|
MemTracker::record_virtual_memory_reserve((address)p_buf,
|
||||||
bytes_to_release, mtNone, CALLER_PC);
|
bytes_to_release, CALLER_PC);
|
||||||
os::release_memory(p_buf, bytes_to_release);
|
os::release_memory(p_buf, bytes_to_release);
|
||||||
}
|
}
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -2986,11 +2990,10 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
|||||||
}
|
}
|
||||||
// Although the memory is allocated individually, it is returned as one.
|
// Although the memory is allocated individually, it is returned as one.
|
||||||
// NMT records it as one block.
|
// NMT records it as one block.
|
||||||
address pc = CALLER_PC;
|
|
||||||
if ((flags & MEM_COMMIT) != 0) {
|
if ((flags & MEM_COMMIT) != 0) {
|
||||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
|
||||||
} else {
|
} else {
|
||||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
|
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
|
||||||
}
|
}
|
||||||
|
|
||||||
// made it this far, success
|
// made it this far, success
|
||||||
@ -3188,8 +3191,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, boo
|
|||||||
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
||||||
char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
|
char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
|
||||||
if (res != NULL) {
|
if (res != NULL) {
|
||||||
address pc = CALLER_PC;
|
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
|
||||||
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -29,6 +29,7 @@
|
|||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "os_windows.inline.hpp"
|
#include "os_windows.inline.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/perfMemory.hpp"
|
#include "runtime/perfMemory.hpp"
|
||||||
#include "services/memTracker.hpp"
|
#include "services/memTracker.hpp"
|
||||||
#include "utilities/exceptions.hpp"
|
#include "utilities/exceptions.hpp"
|
||||||
@ -1388,7 +1389,7 @@ static HANDLE create_sharedmem_resources(const char* dirname, const char* filena
|
|||||||
// the file has been successfully created and the file mapping
|
// the file has been successfully created and the file mapping
|
||||||
// object has been created.
|
// object has been created.
|
||||||
sharedmem_fileHandle = fh;
|
sharedmem_fileHandle = fh;
|
||||||
sharedmem_fileName = strdup(filename);
|
sharedmem_fileName = os::strdup(filename);
|
||||||
|
|
||||||
return fmh;
|
return fmh;
|
||||||
}
|
}
|
||||||
@ -1498,7 +1499,8 @@ static char* mapping_create_shared(size_t size) {
|
|||||||
(void)memset(mapAddress, '\0', size);
|
(void)memset(mapAddress, '\0', size);
|
||||||
|
|
||||||
// it does not go through os api, the operation has to record from here
|
// it does not go through os api, the operation has to record from here
|
||||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
|
||||||
|
size, CURRENT_PC, mtInternal);
|
||||||
|
|
||||||
return (char*) mapAddress;
|
return (char*) mapAddress;
|
||||||
}
|
}
|
||||||
@ -1680,7 +1682,8 @@ static void open_file_mapping(const char* user, int vmid,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// it does not go through os api, the operation has to record from here
|
// it does not go through os api, the operation has to record from here
|
||||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size,
|
||||||
|
CURRENT_PC, mtInternal);
|
||||||
|
|
||||||
|
|
||||||
*addrp = (char*)mapAddress;
|
*addrp = (char*)mapAddress;
|
||||||
@ -1834,10 +1837,14 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||||
remove_file_mapping(addr);
|
// it does not go through os api, the operation has to record from here
|
||||||
// it does not go through os api, the operation has to record from here
|
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||||
tkr.record((address)addr, bytes);
|
remove_file_mapping(addr);
|
||||||
|
tkr.record((address)addr, bytes);
|
||||||
|
} else {
|
||||||
|
remove_file_mapping(addr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
char* PerfMemory::backing_store_filename() {
|
char* PerfMemory::backing_store_filename() {
|
||||||
|
@ -23,6 +23,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
#include "memory/allocation.inline.hpp"
|
||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
#include "vm_version_sparc.hpp"
|
#include "vm_version_sparc.hpp"
|
||||||
|
|
||||||
@ -48,7 +50,7 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
|
|||||||
// All SI defines used below must be supported.
|
// All SI defines used below must be supported.
|
||||||
guarantee(bufsize != -1, "must be supported");
|
guarantee(bufsize != -1, "must be supported");
|
||||||
|
|
||||||
char* buf = (char*) malloc(bufsize);
|
char* buf = (char*) os::malloc(bufsize, mtInternal);
|
||||||
|
|
||||||
if (buf == NULL)
|
if (buf == NULL)
|
||||||
return;
|
return;
|
||||||
@ -60,7 +62,7 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
free(buf);
|
os::free(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
int VM_Version::platform_features(int features) {
|
int VM_Version::platform_features(int features) {
|
||||||
@ -161,7 +163,7 @@ int VM_Version::platform_features(int features) {
|
|||||||
|
|
||||||
char tmp;
|
char tmp;
|
||||||
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
|
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
|
||||||
char* buf = (char*) malloc(bufsize);
|
char* buf = (char*) os::malloc(bufsize, mtInternal);
|
||||||
|
|
||||||
if (buf != NULL) {
|
if (buf != NULL) {
|
||||||
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
|
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
|
||||||
@ -184,7 +186,7 @@ int VM_Version::platform_features(int features) {
|
|||||||
if (vis[3] == '2') features |= vis2_instructions_m;
|
if (vis[3] == '2') features |= vis2_instructions_m;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
free(buf);
|
os::free(buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,7 +230,7 @@ int VM_Version::platform_features(int features) {
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
// Convert to UPPER case before compare.
|
// Convert to UPPER case before compare.
|
||||||
char* impl = strdup(implementation);
|
char* impl = os::strdup_check_oom(implementation);
|
||||||
|
|
||||||
for (int i = 0; impl[i] != 0; i++)
|
for (int i = 0; impl[i] != 0; i++)
|
||||||
impl[i] = (char)toupper((uint)impl[i]);
|
impl[i] = (char)toupper((uint)impl[i]);
|
||||||
@ -252,7 +254,7 @@ int VM_Version::platform_features(int features) {
|
|||||||
implementation = "SPARC";
|
implementation = "SPARC";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
free((void*)impl);
|
os::free((void*)impl);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} // for(
|
} // for(
|
||||||
|
@ -269,7 +269,7 @@ address CodeBuffer::decode_begin() {
|
|||||||
|
|
||||||
GrowableArray<int>* CodeBuffer::create_patch_overflow() {
|
GrowableArray<int>* CodeBuffer::create_patch_overflow() {
|
||||||
if (_overflow_arena == NULL) {
|
if (_overflow_arena == NULL) {
|
||||||
_overflow_arena = new (mtCode) Arena();
|
_overflow_arena = new (mtCode) Arena(mtCode);
|
||||||
}
|
}
|
||||||
return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
|
return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ Compiler::Compiler() : AbstractCompiler(c1) {
|
|||||||
|
|
||||||
void Compiler::init_c1_runtime() {
|
void Compiler::init_c1_runtime() {
|
||||||
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
|
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
|
||||||
Arena* arena = new (mtCompiler) Arena();
|
Arena* arena = new (mtCompiler) Arena(mtCompiler);
|
||||||
Runtime1::initialize(buffer_blob);
|
Runtime1::initialize(buffer_blob);
|
||||||
FrameMap::initialize();
|
FrameMap::initialize();
|
||||||
// initialize data structures
|
// initialize data structures
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include "c1/c1_MacroAssembler.hpp"
|
#include "c1/c1_MacroAssembler.hpp"
|
||||||
#include "c1/c1_ValueStack.hpp"
|
#include "c1/c1_ValueStack.hpp"
|
||||||
#include "ci/ciInstance.hpp"
|
#include "ci/ciInstance.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
|
|
||||||
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
|
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
|
||||||
// we must have enough patching space so that call can be inserted
|
// we must have enough patching space so that call can be inserted
|
||||||
@ -848,7 +849,7 @@ void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
|
|||||||
stringStream st;
|
stringStream st;
|
||||||
st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
|
st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
|
||||||
#ifdef SPARC
|
#ifdef SPARC
|
||||||
_masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
|
_masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__);
|
||||||
#else
|
#else
|
||||||
_masm->verify_oop(r->as_Register());
|
_masm->verify_oop(r->as_Register());
|
||||||
#endif
|
#endif
|
||||||
|
@ -86,7 +86,8 @@ static bool firstEnv = true;
|
|||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciEnv::ciEnv
|
// ciEnv::ciEnv
|
||||||
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
|
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
|
||||||
|
: _ciEnv_arena(mtCompiler) {
|
||||||
VM_ENTRY_MARK;
|
VM_ENTRY_MARK;
|
||||||
|
|
||||||
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
|
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
|
||||||
@ -144,7 +145,7 @@ ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
|
|||||||
_jvmti_can_pop_frame = false;
|
_jvmti_can_pop_frame = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
ciEnv::ciEnv(Arena* arena) {
|
ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
|
||||||
ASSERT_IN_VM;
|
ASSERT_IN_VM;
|
||||||
|
|
||||||
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
|
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -112,7 +112,7 @@ void ciObjectFactory::initialize() {
|
|||||||
// This Arena is long lived and exists in the resource mark of the
|
// This Arena is long lived and exists in the resource mark of the
|
||||||
// compiler thread that initializes the initial ciObjectFactory which
|
// compiler thread that initializes the initial ciObjectFactory which
|
||||||
// creates the shared ciObjects that all later ciObjectFactories use.
|
// creates the shared ciObjects that all later ciObjectFactories use.
|
||||||
Arena* arena = new (mtCompiler) Arena();
|
Arena* arena = new (mtCompiler) Arena(mtCompiler);
|
||||||
ciEnv initial(arena);
|
ciEnv initial(arena);
|
||||||
ciEnv* env = ciEnv::current();
|
ciEnv* env = ciEnv::current();
|
||||||
env->_factory->init_shared_objects();
|
env->_factory->init_shared_objects();
|
||||||
|
@ -273,13 +273,17 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
|
|||||||
}
|
}
|
||||||
|
|
||||||
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
|
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
|
||||||
_path = strdup(path);
|
_path = os::strdup_check_oom(path);
|
||||||
_st = *st;
|
_st = *st;
|
||||||
_meta_index = NULL;
|
_meta_index = NULL;
|
||||||
_resolved_entry = NULL;
|
_resolved_entry = NULL;
|
||||||
_has_error = false;
|
_has_error = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LazyClassPathEntry::~LazyClassPathEntry() {
|
||||||
|
os::free(_path);
|
||||||
|
}
|
||||||
|
|
||||||
bool LazyClassPathEntry::is_jar_file() {
|
bool LazyClassPathEntry::is_jar_file() {
|
||||||
return ((_st.st_mode & S_IFREG) == S_IFREG);
|
return ((_st.st_mode & S_IFREG) == S_IFREG);
|
||||||
}
|
}
|
||||||
@ -416,7 +420,7 @@ void ClassLoader::setup_meta_index() {
|
|||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
if (!skipCurrentJar && cur_entry != NULL) {
|
if (!skipCurrentJar && cur_entry != NULL) {
|
||||||
char* new_name = strdup(package_name);
|
char* new_name = os::strdup_check_oom(package_name);
|
||||||
boot_class_path_packages.append(new_name);
|
boot_class_path_packages.append(new_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -438,7 +442,7 @@ void ClassLoader::setup_meta_index() {
|
|||||||
|
|
||||||
void ClassLoader::setup_bootstrap_search_path() {
|
void ClassLoader::setup_bootstrap_search_path() {
|
||||||
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
|
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
|
||||||
char* sys_class_path = os::strdup(Arguments::get_sysclasspath());
|
char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
|
||||||
if (TraceClassLoading && Verbose) {
|
if (TraceClassLoading && Verbose) {
|
||||||
tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
|
tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
|
||||||
}
|
}
|
||||||
@ -460,6 +464,7 @@ void ClassLoader::setup_bootstrap_search_path() {
|
|||||||
end++;
|
end++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
os::free(sys_class_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
|
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
|
||||||
|
@ -128,6 +128,8 @@ class LazyClassPathEntry: public ClassPathEntry {
|
|||||||
bool is_jar_file();
|
bool is_jar_file();
|
||||||
const char* name() { return _path; }
|
const char* name() { return _path; }
|
||||||
LazyClassPathEntry(char* path, const struct stat* st);
|
LazyClassPathEntry(char* path, const struct stat* st);
|
||||||
|
virtual ~LazyClassPathEntry();
|
||||||
|
|
||||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||||
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
|
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
|
||||||
virtual bool is_lazy();
|
virtual bool is_lazy();
|
||||||
|
@ -70,9 +70,9 @@ Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS
|
|||||||
void SymbolTable::initialize_symbols(int arena_alloc_size) {
|
void SymbolTable::initialize_symbols(int arena_alloc_size) {
|
||||||
// Initialize the arena for global symbols, size passed in depends on CDS.
|
// Initialize the arena for global symbols, size passed in depends on CDS.
|
||||||
if (arena_alloc_size == 0) {
|
if (arena_alloc_size == 0) {
|
||||||
_arena = new (mtSymbol) Arena();
|
_arena = new (mtSymbol) Arena(mtSymbol);
|
||||||
} else {
|
} else {
|
||||||
_arena = new (mtSymbol) Arena(arena_alloc_size);
|
_arena = new (mtSymbol) Arena(mtSymbol, arena_alloc_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2217,6 +2217,181 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Look at the method's handlers. If the bci is in the handler's try block
|
||||||
|
// then check if the handler_pc is already on the stack. If not, push it.
|
||||||
|
void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
|
||||||
|
GrowableArray<u4>* handler_stack,
|
||||||
|
u4 bci) {
|
||||||
|
int exlength = exhandlers->length();
|
||||||
|
for(int x = 0; x < exlength; x++) {
|
||||||
|
if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
|
||||||
|
handler_stack->append_if_missing(exhandlers->handler_pc(x));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return TRUE if all code paths starting with start_bc_offset end in
|
||||||
|
// bytecode athrow or loop.
|
||||||
|
bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
|
||||||
|
ResourceMark rm;
|
||||||
|
// Create bytecode stream.
|
||||||
|
RawBytecodeStream bcs(method());
|
||||||
|
u4 code_length = method()->code_size();
|
||||||
|
bcs.set_start(start_bc_offset);
|
||||||
|
u4 target;
|
||||||
|
// Create stack for storing bytecode start offsets for if* and *switch.
|
||||||
|
GrowableArray<u4>* bci_stack = new GrowableArray<u4>(30);
|
||||||
|
// Create stack for handlers for try blocks containing this handler.
|
||||||
|
GrowableArray<u4>* handler_stack = new GrowableArray<u4>(30);
|
||||||
|
// Create list of visited branch opcodes (goto* and if*).
|
||||||
|
GrowableArray<u4>* visited_branches = new GrowableArray<u4>(30);
|
||||||
|
ExceptionTable exhandlers(_method());
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
if (bcs.is_last_bytecode()) {
|
||||||
|
// if no more starting offsets to parse or if at the end of the
|
||||||
|
// method then return false.
|
||||||
|
if ((bci_stack->is_empty()) || ((u4)bcs.end_bci() == code_length))
|
||||||
|
return false;
|
||||||
|
// Pop a bytecode starting offset and scan from there.
|
||||||
|
bcs.set_start(bci_stack->pop());
|
||||||
|
}
|
||||||
|
Bytecodes::Code opcode = bcs.raw_next();
|
||||||
|
u4 bci = bcs.bci();
|
||||||
|
|
||||||
|
// If the bytecode is in a TRY block, push its handlers so they
|
||||||
|
// will get parsed.
|
||||||
|
push_handlers(&exhandlers, handler_stack, bci);
|
||||||
|
|
||||||
|
switch (opcode) {
|
||||||
|
case Bytecodes::_if_icmpeq:
|
||||||
|
case Bytecodes::_if_icmpne:
|
||||||
|
case Bytecodes::_if_icmplt:
|
||||||
|
case Bytecodes::_if_icmpge:
|
||||||
|
case Bytecodes::_if_icmpgt:
|
||||||
|
case Bytecodes::_if_icmple:
|
||||||
|
case Bytecodes::_ifeq:
|
||||||
|
case Bytecodes::_ifne:
|
||||||
|
case Bytecodes::_iflt:
|
||||||
|
case Bytecodes::_ifge:
|
||||||
|
case Bytecodes::_ifgt:
|
||||||
|
case Bytecodes::_ifle:
|
||||||
|
case Bytecodes::_if_acmpeq:
|
||||||
|
case Bytecodes::_if_acmpne:
|
||||||
|
case Bytecodes::_ifnull:
|
||||||
|
case Bytecodes::_ifnonnull:
|
||||||
|
target = bcs.dest();
|
||||||
|
if (visited_branches->contains(bci)) {
|
||||||
|
if (bci_stack->is_empty()) return true;
|
||||||
|
// Pop a bytecode starting offset and scan from there.
|
||||||
|
bcs.set_start(bci_stack->pop());
|
||||||
|
} else {
|
||||||
|
if (target > bci) { // forward branch
|
||||||
|
if (target >= code_length) return false;
|
||||||
|
// Push the branch target onto the stack.
|
||||||
|
bci_stack->push(target);
|
||||||
|
// then, scan bytecodes starting with next.
|
||||||
|
bcs.set_start(bcs.next_bci());
|
||||||
|
} else { // backward branch
|
||||||
|
// Push bytecode offset following backward branch onto the stack.
|
||||||
|
bci_stack->push(bcs.next_bci());
|
||||||
|
// Check bytecodes starting with branch target.
|
||||||
|
bcs.set_start(target);
|
||||||
|
}
|
||||||
|
// Record target so we don't branch here again.
|
||||||
|
visited_branches->append(bci);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case Bytecodes::_goto:
|
||||||
|
case Bytecodes::_goto_w:
|
||||||
|
target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
|
||||||
|
if (visited_branches->contains(bci)) {
|
||||||
|
if (bci_stack->is_empty()) return true;
|
||||||
|
// Been here before, pop new starting offset from stack.
|
||||||
|
bcs.set_start(bci_stack->pop());
|
||||||
|
} else {
|
||||||
|
if (target >= code_length) return false;
|
||||||
|
// Continue scanning from the target onward.
|
||||||
|
bcs.set_start(target);
|
||||||
|
// Record target so we don't branch here again.
|
||||||
|
visited_branches->append(bci);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
// Check that all switch alternatives end in 'athrow' bytecodes. Since it
|
||||||
|
// is difficult to determine where each switch alternative ends, parse
|
||||||
|
// each switch alternative until either hit a 'return', 'athrow', or reach
|
||||||
|
// the end of the method's bytecodes. This is gross but should be okay
|
||||||
|
// because:
|
||||||
|
// 1. tableswitch and lookupswitch byte codes in handlers for ctor explicit
|
||||||
|
// constructor invocations should be rare.
|
||||||
|
// 2. if each switch alternative ends in an athrow then the parsing should be
|
||||||
|
// short. If there is no athrow then it is bogus code, anyway.
|
||||||
|
case Bytecodes::_lookupswitch:
|
||||||
|
case Bytecodes::_tableswitch:
|
||||||
|
{
|
||||||
|
address aligned_bcp = (address) round_to((intptr_t)(bcs.bcp() + 1), jintSize);
|
||||||
|
u4 default_offset = Bytes::get_Java_u4(aligned_bcp) + bci;
|
||||||
|
int keys, delta;
|
||||||
|
if (opcode == Bytecodes::_tableswitch) {
|
||||||
|
jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
|
||||||
|
jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
|
||||||
|
// This is invalid, but let the regular bytecode verifier
|
||||||
|
// report this because the user will get a better error message.
|
||||||
|
if (low > high) return true;
|
||||||
|
keys = high - low + 1;
|
||||||
|
delta = 1;
|
||||||
|
} else {
|
||||||
|
keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
|
||||||
|
delta = 2;
|
||||||
|
}
|
||||||
|
// Invalid, let the regular bytecode verifier deal with it.
|
||||||
|
if (keys < 0) return true;
|
||||||
|
|
||||||
|
// Push the offset of the next bytecode onto the stack.
|
||||||
|
bci_stack->push(bcs.next_bci());
|
||||||
|
|
||||||
|
// Push the switch alternatives onto the stack.
|
||||||
|
for (int i = 0; i < keys; i++) {
|
||||||
|
u4 target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
|
||||||
|
if (target > code_length) return false;
|
||||||
|
bci_stack->push(target);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start bytecode parsing for the switch at the default alternative.
|
||||||
|
if (default_offset > code_length) return false;
|
||||||
|
bcs.set_start(default_offset);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case Bytecodes::_return:
|
||||||
|
return false;
|
||||||
|
|
||||||
|
case Bytecodes::_athrow:
|
||||||
|
{
|
||||||
|
if (bci_stack->is_empty()) {
|
||||||
|
if (handler_stack->is_empty()) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
// Parse the catch handlers for try blocks containing athrow.
|
||||||
|
bcs.set_start(handler_stack->pop());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Pop a bytecode offset and starting scanning from there.
|
||||||
|
bcs.set_start(bci_stack->pop());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
;
|
||||||
|
} // end switch
|
||||||
|
} // end while loop
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
void ClassVerifier::verify_invoke_init(
|
void ClassVerifier::verify_invoke_init(
|
||||||
RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
|
RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
|
||||||
StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
|
StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
|
||||||
@ -2236,18 +2411,26 @@ void ClassVerifier::verify_invoke_init(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure that this call is not done from within a TRY block because
|
// Check if this call is done from inside of a TRY block. If so, make
|
||||||
// that can result in returning an incomplete object. Simply checking
|
// sure that all catch clause paths end in a throw. Otherwise, this
|
||||||
// (bci >= start_pc) also ensures that this call is not done after a TRY
|
// can result in returning an incomplete object.
|
||||||
// block. That is also illegal because this call must be the first Java
|
|
||||||
// statement in the constructor.
|
|
||||||
ExceptionTable exhandlers(_method());
|
ExceptionTable exhandlers(_method());
|
||||||
int exlength = exhandlers.length();
|
int exlength = exhandlers.length();
|
||||||
for(int i = 0; i < exlength; i++) {
|
for(int i = 0; i < exlength; i++) {
|
||||||
if (bci >= exhandlers.start_pc(i)) {
|
u2 start_pc = exhandlers.start_pc(i);
|
||||||
verify_error(ErrorContext::bad_code(bci),
|
u2 end_pc = exhandlers.end_pc(i);
|
||||||
"Bad <init> method call from after the start of a try block");
|
|
||||||
return;
|
if (bci >= start_pc && bci < end_pc) {
|
||||||
|
if (!ends_in_athrow(exhandlers.handler_pc(i))) {
|
||||||
|
verify_error(ErrorContext::bad_code(bci),
|
||||||
|
"Bad <init> method call from after the start of a try block");
|
||||||
|
return;
|
||||||
|
} else if (VerboseVerification) {
|
||||||
|
ResourceMark rm;
|
||||||
|
tty->print_cr(
|
||||||
|
"Survived call to ends_in_athrow(): %s",
|
||||||
|
current_class()->name()->as_C_string());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include "oops/klass.hpp"
|
#include "oops/klass.hpp"
|
||||||
#include "oops/method.hpp"
|
#include "oops/method.hpp"
|
||||||
#include "runtime/handles.hpp"
|
#include "runtime/handles.hpp"
|
||||||
|
#include "utilities/growableArray.hpp"
|
||||||
#include "utilities/exceptions.hpp"
|
#include "utilities/exceptions.hpp"
|
||||||
|
|
||||||
// The verifier class
|
// The verifier class
|
||||||
@ -303,6 +304,16 @@ class ClassVerifier : public StackObj {
|
|||||||
StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
|
StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
|
||||||
constantPoolHandle cp, TRAPS);
|
constantPoolHandle cp, TRAPS);
|
||||||
|
|
||||||
|
// Used by ends_in_athrow() to push all handlers that contain bci onto
|
||||||
|
// the handler_stack, if the handler is not already on the stack.
|
||||||
|
void push_handlers(ExceptionTable* exhandlers,
|
||||||
|
GrowableArray<u4>* handler_stack,
|
||||||
|
u4 bci);
|
||||||
|
|
||||||
|
// Returns true if all paths starting with start_bc_offset end in athrow
|
||||||
|
// bytecode or loop.
|
||||||
|
bool ends_in_athrow(u4 start_bc_offset);
|
||||||
|
|
||||||
void verify_invoke_instructions(
|
void verify_invoke_instructions(
|
||||||
RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
|
RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
|
||||||
bool* this_uninit, VerificationType return_type,
|
bool* this_uninit, VerificationType return_type,
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include "oops/symbol.hpp"
|
#include "oops/symbol.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/jniHandles.hpp"
|
#include "runtime/jniHandles.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
|
|
||||||
class MethodMatcher : public CHeapObj<mtCompiler> {
|
class MethodMatcher : public CHeapObj<mtCompiler> {
|
||||||
public:
|
public:
|
||||||
@ -175,7 +176,11 @@ class MethodOptionMatcher: public MethodMatcher {
|
|||||||
Symbol* method_name, Mode method_mode,
|
Symbol* method_name, Mode method_mode,
|
||||||
Symbol* signature, const char * opt, MethodMatcher* next):
|
Symbol* signature, const char * opt, MethodMatcher* next):
|
||||||
MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next) {
|
MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next) {
|
||||||
option = opt;
|
option = os::strdup_check_oom(opt);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual ~MethodOptionMatcher() {
|
||||||
|
os::free((void*)option);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool match(methodHandle method, const char* opt) {
|
bool match(methodHandle method, const char* opt) {
|
||||||
@ -498,7 +503,7 @@ void CompilerOracle::parse_from_line(char* line) {
|
|||||||
tty->print("CompilerOracle: %s ", command_names[command]);
|
tty->print("CompilerOracle: %s ", command_names[command]);
|
||||||
match->print();
|
match->print();
|
||||||
}
|
}
|
||||||
match = add_option_string(c_name, c_match, m_name, m_match, signature, strdup(option));
|
match = add_option_string(c_name, c_match, m_name, m_match, signature, option);
|
||||||
line += bytes_read;
|
line += bytes_read;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -52,7 +52,8 @@ void ConcurrentMarkSweepPolicy::initialize_alignments() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMarkSweepPolicy::initialize_generations() {
|
void ConcurrentMarkSweepPolicy::initialize_generations() {
|
||||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
|
||||||
|
CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||||
if (_generations == NULL)
|
if (_generations == NULL)
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||||
|
|
||||||
|
@ -289,7 +289,7 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
|
|||||||
}
|
}
|
||||||
|
|
||||||
_fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
|
_fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
|
||||||
mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||||
|
|
||||||
if (_fine_grain_regions == NULL) {
|
if (_fine_grain_regions == NULL) {
|
||||||
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
|
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
|
||||||
|
@ -280,9 +280,6 @@ class AbstractInterpreterGenerator: public StackObj {
|
|||||||
address generate_result_handler_for(BasicType type);
|
address generate_result_handler_for(BasicType type);
|
||||||
address generate_slow_signature_handler();
|
address generate_slow_signature_handler();
|
||||||
|
|
||||||
// entry point generator
|
|
||||||
address generate_method_entry(AbstractInterpreter::MethodKind kind);
|
|
||||||
|
|
||||||
void bang_stack_shadow_pages(bool native_call);
|
void bang_stack_shadow_pages(bool native_call);
|
||||||
|
|
||||||
void generate_all();
|
void generate_all();
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -108,7 +108,7 @@ void CppInterpreterGenerator::generate_all() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind)
|
#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind)
|
||||||
|
|
||||||
{ CodeletMark cm(_masm, "(kind = frame_manager)");
|
{ CodeletMark cm(_masm, "(kind = frame_manager)");
|
||||||
// all non-native method kinds
|
// all non-native method kinds
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "interpreter/bytecodeHistogram.hpp"
|
#include "interpreter/bytecodeHistogram.hpp"
|
||||||
#include "interpreter/bytecodeInterpreter.hpp"
|
#include "interpreter/bytecodeInterpreter.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
#include "interpreter/interpreterGenerator.hpp"
|
||||||
#include "interpreter/interpreterRuntime.hpp"
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
#include "interpreter/interp_masm.hpp"
|
#include "interpreter/interp_masm.hpp"
|
||||||
#include "interpreter/templateTable.hpp"
|
#include "interpreter/templateTable.hpp"
|
||||||
@ -261,7 +262,7 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
|
|||||||
// Special intrinsic method?
|
// Special intrinsic method?
|
||||||
// Note: This test must come _after_ the test for native methods,
|
// Note: This test must come _after_ the test for native methods,
|
||||||
// otherwise we will run into problems with JDK 1.2, see also
|
// otherwise we will run into problems with JDK 1.2, see also
|
||||||
// AbstractInterpreterGenerator::generate_method_entry() for
|
// InterpreterGenerator::generate_method_entry() for
|
||||||
// for details.
|
// for details.
|
||||||
switch (m->intrinsic_id()) {
|
switch (m->intrinsic_id()) {
|
||||||
case vmIntrinsics::_dsin : return java_lang_math_sin ;
|
case vmIntrinsics::_dsin : return java_lang_math_sin ;
|
||||||
@ -521,3 +522,50 @@ void AbstractInterpreterGenerator::initialize_method_handle_entries() {
|
|||||||
Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
|
Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generate method entries
|
||||||
|
address InterpreterGenerator::generate_method_entry(
|
||||||
|
AbstractInterpreter::MethodKind kind) {
|
||||||
|
// determine code generation flags
|
||||||
|
bool synchronized = false;
|
||||||
|
address entry_point = NULL;
|
||||||
|
|
||||||
|
switch (kind) {
|
||||||
|
case Interpreter::zerolocals : break;
|
||||||
|
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||||
|
case Interpreter::native : entry_point = generate_native_entry(false); break;
|
||||||
|
case Interpreter::native_synchronized : entry_point = generate_native_entry(true); break;
|
||||||
|
case Interpreter::empty : entry_point = generate_empty_entry(); break;
|
||||||
|
case Interpreter::accessor : entry_point = generate_accessor_entry(); break;
|
||||||
|
case Interpreter::abstract : entry_point = generate_abstract_entry(); break;
|
||||||
|
|
||||||
|
case Interpreter::java_lang_math_sin : // fall thru
|
||||||
|
case Interpreter::java_lang_math_cos : // fall thru
|
||||||
|
case Interpreter::java_lang_math_tan : // fall thru
|
||||||
|
case Interpreter::java_lang_math_abs : // fall thru
|
||||||
|
case Interpreter::java_lang_math_log : // fall thru
|
||||||
|
case Interpreter::java_lang_math_log10 : // fall thru
|
||||||
|
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||||
|
case Interpreter::java_lang_math_pow : // fall thru
|
||||||
|
case Interpreter::java_lang_math_exp : entry_point = generate_math_entry(kind); break;
|
||||||
|
case Interpreter::java_lang_ref_reference_get
|
||||||
|
: entry_point = generate_Reference_get_entry(); break;
|
||||||
|
#ifndef CC_INTERP
|
||||||
|
case Interpreter::java_util_zip_CRC32_update
|
||||||
|
: entry_point = generate_CRC32_update_entry(); break;
|
||||||
|
case Interpreter::java_util_zip_CRC32_updateBytes
|
||||||
|
: // fall thru
|
||||||
|
case Interpreter::java_util_zip_CRC32_updateByteBuffer
|
||||||
|
: entry_point = generate_CRC32_updateBytes_entry(kind); break;
|
||||||
|
#endif // CC_INTERP
|
||||||
|
default:
|
||||||
|
fatal(err_msg("unexpected method kind: %d", kind));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry_point) {
|
||||||
|
return entry_point;
|
||||||
|
}
|
||||||
|
|
||||||
|
return generate_normal_entry(synchronized);
|
||||||
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -37,9 +37,11 @@
|
|||||||
class InterpreterGenerator: public CC_INTERP_ONLY(CppInterpreterGenerator)
|
class InterpreterGenerator: public CC_INTERP_ONLY(CppInterpreterGenerator)
|
||||||
NOT_CC_INTERP(TemplateInterpreterGenerator) {
|
NOT_CC_INTERP(TemplateInterpreterGenerator) {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
InterpreterGenerator(StubQueue* _code);
|
InterpreterGenerator(StubQueue* _code);
|
||||||
|
// entry point generator
|
||||||
|
address generate_method_entry(AbstractInterpreter::MethodKind kind);
|
||||||
|
|
||||||
#ifdef TARGET_ARCH_x86
|
#ifdef TARGET_ARCH_x86
|
||||||
# include "interpreterGenerator_x86.hpp"
|
# include "interpreterGenerator_x86.hpp"
|
||||||
|
@ -364,7 +364,7 @@ void TemplateInterpreterGenerator::generate_all() {
|
|||||||
|
|
||||||
#define method_entry(kind) \
|
#define method_entry(kind) \
|
||||||
{ CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
|
{ CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
|
||||||
Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
|
Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind); \
|
||||||
}
|
}
|
||||||
|
|
||||||
// all non-native method kinds
|
// all non-native method kinds
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -59,9 +59,6 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
|
|||||||
address generate_safept_entry_for(TosState state, address runtime_entry);
|
address generate_safept_entry_for(TosState state, address runtime_entry);
|
||||||
void generate_throw_exception();
|
void generate_throw_exception();
|
||||||
|
|
||||||
// entry point generator
|
|
||||||
// address generate_method_entry(AbstractInterpreter::MethodKind kind);
|
|
||||||
|
|
||||||
// Instruction generation
|
// Instruction generation
|
||||||
void generate_and_dispatch (Template* t, TosState tos_out = ilgl);
|
void generate_and_dispatch (Template* t, TosState tos_out = ilgl);
|
||||||
void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
|
void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
|
||||||
|
@ -422,26 +422,23 @@ void Chunk::start_chunk_pool_cleaner_task() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------Arena------------------------------------------
|
//------------------------------Arena------------------------------------------
|
||||||
NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
|
|
||||||
|
|
||||||
Arena::Arena(size_t init_size) {
|
Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
|
||||||
size_t round_size = (sizeof (char *)) - 1;
|
size_t round_size = (sizeof (char *)) - 1;
|
||||||
init_size = (init_size+round_size) & ~round_size;
|
init_size = (init_size+round_size) & ~round_size;
|
||||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
|
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
|
||||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||||
_max = _chunk->top();
|
_max = _chunk->top();
|
||||||
_size_in_bytes = 0;
|
MemTracker::record_new_arena(flag);
|
||||||
set_size_in_bytes(init_size);
|
set_size_in_bytes(init_size);
|
||||||
NOT_PRODUCT(Atomic::inc(&_instance_count);)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Arena::Arena() {
|
Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
|
||||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
|
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
|
||||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||||
_max = _chunk->top();
|
_max = _chunk->top();
|
||||||
_size_in_bytes = 0;
|
MemTracker::record_new_arena(flag);
|
||||||
set_size_in_bytes(Chunk::init_size);
|
set_size_in_bytes(Chunk::init_size);
|
||||||
NOT_PRODUCT(Atomic::inc(&_instance_count);)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Arena *Arena::move_contents(Arena *copy) {
|
Arena *Arena::move_contents(Arena *copy) {
|
||||||
@ -463,7 +460,7 @@ Arena *Arena::move_contents(Arena *copy) {
|
|||||||
|
|
||||||
Arena::~Arena() {
|
Arena::~Arena() {
|
||||||
destruct_contents();
|
destruct_contents();
|
||||||
NOT_PRODUCT(Atomic::dec(&_instance_count);)
|
MemTracker::record_arena_free(_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* Arena::operator new(size_t size) throw() {
|
void* Arena::operator new(size_t size) throw() {
|
||||||
@ -479,21 +476,21 @@ void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant)
|
|||||||
// dynamic memory type binding
|
// dynamic memory type binding
|
||||||
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
|
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
|
void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
|
||||||
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
||||||
return p;
|
return p;
|
||||||
#else
|
#else
|
||||||
return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
|
return (void *) AllocateHeap(size, flags, CALLER_PC);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
|
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void* p = os::malloc(size, flags|otArena, CALLER_PC);
|
void* p = os::malloc(size, flags, CALLER_PC);
|
||||||
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
||||||
return p;
|
return p;
|
||||||
#else
|
#else
|
||||||
return os::malloc(size, flags|otArena, CALLER_PC);
|
return os::malloc(size, flags, CALLER_PC);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -518,8 +515,9 @@ void Arena::destruct_contents() {
|
|||||||
// change the size
|
// change the size
|
||||||
void Arena::set_size_in_bytes(size_t size) {
|
void Arena::set_size_in_bytes(size_t size) {
|
||||||
if (_size_in_bytes != size) {
|
if (_size_in_bytes != size) {
|
||||||
|
long delta = (long)(size - size_in_bytes());
|
||||||
_size_in_bytes = size;
|
_size_in_bytes = size;
|
||||||
MemTracker::record_arena_size((address)this, size);
|
MemTracker::record_arena_size_change(delta, _flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -133,51 +133,34 @@ class AllocatedObj {
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MemoryType bitmap layout:
|
* Memory types
|
||||||
* | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
|
|
||||||
* | memory type | object | reserved |
|
|
||||||
* | | type | |
|
|
||||||
*/
|
*/
|
||||||
enum MemoryType {
|
enum MemoryType {
|
||||||
// Memory type by sub systems. It occupies lower byte.
|
// Memory type by sub systems. It occupies lower byte.
|
||||||
mtNone = 0x0000, // undefined
|
mtJavaHeap = 0x00, // Java heap
|
||||||
mtClass = 0x0100, // memory class for Java classes
|
mtClass = 0x01, // memory class for Java classes
|
||||||
mtThread = 0x0200, // memory for thread objects
|
mtThread = 0x02, // memory for thread objects
|
||||||
mtThreadStack = 0x0300,
|
mtThreadStack = 0x03,
|
||||||
mtCode = 0x0400, // memory for generated code
|
mtCode = 0x04, // memory for generated code
|
||||||
mtGC = 0x0500, // memory for GC
|
mtGC = 0x05, // memory for GC
|
||||||
mtCompiler = 0x0600, // memory for compiler
|
mtCompiler = 0x06, // memory for compiler
|
||||||
mtInternal = 0x0700, // memory used by VM, but does not belong to
|
mtInternal = 0x07, // memory used by VM, but does not belong to
|
||||||
// any of above categories, and not used for
|
// any of above categories, and not used for
|
||||||
// native memory tracking
|
// native memory tracking
|
||||||
mtOther = 0x0800, // memory not used by VM
|
mtOther = 0x08, // memory not used by VM
|
||||||
mtSymbol = 0x0900, // symbol
|
mtSymbol = 0x09, // symbol
|
||||||
mtNMT = 0x0A00, // memory used by native memory tracking
|
mtNMT = 0x0A, // memory used by native memory tracking
|
||||||
mtChunk = 0x0B00, // chunk that holds content of arenas
|
mtClassShared = 0x0B, // class data sharing
|
||||||
mtJavaHeap = 0x0C00, // Java heap
|
mtChunk = 0x0C, // chunk that holds content of arenas
|
||||||
mtClassShared = 0x0D00, // class data sharing
|
mtTest = 0x0D, // Test type for verifying NMT
|
||||||
mtTest = 0x0E00, // Test type for verifying NMT
|
mtTracing = 0x0E, // memory used for Tracing
|
||||||
mtTracing = 0x0F00, // memory used for Tracing
|
mtNone = 0x0F, // undefined
|
||||||
mt_number_of_types = 0x000F, // number of memory types (mtDontTrack
|
mt_number_of_types = 0x10 // number of memory types (mtDontTrack
|
||||||
// is not included as validate type)
|
// is not included as validate type)
|
||||||
mtDontTrack = 0x0F00, // memory we do not or cannot track
|
|
||||||
mt_masks = 0x7F00,
|
|
||||||
|
|
||||||
// object type mask
|
|
||||||
otArena = 0x0010, // an arena object
|
|
||||||
otNMTRecorder = 0x0020, // memory recorder object
|
|
||||||
ot_masks = 0x00F0
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
|
typedef MemoryType MEMFLAGS;
|
||||||
#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
|
|
||||||
#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
|
|
||||||
|
|
||||||
#define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena)
|
|
||||||
#define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder)
|
|
||||||
#define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
|
|
||||||
|
|
||||||
typedef unsigned short MEMFLAGS;
|
|
||||||
|
|
||||||
#if INCLUDE_NMT
|
#if INCLUDE_NMT
|
||||||
|
|
||||||
@ -189,27 +172,23 @@ const bool NMT_track_callsite = false;
|
|||||||
|
|
||||||
#endif // INCLUDE_NMT
|
#endif // INCLUDE_NMT
|
||||||
|
|
||||||
// debug build does not inline
|
class NativeCallStack;
|
||||||
#if defined(_NMT_NOINLINE_)
|
|
||||||
#define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
|
|
||||||
#define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
|
|
||||||
#define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
|
|
||||||
#else
|
|
||||||
#define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0)
|
|
||||||
#define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
|
|
||||||
#define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
|
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
|
||||||
public:
|
public:
|
||||||
_NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
|
_NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw();
|
||||||
|
_NOINLINE_ void* operator new(size_t size) throw();
|
||||||
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
|
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
|
||||||
address caller_pc = 0) throw();
|
const NativeCallStack& stack) throw();
|
||||||
_NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
|
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant)
|
||||||
|
throw();
|
||||||
|
_NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw();
|
||||||
|
_NOINLINE_ void* operator new [](size_t size) throw();
|
||||||
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
|
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
|
||||||
address caller_pc = 0) throw();
|
const NativeCallStack& stack) throw();
|
||||||
|
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant)
|
||||||
|
throw();
|
||||||
void operator delete(void* p);
|
void operator delete(void* p);
|
||||||
void operator delete [] (void* p);
|
void operator delete [] (void* p);
|
||||||
};
|
};
|
||||||
@ -384,13 +363,15 @@ class Chunk: CHeapObj<mtChunk> {
|
|||||||
|
|
||||||
//------------------------------Arena------------------------------------------
|
//------------------------------Arena------------------------------------------
|
||||||
// Fast allocation of memory
|
// Fast allocation of memory
|
||||||
class Arena : public CHeapObj<mtNone|otArena> {
|
class Arena : public CHeapObj<mtNone> {
|
||||||
protected:
|
protected:
|
||||||
friend class ResourceMark;
|
friend class ResourceMark;
|
||||||
friend class HandleMark;
|
friend class HandleMark;
|
||||||
friend class NoHandleMark;
|
friend class NoHandleMark;
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
|
||||||
|
MEMFLAGS _flags; // Memory tracking flags
|
||||||
|
|
||||||
Chunk *_first; // First chunk
|
Chunk *_first; // First chunk
|
||||||
Chunk *_chunk; // current chunk
|
Chunk *_chunk; // current chunk
|
||||||
char *_hwm, *_max; // High water mark and max in current chunk
|
char *_hwm, *_max; // High water mark and max in current chunk
|
||||||
@ -418,8 +399,8 @@ protected:
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Arena();
|
Arena(MEMFLAGS memflag);
|
||||||
Arena(size_t init_size);
|
Arena(MEMFLAGS memflag, size_t init_size);
|
||||||
~Arena();
|
~Arena();
|
||||||
void destruct_contents();
|
void destruct_contents();
|
||||||
char* hwm() const { return _hwm; }
|
char* hwm() const { return _hwm; }
|
||||||
@ -518,8 +499,6 @@ protected:
|
|||||||
static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
|
static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
|
||||||
static void free_all(char** start, char** end) PRODUCT_RETURN;
|
static void free_all(char** start, char** end) PRODUCT_RETURN;
|
||||||
|
|
||||||
// how many arena instances
|
|
||||||
NOT_PRODUCT(static volatile jint _instance_count;)
|
|
||||||
private:
|
private:
|
||||||
// Reset this Arena to empty, access will trigger grow if necessary
|
// Reset this Arena to empty, access will trigger grow if necessary
|
||||||
void reset(void) {
|
void reset(void) {
|
||||||
@ -681,7 +660,7 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
|
|||||||
NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
|
NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
|
||||||
|
|
||||||
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
|
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
|
||||||
NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
|
NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL)
|
||||||
|
|
||||||
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
|
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
|
||||||
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
|
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -27,6 +27,7 @@
|
|||||||
|
|
||||||
#include "runtime/atomic.inline.hpp"
|
#include "runtime/atomic.inline.hpp"
|
||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
|
#include "services/memTracker.hpp"
|
||||||
|
|
||||||
// Explicit C-heap memory management
|
// Explicit C-heap memory management
|
||||||
|
|
||||||
@ -49,12 +50,10 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// allocate using malloc; will fail if no memory available
|
// allocate using malloc; will fail if no memory available
|
||||||
inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
|
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
|
||||||
|
const NativeCallStack& stack,
|
||||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||||
if (pc == 0) {
|
char* p = (char*) os::malloc(size, flags, stack);
|
||||||
pc = CURRENT_PC;
|
|
||||||
}
|
|
||||||
char* p = (char*) os::malloc(size, flags, pc);
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
|
if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
|
||||||
#endif
|
#endif
|
||||||
@ -63,10 +62,14 @@ inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
|
|||||||
}
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
|
||||||
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
|
|
||||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||||
char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
|
return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
|
||||||
|
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||||
|
char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
|
if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
|
||||||
#endif
|
#endif
|
||||||
@ -85,8 +88,22 @@ inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
|
|||||||
|
|
||||||
|
|
||||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
|
||||||
address caller_pc) throw() {
|
const NativeCallStack& stack) throw() {
|
||||||
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
|
void* p = (void*)AllocateHeap(size, F, stack);
|
||||||
|
#ifdef ASSERT
|
||||||
|
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
||||||
|
#endif
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
|
||||||
|
return CHeapObj<F>::operator new(size, CALLER_PC);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
|
||||||
|
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
|
||||||
|
void* p = (void*)AllocateHeap(size, F, stack,
|
||||||
|
AllocFailStrategy::RETURN_NULL);
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
||||||
#endif
|
#endif
|
||||||
@ -94,23 +111,28 @@ template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
|
||||||
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
|
const std::nothrow_t& nothrow_constant) throw() {
|
||||||
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
|
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
|
||||||
AllocFailStrategy::RETURN_NULL);
|
|
||||||
#ifdef ASSERT
|
|
||||||
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
|
||||||
#endif
|
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
||||||
address caller_pc) throw() {
|
const NativeCallStack& stack) throw() {
|
||||||
return CHeapObj<F>::operator new(size, caller_pc);
|
return CHeapObj<F>::operator new(size, stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
|
||||||
|
throw() {
|
||||||
|
return CHeapObj<F>::operator new(size, CALLER_PC);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
||||||
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
|
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
|
||||||
return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
|
return CHeapObj<F>::operator new(size, nothrow_constant, stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
||||||
|
const std::nothrow_t& nothrow_constant) throw() {
|
||||||
|
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
|
template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
|
||||||
|
@ -56,7 +56,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap,
|
|||||||
#endif
|
#endif
|
||||||
set_bs(_ct_bs);
|
set_bs(_ct_bs);
|
||||||
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
|
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
|
||||||
mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||||
if (_last_cur_val_in_gen == NULL) {
|
if (_last_cur_val_in_gen == NULL) {
|
||||||
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
|
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
|
||||||
}
|
}
|
||||||
|
@ -905,7 +905,8 @@ void MarkSweepPolicy::initialize_alignments() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MarkSweepPolicy::initialize_generations() {
|
void MarkSweepPolicy::initialize_generations() {
|
||||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
|
||||||
|
AllocFailStrategy::RETURN_NULL);
|
||||||
if (_generations == NULL) {
|
if (_generations == NULL) {
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||||
}
|
}
|
||||||
|
@ -135,7 +135,7 @@ KlassInfoTable::KlassInfoTable(bool need_class_stats) {
|
|||||||
_ref = (HeapWord*) Universe::boolArrayKlassObj();
|
_ref = (HeapWord*) Universe::boolArrayKlassObj();
|
||||||
_buckets =
|
_buckets =
|
||||||
(KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
|
(KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
|
||||||
mtInternal, 0, AllocFailStrategy::RETURN_NULL);
|
mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||||
if (_buckets != NULL) {
|
if (_buckets != NULL) {
|
||||||
_size = _num_buckets;
|
_size = _num_buckets;
|
||||||
for (int index = 0; index < _size; index++) {
|
for (int index = 0; index < _size; index++) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -103,11 +103,13 @@ MemRegion MemRegion::minus(const MemRegion mr2) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void* MemRegion::operator new(size_t size) throw() {
|
void* MemRegion::operator new(size_t size) throw() {
|
||||||
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
return (address)AllocateHeap(size, mtGC, CURRENT_PC,
|
||||||
|
AllocFailStrategy::RETURN_NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* MemRegion::operator new [](size_t size) throw() {
|
void* MemRegion::operator new [](size_t size) throw() {
|
||||||
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
return (address)AllocateHeap(size, mtGC, CURRENT_PC,
|
||||||
|
AllocFailStrategy::RETURN_NULL);
|
||||||
}
|
}
|
||||||
void MemRegion::operator delete(void* p) {
|
void MemRegion::operator delete(void* p) {
|
||||||
FreeHeap(p, mtGC);
|
FreeHeap(p, mtGC);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -49,11 +49,11 @@ class ResourceArea: public Arena {
|
|||||||
debug_only(static int _warned;) // to suppress multiple warnings
|
debug_only(static int _warned;) // to suppress multiple warnings
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ResourceArea() {
|
ResourceArea() : Arena(mtThread) {
|
||||||
debug_only(_nesting = 0;)
|
debug_only(_nesting = 0;)
|
||||||
}
|
}
|
||||||
|
|
||||||
ResourceArea(size_t init_size) : Arena(init_size) {
|
ResourceArea(size_t init_size) : Arena(mtThread, init_size) {
|
||||||
debug_only(_nesting = 0;);
|
debug_only(_nesting = 0;);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ public:
|
|||||||
if (UseMallocOnly) {
|
if (UseMallocOnly) {
|
||||||
// use malloc, but save pointer in res. area for later freeing
|
// use malloc, but save pointer in res. area for later freeing
|
||||||
char** save = (char**)internal_malloc_4(sizeof(char*));
|
char** save = (char**)internal_malloc_4(sizeof(char*));
|
||||||
return (*save = (char*)os::malloc(size, mtThread));
|
return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return (char*)Amalloc(size, alloc_failmode);
|
return (char*)Amalloc(size, alloc_failmode);
|
||||||
|
@ -647,6 +647,10 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
_printer(IdealGraphPrinter::printer()),
|
_printer(IdealGraphPrinter::printer()),
|
||||||
#endif
|
#endif
|
||||||
_congraph(NULL),
|
_congraph(NULL),
|
||||||
|
_comp_arena(mtCompiler),
|
||||||
|
_node_arena(mtCompiler),
|
||||||
|
_old_arena(mtCompiler),
|
||||||
|
_Compile_types(mtCompiler),
|
||||||
_replay_inline_data(NULL),
|
_replay_inline_data(NULL),
|
||||||
_late_inlines(comp_arena(), 2, 0, NULL),
|
_late_inlines(comp_arena(), 2, 0, NULL),
|
||||||
_string_late_inlines(comp_arena(), 2, 0, NULL),
|
_string_late_inlines(comp_arena(), 2, 0, NULL),
|
||||||
@ -954,6 +958,10 @@ Compile::Compile( ciEnv* ci_env,
|
|||||||
_in_dump_cnt(0),
|
_in_dump_cnt(0),
|
||||||
_printer(NULL),
|
_printer(NULL),
|
||||||
#endif
|
#endif
|
||||||
|
_comp_arena(mtCompiler),
|
||||||
|
_node_arena(mtCompiler),
|
||||||
|
_old_arena(mtCompiler),
|
||||||
|
_Compile_types(mtCompiler),
|
||||||
_dead_node_list(comp_arena()),
|
_dead_node_list(comp_arena()),
|
||||||
_dead_node_count(0),
|
_dead_node_count(0),
|
||||||
_congraph(NULL),
|
_congraph(NULL),
|
||||||
|
@ -1381,11 +1381,11 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount
|
|||||||
}
|
}
|
||||||
NamedCounter* c;
|
NamedCounter* c;
|
||||||
if (tag == NamedCounter::BiasedLockingCounter) {
|
if (tag == NamedCounter::BiasedLockingCounter) {
|
||||||
c = new BiasedLockingNamedCounter(strdup(st.as_string()));
|
c = new BiasedLockingNamedCounter(st.as_string());
|
||||||
} else if (tag == NamedCounter::RTMLockingCounter) {
|
} else if (tag == NamedCounter::RTMLockingCounter) {
|
||||||
c = new RTMLockingNamedCounter(strdup(st.as_string()));
|
c = new RTMLockingNamedCounter(st.as_string());
|
||||||
} else {
|
} else {
|
||||||
c = new NamedCounter(strdup(st.as_string()), tag);
|
c = new NamedCounter(st.as_string(), tag);
|
||||||
}
|
}
|
||||||
|
|
||||||
// atomically add the new counter to the head of the list. We only
|
// atomically add the new counter to the head of the list. We only
|
||||||
|
@ -75,11 +75,17 @@ private:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
NamedCounter(const char *n, CounterTag tag = NoTag):
|
NamedCounter(const char *n, CounterTag tag = NoTag):
|
||||||
_name(n),
|
_name(n == NULL ? NULL : os::strdup(n)),
|
||||||
_count(0),
|
_count(0),
|
||||||
_next(NULL),
|
_next(NULL),
|
||||||
_tag(tag) {}
|
_tag(tag) {}
|
||||||
|
|
||||||
|
~NamedCounter() {
|
||||||
|
if (_name != NULL) {
|
||||||
|
os::free((void*)_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const char * name() const { return _name; }
|
const char * name() const { return _name; }
|
||||||
int count() const { return _count; }
|
int count() const { return _count; }
|
||||||
address addr() { return (address)&_count; }
|
address addr() { return (address)&_count; }
|
||||||
|
@ -265,7 +265,7 @@ void Type::Initialize_shared(Compile* current) {
|
|||||||
// locking.
|
// locking.
|
||||||
|
|
||||||
Arena* save = current->type_arena();
|
Arena* save = current->type_arena();
|
||||||
Arena* shared_type_arena = new (mtCompiler)Arena();
|
Arena* shared_type_arena = new (mtCompiler)Arena(mtCompiler);
|
||||||
|
|
||||||
current->set_type_arena(shared_type_arena);
|
current->set_type_arena(shared_type_arena);
|
||||||
_shared_type_dict =
|
_shared_type_dict =
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -222,10 +222,17 @@
|
|||||||
# include "runtime/vmThread.hpp"
|
# include "runtime/vmThread.hpp"
|
||||||
# include "runtime/vm_operations.hpp"
|
# include "runtime/vm_operations.hpp"
|
||||||
# include "runtime/vm_version.hpp"
|
# include "runtime/vm_version.hpp"
|
||||||
|
# include "services/allocationSite.hpp"
|
||||||
# include "services/lowMemoryDetector.hpp"
|
# include "services/lowMemoryDetector.hpp"
|
||||||
|
# include "services/mallocTracker.hpp"
|
||||||
|
# include "services/memBaseline.hpp"
|
||||||
# include "services/memoryPool.hpp"
|
# include "services/memoryPool.hpp"
|
||||||
# include "services/memoryService.hpp"
|
# include "services/memoryService.hpp"
|
||||||
# include "services/memoryUsage.hpp"
|
# include "services/memoryUsage.hpp"
|
||||||
|
# include "services/memReporter.hpp"
|
||||||
|
# include "services/memTracker.hpp"
|
||||||
|
# include "services/nmtCommon.hpp"
|
||||||
|
# include "services/virtualMemoryTracker.hpp"
|
||||||
# include "utilities/accessFlags.hpp"
|
# include "utilities/accessFlags.hpp"
|
||||||
# include "utilities/array.hpp"
|
# include "utilities/array.hpp"
|
||||||
# include "utilities/bitMap.hpp"
|
# include "utilities/bitMap.hpp"
|
||||||
@ -240,6 +247,7 @@
|
|||||||
# include "utilities/hashtable.hpp"
|
# include "utilities/hashtable.hpp"
|
||||||
# include "utilities/histogram.hpp"
|
# include "utilities/histogram.hpp"
|
||||||
# include "utilities/macros.hpp"
|
# include "utilities/macros.hpp"
|
||||||
|
# include "utilities/nativeCallStack.hpp"
|
||||||
# include "utilities/numberSeq.hpp"
|
# include "utilities/numberSeq.hpp"
|
||||||
# include "utilities/ostream.hpp"
|
# include "utilities/ostream.hpp"
|
||||||
# include "utilities/preserveException.hpp"
|
# include "utilities/preserveException.hpp"
|
||||||
|
@ -74,6 +74,7 @@
|
|||||||
#include "runtime/signature.hpp"
|
#include "runtime/signature.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/vm_operations.hpp"
|
#include "runtime/vm_operations.hpp"
|
||||||
|
#include "services/memTracker.hpp"
|
||||||
#include "services/runtimeService.hpp"
|
#include "services/runtimeService.hpp"
|
||||||
#include "trace/tracing.hpp"
|
#include "trace/tracing.hpp"
|
||||||
#include "utilities/defaultStream.hpp"
|
#include "utilities/defaultStream.hpp"
|
||||||
@ -2697,6 +2698,7 @@ static char* get_bad_address() {
|
|||||||
if (bad_address != NULL) {
|
if (bad_address != NULL) {
|
||||||
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
|
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
|
||||||
/*is_committed*/false);
|
/*is_committed*/false);
|
||||||
|
MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return bad_address;
|
return bad_address;
|
||||||
@ -3857,6 +3859,7 @@ void TestOldSize_test();
|
|||||||
void TestKlass_test();
|
void TestKlass_test();
|
||||||
void TestBitMap_test();
|
void TestBitMap_test();
|
||||||
void TestAsUtf8();
|
void TestAsUtf8();
|
||||||
|
void Test_linked_list();
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
void TestOldFreeSpaceCalculation_test();
|
void TestOldFreeSpaceCalculation_test();
|
||||||
void TestG1BiasedArray_test();
|
void TestG1BiasedArray_test();
|
||||||
@ -3887,6 +3890,7 @@ void execute_internal_vm_tests() {
|
|||||||
run_unit_test(TestBitMap_test());
|
run_unit_test(TestBitMap_test());
|
||||||
run_unit_test(TestAsUtf8());
|
run_unit_test(TestAsUtf8());
|
||||||
run_unit_test(ObjectMonitor::sanity_checks());
|
run_unit_test(ObjectMonitor::sanity_checks());
|
||||||
|
run_unit_test(Test_linked_list());
|
||||||
#if INCLUDE_VM_STRUCTS
|
#if INCLUDE_VM_STRUCTS
|
||||||
run_unit_test(VMStructs::test());
|
run_unit_test(VMStructs::test());
|
||||||
#endif
|
#endif
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -316,6 +316,7 @@ void JvmtiManageCapabilities::update() {
|
|||||||
avail.can_generate_frame_pop_events ||
|
avail.can_generate_frame_pop_events ||
|
||||||
avail.can_generate_method_entry_events ||
|
avail.can_generate_method_entry_events ||
|
||||||
avail.can_generate_method_exit_events;
|
avail.can_generate_method_exit_events;
|
||||||
|
#ifdef ZERO
|
||||||
bool enter_all_methods =
|
bool enter_all_methods =
|
||||||
interp_events ||
|
interp_events ||
|
||||||
avail.can_generate_breakpoint_events;
|
avail.can_generate_breakpoint_events;
|
||||||
@ -324,6 +325,7 @@ void JvmtiManageCapabilities::update() {
|
|||||||
UseFastEmptyMethods = false;
|
UseFastEmptyMethods = false;
|
||||||
UseFastAccessorMethods = false;
|
UseFastAccessorMethods = false;
|
||||||
}
|
}
|
||||||
|
#endif // ZERO
|
||||||
|
|
||||||
if (avail.can_generate_breakpoint_events) {
|
if (avail.can_generate_breakpoint_events) {
|
||||||
RewriteFrequentPairs = false;
|
RewriteFrequentPairs = false;
|
||||||
|
@ -52,8 +52,10 @@
|
|||||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
||||||
#ifdef INCLUDE_NMT
|
#if INCLUDE_NMT
|
||||||
|
#include "services/mallocSiteTable.hpp"
|
||||||
#include "services/memTracker.hpp"
|
#include "services/memTracker.hpp"
|
||||||
|
#include "utilities/nativeCallStack.hpp"
|
||||||
#endif // INCLUDE_NMT
|
#endif // INCLUDE_NMT
|
||||||
|
|
||||||
#include "compiler/compileBroker.hpp"
|
#include "compiler/compileBroker.hpp"
|
||||||
@ -255,14 +257,18 @@ WB_END
|
|||||||
// NMT picks it up correctly
|
// NMT picks it up correctly
|
||||||
WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
|
WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
|
||||||
jlong addr = 0;
|
jlong addr = 0;
|
||||||
|
|
||||||
if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
|
|
||||||
addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
|
addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
|
||||||
}
|
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
// Alloc memory with pseudo call stack. The test can create psudo malloc
|
||||||
|
// allocation site to stress the malloc tracking.
|
||||||
|
WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
|
||||||
|
address pc = (address)(size_t)pseudo_stack;
|
||||||
|
NativeCallStack stack(&pc, 1);
|
||||||
|
return (jlong)os::malloc(size, mtTest, stack);
|
||||||
|
WB_END
|
||||||
|
|
||||||
// Free the memory allocated by NMTAllocTest
|
// Free the memory allocated by NMTAllocTest
|
||||||
WB_ENTRY(void, WB_NMTFree(JNIEnv* env, jobject o, jlong mem))
|
WB_ENTRY(void, WB_NMTFree(JNIEnv* env, jobject o, jlong mem))
|
||||||
os::free((void*)(uintptr_t)mem, mtTest);
|
os::free((void*)(uintptr_t)mem, mtTest);
|
||||||
@ -271,10 +277,8 @@ WB_END
|
|||||||
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
|
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
|
||||||
jlong addr = 0;
|
jlong addr = 0;
|
||||||
|
|
||||||
if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
|
|
||||||
addr = (jlong)(uintptr_t)os::reserve_memory(size);
|
addr = (jlong)(uintptr_t)os::reserve_memory(size);
|
||||||
MemTracker::record_virtual_memory_type((address)addr, mtTest);
|
MemTracker::record_virtual_memory_type((address)addr, mtTest);
|
||||||
}
|
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
WB_END
|
WB_END
|
||||||
@ -293,20 +297,20 @@ WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong siz
|
|||||||
os::release_memory((char *)(uintptr_t)addr, size);
|
os::release_memory((char *)(uintptr_t)addr, size);
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
|
|
||||||
WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
|
|
||||||
|
|
||||||
if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return MemTracker::wbtest_wait_for_data_merge();
|
|
||||||
WB_END
|
|
||||||
|
|
||||||
WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
|
WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
|
||||||
return MemTracker::tracking_level() == MemTracker::NMT_detail;
|
return MemTracker::tracking_level() == NMT_detail;
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
|
||||||
|
address pc = (address)1;
|
||||||
|
for (jlong index = 0; index < num; index ++) {
|
||||||
|
NativeCallStack stack(&pc, 1);
|
||||||
|
os::malloc(0, mtTest, stack);
|
||||||
|
pc += MallocSiteTable::hash_buckets();
|
||||||
|
}
|
||||||
|
WB_END
|
||||||
|
|
||||||
|
|
||||||
#endif // INCLUDE_NMT
|
#endif // INCLUDE_NMT
|
||||||
|
|
||||||
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
|
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
|
||||||
@ -843,12 +847,13 @@ static JNINativeMethod methods[] = {
|
|||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
#if INCLUDE_NMT
|
#if INCLUDE_NMT
|
||||||
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
|
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
|
||||||
|
{CC"NMTMallocWithPseudoStack", CC"(JI)J", (void*)&WB_NMTMallocWithPseudoStack},
|
||||||
{CC"NMTFree", CC"(J)V", (void*)&WB_NMTFree },
|
{CC"NMTFree", CC"(J)V", (void*)&WB_NMTFree },
|
||||||
{CC"NMTReserveMemory", CC"(J)J", (void*)&WB_NMTReserveMemory },
|
{CC"NMTReserveMemory", CC"(J)J", (void*)&WB_NMTReserveMemory },
|
||||||
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
|
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
|
||||||
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
|
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
|
||||||
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
|
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
|
||||||
{CC"NMTWaitForDataMerge", CC"()Z", (void*)&WB_NMTWaitForDataMerge},
|
{CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket},
|
||||||
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
|
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
|
||||||
#endif // INCLUDE_NMT
|
#endif // INCLUDE_NMT
|
||||||
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
|
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
|
||||||
|
@ -300,6 +300,11 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
|
|||||||
{ "UseNewReflection", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
{ "UseNewReflection", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
#ifndef ZERO
|
||||||
|
{ "UseFastAccessorMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
{ "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
#endif // ZERO
|
||||||
{ NULL, JDK_Version(0), JDK_Version(0) }
|
{ NULL, JDK_Version(0), JDK_Version(0) }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -799,7 +804,7 @@ void Arguments::add_string(char*** bldarray, int* count, const char* arg) {
|
|||||||
} else {
|
} else {
|
||||||
*bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtInternal);
|
*bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtInternal);
|
||||||
}
|
}
|
||||||
(*bldarray)[*count] = strdup(arg);
|
(*bldarray)[*count] = os::strdup_check_oom(arg);
|
||||||
*count = new_count;
|
*count = new_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1070,16 +1075,6 @@ void Arguments::set_mode_flags(Mode mode) {
|
|||||||
UseCompiler = true;
|
UseCompiler = true;
|
||||||
UseLoopCounter = true;
|
UseLoopCounter = true;
|
||||||
|
|
||||||
#ifndef ZERO
|
|
||||||
// Turn these off for mixed and comp. Leave them on for Zero.
|
|
||||||
if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) {
|
|
||||||
UseFastAccessorMethods = (mode == _int);
|
|
||||||
}
|
|
||||||
if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) {
|
|
||||||
UseFastEmptyMethods = (mode == _int);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Default values may be platform/compiler dependent -
|
// Default values may be platform/compiler dependent -
|
||||||
// use the saved values
|
// use the saved values
|
||||||
ClipInlining = Arguments::_ClipInlining;
|
ClipInlining = Arguments::_ClipInlining;
|
||||||
@ -1885,7 +1880,7 @@ void Arguments::process_java_compiler_argument(char* arg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) {
|
void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) {
|
||||||
_sun_java_launcher = strdup(launcher);
|
_sun_java_launcher = os::strdup_check_oom(launcher);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Arguments::created_by_java_launcher() {
|
bool Arguments::created_by_java_launcher() {
|
||||||
@ -2388,7 +2383,7 @@ bool Arguments::check_vm_args_consistency() {
|
|||||||
|
|
||||||
if (PrintNMTStatistics) {
|
if (PrintNMTStatistics) {
|
||||||
#if INCLUDE_NMT
|
#if INCLUDE_NMT
|
||||||
if (MemTracker::tracking_level() == MemTracker::NMT_off) {
|
if (MemTracker::tracking_level() == NMT_off) {
|
||||||
#endif // INCLUDE_NMT
|
#endif // INCLUDE_NMT
|
||||||
warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
|
warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
|
||||||
PrintNMTStatistics = false;
|
PrintNMTStatistics = false;
|
||||||
@ -2995,7 +2990,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
|||||||
// Redirect GC output to the file. -Xloggc:<filename>
|
// Redirect GC output to the file. -Xloggc:<filename>
|
||||||
// ostream_init_log(), when called will use this filename
|
// ostream_init_log(), when called will use this filename
|
||||||
// to initialize a fileStream.
|
// to initialize a fileStream.
|
||||||
_gc_log_filename = strdup(tail);
|
_gc_log_filename = os::strdup_check_oom(tail);
|
||||||
if (!is_filename_valid(_gc_log_filename)) {
|
if (!is_filename_valid(_gc_log_filename)) {
|
||||||
jio_fprintf(defaultStream::output_stream(),
|
jio_fprintf(defaultStream::output_stream(),
|
||||||
"Invalid file name for use with -Xloggc: Filename can only contain the "
|
"Invalid file name for use with -Xloggc: Filename can only contain the "
|
||||||
@ -3598,15 +3593,24 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
|||||||
CommandLineFlags::printFlags(tty, false);
|
CommandLineFlags::printFlags(tty, false);
|
||||||
vm_exit(0);
|
vm_exit(0);
|
||||||
}
|
}
|
||||||
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
|
|
||||||
#if INCLUDE_NMT
|
#if INCLUDE_NMT
|
||||||
MemTracker::init_tracking_options(tail);
|
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
|
||||||
#else
|
// The launcher did not setup nmt environment variable properly.
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
// if (!MemTracker::check_launcher_nmt_support(tail)) {
|
||||||
"Native Memory Tracking is not supported in this VM\n");
|
// warning("Native Memory Tracking did not setup properly, using wrong launcher?");
|
||||||
return JNI_ERR;
|
// }
|
||||||
#endif
|
|
||||||
|
// Verify if nmt option is valid.
|
||||||
|
if (MemTracker::verify_nmt_option()) {
|
||||||
|
// Late initialization, still in single-threaded mode.
|
||||||
|
if (MemTracker::tracking_level() >= NMT_summary) {
|
||||||
|
MemTracker::init();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -629,10 +629,16 @@ class vmNode : public ProfilerNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
vmNode(const char* name, const TickPosition where) : ProfilerNode() {
|
vmNode(const char* name, const TickPosition where) : ProfilerNode() {
|
||||||
_name = name;
|
_name = os::strdup(name);
|
||||||
update(where);
|
update(where);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
~vmNode() {
|
||||||
|
if (_name != NULL) {
|
||||||
|
os::free((void*)_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const char *name() const { return _name; }
|
const char *name() const { return _name; }
|
||||||
bool is_compiled() const { return true; }
|
bool is_compiled() const { return true; }
|
||||||
|
|
||||||
@ -784,7 +790,7 @@ void ThreadProfiler::vm_update(const char* name, TickPosition where) {
|
|||||||
assert(index >= 0, "Must be positive");
|
assert(index >= 0, "Must be positive");
|
||||||
// Note that we call strdup below since the symbol may be resource allocated
|
// Note that we call strdup below since the symbol may be resource allocated
|
||||||
if (!table[index]) {
|
if (!table[index]) {
|
||||||
table[index] = new (this) vmNode(os::strdup(name), where);
|
table[index] = new (this) vmNode(name, where);
|
||||||
} else {
|
} else {
|
||||||
ProfilerNode* prev = table[index];
|
ProfilerNode* prev = table[index];
|
||||||
for(ProfilerNode* node = prev; node; node = node->next()) {
|
for(ProfilerNode* node = prev; node; node = node->next()) {
|
||||||
@ -794,7 +800,7 @@ void ThreadProfiler::vm_update(const char* name, TickPosition where) {
|
|||||||
}
|
}
|
||||||
prev = node;
|
prev = node;
|
||||||
}
|
}
|
||||||
prev->set_next(new (this) vmNode(os::strdup(name), where));
|
prev->set_next(new (this) vmNode(name, where));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -945,11 +945,6 @@ class CommandLineFlags {
|
|||||||
diagnostic(bool, PrintNMTStatistics, false, \
|
diagnostic(bool, PrintNMTStatistics, false, \
|
||||||
"Print native memory tracking summary data if it is on") \
|
"Print native memory tracking summary data if it is on") \
|
||||||
\
|
\
|
||||||
diagnostic(bool, AutoShutdownNMT, true, \
|
|
||||||
"Automatically shutdown native memory tracking under stress " \
|
|
||||||
"situations. When set to false, native memory tracking tries to " \
|
|
||||||
"stay alive at the expense of JVM performance") \
|
|
||||||
\
|
|
||||||
diagnostic(bool, LogCompilation, false, \
|
diagnostic(bool, LogCompilation, false, \
|
||||||
"Log compilation activity in detail to LogFile") \
|
"Log compilation activity in detail to LogFile") \
|
||||||
\
|
\
|
||||||
@ -2789,12 +2784,6 @@ class CommandLineFlags {
|
|||||||
product(bool, UseLoopCounter, true, \
|
product(bool, UseLoopCounter, true, \
|
||||||
"Increment invocation counter on backward branch") \
|
"Increment invocation counter on backward branch") \
|
||||||
\
|
\
|
||||||
product(bool, UseFastEmptyMethods, true, \
|
|
||||||
"Use fast method entry code for empty methods") \
|
|
||||||
\
|
|
||||||
product(bool, UseFastAccessorMethods, true, \
|
|
||||||
"Use fast method entry code for accessor methods") \
|
|
||||||
\
|
|
||||||
product_pd(bool, UseOnStackReplacement, \
|
product_pd(bool, UseOnStackReplacement, \
|
||||||
"Use on stack replacement, calls runtime if invoc. counter " \
|
"Use on stack replacement, calls runtime if invoc. counter " \
|
||||||
"overflows in loop") \
|
"overflows in loop") \
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -227,7 +227,7 @@ class HandleArea: public Arena {
|
|||||||
HandleArea* _prev; // link to outer (older) area
|
HandleArea* _prev; // link to outer (older) area
|
||||||
public:
|
public:
|
||||||
// Constructor
|
// Constructor
|
||||||
HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) {
|
HandleArea(HandleArea* prev) : Arena(mtThread, Chunk::tiny_size) {
|
||||||
debug_only(_handle_mark_nesting = 0);
|
debug_only(_handle_mark_nesting = 0);
|
||||||
debug_only(_no_handle_mark_nesting = 0);
|
debug_only(_no_handle_mark_nesting = 0);
|
||||||
_prev = prev;
|
_prev = prev;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -34,8 +34,10 @@
|
|||||||
#include "runtime/init.hpp"
|
#include "runtime/init.hpp"
|
||||||
#include "runtime/safepoint.hpp"
|
#include "runtime/safepoint.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
#include "services/memTracker.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
|
|
||||||
// Initialization done by VM thread in vm_init_globals()
|
// Initialization done by VM thread in vm_init_globals()
|
||||||
void check_ThreadShadow();
|
void check_ThreadShadow();
|
||||||
void eventlog_init();
|
void eventlog_init();
|
||||||
@ -131,6 +133,12 @@ jint init_globals() {
|
|||||||
javaClasses_init(); // must happen after vtable initialization
|
javaClasses_init(); // must happen after vtable initialization
|
||||||
stubRoutines_init2(); // note: StubRoutines need 2-phase init
|
stubRoutines_init2(); // note: StubRoutines need 2-phase init
|
||||||
|
|
||||||
|
#if INCLUDE_NMT
|
||||||
|
// Solaris stack is walkable only after stubRoutines are set up.
|
||||||
|
// On Other platforms, the stack is always walkable.
|
||||||
|
NMT_stack_walkable = true;
|
||||||
|
#endif // INCLUDE_NMT
|
||||||
|
|
||||||
// All the flags that get adjusted by VM_Version_init and os::init_2
|
// All the flags that get adjusted by VM_Version_init and os::init_2
|
||||||
// have been set so dump the flags now.
|
// have been set so dump the flags now.
|
||||||
if (PrintFlagsFinal) {
|
if (PrintFlagsFinal) {
|
||||||
|
@ -57,7 +57,6 @@
|
|||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/timer.hpp"
|
#include "runtime/timer.hpp"
|
||||||
#include "runtime/vm_operations.hpp"
|
#include "runtime/vm_operations.hpp"
|
||||||
#include "services/memReporter.hpp"
|
|
||||||
#include "services/memTracker.hpp"
|
#include "services/memTracker.hpp"
|
||||||
#include "trace/tracing.hpp"
|
#include "trace/tracing.hpp"
|
||||||
#include "utilities/dtrace.hpp"
|
#include "utilities/dtrace.hpp"
|
||||||
@ -349,12 +348,7 @@ void print_statistics() {
|
|||||||
#endif // ENABLE_ZAP_DEAD_LOCALS
|
#endif // ENABLE_ZAP_DEAD_LOCALS
|
||||||
// Native memory tracking data
|
// Native memory tracking data
|
||||||
if (PrintNMTStatistics) {
|
if (PrintNMTStatistics) {
|
||||||
if (MemTracker::is_on()) {
|
MemTracker::final_report(tty);
|
||||||
BaselineTTYOutputer outputer(tty);
|
|
||||||
MemTracker::print_memory_usage(outputer, K, false);
|
|
||||||
} else {
|
|
||||||
tty->print_cr("%s", MemTracker::reason());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -390,12 +384,7 @@ void print_statistics() {
|
|||||||
|
|
||||||
// Native memory tracking data
|
// Native memory tracking data
|
||||||
if (PrintNMTStatistics) {
|
if (PrintNMTStatistics) {
|
||||||
if (MemTracker::is_on()) {
|
MemTracker::final_report(tty);
|
||||||
BaselineTTYOutputer outputer(tty);
|
|
||||||
MemTracker::print_memory_usage(outputer, K, false);
|
|
||||||
} else {
|
|
||||||
tty->print_cr("%s", MemTracker::reason());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -544,10 +533,6 @@ void before_exit(JavaThread * thread) {
|
|||||||
BeforeExit_lock->notify_all();
|
BeforeExit_lock->notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown NMT before exit. Otherwise,
|
|
||||||
// it will run into trouble when system destroys static variables.
|
|
||||||
MemTracker::shutdown(MemTracker::NMT_normal);
|
|
||||||
|
|
||||||
if (VerifyStringTableAtExit) {
|
if (VerifyStringTableAtExit) {
|
||||||
int fail_cnt = 0;
|
int fail_cnt = 0;
|
||||||
{
|
{
|
||||||
|
@ -52,6 +52,7 @@
|
|||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/vm_version.hpp"
|
#include "runtime/vm_version.hpp"
|
||||||
#include "services/attachListener.hpp"
|
#include "services/attachListener.hpp"
|
||||||
|
#include "services/nmtCommon.hpp"
|
||||||
#include "services/memTracker.hpp"
|
#include "services/memTracker.hpp"
|
||||||
#include "services/threadService.hpp"
|
#include "services/threadService.hpp"
|
||||||
#include "utilities/defaultStream.hpp"
|
#include "utilities/defaultStream.hpp"
|
||||||
@ -516,6 +517,14 @@ char *os::strdup(const char *str, MEMFLAGS flags) {
|
|||||||
return dup_str;
|
return dup_str;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
char* os::strdup_check_oom(const char* str, MEMFLAGS flags) {
|
||||||
|
char* p = os::strdup(str, flags);
|
||||||
|
if (p == NULL) {
|
||||||
|
vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom");
|
||||||
|
}
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#define paranoid 0 /* only set to 1 if you suspect checking code has bug */
|
#define paranoid 0 /* only set to 1 if you suspect checking code has bug */
|
||||||
|
|
||||||
@ -553,7 +562,11 @@ static u_char* testMalloc(size_t alloc_size) {
|
|||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
|
void* os::malloc(size_t size, MEMFLAGS flags) {
|
||||||
|
return os::malloc(size, flags, CALLER_PC);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
|
||||||
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
|
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
|
||||||
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
|
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
|
||||||
|
|
||||||
@ -579,11 +592,15 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
|
|||||||
size = 1;
|
size = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NMT support
|
||||||
|
NMT_TrackingLevel level = MemTracker::tracking_level();
|
||||||
|
size_t nmt_header_size = MemTracker::malloc_header_size(level);
|
||||||
|
|
||||||
#ifndef ASSERT
|
#ifndef ASSERT
|
||||||
const size_t alloc_size = size;
|
const size_t alloc_size = size + nmt_header_size;
|
||||||
#else
|
#else
|
||||||
const size_t alloc_size = GuardedMemory::get_total_size(size);
|
const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
|
||||||
if (size > alloc_size) { // Check for rollover.
|
if (size + nmt_header_size > alloc_size) { // Check for rollover.
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -602,7 +619,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
// Wrap memory with guard
|
// Wrap memory with guard
|
||||||
GuardedMemory guarded(ptr, size);
|
GuardedMemory guarded(ptr, size + nmt_header_size);
|
||||||
ptr = guarded.get_user_ptr();
|
ptr = guarded.get_user_ptr();
|
||||||
#endif
|
#endif
|
||||||
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
|
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
|
||||||
@ -615,48 +632,50 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// we do not track guard memory
|
// we do not track guard memory
|
||||||
MemTracker::record_malloc((address)ptr, size, memflags, caller == 0 ? CALLER_PC : caller);
|
return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
|
||||||
|
|
||||||
return ptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
|
||||||
|
return os::realloc(memblock, size, flags, CALLER_PC);
|
||||||
|
}
|
||||||
|
|
||||||
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
|
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
|
||||||
#ifndef ASSERT
|
#ifndef ASSERT
|
||||||
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
|
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
|
||||||
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
|
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
|
||||||
MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
|
// NMT support
|
||||||
void* ptr = ::realloc(memblock, size);
|
void* membase = MemTracker::record_free(memblock);
|
||||||
if (ptr != NULL) {
|
NMT_TrackingLevel level = MemTracker::tracking_level();
|
||||||
tkr.record((address)memblock, (address)ptr, size, memflags,
|
size_t nmt_header_size = MemTracker::malloc_header_size(level);
|
||||||
caller == 0 ? CALLER_PC : caller);
|
void* ptr = ::realloc(membase, size + nmt_header_size);
|
||||||
} else {
|
return MemTracker::record_malloc(ptr, size, memflags, stack, level);
|
||||||
tkr.discard();
|
|
||||||
}
|
|
||||||
return ptr;
|
|
||||||
#else
|
#else
|
||||||
if (memblock == NULL) {
|
if (memblock == NULL) {
|
||||||
return os::malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
|
return os::malloc(size, memflags, stack);
|
||||||
}
|
}
|
||||||
if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
|
if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
|
||||||
tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
|
tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
|
||||||
breakpoint();
|
breakpoint();
|
||||||
}
|
}
|
||||||
verify_memory(memblock);
|
// NMT support
|
||||||
|
void* membase = MemTracker::malloc_base(memblock);
|
||||||
|
verify_memory(membase);
|
||||||
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
|
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
// always move the block
|
// always move the block
|
||||||
void* ptr = os::malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
|
void* ptr = os::malloc(size, memflags, stack);
|
||||||
if (PrintMalloc) {
|
if (PrintMalloc) {
|
||||||
tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
|
tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
|
||||||
}
|
}
|
||||||
// Copy to new memory if malloc didn't fail
|
// Copy to new memory if malloc didn't fail
|
||||||
if ( ptr != NULL ) {
|
if ( ptr != NULL ) {
|
||||||
GuardedMemory guarded(memblock);
|
GuardedMemory guarded(MemTracker::malloc_base(memblock));
|
||||||
memcpy(ptr, memblock, MIN2(size, guarded.get_user_size()));
|
// Guard's user data contains NMT header
|
||||||
if (paranoid) verify_memory(ptr);
|
size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
|
||||||
|
memcpy(ptr, memblock, MIN2(size, memblock_size));
|
||||||
|
if (paranoid) verify_memory(MemTracker::malloc_base(ptr));
|
||||||
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
|
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
|
||||||
tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
|
tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
|
||||||
breakpoint();
|
breakpoint();
|
||||||
@ -669,7 +688,6 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
|
|||||||
|
|
||||||
|
|
||||||
void os::free(void *memblock, MEMFLAGS memflags) {
|
void os::free(void *memblock, MEMFLAGS memflags) {
|
||||||
address trackp = (address) memblock;
|
|
||||||
NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
|
NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (memblock == NULL) return;
|
if (memblock == NULL) return;
|
||||||
@ -677,20 +695,22 @@ void os::free(void *memblock, MEMFLAGS memflags) {
|
|||||||
if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
|
if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
|
||||||
breakpoint();
|
breakpoint();
|
||||||
}
|
}
|
||||||
verify_memory(memblock);
|
void* membase = MemTracker::record_free(memblock);
|
||||||
|
verify_memory(membase);
|
||||||
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
|
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
|
||||||
|
|
||||||
GuardedMemory guarded(memblock);
|
GuardedMemory guarded(membase);
|
||||||
size_t size = guarded.get_user_size();
|
size_t size = guarded.get_user_size();
|
||||||
inc_stat_counter(&free_bytes, size);
|
inc_stat_counter(&free_bytes, size);
|
||||||
memblock = guarded.release_for_freeing();
|
membase = guarded.release_for_freeing();
|
||||||
if (PrintMalloc && tty != NULL) {
|
if (PrintMalloc && tty != NULL) {
|
||||||
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
|
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase);
|
||||||
}
|
}
|
||||||
|
::free(membase);
|
||||||
|
#else
|
||||||
|
void* membase = MemTracker::record_free(memblock);
|
||||||
|
::free(membase);
|
||||||
#endif
|
#endif
|
||||||
MemTracker::record_free(trackp, memflags);
|
|
||||||
|
|
||||||
::free(memblock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::init_random(long initval) {
|
void os::init_random(long initval) {
|
||||||
@ -1404,7 +1424,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
|
|||||||
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
|
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
|
||||||
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
|
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
|
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
@ -1414,7 +1434,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
|
|||||||
MEMFLAGS flags) {
|
MEMFLAGS flags) {
|
||||||
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
|
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
|
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||||
MemTracker::record_virtual_memory_type((address)result, flags);
|
MemTracker::record_virtual_memory_type((address)result, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1424,7 +1444,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
|
|||||||
char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
|
char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
|
||||||
char* result = pd_attempt_reserve_memory_at(bytes, addr);
|
char* result = pd_attempt_reserve_memory_at(bytes, addr);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
|
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -1464,23 +1484,29 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool os::uncommit_memory(char* addr, size_t bytes) {
|
bool os::uncommit_memory(char* addr, size_t bytes) {
|
||||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
|
bool res;
|
||||||
bool res = pd_uncommit_memory(addr, bytes);
|
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||||
if (res) {
|
Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
|
||||||
tkr.record((address)addr, bytes);
|
res = pd_uncommit_memory(addr, bytes);
|
||||||
|
if (res) {
|
||||||
|
tkr.record((address)addr, bytes);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
tkr.discard();
|
res = pd_uncommit_memory(addr, bytes);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool os::release_memory(char* addr, size_t bytes) {
|
bool os::release_memory(char* addr, size_t bytes) {
|
||||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
bool res;
|
||||||
bool res = pd_release_memory(addr, bytes);
|
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||||
if (res) {
|
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||||
tkr.record((address)addr, bytes);
|
res = pd_release_memory(addr, bytes);
|
||||||
|
if (res) {
|
||||||
|
tkr.record((address)addr, bytes);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
tkr.discard();
|
res = pd_release_memory(addr, bytes);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -1491,7 +1517,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
|
|||||||
bool allow_exec) {
|
bool allow_exec) {
|
||||||
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
|
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -1504,12 +1530,15 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool os::unmap_memory(char *addr, size_t bytes) {
|
bool os::unmap_memory(char *addr, size_t bytes) {
|
||||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
bool result;
|
||||||
bool result = pd_unmap_memory(addr, bytes);
|
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||||
if (result) {
|
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||||
tkr.record((address)addr, bytes);
|
result = pd_unmap_memory(addr, bytes);
|
||||||
|
if (result) {
|
||||||
|
tkr.record((address)addr, bytes);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
tkr.discard();
|
result = pd_unmap_memory(addr, bytes);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -65,6 +65,8 @@ class JavaThread;
|
|||||||
class Event;
|
class Event;
|
||||||
class DLL;
|
class DLL;
|
||||||
class FileHandle;
|
class FileHandle;
|
||||||
|
class NativeCallStack;
|
||||||
|
|
||||||
template<class E> class GrowableArray;
|
template<class E> class GrowableArray;
|
||||||
|
|
||||||
// %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
|
// %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
|
||||||
@ -96,9 +98,11 @@ const bool ExecMem = true;
|
|||||||
// Typedef for structured exception handling support
|
// Typedef for structured exception handling support
|
||||||
typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
|
typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
|
||||||
|
|
||||||
|
class MallocTracker;
|
||||||
|
|
||||||
class os: AllStatic {
|
class os: AllStatic {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
friend class MallocTracker;
|
||||||
public:
|
public:
|
||||||
enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
|
enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
|
||||||
|
|
||||||
@ -160,7 +164,10 @@ class os: AllStatic {
|
|||||||
// Override me as needed
|
// Override me as needed
|
||||||
static int file_name_strcmp(const char* s1, const char* s2);
|
static int file_name_strcmp(const char* s1, const char* s2);
|
||||||
|
|
||||||
|
// get/unset environment variable
|
||||||
static bool getenv(const char* name, char* buffer, int len);
|
static bool getenv(const char* name, char* buffer, int len);
|
||||||
|
static bool unsetenv(const char* name);
|
||||||
|
|
||||||
static bool have_special_privileges();
|
static bool have_special_privileges();
|
||||||
|
|
||||||
static jlong javaTimeMillis();
|
static jlong javaTimeMillis();
|
||||||
@ -207,8 +214,13 @@ class os: AllStatic {
|
|||||||
|
|
||||||
// Interface for detecting multiprocessor system
|
// Interface for detecting multiprocessor system
|
||||||
static inline bool is_MP() {
|
static inline bool is_MP() {
|
||||||
|
#if !INCLUDE_NMT
|
||||||
assert(_processor_count > 0, "invalid processor count");
|
assert(_processor_count > 0, "invalid processor count");
|
||||||
return _processor_count > 1 || AssumeMP;
|
return _processor_count > 1 || AssumeMP;
|
||||||
|
#else
|
||||||
|
// NMT needs atomic operations before this initialization.
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
static julong available_memory();
|
static julong available_memory();
|
||||||
static julong physical_memory();
|
static julong physical_memory();
|
||||||
@ -635,15 +647,25 @@ class os: AllStatic {
|
|||||||
static void* thread_local_storage_at(int index);
|
static void* thread_local_storage_at(int index);
|
||||||
static void free_thread_local_storage(int index);
|
static void free_thread_local_storage(int index);
|
||||||
|
|
||||||
// Stack walk
|
// Retrieve native stack frames.
|
||||||
static address get_caller_pc(int n = 0);
|
// Parameter:
|
||||||
|
// stack: an array to storage stack pointers.
|
||||||
|
// frames: size of above array.
|
||||||
|
// toSkip: number of stack frames to skip at the beginning.
|
||||||
|
// Return: number of stack frames captured.
|
||||||
|
static int get_native_stack(address* stack, int size, int toSkip = 0);
|
||||||
|
|
||||||
// General allocation (must be MT-safe)
|
// General allocation (must be MT-safe)
|
||||||
static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0);
|
static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
|
||||||
static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0);
|
static void* malloc (size_t size, MEMFLAGS flags);
|
||||||
|
static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
|
||||||
|
static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
|
||||||
|
|
||||||
static void free (void *memblock, MEMFLAGS flags = mtNone);
|
static void free (void *memblock, MEMFLAGS flags = mtNone);
|
||||||
static bool check_heap(bool force = false); // verify C heap integrity
|
static bool check_heap(bool force = false); // verify C heap integrity
|
||||||
static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
|
static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
|
||||||
|
// Like strdup, but exit VM when strdup() returns NULL
|
||||||
|
static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal);
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
static julong num_mallocs; // # of calls to malloc/realloc
|
static julong num_mallocs; // # of calls to malloc/realloc
|
||||||
|
@ -52,7 +52,6 @@
|
|||||||
#include "runtime/sweeper.hpp"
|
#include "runtime/sweeper.hpp"
|
||||||
#include "runtime/synchronizer.hpp"
|
#include "runtime/synchronizer.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "services/memTracker.hpp"
|
|
||||||
#include "services/runtimeService.hpp"
|
#include "services/runtimeService.hpp"
|
||||||
#include "utilities/events.hpp"
|
#include "utilities/events.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
@ -527,10 +526,6 @@ void SafepointSynchronize::do_cleanup_tasks() {
|
|||||||
TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
|
TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
|
||||||
ClassLoaderDataGraph::purge_if_needed();
|
ClassLoaderDataGraph::purge_if_needed();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (MemTracker::is_on()) {
|
|
||||||
MemTracker::sync();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -297,8 +297,7 @@ void Thread::record_stack_base_and_size() {
|
|||||||
#if INCLUDE_NMT
|
#if INCLUDE_NMT
|
||||||
// record thread's native stack, stack grows downward
|
// record thread's native stack, stack grows downward
|
||||||
address stack_low_addr = stack_base() - stack_size();
|
address stack_low_addr = stack_base() - stack_size();
|
||||||
MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
|
MemTracker::record_thread_stack(stack_low_addr, stack_size());
|
||||||
CURRENT_PC);
|
|
||||||
#endif // INCLUDE_NMT
|
#endif // INCLUDE_NMT
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -316,7 +315,7 @@ Thread::~Thread() {
|
|||||||
#if INCLUDE_NMT
|
#if INCLUDE_NMT
|
||||||
if (_stack_base != NULL) {
|
if (_stack_base != NULL) {
|
||||||
address low_stack_addr = stack_base() - stack_size();
|
address low_stack_addr = stack_base() - stack_size();
|
||||||
MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
|
MemTracker::release_thread_stack(low_stack_addr, stack_size());
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
set_stack_base(NULL);
|
set_stack_base(NULL);
|
||||||
#endif
|
#endif
|
||||||
@ -1425,9 +1424,6 @@ void JavaThread::initialize() {
|
|||||||
set_monitor_chunks(NULL);
|
set_monitor_chunks(NULL);
|
||||||
set_next(NULL);
|
set_next(NULL);
|
||||||
set_thread_state(_thread_new);
|
set_thread_state(_thread_new);
|
||||||
#if INCLUDE_NMT
|
|
||||||
set_recorder(NULL);
|
|
||||||
#endif
|
|
||||||
_terminated = _not_terminated;
|
_terminated = _not_terminated;
|
||||||
_privileged_stack_top = NULL;
|
_privileged_stack_top = NULL;
|
||||||
_array_for_gc = NULL;
|
_array_for_gc = NULL;
|
||||||
@ -1503,7 +1499,6 @@ JavaThread::JavaThread(bool is_attaching_via_jni) :
|
|||||||
_jni_attach_state = _not_attaching_via_jni;
|
_jni_attach_state = _not_attaching_via_jni;
|
||||||
}
|
}
|
||||||
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
|
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
|
||||||
_safepoint_visible = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool JavaThread::reguard_stack(address cur_sp) {
|
bool JavaThread::reguard_stack(address cur_sp) {
|
||||||
@ -1566,7 +1561,6 @@ JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
|
|||||||
thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
|
thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
|
||||||
os::java_thread;
|
os::java_thread;
|
||||||
os::create_thread(this, thr_type, stack_sz);
|
os::create_thread(this, thr_type, stack_sz);
|
||||||
_safepoint_visible = false;
|
|
||||||
// The _osthread may be NULL here because we ran out of memory (too many threads active).
|
// The _osthread may be NULL here because we ran out of memory (too many threads active).
|
||||||
// We need to throw and OutOfMemoryError - however we cannot do this here because the caller
|
// We need to throw and OutOfMemoryError - however we cannot do this here because the caller
|
||||||
// may hold a lock and all locks must be unlocked before throwing the exception (throwing
|
// may hold a lock and all locks must be unlocked before throwing the exception (throwing
|
||||||
@ -1584,13 +1578,6 @@ JavaThread::~JavaThread() {
|
|||||||
tty->print_cr("terminate thread %p", this);
|
tty->print_cr("terminate thread %p", this);
|
||||||
}
|
}
|
||||||
|
|
||||||
// By now, this thread should already be invisible to safepoint,
|
|
||||||
// and its per-thread recorder also collected.
|
|
||||||
assert(!is_safepoint_visible(), "wrong state");
|
|
||||||
#if INCLUDE_NMT
|
|
||||||
assert(get_recorder() == NULL, "Already collected");
|
|
||||||
#endif // INCLUDE_NMT
|
|
||||||
|
|
||||||
// JSR166 -- return the parker to the free list
|
// JSR166 -- return the parker to the free list
|
||||||
Parker::Release(_parker);
|
Parker::Release(_parker);
|
||||||
_parker = NULL;
|
_parker = NULL;
|
||||||
@ -3359,11 +3346,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
|||||||
// initialize TLS
|
// initialize TLS
|
||||||
ThreadLocalStorage::init();
|
ThreadLocalStorage::init();
|
||||||
|
|
||||||
// Bootstrap native memory tracking, so it can start recording memory
|
|
||||||
// activities before worker thread is started. This is the first phase
|
|
||||||
// of bootstrapping, VM is currently running in single-thread mode.
|
|
||||||
MemTracker::bootstrap_single_thread();
|
|
||||||
|
|
||||||
// Initialize output stream logging
|
// Initialize output stream logging
|
||||||
ostream_init_log();
|
ostream_init_log();
|
||||||
|
|
||||||
@ -3414,9 +3396,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
|||||||
// Initialize Java-Level synchronization subsystem
|
// Initialize Java-Level synchronization subsystem
|
||||||
ObjectMonitor::Initialize();
|
ObjectMonitor::Initialize();
|
||||||
|
|
||||||
// Second phase of bootstrapping, VM is about entering multi-thread mode
|
|
||||||
MemTracker::bootstrap_multi_thread();
|
|
||||||
|
|
||||||
// Initialize global modules
|
// Initialize global modules
|
||||||
jint status = init_globals();
|
jint status = init_globals();
|
||||||
if (status != JNI_OK) {
|
if (status != JNI_OK) {
|
||||||
@ -3438,9 +3417,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
|||||||
// real raw monitor. VM is setup enough here for raw monitor enter.
|
// real raw monitor. VM is setup enough here for raw monitor enter.
|
||||||
JvmtiExport::transition_pending_onload_raw_monitors();
|
JvmtiExport::transition_pending_onload_raw_monitors();
|
||||||
|
|
||||||
// Fully start NMT
|
|
||||||
MemTracker::start();
|
|
||||||
|
|
||||||
// Create the VMThread
|
// Create the VMThread
|
||||||
{ TraceTime timer("Start VMThread", TraceStartupTime);
|
{ TraceTime timer("Start VMThread", TraceStartupTime);
|
||||||
VMThread::create();
|
VMThread::create();
|
||||||
@ -3995,8 +3971,6 @@ void Threads::add(JavaThread* p, bool force_daemon) {
|
|||||||
daemon = false;
|
daemon = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
p->set_safepoint_visible(true);
|
|
||||||
|
|
||||||
ThreadService::add_thread(p, daemon);
|
ThreadService::add_thread(p, daemon);
|
||||||
|
|
||||||
// Possible GC point.
|
// Possible GC point.
|
||||||
@ -4042,13 +4016,6 @@ void Threads::remove(JavaThread* p) {
|
|||||||
// to do callbacks into the safepoint code. However, the safepoint code is not aware
|
// to do callbacks into the safepoint code. However, the safepoint code is not aware
|
||||||
// of this thread since it is removed from the queue.
|
// of this thread since it is removed from the queue.
|
||||||
p->set_terminated_value();
|
p->set_terminated_value();
|
||||||
|
|
||||||
// Now, this thread is not visible to safepoint
|
|
||||||
p->set_safepoint_visible(false);
|
|
||||||
// once the thread becomes safepoint invisible, we can not use its per-thread
|
|
||||||
// recorder. And Threads::do_threads() no longer walks this thread, so we have
|
|
||||||
// to release its per-thread recorder here.
|
|
||||||
MemTracker::thread_exiting(p);
|
|
||||||
} // unlock Threads_lock
|
} // unlock Threads_lock
|
||||||
|
|
||||||
// Since Events::log uses a lock, we grab it outside the Threads_lock
|
// Since Events::log uses a lock, we grab it outside the Threads_lock
|
||||||
|
@ -43,10 +43,6 @@
|
|||||||
#include "runtime/unhandledOops.hpp"
|
#include "runtime/unhandledOops.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
#if INCLUDE_NMT
|
|
||||||
#include "services/memRecorder.hpp"
|
|
||||||
#endif // INCLUDE_NMT
|
|
||||||
|
|
||||||
#include "trace/traceBackend.hpp"
|
#include "trace/traceBackend.hpp"
|
||||||
#include "trace/traceMacros.hpp"
|
#include "trace/traceMacros.hpp"
|
||||||
#include "utilities/exceptions.hpp"
|
#include "utilities/exceptions.hpp"
|
||||||
@ -1036,16 +1032,6 @@ class JavaThread: public Thread {
|
|||||||
bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
|
bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
|
||||||
void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
|
void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
|
||||||
|
|
||||||
#if INCLUDE_NMT
|
|
||||||
// native memory tracking
|
|
||||||
inline MemRecorder* get_recorder() const { return (MemRecorder*)_recorder; }
|
|
||||||
inline void set_recorder(MemRecorder* rc) { _recorder = rc; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
// per-thread memory recorder
|
|
||||||
MemRecorder* volatile _recorder;
|
|
||||||
#endif // INCLUDE_NMT
|
|
||||||
|
|
||||||
// Suspend/resume support for JavaThread
|
// Suspend/resume support for JavaThread
|
||||||
private:
|
private:
|
||||||
inline void set_ext_suspended();
|
inline void set_ext_suspended();
|
||||||
@ -1485,19 +1471,6 @@ public:
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// NMT (Native memory tracking) support.
|
|
||||||
// This flag helps NMT to determine if this JavaThread will be blocked
|
|
||||||
// at safepoint. If not, ThreadCritical is needed for writing memory records.
|
|
||||||
// JavaThread is only safepoint visible when it is in Threads' thread list,
|
|
||||||
// it is not visible until it is added to the list and becomes invisible
|
|
||||||
// once it is removed from the list.
|
|
||||||
public:
|
|
||||||
bool is_safepoint_visible() const { return _safepoint_visible; }
|
|
||||||
void set_safepoint_visible(bool visible) { _safepoint_visible = visible; }
|
|
||||||
private:
|
|
||||||
bool _safepoint_visible;
|
|
||||||
|
|
||||||
// Static operations
|
|
||||||
public:
|
public:
|
||||||
// Returns the running thread as a JavaThread
|
// Returns the running thread as a JavaThread
|
||||||
static inline JavaThread* current();
|
static inline JavaThread* current();
|
||||||
|
@ -52,6 +52,7 @@
|
|||||||
#include "interpreter/bytecodes.hpp"
|
#include "interpreter/bytecodes.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
|
#include "memory/allocation.inline.hpp"
|
||||||
#include "memory/cardTableRS.hpp"
|
#include "memory/cardTableRS.hpp"
|
||||||
#include "memory/defNewGeneration.hpp"
|
#include "memory/defNewGeneration.hpp"
|
||||||
#include "memory/freeBlockDictionary.hpp"
|
#include "memory/freeBlockDictionary.hpp"
|
||||||
@ -93,6 +94,7 @@
|
|||||||
#include "runtime/globals.hpp"
|
#include "runtime/globals.hpp"
|
||||||
#include "runtime/java.hpp"
|
#include "runtime/java.hpp"
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/perfMemory.hpp"
|
#include "runtime/perfMemory.hpp"
|
||||||
#include "runtime/serviceThread.hpp"
|
#include "runtime/serviceThread.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
@ -3296,14 +3298,14 @@ static int recursiveFindType(VMTypeEntry* origtypes, const char* typeName, bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (strstr(typeName, " const") == typeName + len - 6) {
|
if (strstr(typeName, " const") == typeName + len - 6) {
|
||||||
char * s = strdup(typeName);
|
char * s = os::strdup_check_oom(typeName);
|
||||||
s[len - 6] = '\0';
|
s[len - 6] = '\0';
|
||||||
// tty->print_cr("checking \"%s\" for \"%s\"", s, typeName);
|
// tty->print_cr("checking \"%s\" for \"%s\"", s, typeName);
|
||||||
if (recursiveFindType(origtypes, s, true) == 1) {
|
if (recursiveFindType(origtypes, s, true) == 1) {
|
||||||
free(s);
|
os::free(s);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
free(s);
|
os::free(s);
|
||||||
}
|
}
|
||||||
if (!isRecurse) {
|
if (!isRecurse) {
|
||||||
tty->print_cr("type \"%s\" not found", typeName);
|
tty->print_cr("type \"%s\" not found", typeName);
|
||||||
|
57
hotspot/src/share/vm/services/allocationSite.hpp
Normal file
57
hotspot/src/share/vm/services/allocationSite.hpp
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
|
||||||
|
#define SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
|
||||||
|
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
#include "utilities/nativeCallStack.hpp"
|
||||||
|
|
||||||
|
// Allocation site represents a code path that makes a memory
|
||||||
|
// allocation
|
||||||
|
template <class E> class AllocationSite VALUE_OBJ_CLASS_SPEC {
|
||||||
|
private:
|
||||||
|
NativeCallStack _call_stack;
|
||||||
|
E e;
|
||||||
|
public:
|
||||||
|
AllocationSite(const NativeCallStack& stack) : _call_stack(stack) { }
|
||||||
|
int hash() const { return _call_stack.hash(); }
|
||||||
|
bool equals(const NativeCallStack& stack) const {
|
||||||
|
return _call_stack.equals(stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool equals(const AllocationSite<E>& other) const {
|
||||||
|
return other.equals(_call_stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
const NativeCallStack* call_stack() const {
|
||||||
|
return &_call_stack;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Information regarding this allocation
|
||||||
|
E* data() { return &e; }
|
||||||
|
const E* peek() const { return &e; }
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
|
261
hotspot/src/share/vm/services/mallocSiteTable.cpp
Normal file
261
hotspot/src/share/vm/services/mallocSiteTable.cpp
Normal file
@ -0,0 +1,261 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
#include "memory/allocation.inline.hpp"
|
||||||
|
#include "runtime/atomic.hpp"
|
||||||
|
#include "services/mallocSiteTable.hpp"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Early os::malloc() calls come from initializations of static variables, long before entering any
|
||||||
|
* VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be
|
||||||
|
* initialized, along with the allocation site for the hashtable entries.
|
||||||
|
* To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc()
|
||||||
|
* call, the hashtable bucket array and hashtable entry allocation site have to be static.
|
||||||
|
* It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just
|
||||||
|
* allocates a block memory and zero the memory for it.
|
||||||
|
* But for hashtable entry allocation site object, things get tricky. C runtime not only allocates
|
||||||
|
* memory for it, but also calls its constructor at some later time. If we initialize the allocation site
|
||||||
|
* at the first os::malloc() call, the object will be reinitialized when its constructor is called
|
||||||
|
* by C runtime.
|
||||||
|
* To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry,
|
||||||
|
* the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site.
|
||||||
|
* Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable,
|
||||||
|
* which is exactly what we want.
|
||||||
|
* The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation.
|
||||||
|
*
|
||||||
|
* Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare
|
||||||
|
* the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects
|
||||||
|
size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
|
||||||
|
size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
|
||||||
|
|
||||||
|
// Malloc site hashtable buckets
|
||||||
|
MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size];
|
||||||
|
|
||||||
|
// concurrent access counter
|
||||||
|
volatile int MallocSiteTable::_access_count = 0;
|
||||||
|
|
||||||
|
// Tracking hashtable contention
|
||||||
|
NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize malloc site table.
|
||||||
|
* Hashtable entry is malloc'd, so it can cause infinite recursion.
|
||||||
|
* To avoid above problem, we pre-initialize a hash entry for
|
||||||
|
* this allocation site.
|
||||||
|
* The method is called during C runtime static variable initialization
|
||||||
|
* time, it is in single-threaded mode from JVM perspective.
|
||||||
|
*/
|
||||||
|
bool MallocSiteTable::initialize() {
|
||||||
|
assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check");
|
||||||
|
assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry),
|
||||||
|
"Sanity Check");
|
||||||
|
assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
|
||||||
|
|
||||||
|
// Fake the call stack for hashtable entry allocation
|
||||||
|
assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");
|
||||||
|
|
||||||
|
// Create pseudo call stack for hashtable entry allocation
|
||||||
|
address pc[3];
|
||||||
|
if (NMT_TrackingStackDepth >= 3) {
|
||||||
|
pc[2] = (address)MallocSiteTable::allocation_at;
|
||||||
|
}
|
||||||
|
if (NMT_TrackingStackDepth >= 2) {
|
||||||
|
pc[1] = (address)MallocSiteTable::lookup_or_add;
|
||||||
|
}
|
||||||
|
pc[0] = (address)MallocSiteTable::new_entry;
|
||||||
|
|
||||||
|
// Instantiate NativeCallStack object, have to use placement new operator. (see comments above)
|
||||||
|
NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack)
|
||||||
|
NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
|
||||||
|
|
||||||
|
// Instantiate hash entry for hashtable entry allocation callsite
|
||||||
|
MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site)
|
||||||
|
MallocSiteHashtableEntry(*stack);
|
||||||
|
|
||||||
|
// Add the allocation site to hashtable.
|
||||||
|
int index = hash_to_index(stack->hash());
|
||||||
|
_table[index] = entry;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walks entries in the hashtable.
|
||||||
|
// It stops walk if the walker returns false.
|
||||||
|
bool MallocSiteTable::walk(MallocSiteWalker* walker) {
|
||||||
|
MallocSiteHashtableEntry* head;
|
||||||
|
for (int index = 0; index < table_size; index ++) {
|
||||||
|
head = _table[index];
|
||||||
|
while (head != NULL) {
|
||||||
|
if (!walker->do_malloc_site(head->peek())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
head = (MallocSiteHashtableEntry*)head->next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The hashtable does not have deletion policy on individual entry,
|
||||||
|
* and each linked list node is inserted via compare-and-swap,
|
||||||
|
* so each linked list is stable, the contention only happens
|
||||||
|
* at the end of linked list.
|
||||||
|
* This method should not return NULL under normal circumstance.
|
||||||
|
* If NULL is returned, it indicates:
|
||||||
|
* 1. Out of memory, it cannot allocate new hash entry.
|
||||||
|
* 2. Overflow hash bucket.
|
||||||
|
* Under any of above circumstances, caller should handle the situation.
|
||||||
|
*/
|
||||||
|
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
|
||||||
|
size_t* pos_idx) {
|
||||||
|
int index = hash_to_index(key.hash());
|
||||||
|
assert(index >= 0, "Negative index");
|
||||||
|
*bucket_idx = (size_t)index;
|
||||||
|
*pos_idx = 0;
|
||||||
|
|
||||||
|
// First entry for this hash bucket
|
||||||
|
if (_table[index] == NULL) {
|
||||||
|
MallocSiteHashtableEntry* entry = new_entry(key);
|
||||||
|
// OOM check
|
||||||
|
if (entry == NULL) return NULL;
|
||||||
|
|
||||||
|
// swap in the head
|
||||||
|
if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) {
|
||||||
|
return entry->data();
|
||||||
|
}
|
||||||
|
|
||||||
|
delete entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
MallocSiteHashtableEntry* head = _table[index];
|
||||||
|
while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
|
||||||
|
MallocSite* site = head->data();
|
||||||
|
if (site->equals(key)) {
|
||||||
|
// found matched entry
|
||||||
|
return head->data();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
|
||||||
|
MallocSiteHashtableEntry* entry = new_entry(key);
|
||||||
|
// OOM check
|
||||||
|
if (entry == NULL) return NULL;
|
||||||
|
if (head->atomic_insert(entry)) {
|
||||||
|
(*pos_idx) ++;
|
||||||
|
return entry->data();
|
||||||
|
}
|
||||||
|
// contended, other thread won
|
||||||
|
delete entry;
|
||||||
|
}
|
||||||
|
head = (MallocSiteHashtableEntry*)head->next();
|
||||||
|
(*pos_idx) ++;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Access malloc site
|
||||||
|
MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
|
||||||
|
assert(bucket_idx < table_size, "Invalid bucket index");
|
||||||
|
MallocSiteHashtableEntry* head = _table[bucket_idx];
|
||||||
|
for (size_t index = 0; index < pos_idx && head != NULL;
|
||||||
|
index ++, head = (MallocSiteHashtableEntry*)head->next());
|
||||||
|
assert(head != NULL, "Invalid position index");
|
||||||
|
return head->data();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocates MallocSiteHashtableEntry object. Special call stack
|
||||||
|
// (pre-installed allocation site) has to be used to avoid infinite
|
||||||
|
// recursion.
|
||||||
|
MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key) {
|
||||||
|
void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
|
||||||
|
*hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
|
||||||
|
return ::new (p) MallocSiteHashtableEntry(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocSiteTable::reset() {
|
||||||
|
for (int index = 0; index < table_size; index ++) {
|
||||||
|
MallocSiteHashtableEntry* head = _table[index];
|
||||||
|
_table[index] = NULL;
|
||||||
|
delete_linked_list(head);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
|
||||||
|
MallocSiteHashtableEntry* p;
|
||||||
|
while (head != NULL) {
|
||||||
|
p = head;
|
||||||
|
head = (MallocSiteHashtableEntry*)head->next();
|
||||||
|
if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) {
|
||||||
|
delete p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocSiteTable::shutdown() {
|
||||||
|
AccessLock locker(&_access_count);
|
||||||
|
locker.exclusiveLock();
|
||||||
|
reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
|
||||||
|
assert(walker != NULL, "NuLL walker");
|
||||||
|
AccessLock locker(&_access_count);
|
||||||
|
if (locker.sharedLock()) {
|
||||||
|
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
|
||||||
|
return walk(walker);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MallocSiteTable::AccessLock::exclusiveLock() {
|
||||||
|
jint target;
|
||||||
|
jint val;
|
||||||
|
|
||||||
|
assert(_lock_state != ExclusiveLock, "Can only call once");
|
||||||
|
assert(*_lock >= 0, "Can not content exclusive lock");
|
||||||
|
|
||||||
|
// make counter negative to block out shared locks
|
||||||
|
do {
|
||||||
|
val = *_lock;
|
||||||
|
target = _MAGIC_ + *_lock;
|
||||||
|
} while (Atomic::cmpxchg(target, _lock, val) != val);
|
||||||
|
|
||||||
|
// wait for all readers to exit
|
||||||
|
while (*_lock != _MAGIC_) {
|
||||||
|
#ifdef _WINDOWS
|
||||||
|
os::naked_short_sleep(1);
|
||||||
|
#else
|
||||||
|
os::naked_yield();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
_lock_state = ExclusiveLock;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
268
hotspot/src/share/vm/services/mallocSiteTable.hpp
Normal file
268
hotspot/src/share/vm/services/mallocSiteTable.hpp
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
|
||||||
|
#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
|
||||||
|
|
||||||
|
#if INCLUDE_NMT
|
||||||
|
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
#include "runtime/atomic.hpp"
|
||||||
|
#include "services/allocationSite.hpp"
|
||||||
|
#include "services/mallocTracker.hpp"
|
||||||
|
#include "services/nmtCommon.hpp"
|
||||||
|
|
||||||
|
// MallocSite represents a code path that eventually calls
|
||||||
|
// os::malloc() to allocate memory
|
||||||
|
class MallocSite : public AllocationSite<MemoryCounter> {
|
||||||
|
public:
|
||||||
|
MallocSite() :
|
||||||
|
AllocationSite<MemoryCounter>(emptyStack) { }
|
||||||
|
|
||||||
|
MallocSite(const NativeCallStack& stack) :
|
||||||
|
AllocationSite<MemoryCounter>(stack) { }
|
||||||
|
|
||||||
|
void allocate(size_t size) { data()->allocate(size); }
|
||||||
|
void deallocate(size_t size) { data()->deallocate(size); }
|
||||||
|
|
||||||
|
// Memory allocated from this code path
|
||||||
|
size_t size() const { return peek()->size(); }
|
||||||
|
// The number of calls were made
|
||||||
|
size_t count() const { return peek()->count(); }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Malloc site hashtable entry
|
||||||
|
class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
|
||||||
|
private:
|
||||||
|
MallocSite _malloc_site;
|
||||||
|
MallocSiteHashtableEntry* _next;
|
||||||
|
|
||||||
|
public:
|
||||||
|
MallocSiteHashtableEntry() : _next(NULL) { }
|
||||||
|
|
||||||
|
MallocSiteHashtableEntry(NativeCallStack stack):
|
||||||
|
_malloc_site(stack), _next(NULL) { }
|
||||||
|
|
||||||
|
inline const MallocSiteHashtableEntry* next() const {
|
||||||
|
return _next;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert an entry atomically.
|
||||||
|
// Return true if the entry is inserted successfully.
|
||||||
|
// The operation can be failed due to contention from other thread.
|
||||||
|
bool atomic_insert(const MallocSiteHashtableEntry* entry) {
|
||||||
|
return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
|
||||||
|
NULL) == NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_callsite(const MallocSite& site) {
|
||||||
|
_malloc_site = site;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline const MallocSite* peek() const { return &_malloc_site; }
|
||||||
|
inline MallocSite* data() { return &_malloc_site; }
|
||||||
|
|
||||||
|
inline long hash() const { return _malloc_site.hash(); }
|
||||||
|
inline bool equals(const NativeCallStack& stack) const {
|
||||||
|
return _malloc_site.equals(stack);
|
||||||
|
}
|
||||||
|
// Allocation/deallocation on this allocation site
|
||||||
|
inline void allocate(size_t size) { _malloc_site.allocate(size); }
|
||||||
|
inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
|
||||||
|
// Memory counters
|
||||||
|
inline size_t size() const { return _malloc_site.size(); }
|
||||||
|
inline size_t count() const { return _malloc_site.count(); }
|
||||||
|
};
|
||||||
|
|
||||||
|
// The walker walks every entry on MallocSiteTable
|
||||||
|
class MallocSiteWalker : public StackObj {
|
||||||
|
public:
|
||||||
|
virtual bool do_malloc_site(const MallocSite* e) { return false; }
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Native memory tracking call site table.
|
||||||
|
* The table is only needed when detail tracking is enabled.
|
||||||
|
*/
|
||||||
|
class MallocSiteTable : AllStatic {
|
||||||
|
private:
|
||||||
|
// The number of hash bucket in this hashtable. The number should
|
||||||
|
// be tuned if malloc activities changed significantly.
|
||||||
|
// The statistics data can be obtained via Jcmd
|
||||||
|
// jcmd <pid> VM.native_memory statistics.
|
||||||
|
|
||||||
|
// Currently, (number of buckets / number of entires) ratio is
|
||||||
|
// about 1 / 6
|
||||||
|
enum {
|
||||||
|
table_base_size = 128, // The base size is calculated from statistics to give
|
||||||
|
// table ratio around 1:6
|
||||||
|
table_size = (table_base_size * NMT_TrackingStackDepth - 1)
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
// This is a very special lock, that allows multiple shared accesses (sharedLock), but
|
||||||
|
// once exclusive access (exclusiveLock) is requested, all shared accesses are
|
||||||
|
// rejected forever.
|
||||||
|
class AccessLock : public StackObj {
|
||||||
|
enum LockState {
|
||||||
|
NoLock,
|
||||||
|
SharedLock,
|
||||||
|
ExclusiveLock
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
// A very large negative number. The only possibility to "overflow"
|
||||||
|
// this number is when there are more than -min_jint threads in
|
||||||
|
// this process, which is not going to happen in foreseeable future.
|
||||||
|
const static int _MAGIC_ = min_jint;
|
||||||
|
|
||||||
|
LockState _lock_state;
|
||||||
|
volatile int* _lock;
|
||||||
|
public:
|
||||||
|
AccessLock(volatile int* lock) :
|
||||||
|
_lock(lock), _lock_state(NoLock) {
|
||||||
|
}
|
||||||
|
|
||||||
|
~AccessLock() {
|
||||||
|
if (_lock_state == SharedLock) {
|
||||||
|
Atomic::dec((volatile jint*)_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Acquire shared lock.
|
||||||
|
// Return true if shared access is granted.
|
||||||
|
inline bool sharedLock() {
|
||||||
|
jint res = Atomic::add(1, _lock);
|
||||||
|
if (res < 0) {
|
||||||
|
Atomic::add(-1, _lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
_lock_state = SharedLock;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// Acquire exclusive lock
|
||||||
|
void exclusiveLock();
|
||||||
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
static bool initialize();
|
||||||
|
static void shutdown();
|
||||||
|
|
||||||
|
NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
|
||||||
|
|
||||||
|
// Number of hash buckets
|
||||||
|
static inline int hash_buckets() { return (int)table_size; }
|
||||||
|
|
||||||
|
// Access and copy a call stack from this table. Shared lock should be
|
||||||
|
// acquired before access the entry.
|
||||||
|
static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
|
||||||
|
size_t pos_idx) {
|
||||||
|
AccessLock locker(&_access_count);
|
||||||
|
if (locker.sharedLock()) {
|
||||||
|
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
|
||||||
|
MallocSite* site = malloc_site(bucket_idx, pos_idx);
|
||||||
|
if (site != NULL) {
|
||||||
|
stack = *site->call_stack();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record a new allocation from specified call path.
|
||||||
|
// Return true if the allocation is recorded successfully, bucket_idx
|
||||||
|
// and pos_idx are also updated to indicate the entry where the allocation
|
||||||
|
// information was recorded.
|
||||||
|
// Return false only occurs under rare scenarios:
|
||||||
|
// 1. out of memory
|
||||||
|
// 2. overflow hash bucket
|
||||||
|
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
|
||||||
|
size_t* bucket_idx, size_t* pos_idx) {
|
||||||
|
AccessLock locker(&_access_count);
|
||||||
|
if (locker.sharedLock()) {
|
||||||
|
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
|
||||||
|
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
|
||||||
|
if (site != NULL) site->allocate(size);
|
||||||
|
return site != NULL;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
|
||||||
|
// information was recorded.
|
||||||
|
static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
|
||||||
|
AccessLock locker(&_access_count);
|
||||||
|
if (locker.sharedLock()) {
|
||||||
|
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
|
||||||
|
MallocSite* site = malloc_site(bucket_idx, pos_idx);
|
||||||
|
if (site != NULL) {
|
||||||
|
site->deallocate(size);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk this table.
|
||||||
|
static bool walk_malloc_site(MallocSiteWalker* walker);
|
||||||
|
|
||||||
|
private:
|
||||||
|
static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
|
||||||
|
static void reset();
|
||||||
|
|
||||||
|
// Delete a bucket linked list
|
||||||
|
static void delete_linked_list(MallocSiteHashtableEntry* head);
|
||||||
|
|
||||||
|
static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
|
||||||
|
static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
|
||||||
|
static bool walk(MallocSiteWalker* walker);
|
||||||
|
|
||||||
|
static inline int hash_to_index(int hash) {
|
||||||
|
hash = (hash > 0) ? hash : (-hash);
|
||||||
|
return (hash % table_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline const NativeCallStack* hash_entry_allocation_stack() {
|
||||||
|
return (NativeCallStack*)_hash_entry_allocation_stack;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Counter for counting concurrent access
|
||||||
|
static volatile int _access_count;
|
||||||
|
|
||||||
|
// The callsite hashtable. It has to be a static table,
|
||||||
|
// since malloc call can come from C runtime linker.
|
||||||
|
static MallocSiteHashtableEntry* _table[table_size];
|
||||||
|
|
||||||
|
|
||||||
|
// Reserve enough memory for placing the objects
|
||||||
|
|
||||||
|
// The memory for hashtable entry allocation stack object
|
||||||
|
static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
|
||||||
|
// The memory for hashtable entry allocation callsite object
|
||||||
|
static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
|
||||||
|
NOT_PRODUCT(static int _peak_count;)
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // INCLUDE_NMT
|
||||||
|
#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
|
200
hotspot/src/share/vm/services/mallocTracker.cpp
Normal file
200
hotspot/src/share/vm/services/mallocTracker.cpp
Normal file
@ -0,0 +1,200 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
|
||||||
|
#include "runtime/atomic.hpp"
|
||||||
|
#include "runtime/atomic.inline.hpp"
|
||||||
|
#include "services/mallocSiteTable.hpp"
|
||||||
|
#include "services/mallocTracker.hpp"
|
||||||
|
#include "services/mallocTracker.inline.hpp"
|
||||||
|
#include "services/memTracker.hpp"
|
||||||
|
|
||||||
|
size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
|
||||||
|
|
||||||
|
// Total malloc'd memory amount
|
||||||
|
size_t MallocMemorySnapshot::total() const {
|
||||||
|
size_t amount = 0;
|
||||||
|
for (int index = 0; index < mt_number_of_types; index ++) {
|
||||||
|
amount += _malloc[index].malloc_size();
|
||||||
|
}
|
||||||
|
amount += _tracking_header.size() + total_arena();
|
||||||
|
return amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total malloc'd memory used by arenas
|
||||||
|
size_t MallocMemorySnapshot::total_arena() const {
|
||||||
|
size_t amount = 0;
|
||||||
|
for (int index = 0; index < mt_number_of_types; index ++) {
|
||||||
|
amount += _malloc[index].arena_size();
|
||||||
|
}
|
||||||
|
return amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MallocMemorySnapshot::reset() {
|
||||||
|
_tracking_header.reset();
|
||||||
|
for (int index = 0; index < mt_number_of_types; index ++) {
|
||||||
|
_malloc[index].reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make adjustment by subtracting chunks used by arenas
|
||||||
|
// from total chunks to get total free chunck size
|
||||||
|
void MallocMemorySnapshot::make_adjustment() {
|
||||||
|
size_t arena_size = total_arena();
|
||||||
|
int chunk_idx = NMTUtil::flag_to_index(mtChunk);
|
||||||
|
_malloc[chunk_idx].record_free(arena_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MallocMemorySummary::initialize() {
|
||||||
|
assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check");
|
||||||
|
// Uses placement new operator to initialize static area.
|
||||||
|
::new ((void*)_snapshot)MallocMemorySnapshot();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocHeader::release() const {
|
||||||
|
// Tracking already shutdown, no housekeeping is needed anymore
|
||||||
|
if (MemTracker::tracking_level() <= NMT_minimal) return;
|
||||||
|
|
||||||
|
MallocMemorySummary::record_free(size(), flags());
|
||||||
|
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
|
||||||
|
if (tracking_level() == NMT_detail) {
|
||||||
|
MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
|
||||||
|
size_t* bucket_idx, size_t* pos_idx) const {
|
||||||
|
bool ret = MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx);
|
||||||
|
|
||||||
|
// Something went wrong, could be OOM or overflow malloc site table.
|
||||||
|
// We want to keep tracking data under OOM circumstance, so transition to
|
||||||
|
// summary tracking.
|
||||||
|
if (!ret) {
|
||||||
|
MemTracker::transition_to(NMT_summary);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MallocHeader::get_stack(NativeCallStack& stack) const {
|
||||||
|
return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MallocTracker::initialize(NMT_TrackingLevel level) {
|
||||||
|
if (level >= NMT_summary) {
|
||||||
|
MallocMemorySummary::initialize();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (level == NMT_detail) {
|
||||||
|
return MallocSiteTable::initialize();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
|
||||||
|
assert(from != NMT_off, "Can not transition from off state");
|
||||||
|
assert(to != NMT_off, "Can not transition to off state");
|
||||||
|
if (from == NMT_minimal) {
|
||||||
|
MallocMemorySummary::reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (to == NMT_detail) {
|
||||||
|
assert(from == NMT_minimal || from == NMT_summary, "Just check");
|
||||||
|
return MallocSiteTable::initialize();
|
||||||
|
} else if (from == NMT_detail) {
|
||||||
|
assert(to == NMT_minimal || to == NMT_summary, "Just check");
|
||||||
|
MallocSiteTable::shutdown();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record a malloc memory allocation
|
||||||
|
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
|
||||||
|
const NativeCallStack& stack, NMT_TrackingLevel level) {
|
||||||
|
void* memblock; // the address for user data
|
||||||
|
MallocHeader* header = NULL;
|
||||||
|
|
||||||
|
if (malloc_base == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check malloc size, size has to <= MAX_MALLOC_SIZE. This is only possible on 32-bit
|
||||||
|
// systems, when malloc size >= 1GB, but is is safe to assume it won't happen.
|
||||||
|
if (size > MAX_MALLOC_SIZE) {
|
||||||
|
fatal("Should not use malloc for big memory block, use virtual memory instead");
|
||||||
|
}
|
||||||
|
// Uses placement global new operator to initialize malloc header
|
||||||
|
switch(level) {
|
||||||
|
case NMT_off:
|
||||||
|
return malloc_base;
|
||||||
|
case NMT_minimal: {
|
||||||
|
MallocHeader* hdr = ::new (malloc_base) MallocHeader();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case NMT_summary: {
|
||||||
|
header = ::new (malloc_base) MallocHeader(size, flags);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case NMT_detail: {
|
||||||
|
header = ::new (malloc_base) MallocHeader(size, flags, stack);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
|
||||||
|
|
||||||
|
// The alignment check: 8 bytes alignment for 32 bit systems.
|
||||||
|
// 16 bytes alignment for 64-bit systems.
|
||||||
|
assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
assert(get_memory_tracking_level(memblock) == level,
|
||||||
|
"Wrong tracking level");
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
if (level > NMT_minimal) {
|
||||||
|
// Read back
|
||||||
|
assert(get_size(memblock) == size, "Wrong size");
|
||||||
|
assert(get_flags(memblock) == flags, "Wrong flags");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return memblock;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocTracker::record_free(void* memblock) {
|
||||||
|
// Never turned on
|
||||||
|
if (MemTracker::tracking_level() == NMT_off ||
|
||||||
|
memblock == NULL) {
|
||||||
|
return memblock;
|
||||||
|
}
|
||||||
|
MallocHeader* header = malloc_header(memblock);
|
||||||
|
header->release();
|
||||||
|
|
||||||
|
return (void*)header;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
424
hotspot/src/share/vm/services/mallocTracker.hpp
Normal file
424
hotspot/src/share/vm/services/mallocTracker.hpp
Normal file
@ -0,0 +1,424 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
|
||||||
|
#define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
|
||||||
|
|
||||||
|
#if INCLUDE_NMT
|
||||||
|
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
#include "runtime/atomic.hpp"
|
||||||
|
#include "services/nmtCommon.hpp"
|
||||||
|
#include "utilities/nativeCallStack.hpp"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This counter class counts memory allocation and deallocation,
|
||||||
|
* records total memory allocation size and number of allocations.
|
||||||
|
* The counters are updated atomically.
|
||||||
|
*/
|
||||||
|
class MemoryCounter VALUE_OBJ_CLASS_SPEC {
|
||||||
|
private:
|
||||||
|
size_t _count;
|
||||||
|
size_t _size;
|
||||||
|
|
||||||
|
DEBUG_ONLY(size_t _peak_count;)
|
||||||
|
DEBUG_ONLY(size_t _peak_size; )
|
||||||
|
|
||||||
|
public:
|
||||||
|
MemoryCounter() : _count(0), _size(0) {
|
||||||
|
DEBUG_ONLY(_peak_count = 0;)
|
||||||
|
DEBUG_ONLY(_peak_size = 0;)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset counters
|
||||||
|
void reset() {
|
||||||
|
_size = 0;
|
||||||
|
_count = 0;
|
||||||
|
DEBUG_ONLY(_peak_size = 0;)
|
||||||
|
DEBUG_ONLY(_peak_count = 0;)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void allocate(size_t sz) {
|
||||||
|
Atomic::add(1, (volatile MemoryCounterType*)&_count);
|
||||||
|
if (sz > 0) {
|
||||||
|
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
|
||||||
|
DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
|
||||||
|
}
|
||||||
|
DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void deallocate(size_t sz) {
|
||||||
|
assert(_count > 0, "Negative counter");
|
||||||
|
assert(_size >= sz, "Negative size");
|
||||||
|
Atomic::add(-1, (volatile MemoryCounterType*)&_count);
|
||||||
|
if (sz > 0) {
|
||||||
|
Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void resize(long sz) {
|
||||||
|
if (sz != 0) {
|
||||||
|
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
|
||||||
|
DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t count() const { return _count; }
|
||||||
|
inline size_t size() const { return _size; }
|
||||||
|
DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
|
||||||
|
DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; })
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Malloc memory used by a particular subsystem.
|
||||||
|
* It includes the memory acquired through os::malloc()
|
||||||
|
* call and arena's backing memory.
|
||||||
|
*/
|
||||||
|
class MallocMemory VALUE_OBJ_CLASS_SPEC {
|
||||||
|
private:
|
||||||
|
MemoryCounter _malloc;
|
||||||
|
MemoryCounter _arena;
|
||||||
|
|
||||||
|
public:
|
||||||
|
MallocMemory() { }
|
||||||
|
|
||||||
|
inline void record_malloc(size_t sz) {
|
||||||
|
_malloc.allocate(sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void record_free(size_t sz) {
|
||||||
|
_malloc.deallocate(sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void record_new_arena() {
|
||||||
|
_arena.allocate(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void record_arena_free() {
|
||||||
|
_arena.deallocate(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void record_arena_size_change(long sz) {
|
||||||
|
_arena.resize(sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
void reset() {
|
||||||
|
_malloc.reset();
|
||||||
|
_arena.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t malloc_size() const { return _malloc.size(); }
|
||||||
|
inline size_t malloc_count() const { return _malloc.count();}
|
||||||
|
inline size_t arena_size() const { return _arena.size(); }
|
||||||
|
inline size_t arena_count() const { return _arena.count(); }
|
||||||
|
|
||||||
|
DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
|
||||||
|
DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
|
||||||
|
};
|
||||||
|
|
||||||
|
class MallocMemorySummary;
|
||||||
|
|
||||||
|
// A snapshot of malloc'd memory, includes malloc memory
|
||||||
|
// usage by types and memory used by tracking itself.
|
||||||
|
class MallocMemorySnapshot : public ResourceObj {
|
||||||
|
friend class MallocMemorySummary;
|
||||||
|
|
||||||
|
private:
|
||||||
|
MallocMemory _malloc[mt_number_of_types];
|
||||||
|
MemoryCounter _tracking_header;
|
||||||
|
|
||||||
|
|
||||||
|
public:
|
||||||
|
inline MallocMemory* by_type(MEMFLAGS flags) {
|
||||||
|
int index = NMTUtil::flag_to_index(flags);
|
||||||
|
return &_malloc[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
inline MallocMemory* by_index(int index) {
|
||||||
|
assert(index >= 0, "Index out of bound");
|
||||||
|
assert(index < mt_number_of_types, "Index out of bound");
|
||||||
|
return &_malloc[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
inline MemoryCounter* malloc_overhead() {
|
||||||
|
return &_tracking_header;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total malloc'd memory amount
|
||||||
|
size_t total() const;
|
||||||
|
// Total malloc'd memory used by arenas
|
||||||
|
size_t total_arena() const;
|
||||||
|
|
||||||
|
inline size_t thread_count() {
|
||||||
|
return by_type(mtThreadStack)->malloc_count();
|
||||||
|
}
|
||||||
|
|
||||||
|
void reset();
|
||||||
|
|
||||||
|
void copy_to(MallocMemorySnapshot* s) {
|
||||||
|
s->_tracking_header = _tracking_header;
|
||||||
|
for (int index = 0; index < mt_number_of_types; index ++) {
|
||||||
|
s->_malloc[index] = _malloc[index];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make adjustment by subtracting chunks used by arenas
|
||||||
|
// from total chunks to get total free chunk size
|
||||||
|
void make_adjustment();
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This class is for collecting malloc statistics at summary level
|
||||||
|
*/
|
||||||
|
class MallocMemorySummary : AllStatic {
|
||||||
|
private:
|
||||||
|
// Reserve memory for placement of MallocMemorySnapshot object
|
||||||
|
static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
|
||||||
|
|
||||||
|
public:
|
||||||
|
static void initialize();
|
||||||
|
|
||||||
|
static inline void record_malloc(size_t size, MEMFLAGS flag) {
|
||||||
|
as_snapshot()->by_type(flag)->record_malloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void record_free(size_t size, MEMFLAGS flag) {
|
||||||
|
as_snapshot()->by_type(flag)->record_free(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void record_new_arena(MEMFLAGS flag) {
|
||||||
|
as_snapshot()->by_type(flag)->record_new_arena();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void record_arena_free(MEMFLAGS flag) {
|
||||||
|
as_snapshot()->by_type(flag)->record_arena_free();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void record_arena_size_change(long size, MEMFLAGS flag) {
|
||||||
|
as_snapshot()->by_type(flag)->record_arena_size_change(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void snapshot(MallocMemorySnapshot* s) {
|
||||||
|
as_snapshot()->copy_to(s);
|
||||||
|
s->make_adjustment();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record memory used by malloc tracking header
|
||||||
|
static inline void record_new_malloc_header(size_t sz) {
|
||||||
|
as_snapshot()->malloc_overhead()->allocate(sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void record_free_malloc_header(size_t sz) {
|
||||||
|
as_snapshot()->malloc_overhead()->deallocate(sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The memory used by malloc tracking headers
|
||||||
|
static inline size_t tracking_overhead() {
|
||||||
|
return as_snapshot()->malloc_overhead()->size();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset all counters to zero
|
||||||
|
static void reset() {
|
||||||
|
as_snapshot()->reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
static MallocMemorySnapshot* as_snapshot() {
|
||||||
|
return (MallocMemorySnapshot*)_snapshot;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Malloc tracking header.
|
||||||
|
* To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
|
||||||
|
* which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
|
||||||
|
*/
|
||||||
|
|
||||||
|
class MallocHeader VALUE_OBJ_CLASS_SPEC {
|
||||||
|
#ifdef _LP64
|
||||||
|
size_t _size : 62;
|
||||||
|
size_t _level : 2;
|
||||||
|
size_t _flags : 8;
|
||||||
|
size_t _pos_idx : 16;
|
||||||
|
size_t _bucket_idx: 40;
|
||||||
|
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
|
||||||
|
#define MAX_BUCKET_LENGTH ((size_t)(1 << 16))
|
||||||
|
#define MAX_MALLOC_SIZE (((size_t)1 << 62) - 1)
|
||||||
|
#else
|
||||||
|
size_t _size : 30;
|
||||||
|
size_t _level : 2;
|
||||||
|
size_t _flags : 8;
|
||||||
|
size_t _pos_idx : 8;
|
||||||
|
size_t _bucket_idx: 16;
|
||||||
|
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)(1 << 16))
|
||||||
|
#define MAX_BUCKET_LENGTH ((size_t)(1 << 8))
|
||||||
|
// Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
|
||||||
|
#define MAX_MALLOC_SIZE ((size_t)(1 << 30) - 1)
|
||||||
|
#endif // _LP64
|
||||||
|
|
||||||
|
public:
|
||||||
|
// Summary tracking header
|
||||||
|
MallocHeader(size_t size, MEMFLAGS flags) {
|
||||||
|
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
||||||
|
"Wrong header size");
|
||||||
|
|
||||||
|
_level = NMT_summary;
|
||||||
|
_flags = flags;
|
||||||
|
set_size(size);
|
||||||
|
MallocMemorySummary::record_malloc(size, flags);
|
||||||
|
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
|
||||||
|
}
|
||||||
|
// Detail tracking header
|
||||||
|
MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
|
||||||
|
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
||||||
|
"Wrong header size");
|
||||||
|
|
||||||
|
_level = NMT_detail;
|
||||||
|
_flags = flags;
|
||||||
|
set_size(size);
|
||||||
|
size_t bucket_idx;
|
||||||
|
size_t pos_idx;
|
||||||
|
if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
|
||||||
|
assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
|
||||||
|
assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
|
||||||
|
_bucket_idx = bucket_idx;
|
||||||
|
_pos_idx = pos_idx;
|
||||||
|
}
|
||||||
|
MallocMemorySummary::record_malloc(size, flags);
|
||||||
|
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
|
||||||
|
}
|
||||||
|
// Minimal tracking header
|
||||||
|
MallocHeader() {
|
||||||
|
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
||||||
|
"Wrong header size");
|
||||||
|
|
||||||
|
_level = (unsigned short)NMT_minimal;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline NMT_TrackingLevel tracking_level() const {
|
||||||
|
return (NMT_TrackingLevel)_level;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t size() const { return _size; }
|
||||||
|
inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
|
||||||
|
bool get_stack(NativeCallStack& stack) const;
|
||||||
|
|
||||||
|
// Cleanup tracking information before the memory is released.
|
||||||
|
void release() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
inline void set_size(size_t size) {
|
||||||
|
assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
|
||||||
|
_size = size;
|
||||||
|
}
|
||||||
|
bool record_malloc_site(const NativeCallStack& stack, size_t size,
|
||||||
|
size_t* bucket_idx, size_t* pos_idx) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
// Main class called from MemTracker to track malloc activities
|
||||||
|
class MallocTracker : AllStatic {
|
||||||
|
public:
|
||||||
|
// Initialize malloc tracker for specific tracking level
|
||||||
|
static bool initialize(NMT_TrackingLevel level);
|
||||||
|
|
||||||
|
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
|
||||||
|
|
||||||
|
// malloc tracking header size for specific tracking level
|
||||||
|
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
|
||||||
|
return (level == NMT_off) ? 0 : sizeof(MallocHeader);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parameter name convention:
|
||||||
|
// memblock : the beginning address for user data
|
||||||
|
// malloc_base: the beginning address that includes malloc tracking header
|
||||||
|
//
|
||||||
|
// The relationship:
|
||||||
|
// memblock = (char*)malloc_base + sizeof(nmt header)
|
||||||
|
//
|
||||||
|
|
||||||
|
// Record malloc on specified memory block
|
||||||
|
static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
|
||||||
|
const NativeCallStack& stack, NMT_TrackingLevel level);
|
||||||
|
|
||||||
|
// Record free on specified memory block
|
||||||
|
static void* record_free(void* memblock);
|
||||||
|
|
||||||
|
// Get tracking level of specified memory block
|
||||||
|
static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
|
||||||
|
|
||||||
|
|
||||||
|
// Offset memory address to header address
|
||||||
|
static inline void* get_base(void* memblock);
|
||||||
|
static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
|
||||||
|
if (memblock == NULL || level == NMT_off) return memblock;
|
||||||
|
return (char*)memblock - malloc_header_size(level);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get memory size
|
||||||
|
static inline size_t get_size(void* memblock) {
|
||||||
|
MallocHeader* header = malloc_header(memblock);
|
||||||
|
assert(header->tracking_level() >= NMT_summary,
|
||||||
|
"Wrong tracking level");
|
||||||
|
return header->size();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get memory type
|
||||||
|
static inline MEMFLAGS get_flags(void* memblock) {
|
||||||
|
MallocHeader* header = malloc_header(memblock);
|
||||||
|
assert(header->tracking_level() >= NMT_summary,
|
||||||
|
"Wrong tracking level");
|
||||||
|
return header->flags();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get header size
|
||||||
|
static inline size_t get_header_size(void* memblock) {
|
||||||
|
return (memblock == NULL) ? 0 : sizeof(MallocHeader);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void record_new_arena(MEMFLAGS flags) {
|
||||||
|
MallocMemorySummary::record_new_arena(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void record_arena_free(MEMFLAGS flags) {
|
||||||
|
MallocMemorySummary::record_arena_free(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void record_arena_size_change(int size, MEMFLAGS flags) {
|
||||||
|
MallocMemorySummary::record_arena_size_change(size, flags);
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
static inline MallocHeader* malloc_header(void *memblock) {
|
||||||
|
assert(memblock != NULL, "NULL pointer");
|
||||||
|
MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
|
||||||
|
assert(header->tracking_level() >= NMT_minimal, "Bad header");
|
||||||
|
return header;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // INCLUDE_NMT
|
||||||
|
|
||||||
|
|
||||||
|
#endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -22,22 +22,22 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
|
||||||
#include "runtime/atomic.inline.hpp"
|
#define SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
|
||||||
#include "services/memPtr.hpp"
|
|
||||||
|
#include "services/mallocTracker.hpp"
|
||||||
#include "services/memTracker.hpp"
|
#include "services/memTracker.hpp"
|
||||||
|
|
||||||
volatile jint SequenceGenerator::_seq_number = 1;
|
inline NMT_TrackingLevel MallocTracker::get_memory_tracking_level(void* memblock) {
|
||||||
volatile unsigned long SequenceGenerator::_generation = 1;
|
assert(memblock != NULL, "Sanity check");
|
||||||
NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;)
|
if (MemTracker::tracking_level() == NMT_off) return NMT_off;
|
||||||
|
MallocHeader* header = malloc_header(memblock);
|
||||||
jint SequenceGenerator::next() {
|
return header->tracking_level();
|
||||||
jint seq = Atomic::add(1, &_seq_number);
|
|
||||||
if (seq < 0) {
|
|
||||||
MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
|
|
||||||
} else {
|
|
||||||
NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
|
|
||||||
}
|
|
||||||
return seq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void* MallocTracker::get_base(void* memblock){
|
||||||
|
return get_base(memblock, MemTracker::tracking_level());
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
|
||||||
|
|
@ -1914,7 +1914,7 @@ void ThreadTimesClosure::do_thread(Thread* thread) {
|
|||||||
ResourceMark rm(THREAD); // thread->name() uses ResourceArea
|
ResourceMark rm(THREAD); // thread->name() uses ResourceArea
|
||||||
|
|
||||||
assert(thread->name() != NULL, "All threads should have a name");
|
assert(thread->name() != NULL, "All threads should have a name");
|
||||||
_names_chars[_count] = strdup(thread->name());
|
_names_chars[_count] = os::strdup(thread->name());
|
||||||
_times->long_at_put(_count, os::is_thread_cpu_time_supported() ?
|
_times->long_at_put(_count, os::is_thread_cpu_time_supported() ?
|
||||||
os::thread_cpu_time(thread) : -1);
|
os::thread_cpu_time(thread) : -1);
|
||||||
_count++;
|
_count++;
|
||||||
@ -1932,7 +1932,7 @@ void ThreadTimesClosure::do_unlocked() {
|
|||||||
|
|
||||||
ThreadTimesClosure::~ThreadTimesClosure() {
|
ThreadTimesClosure::~ThreadTimesClosure() {
|
||||||
for (int i = 0; i < _count; i++) {
|
for (int i = 0; i < _count; i++) {
|
||||||
free(_names_chars[i]);
|
os::free(_names_chars[i]);
|
||||||
}
|
}
|
||||||
FREE_C_HEAP_ARRAY(char *, _names_chars, mtInternal);
|
FREE_C_HEAP_ARRAY(char *, _names_chars, mtInternal);
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -22,471 +22,301 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "runtime/safepoint.hpp"
|
#include "runtime/safepoint.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "services/memBaseline.hpp"
|
#include "services/memBaseline.hpp"
|
||||||
#include "services/memTracker.hpp"
|
#include "services/memTracker.hpp"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sizes are sorted in descenting order for reporting
|
||||||
|
*/
|
||||||
|
int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) {
|
||||||
|
if (s1.size() == s2.size()) {
|
||||||
|
return 0;
|
||||||
|
} else if (s1.size() > s2.size()) {
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
|
|
||||||
{mtJavaHeap, "Java Heap"},
|
int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1,
|
||||||
{mtClass, "Class"},
|
const VirtualMemoryAllocationSite& s2) {
|
||||||
{mtThreadStack,"Thread Stack"},
|
if (s1.reserved() == s2.reserved()) {
|
||||||
{mtThread, "Thread"},
|
return 0;
|
||||||
{mtCode, "Code"},
|
} else if (s1.reserved() > s2.reserved()) {
|
||||||
{mtGC, "GC"},
|
return -1;
|
||||||
{mtCompiler, "Compiler"},
|
} else {
|
||||||
{mtInternal, "Internal"},
|
return 1;
|
||||||
{mtOther, "Other"},
|
}
|
||||||
{mtSymbol, "Symbol"},
|
}
|
||||||
{mtNMT, "Memory Tracking"},
|
|
||||||
{mtTracing, "Tracing"},
|
// Sort into allocation site addresses order for baseline comparison
|
||||||
{mtChunk, "Pooled Free Chunks"},
|
int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) {
|
||||||
{mtClassShared,"Shared spaces for classes"},
|
return s1.call_stack()->compare(*s2.call_stack());
|
||||||
{mtTest, "Test"},
|
}
|
||||||
{mtNone, "Unknown"} // It can happen when type tagging records are lagging
|
|
||||||
// behind
|
|
||||||
|
int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1,
|
||||||
|
const VirtualMemoryAllocationSite& s2) {
|
||||||
|
return s1.call_stack()->compare(*s2.call_stack());
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Walker to walk malloc allocation site table
|
||||||
|
*/
|
||||||
|
class MallocAllocationSiteWalker : public MallocSiteWalker {
|
||||||
|
private:
|
||||||
|
SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
|
||||||
|
_malloc_sites;
|
||||||
|
size_t _count;
|
||||||
|
|
||||||
|
// Entries in MallocSiteTable with size = 0 and count = 0,
|
||||||
|
// when the malloc site is not longer there.
|
||||||
|
public:
|
||||||
|
MallocAllocationSiteWalker(Arena* arena) : _count(0), _malloc_sites(arena) {
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t count() const { return _count; }
|
||||||
|
|
||||||
|
LinkedList<MallocSite>* malloc_sites() {
|
||||||
|
return &_malloc_sites;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool do_malloc_site(const MallocSite* site) {
|
||||||
|
if (site->size() >= MemBaseline::SIZE_THRESHOLD) {
|
||||||
|
if (_malloc_sites.add(*site) != NULL) {
|
||||||
|
_count++;
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false; // OOM
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// malloc site does not meet threshold, ignore and continue
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
MemBaseline::MemBaseline() {
|
// Compare virtual memory region's base address
|
||||||
_baselined = false;
|
int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
|
||||||
|
return r1.compare(r2);
|
||||||
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
|
|
||||||
_malloc_data[index].set_type(MemType2NameMap[index]._flag);
|
|
||||||
_vm_data[index].set_type(MemType2NameMap[index]._flag);
|
|
||||||
_arena_data[index].set_type(MemType2NameMap[index]._flag);
|
|
||||||
}
|
|
||||||
|
|
||||||
_malloc_cs = NULL;
|
|
||||||
_vm_cs = NULL;
|
|
||||||
_vm_map = NULL;
|
|
||||||
|
|
||||||
_number_of_classes = 0;
|
|
||||||
_number_of_threads = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Walk all virtual memory regions for baselining
|
||||||
|
class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
|
||||||
|
private:
|
||||||
|
SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base, ResourceObj::ARENA>
|
||||||
|
_virtual_memory_regions;
|
||||||
|
size_t _count;
|
||||||
|
|
||||||
void MemBaseline::clear() {
|
public:
|
||||||
if (_malloc_cs != NULL) {
|
VirtualMemoryAllocationWalker(Arena* a) : _count(0), _virtual_memory_regions(a) {
|
||||||
delete _malloc_cs;
|
|
||||||
_malloc_cs = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_vm_cs != NULL) {
|
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
|
||||||
delete _vm_cs;
|
if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
|
||||||
_vm_cs = NULL;
|
if (_virtual_memory_regions.add(*rgn) != NULL) {
|
||||||
}
|
_count ++;
|
||||||
|
return true;
|
||||||
if (_vm_map != NULL) {
|
} else {
|
||||||
delete _vm_map;
|
|
||||||
_vm_map = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void MemBaseline::reset() {
|
|
||||||
_baselined = false;
|
|
||||||
_total_vm_reserved = 0;
|
|
||||||
_total_vm_committed = 0;
|
|
||||||
_total_malloced = 0;
|
|
||||||
_number_of_classes = 0;
|
|
||||||
|
|
||||||
if (_malloc_cs != NULL) _malloc_cs->clear();
|
|
||||||
if (_vm_cs != NULL) _vm_cs->clear();
|
|
||||||
if (_vm_map != NULL) _vm_map->clear();
|
|
||||||
|
|
||||||
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
|
|
||||||
_malloc_data[index].clear();
|
|
||||||
_vm_data[index].clear();
|
|
||||||
_arena_data[index].clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
MemBaseline::~MemBaseline() {
|
|
||||||
clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// baseline malloc'd memory records, generate overall summary and summaries by
|
|
||||||
// memory types
|
|
||||||
bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
|
|
||||||
MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
|
|
||||||
MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
|
|
||||||
size_t used_arena_size = 0;
|
|
||||||
int index;
|
|
||||||
while (malloc_ptr != NULL) {
|
|
||||||
index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
|
|
||||||
size_t size = malloc_ptr->size();
|
|
||||||
if (malloc_ptr->is_arena_memory_record()) {
|
|
||||||
// We do have anonymous arenas, they are either used as value objects,
|
|
||||||
// which are embedded inside other objects, or used as stack objects.
|
|
||||||
_arena_data[index].inc(size);
|
|
||||||
used_arena_size += size;
|
|
||||||
} else {
|
|
||||||
_total_malloced += size;
|
|
||||||
_malloc_data[index].inc(size);
|
|
||||||
if (malloc_ptr->is_arena_record()) {
|
|
||||||
// see if arena memory record present
|
|
||||||
MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
|
|
||||||
if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
|
|
||||||
assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
|
|
||||||
"Arena records do not match");
|
|
||||||
size = next_malloc_ptr->size();
|
|
||||||
_arena_data[index].inc(size);
|
|
||||||
used_arena_size += size;
|
|
||||||
malloc_itr.next();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
|
|
||||||
}
|
|
||||||
|
|
||||||
// substract used arena size to get size of arena chunk in free list
|
|
||||||
index = flag2index(mtChunk);
|
|
||||||
_malloc_data[index].reduce(used_arena_size);
|
|
||||||
// we really don't know how many chunks in free list, so just set to
|
|
||||||
// 0
|
|
||||||
_malloc_data[index].overwrite_counter(0);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if there is a safepoint in progress, if so, block the thread
|
|
||||||
// for the safepoint
|
|
||||||
void MemBaseline::check_safepoint(JavaThread* thr) {
|
|
||||||
if (SafepointSynchronize::is_synchronizing()) {
|
|
||||||
// grab and drop the SR_lock to honor the safepoint protocol
|
|
||||||
MutexLocker ml(thr->SR_lock());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// baseline mmap'd memory records, generate overall summary and summaries by
|
|
||||||
// memory types
|
|
||||||
bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
|
|
||||||
MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
|
|
||||||
VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
|
|
||||||
int index;
|
|
||||||
while (vm_ptr != NULL) {
|
|
||||||
if (vm_ptr->is_reserved_region()) {
|
|
||||||
index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
|
|
||||||
// we use the number of thread stack to count threads
|
|
||||||
if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
|
|
||||||
_number_of_threads ++;
|
|
||||||
}
|
|
||||||
_total_vm_reserved += vm_ptr->size();
|
|
||||||
_vm_data[index].inc(vm_ptr->size(), 0);
|
|
||||||
} else {
|
|
||||||
_total_vm_committed += vm_ptr->size();
|
|
||||||
_vm_data[index].inc(0, vm_ptr->size());
|
|
||||||
}
|
|
||||||
vm_ptr = (VMMemRegion*)vm_itr.next();
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// baseline malloc'd memory by callsites, but only the callsites with memory allocation
|
|
||||||
// over 1KB are stored.
|
|
||||||
bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
|
|
||||||
assert(MemTracker::track_callsite(), "detail tracking is off");
|
|
||||||
|
|
||||||
MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
|
|
||||||
MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
|
|
||||||
MallocCallsitePointer malloc_callsite;
|
|
||||||
|
|
||||||
// initailize malloc callsite array
|
|
||||||
if (_malloc_cs == NULL) {
|
|
||||||
_malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
|
|
||||||
// out of native memory
|
|
||||||
if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_malloc_cs->clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
|
|
||||||
|
|
||||||
// sort into callsite pc order. Details are aggregated by callsites
|
|
||||||
malloc_data->sort((FN_SORT)malloc_sort_by_pc);
|
|
||||||
bool ret = true;
|
|
||||||
|
|
||||||
// baseline memory that is totaled over 1 KB
|
|
||||||
while (malloc_ptr != NULL) {
|
|
||||||
if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
|
|
||||||
// skip thread stacks
|
|
||||||
if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
|
|
||||||
if (malloc_callsite.addr() != malloc_ptr->pc()) {
|
|
||||||
if ((malloc_callsite.amount()/K) > 0) {
|
|
||||||
if (!_malloc_cs->append(&malloc_callsite)) {
|
|
||||||
ret = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
|
|
||||||
}
|
|
||||||
malloc_callsite.inc(malloc_ptr->size());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
|
|
||||||
}
|
|
||||||
|
|
||||||
// restore to address order. Snapshot malloc data is maintained in memory
|
|
||||||
// address order.
|
|
||||||
malloc_data->sort((FN_SORT)malloc_sort_by_addr);
|
|
||||||
|
|
||||||
if (!ret) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// deal with last record
|
|
||||||
if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
|
|
||||||
if (!_malloc_cs->append(&malloc_callsite)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// baseline mmap'd memory by callsites
|
|
||||||
bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
|
|
||||||
assert(MemTracker::track_callsite(), "detail tracking is off");
|
|
||||||
|
|
||||||
VMCallsitePointer vm_callsite;
|
|
||||||
VMCallsitePointer* cur_callsite = NULL;
|
|
||||||
MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
|
|
||||||
VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
|
|
||||||
|
|
||||||
// initialize virtual memory map array
|
|
||||||
if (_vm_map == NULL) {
|
|
||||||
_vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
|
|
||||||
if (_vm_map == NULL || _vm_map->out_of_memory()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_vm_map->clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// initialize virtual memory callsite array
|
|
||||||
if (_vm_cs == NULL) {
|
|
||||||
_vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
|
|
||||||
if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_vm_cs->clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// consolidate virtual memory data
|
|
||||||
VMMemRegionEx* reserved_rec = NULL;
|
|
||||||
VMMemRegionEx* committed_rec = NULL;
|
|
||||||
|
|
||||||
// vm_ptr is coming in increasing base address order
|
|
||||||
while (vm_ptr != NULL) {
|
|
||||||
if (vm_ptr->is_reserved_region()) {
|
|
||||||
// consolidate reserved memory regions for virtual memory map.
|
|
||||||
// The criteria for consolidation is:
|
|
||||||
// 1. two adjacent reserved memory regions
|
|
||||||
// 2. belong to the same memory type
|
|
||||||
// 3. reserved from the same callsite
|
|
||||||
if (reserved_rec == NULL ||
|
|
||||||
reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
|
|
||||||
FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
|
|
||||||
reserved_rec->pc() != vm_ptr->pc()) {
|
|
||||||
if (!_vm_map->append(vm_ptr)) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// inserted reserved region, we need the pointer to the element in virtual
|
|
||||||
// memory map array.
|
|
||||||
reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
|
|
||||||
} else {
|
|
||||||
reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
|
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
vm_callsite = VMCallsitePointer(vm_ptr->pc());
|
|
||||||
cur_callsite = &vm_callsite;
|
|
||||||
vm_callsite.inc(vm_ptr->size(), 0);
|
|
||||||
} else {
|
|
||||||
// consolidate committed memory regions for virtual memory map
|
|
||||||
// The criterial is:
|
|
||||||
// 1. two adjacent committed memory regions
|
|
||||||
// 2. committed from the same callsite
|
|
||||||
if (committed_rec == NULL ||
|
|
||||||
committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
|
|
||||||
committed_rec->pc() != vm_ptr->pc()) {
|
|
||||||
if (!_vm_map->append(vm_ptr)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
|
|
||||||
} else {
|
|
||||||
committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
|
|
||||||
}
|
|
||||||
vm_callsite.inc(0, vm_ptr->size());
|
|
||||||
}
|
|
||||||
vm_ptr = (VMMemRegionEx*)vm_itr.next();
|
|
||||||
}
|
}
|
||||||
// deal with last record
|
|
||||||
if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
|
LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() {
|
||||||
|
return &_virtual_memory_regions;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
bool MemBaseline::baseline_summary() {
|
||||||
|
assert(_malloc_memory_snapshot == NULL, "Malloc baseline not yet reset");
|
||||||
|
assert(_virtual_memory_snapshot == NULL, "Virtual baseline not yet reset");
|
||||||
|
|
||||||
|
_malloc_memory_snapshot = new (arena()) MallocMemorySnapshot();
|
||||||
|
_virtual_memory_snapshot = new (arena()) VirtualMemorySnapshot();
|
||||||
|
if (_malloc_memory_snapshot == NULL || _virtual_memory_snapshot == NULL) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
MallocMemorySummary::snapshot(_malloc_memory_snapshot);
|
||||||
|
VirtualMemorySummary::snapshot(_virtual_memory_snapshot);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MemBaseline::baseline_allocation_sites() {
|
||||||
|
assert(arena() != NULL, "Just check");
|
||||||
|
// Malloc allocation sites
|
||||||
|
MallocAllocationSiteWalker malloc_walker(arena());
|
||||||
|
if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort it into callsite pc order. Details are aggregated by callsites
|
_malloc_sites.set_head(malloc_walker.malloc_sites()->head());
|
||||||
_vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
|
// The malloc sites are collected in size order
|
||||||
|
_malloc_sites_order = by_size;
|
||||||
|
|
||||||
// walk the array to consolidate record by pc
|
// Virtual memory allocation sites
|
||||||
MemPointerArrayIteratorImpl itr(_vm_cs);
|
VirtualMemoryAllocationWalker virtual_memory_walker(arena());
|
||||||
VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
|
if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
|
||||||
VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
|
return false;
|
||||||
while (next_rec != NULL) {
|
}
|
||||||
assert(callsite_rec != NULL, "Sanity check");
|
|
||||||
if (next_rec->addr() == callsite_rec->addr()) {
|
// Virtual memory allocations are collected in call stack order
|
||||||
callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
|
_virtual_memory_allocations.set_head(virtual_memory_walker.virtual_memory_allocations()->head());
|
||||||
itr.remove();
|
|
||||||
next_rec = (VMCallsitePointer*)itr.current();
|
if (!aggregate_virtual_memory_allocation_sites()) {
|
||||||
} else {
|
return false;
|
||||||
callsite_rec = next_rec;
|
}
|
||||||
next_rec = (VMCallsitePointer*)itr.next();
|
// Virtual memory allocation sites are aggregrated in call stack order
|
||||||
}
|
_virtual_memory_sites_order = by_address;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MemBaseline::baseline(bool summaryOnly) {
|
||||||
|
if (arena() == NULL) {
|
||||||
|
_arena = new (std::nothrow, mtNMT) Arena(mtNMT);
|
||||||
|
if (arena() == NULL) return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
reset();
|
||||||
|
|
||||||
|
_class_count = InstanceKlass::number_of_instance_classes();
|
||||||
|
|
||||||
|
if (!baseline_summary()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_baseline_type = Summary_baselined;
|
||||||
|
|
||||||
|
// baseline details
|
||||||
|
if (!summaryOnly &&
|
||||||
|
MemTracker::tracking_level() == NMT_detail) {
|
||||||
|
baseline_allocation_sites();
|
||||||
|
_baseline_type = Detail_baselined;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// baseline a snapshot. If summary_only = false, memory usages aggregated by
|
int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
|
||||||
// callsites are also baselined.
|
const VirtualMemoryAllocationSite& s2) {
|
||||||
// The method call can be lengthy, especially when detail tracking info is
|
return s1.call_stack()->compare(*s2.call_stack());
|
||||||
// requested. So the method checks for safepoint explicitly.
|
}
|
||||||
bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
|
|
||||||
Thread* THREAD = Thread::current();
|
|
||||||
assert(THREAD->is_Java_thread(), "must be a JavaThread");
|
|
||||||
MutexLocker snapshot_locker(snapshot._lock);
|
|
||||||
reset();
|
|
||||||
_baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
|
|
||||||
if (_baselined) {
|
|
||||||
check_safepoint((JavaThread*)THREAD);
|
|
||||||
_baselined = baseline_vm_summary(snapshot._vm_ptrs);
|
|
||||||
}
|
|
||||||
_number_of_classes = snapshot.number_of_classes();
|
|
||||||
|
|
||||||
if (!summary_only && MemTracker::track_callsite() && _baselined) {
|
bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
|
||||||
check_safepoint((JavaThread*)THREAD);
|
SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site, ResourceObj::ARENA>
|
||||||
_baselined = baseline_malloc_details(snapshot._alloc_ptrs);
|
allocation_sites(arena());
|
||||||
if (_baselined) {
|
|
||||||
check_safepoint((JavaThread*)THREAD);
|
VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
|
||||||
_baselined = baseline_vm_details(snapshot._vm_ptrs);
|
const ReservedMemoryRegion* rgn;
|
||||||
|
VirtualMemoryAllocationSite* site;
|
||||||
|
while ((rgn = itr.next()) != NULL) {
|
||||||
|
VirtualMemoryAllocationSite tmp(*rgn->call_stack());
|
||||||
|
site = allocation_sites.find(tmp);
|
||||||
|
if (site == NULL) {
|
||||||
|
LinkedListNode<VirtualMemoryAllocationSite>* node =
|
||||||
|
allocation_sites.add(tmp);
|
||||||
|
if (node == NULL) return false;
|
||||||
|
site = node->data();
|
||||||
}
|
}
|
||||||
}
|
site->reserve_memory(rgn->size());
|
||||||
return _baselined;
|
site->commit_memory(rgn->committed_size());
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int MemBaseline::flag2index(MEMFLAGS flag) const {
|
|
||||||
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
|
|
||||||
if (MemType2NameMap[index]._flag == flag) {
|
|
||||||
return index;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(false, "no type");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
const char* MemBaseline::type2name(MEMFLAGS type) {
|
|
||||||
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
|
|
||||||
if (MemType2NameMap[index]._flag == type) {
|
|
||||||
return MemType2NameMap[index]._name;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(false, err_msg("bad type %x", type));
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
|
|
||||||
_total_malloced = other._total_malloced;
|
|
||||||
_total_vm_reserved = other._total_vm_reserved;
|
|
||||||
_total_vm_committed = other._total_vm_committed;
|
|
||||||
|
|
||||||
_baselined = other._baselined;
|
|
||||||
_number_of_classes = other._number_of_classes;
|
|
||||||
|
|
||||||
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
|
|
||||||
_malloc_data[index] = other._malloc_data[index];
|
|
||||||
_vm_data[index] = other._vm_data[index];
|
|
||||||
_arena_data[index] = other._arena_data[index];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (MemTracker::track_callsite()) {
|
_virtual_memory_sites.set_head(allocation_sites.head());
|
||||||
assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
|
return true;
|
||||||
assert(other._malloc_cs != NULL && other._vm_cs != NULL,
|
}
|
||||||
"not properly baselined");
|
|
||||||
_malloc_cs->clear();
|
|
||||||
_vm_cs->clear();
|
|
||||||
int index;
|
|
||||||
for (index = 0; index < other._malloc_cs->length(); index ++) {
|
|
||||||
_malloc_cs->append(other._malloc_cs->at(index));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (index = 0; index < other._vm_cs->length(); index ++) {
|
MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
|
||||||
_vm_cs->append(other._vm_cs->at(index));
|
assert(!_malloc_sites.is_empty(), "Detail baseline?");
|
||||||
}
|
switch(order) {
|
||||||
|
case by_size:
|
||||||
|
malloc_sites_to_size_order();
|
||||||
|
break;
|
||||||
|
case by_site:
|
||||||
|
malloc_sites_to_allocation_site_order();
|
||||||
|
break;
|
||||||
|
case by_address:
|
||||||
|
default:
|
||||||
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
return *this;
|
return MallocSiteIterator(_malloc_sites.head());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* compare functions for sorting */
|
VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) {
|
||||||
|
assert(!_virtual_memory_sites.is_empty(), "Detail baseline?");
|
||||||
// sort snapshot malloc'd records in callsite pc order
|
switch(order) {
|
||||||
int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
|
case by_size:
|
||||||
assert(MemTracker::track_callsite(),"Just check");
|
virtual_memory_sites_to_size_order();
|
||||||
const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
|
break;
|
||||||
const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
|
case by_site:
|
||||||
return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
|
virtual_memory_sites_to_reservation_site_order();
|
||||||
}
|
break;
|
||||||
|
case by_address:
|
||||||
// sort baselined malloc'd records in size order
|
default:
|
||||||
int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
|
ShouldNotReachHere();
|
||||||
assert(MemTracker::is_on(), "Just check");
|
}
|
||||||
const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
|
return VirtualMemorySiteIterator(_virtual_memory_sites.head());
|
||||||
const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
|
|
||||||
return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
|
|
||||||
}
|
|
||||||
|
|
||||||
// sort baselined malloc'd records in callsite pc order
|
|
||||||
int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
|
|
||||||
assert(MemTracker::is_on(), "Just check");
|
|
||||||
const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
|
|
||||||
const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
|
|
||||||
return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// sort baselined mmap'd records in size (reserved size) order
|
// Sorting allocations sites in different orders
|
||||||
int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
|
void MemBaseline::malloc_sites_to_size_order() {
|
||||||
assert(MemTracker::is_on(), "Just check");
|
if (_malloc_sites_order != by_size) {
|
||||||
const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
|
SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
|
||||||
const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
|
tmp(arena());
|
||||||
return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
|
|
||||||
|
// Add malloc sites to sorted linked list to sort into size order
|
||||||
|
tmp.move(&_malloc_sites);
|
||||||
|
_malloc_sites.set_head(tmp.head());
|
||||||
|
tmp.set_head(NULL);
|
||||||
|
_malloc_sites_order = by_size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort baselined mmap'd records in callsite pc order
|
void MemBaseline::malloc_sites_to_allocation_site_order() {
|
||||||
int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
|
if (_malloc_sites_order != by_site) {
|
||||||
assert(MemTracker::is_on(), "Just check");
|
SortedLinkedList<MallocSite, compare_malloc_site, ResourceObj::ARENA>
|
||||||
const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
|
tmp(arena());
|
||||||
const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
|
// Add malloc sites to sorted linked list to sort into site (address) order
|
||||||
return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
|
tmp.move(&_malloc_sites);
|
||||||
|
_malloc_sites.set_head(tmp.head());
|
||||||
|
tmp.set_head(NULL);
|
||||||
|
_malloc_sites_order = by_site;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MemBaseline::virtual_memory_sites_to_size_order() {
|
||||||
|
if (_virtual_memory_sites_order != by_size) {
|
||||||
|
SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size, ResourceObj::ARENA>
|
||||||
|
tmp(arena());
|
||||||
|
|
||||||
// sort snapshot malloc'd records in memory block address order
|
tmp.move(&_virtual_memory_sites);
|
||||||
int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
|
|
||||||
assert(MemTracker::is_on(), "Just check");
|
_virtual_memory_sites.set_head(tmp.head());
|
||||||
const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
|
tmp.set_head(NULL);
|
||||||
const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
|
_virtual_memory_sites_order = by_size;
|
||||||
int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
|
}
|
||||||
assert(p1 == p2 || delta != 0, "dup pointer");
|
}
|
||||||
return delta;
|
|
||||||
|
void MemBaseline::virtual_memory_sites_to_reservation_site_order() {
|
||||||
|
if (_virtual_memory_sites_order != by_size) {
|
||||||
|
SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site, ResourceObj::ARENA>
|
||||||
|
tmp(arena());
|
||||||
|
|
||||||
|
tmp.add(&_virtual_memory_sites);
|
||||||
|
|
||||||
|
_virtual_memory_sites.set_head(tmp.head());
|
||||||
|
tmp.set_head(NULL);
|
||||||
|
|
||||||
|
_virtual_memory_sites_order = by_size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -25,425 +25,205 @@
|
|||||||
#ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP
|
#ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP
|
||||||
#define SHARE_VM_SERVICES_MEM_BASELINE_HPP
|
#define SHARE_VM_SERVICES_MEM_BASELINE_HPP
|
||||||
|
|
||||||
|
#if INCLUDE_NMT
|
||||||
|
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "runtime/mutex.hpp"
|
#include "runtime/mutex.hpp"
|
||||||
#include "services/memPtr.hpp"
|
#include "services/mallocSiteTable.hpp"
|
||||||
#include "services/memSnapshot.hpp"
|
#include "services/mallocTracker.hpp"
|
||||||
|
#include "services/nmtCommon.hpp"
|
||||||
|
#include "services/virtualMemoryTracker.hpp"
|
||||||
|
#include "utilities/linkedlist.hpp"
|
||||||
|
|
||||||
// compare unsigned number
|
typedef LinkedListIterator<MallocSite> MallocSiteIterator;
|
||||||
#define UNSIGNED_COMPARE(a, b) ((a > b) ? 1 : ((a == b) ? 0 : -1))
|
typedef LinkedListIterator<VirtualMemoryAllocationSite> VirtualMemorySiteIterator;
|
||||||
|
typedef LinkedListIterator<ReservedMemoryRegion> VirtualMemoryAllocationIterator;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MallocCallsitePointer and VMCallsitePointer are used
|
* Baseline a memory snapshot
|
||||||
* to baseline memory blocks with their callsite information.
|
|
||||||
* They are only available when detail tracking is turned
|
|
||||||
* on.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* baselined malloc record aggregated by callsite */
|
|
||||||
class MallocCallsitePointer : public MemPointer {
|
|
||||||
private:
|
|
||||||
size_t _count; // number of malloc invocation from this callsite
|
|
||||||
size_t _amount; // total amount of memory malloc-ed from this callsite
|
|
||||||
|
|
||||||
public:
|
|
||||||
MallocCallsitePointer() {
|
|
||||||
_count = 0;
|
|
||||||
_amount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
MallocCallsitePointer(address pc) : MemPointer(pc) {
|
|
||||||
_count = 0;
|
|
||||||
_amount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
MallocCallsitePointer& operator=(const MallocCallsitePointer& p) {
|
|
||||||
MemPointer::operator=(p);
|
|
||||||
_count = p.count();
|
|
||||||
_amount = p.amount();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void inc(size_t size) {
|
|
||||||
_count ++;
|
|
||||||
_amount += size;
|
|
||||||
};
|
|
||||||
|
|
||||||
inline size_t count() const {
|
|
||||||
return _count;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline size_t amount() const {
|
|
||||||
return _amount;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// baselined virtual memory record aggregated by callsite
|
|
||||||
class VMCallsitePointer : public MemPointer {
|
|
||||||
private:
|
|
||||||
size_t _count; // number of invocation from this callsite
|
|
||||||
size_t _reserved_amount; // total reserved amount
|
|
||||||
size_t _committed_amount; // total committed amount
|
|
||||||
|
|
||||||
public:
|
|
||||||
VMCallsitePointer() {
|
|
||||||
_count = 0;
|
|
||||||
_reserved_amount = 0;
|
|
||||||
_committed_amount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
VMCallsitePointer(address pc) : MemPointer(pc) {
|
|
||||||
_count = 0;
|
|
||||||
_reserved_amount = 0;
|
|
||||||
_committed_amount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
VMCallsitePointer& operator=(const VMCallsitePointer& p) {
|
|
||||||
MemPointer::operator=(p);
|
|
||||||
_count = p.count();
|
|
||||||
_reserved_amount = p.reserved_amount();
|
|
||||||
_committed_amount = p.committed_amount();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void inc(size_t reserved, size_t committed) {
|
|
||||||
_count ++;
|
|
||||||
_reserved_amount += reserved;
|
|
||||||
_committed_amount += committed;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline size_t count() const {
|
|
||||||
return _count;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline size_t reserved_amount() const {
|
|
||||||
return _reserved_amount;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline size_t committed_amount() const {
|
|
||||||
return _committed_amount;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// maps a memory type flag to readable name
|
|
||||||
typedef struct _memType2Name {
|
|
||||||
MEMFLAGS _flag;
|
|
||||||
const char* _name;
|
|
||||||
} MemType2Name;
|
|
||||||
|
|
||||||
|
|
||||||
// This class aggregates malloc'd records by memory type
|
|
||||||
class MallocMem VALUE_OBJ_CLASS_SPEC {
|
|
||||||
private:
|
|
||||||
MEMFLAGS _type;
|
|
||||||
|
|
||||||
size_t _count;
|
|
||||||
size_t _amount;
|
|
||||||
|
|
||||||
public:
|
|
||||||
MallocMem() {
|
|
||||||
_type = mtNone;
|
|
||||||
_count = 0;
|
|
||||||
_amount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
MallocMem(MEMFLAGS flags) {
|
|
||||||
assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
|
|
||||||
_type = FLAGS_TO_MEMORY_TYPE(flags);
|
|
||||||
_count = 0;
|
|
||||||
_amount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void set_type(MEMFLAGS flag) {
|
|
||||||
_type = flag;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void clear() {
|
|
||||||
_count = 0;
|
|
||||||
_amount = 0;
|
|
||||||
_type = mtNone;
|
|
||||||
}
|
|
||||||
|
|
||||||
MallocMem& operator=(const MallocMem& m) {
|
|
||||||
assert(_type == m.type(), "different type");
|
|
||||||
_count = m.count();
|
|
||||||
_amount = m.amount();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void inc(size_t amt) {
|
|
||||||
_amount += amt;
|
|
||||||
_count ++;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void reduce(size_t amt) {
|
|
||||||
assert(_amount >= amt, "Just check");
|
|
||||||
_amount -= amt;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void overwrite_counter(size_t count) {
|
|
||||||
_count = count;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline MEMFLAGS type() const {
|
|
||||||
return _type;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool is_type(MEMFLAGS flags) const {
|
|
||||||
return FLAGS_TO_MEMORY_TYPE(flags) == _type;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline size_t count() const {
|
|
||||||
return _count;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline size_t amount() const {
|
|
||||||
return _amount;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// This class records live arena's memory usage
|
|
||||||
class ArenaMem : public MallocMem {
|
|
||||||
public:
|
|
||||||
ArenaMem(MEMFLAGS typeflag): MallocMem(typeflag) {
|
|
||||||
}
|
|
||||||
ArenaMem() { }
|
|
||||||
};
|
|
||||||
|
|
||||||
// This class aggregates virtual memory by its memory type
|
|
||||||
class VMMem VALUE_OBJ_CLASS_SPEC {
|
|
||||||
private:
|
|
||||||
MEMFLAGS _type;
|
|
||||||
|
|
||||||
size_t _count;
|
|
||||||
size_t _reserved_amount;
|
|
||||||
size_t _committed_amount;
|
|
||||||
|
|
||||||
public:
|
|
||||||
VMMem() {
|
|
||||||
_type = mtNone;
|
|
||||||
_count = 0;
|
|
||||||
_reserved_amount = 0;
|
|
||||||
_committed_amount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
VMMem(MEMFLAGS flags) {
|
|
||||||
assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
|
|
||||||
_type = FLAGS_TO_MEMORY_TYPE(flags);
|
|
||||||
_count = 0;
|
|
||||||
_reserved_amount = 0;
|
|
||||||
_committed_amount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void clear() {
|
|
||||||
_type = mtNone;
|
|
||||||
_count = 0;
|
|
||||||
_reserved_amount = 0;
|
|
||||||
_committed_amount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void set_type(MEMFLAGS flags) {
|
|
||||||
_type = FLAGS_TO_MEMORY_TYPE(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
VMMem& operator=(const VMMem& m) {
|
|
||||||
assert(_type == m.type(), "different type");
|
|
||||||
|
|
||||||
_count = m.count();
|
|
||||||
_reserved_amount = m.reserved_amount();
|
|
||||||
_committed_amount = m.committed_amount();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
inline MEMFLAGS type() const {
|
|
||||||
return _type;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool is_type(MEMFLAGS flags) const {
|
|
||||||
return FLAGS_TO_MEMORY_TYPE(flags) == _type;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void inc(size_t reserved_amt, size_t committed_amt) {
|
|
||||||
_reserved_amount += reserved_amt;
|
|
||||||
_committed_amount += committed_amt;
|
|
||||||
_count ++;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline size_t count() const {
|
|
||||||
return _count;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline size_t reserved_amount() const {
|
|
||||||
return _reserved_amount;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline size_t committed_amount() const {
|
|
||||||
return _committed_amount;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define NUMBER_OF_MEMORY_TYPE (mt_number_of_types + 1)
|
|
||||||
|
|
||||||
class BaselineReporter;
|
|
||||||
class BaselineComparisonReporter;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This class baselines current memory snapshot.
|
|
||||||
* A memory baseline summarizes memory usage by memory type,
|
|
||||||
* aggregates memory usage by callsites when detail tracking
|
|
||||||
* is on.
|
|
||||||
*/
|
*/
|
||||||
class MemBaseline VALUE_OBJ_CLASS_SPEC {
|
class MemBaseline VALUE_OBJ_CLASS_SPEC {
|
||||||
friend class BaselineReporter;
|
public:
|
||||||
friend class BaselineComparisonReporter;
|
enum BaselineThreshold {
|
||||||
|
SIZE_THRESHOLD = K // Only allocation size over this threshold will be baselined.
|
||||||
|
};
|
||||||
|
|
||||||
|
enum BaselineType {
|
||||||
|
Not_baselined,
|
||||||
|
Summary_baselined,
|
||||||
|
Detail_baselined
|
||||||
|
};
|
||||||
|
|
||||||
|
enum SortingOrder {
|
||||||
|
by_address, // by memory address
|
||||||
|
by_size, // by memory size
|
||||||
|
by_site // by call site where the memory is allocated from
|
||||||
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// overall summaries
|
// All baseline data is stored in this arena
|
||||||
size_t _total_malloced;
|
Arena* _arena;
|
||||||
size_t _total_vm_reserved;
|
|
||||||
size_t _total_vm_committed;
|
|
||||||
size_t _number_of_classes;
|
|
||||||
size_t _number_of_threads;
|
|
||||||
|
|
||||||
// if it has properly baselined
|
// Summary information
|
||||||
bool _baselined;
|
MallocMemorySnapshot* _malloc_memory_snapshot;
|
||||||
|
VirtualMemorySnapshot* _virtual_memory_snapshot;
|
||||||
|
|
||||||
// we categorize memory into three categories within the memory type
|
size_t _class_count;
|
||||||
MallocMem _malloc_data[NUMBER_OF_MEMORY_TYPE];
|
|
||||||
VMMem _vm_data[NUMBER_OF_MEMORY_TYPE];
|
|
||||||
ArenaMem _arena_data[NUMBER_OF_MEMORY_TYPE];
|
|
||||||
|
|
||||||
// memory records that aggregate memory usage by callsites.
|
// Allocation sites information
|
||||||
// only available when detail tracking is on.
|
// Malloc allocation sites
|
||||||
MemPointerArray* _malloc_cs;
|
LinkedListImpl<MallocSite, ResourceObj::ARENA>
|
||||||
MemPointerArray* _vm_cs;
|
_malloc_sites;
|
||||||
// virtual memory map
|
|
||||||
MemPointerArray* _vm_map;
|
|
||||||
|
|
||||||
private:
|
// All virtual memory allocations
|
||||||
static MemType2Name MemType2NameMap[NUMBER_OF_MEMORY_TYPE];
|
LinkedListImpl<ReservedMemoryRegion, ResourceObj::ARENA>
|
||||||
|
_virtual_memory_allocations;
|
||||||
|
|
||||||
private:
|
// Virtual memory allocations by allocation sites, always in by_address
|
||||||
// should not use copy constructor
|
// order
|
||||||
MemBaseline(MemBaseline& copy) { ShouldNotReachHere(); }
|
LinkedListImpl<VirtualMemoryAllocationSite, ResourceObj::ARENA>
|
||||||
|
_virtual_memory_sites;
|
||||||
|
|
||||||
// check and block at a safepoint
|
SortingOrder _malloc_sites_order;
|
||||||
static inline void check_safepoint(JavaThread* thr);
|
SortingOrder _virtual_memory_sites_order;
|
||||||
|
|
||||||
|
BaselineType _baseline_type;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// create a memory baseline
|
// create a memory baseline
|
||||||
MemBaseline();
|
MemBaseline():
|
||||||
|
_baseline_type(Not_baselined),
|
||||||
~MemBaseline();
|
_class_count(0),
|
||||||
|
_arena(NULL),
|
||||||
inline bool baselined() const {
|
_malloc_memory_snapshot(NULL),
|
||||||
return _baselined;
|
_virtual_memory_snapshot(NULL),
|
||||||
|
_malloc_sites(NULL) {
|
||||||
}
|
}
|
||||||
|
|
||||||
MemBaseline& operator=(const MemBaseline& other);
|
~MemBaseline() {
|
||||||
|
reset();
|
||||||
|
if (_arena != NULL) {
|
||||||
|
delete _arena;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool baseline(bool summaryOnly = true);
|
||||||
|
|
||||||
|
BaselineType baseline_type() const { return _baseline_type; }
|
||||||
|
|
||||||
|
MallocMemorySnapshot* malloc_memory_snapshot() const {
|
||||||
|
return _malloc_memory_snapshot;
|
||||||
|
}
|
||||||
|
|
||||||
|
VirtualMemorySnapshot* virtual_memory_snapshot() const {
|
||||||
|
return _virtual_memory_snapshot;
|
||||||
|
}
|
||||||
|
|
||||||
|
MallocSiteIterator malloc_sites(SortingOrder order);
|
||||||
|
VirtualMemorySiteIterator virtual_memory_sites(SortingOrder order);
|
||||||
|
|
||||||
|
// Virtual memory allocation iterator always returns in virtual memory
|
||||||
|
// base address order.
|
||||||
|
VirtualMemoryAllocationIterator virtual_memory_allocations() {
|
||||||
|
assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline");
|
||||||
|
return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total reserved memory = total malloc'd memory + total reserved virtual
|
||||||
|
// memory
|
||||||
|
size_t total_reserved_memory() const {
|
||||||
|
assert(baseline_type() != Not_baselined, "Not yet baselined");
|
||||||
|
assert(_virtual_memory_snapshot != NULL, "No virtual memory snapshot");
|
||||||
|
assert(_malloc_memory_snapshot != NULL, "No malloc memory snapshot");
|
||||||
|
size_t amount = _malloc_memory_snapshot->total() +
|
||||||
|
_virtual_memory_snapshot->total_reserved();
|
||||||
|
return amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total committed memory = total malloc'd memory + total committed
|
||||||
|
// virtual memory
|
||||||
|
size_t total_committed_memory() const {
|
||||||
|
assert(baseline_type() != Not_baselined, "Not yet baselined");
|
||||||
|
assert(_virtual_memory_snapshot != NULL,
|
||||||
|
"Not a snapshot");
|
||||||
|
size_t amount = _malloc_memory_snapshot->total() +
|
||||||
|
_virtual_memory_snapshot->total_committed();
|
||||||
|
return amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t total_arena_memory() const {
|
||||||
|
assert(baseline_type() != Not_baselined, "Not yet baselined");
|
||||||
|
assert(_malloc_memory_snapshot != NULL, "Not yet baselined");
|
||||||
|
return _malloc_memory_snapshot->total_arena();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t malloc_tracking_overhead() const {
|
||||||
|
assert(baseline_type() != Not_baselined, "Not yet baselined");
|
||||||
|
return _malloc_memory_snapshot->malloc_overhead()->size();
|
||||||
|
}
|
||||||
|
|
||||||
|
const MallocMemory* malloc_memory(MEMFLAGS flag) const {
|
||||||
|
assert(_malloc_memory_snapshot != NULL, "Not a snapshot");
|
||||||
|
return _malloc_memory_snapshot->by_type(flag);
|
||||||
|
}
|
||||||
|
|
||||||
|
const VirtualMemory* virtual_memory(MEMFLAGS flag) const {
|
||||||
|
assert(_virtual_memory_snapshot != NULL, "Not a snapshot");
|
||||||
|
return _virtual_memory_snapshot->by_type(flag);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
size_t class_count() const {
|
||||||
|
assert(baseline_type() != Not_baselined, "Not yet baselined");
|
||||||
|
return _class_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t thread_count() const {
|
||||||
|
assert(baseline_type() != Not_baselined, "Not yet baselined");
|
||||||
|
assert(_malloc_memory_snapshot != NULL, "Baselined?");
|
||||||
|
return _malloc_memory_snapshot->thread_count();
|
||||||
|
}
|
||||||
|
|
||||||
// reset the baseline for reuse
|
// reset the baseline for reuse
|
||||||
void clear();
|
void reset() {
|
||||||
|
_baseline_type = Not_baselined;
|
||||||
|
_malloc_memory_snapshot = NULL;
|
||||||
|
_virtual_memory_snapshot = NULL;
|
||||||
|
_class_count = 0;
|
||||||
|
|
||||||
// baseline the snapshot
|
_malloc_sites = NULL;
|
||||||
bool baseline(MemSnapshot& snapshot, bool summary_only = true);
|
_virtual_memory_sites = NULL;
|
||||||
|
_virtual_memory_allocations = NULL;
|
||||||
|
|
||||||
bool baseline(const MemPointerArray* malloc_records,
|
if (_arena != NULL) {
|
||||||
const MemPointerArray* vm_records,
|
_arena->destruct_contents();
|
||||||
bool summary_only = true);
|
}
|
||||||
|
|
||||||
// total malloc'd memory of specified memory type
|
|
||||||
inline size_t malloc_amount(MEMFLAGS flag) const {
|
|
||||||
return _malloc_data[flag2index(flag)].amount();
|
|
||||||
}
|
}
|
||||||
// number of malloc'd memory blocks of specified memory type
|
|
||||||
inline size_t malloc_count(MEMFLAGS flag) const {
|
|
||||||
return _malloc_data[flag2index(flag)].count();
|
|
||||||
}
|
|
||||||
// total memory used by arenas of specified memory type
|
|
||||||
inline size_t arena_amount(MEMFLAGS flag) const {
|
|
||||||
return _arena_data[flag2index(flag)].amount();
|
|
||||||
}
|
|
||||||
// number of arenas of specified memory type
|
|
||||||
inline size_t arena_count(MEMFLAGS flag) const {
|
|
||||||
return _arena_data[flag2index(flag)].count();
|
|
||||||
}
|
|
||||||
// total reserved memory of specified memory type
|
|
||||||
inline size_t reserved_amount(MEMFLAGS flag) const {
|
|
||||||
return _vm_data[flag2index(flag)].reserved_amount();
|
|
||||||
}
|
|
||||||
// total committed memory of specified memory type
|
|
||||||
inline size_t committed_amount(MEMFLAGS flag) const {
|
|
||||||
return _vm_data[flag2index(flag)].committed_amount();
|
|
||||||
}
|
|
||||||
// total memory (malloc'd + mmap'd + arena) of specified
|
|
||||||
// memory type
|
|
||||||
inline size_t total_amount(MEMFLAGS flag) const {
|
|
||||||
int index = flag2index(flag);
|
|
||||||
return _malloc_data[index].amount() +
|
|
||||||
_vm_data[index].reserved_amount() +
|
|
||||||
_arena_data[index].amount();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* overall summaries */
|
|
||||||
|
|
||||||
// total malloc'd memory in snapshot
|
|
||||||
inline size_t total_malloc_amount() const {
|
|
||||||
return _total_malloced;
|
|
||||||
}
|
|
||||||
// total mmap'd memory in snapshot
|
|
||||||
inline size_t total_reserved_amount() const {
|
|
||||||
return _total_vm_reserved;
|
|
||||||
}
|
|
||||||
// total committed memory in snapshot
|
|
||||||
inline size_t total_committed_amount() const {
|
|
||||||
return _total_vm_committed;
|
|
||||||
}
|
|
||||||
// number of loaded classes
|
|
||||||
inline size_t number_of_classes() const {
|
|
||||||
return _number_of_classes;
|
|
||||||
}
|
|
||||||
// number of running threads
|
|
||||||
inline size_t number_of_threads() const {
|
|
||||||
return _number_of_threads;
|
|
||||||
}
|
|
||||||
// lookup human readable name of a memory type
|
|
||||||
static const char* type2name(MEMFLAGS type);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// convert memory flag to the index to mapping table
|
// Baseline summary information
|
||||||
int flag2index(MEMFLAGS flag) const;
|
bool baseline_summary();
|
||||||
|
|
||||||
// reset baseline values
|
// Baseline allocation sites (detail tracking only)
|
||||||
void reset();
|
bool baseline_allocation_sites();
|
||||||
|
|
||||||
// summarize the records in global snapshot
|
// Aggregate virtual memory allocation by allocation sites
|
||||||
bool baseline_malloc_summary(const MemPointerArray* malloc_records);
|
bool aggregate_virtual_memory_allocation_sites();
|
||||||
bool baseline_vm_summary(const MemPointerArray* vm_records);
|
|
||||||
bool baseline_malloc_details(const MemPointerArray* malloc_records);
|
|
||||||
bool baseline_vm_details(const MemPointerArray* vm_records);
|
|
||||||
|
|
||||||
// print a line of malloc'd memory aggregated by callsite
|
Arena* arena() { return _arena; }
|
||||||
void print_malloc_callsite(outputStream* st, address pc, size_t size,
|
|
||||||
size_t count, int diff_amt, int diff_count) const;
|
|
||||||
// print a line of mmap'd memory aggregated by callsite
|
|
||||||
void print_vm_callsite(outputStream* st, address pc, size_t rsz,
|
|
||||||
size_t csz, int diff_rsz, int diff_csz) const;
|
|
||||||
|
|
||||||
// sorting functions for raw records
|
// Sorting allocation sites in different orders
|
||||||
static int malloc_sort_by_pc(const void* p1, const void* p2);
|
// Sort allocation sites in size order
|
||||||
static int malloc_sort_by_addr(const void* p1, const void* p2);
|
void malloc_sites_to_size_order();
|
||||||
|
// Sort allocation sites in call site address order
|
||||||
|
void malloc_sites_to_allocation_site_order();
|
||||||
|
|
||||||
private:
|
// Sort allocation sites in reserved size order
|
||||||
// sorting functions for baselined records
|
void virtual_memory_sites_to_size_order();
|
||||||
static int bl_malloc_sort_by_size(const void* p1, const void* p2);
|
// Sort allocation sites in call site address order
|
||||||
static int bl_vm_sort_by_size(const void* p1, const void* p2);
|
void virtual_memory_sites_to_reservation_site_order();
|
||||||
static int bl_malloc_sort_by_pc(const void* p1, const void* p2);
|
|
||||||
static int bl_vm_sort_by_pc(const void* p1, const void* p2);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#endif // INCLUDE_NMT
|
||||||
|
|
||||||
#endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP
|
#endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP
|
||||||
|
@ -1,509 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef SHARE_VM_SERVICES_MEM_PTR_HPP
|
|
||||||
#define SHARE_VM_SERVICES_MEM_PTR_HPP
|
|
||||||
|
|
||||||
#include "memory/allocation.hpp"
|
|
||||||
#include "runtime/os.hpp"
|
|
||||||
#include "runtime/safepoint.hpp"
|
|
||||||
|
|
||||||
/*
|
|
||||||
* global sequence generator that generates sequence numbers to serialize
|
|
||||||
* memory records.
|
|
||||||
*/
|
|
||||||
class SequenceGenerator : AllStatic {
|
|
||||||
public:
|
|
||||||
static jint next();
|
|
||||||
|
|
||||||
// peek last sequence number
|
|
||||||
static jint peek() {
|
|
||||||
return _seq_number;
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset sequence number
|
|
||||||
static void reset() {
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
|
|
||||||
_seq_number = 1;
|
|
||||||
_generation ++;
|
|
||||||
};
|
|
||||||
|
|
||||||
static unsigned long current_generation() { return _generation; }
|
|
||||||
NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
|
|
||||||
|
|
||||||
private:
|
|
||||||
static volatile jint _seq_number;
|
|
||||||
static volatile unsigned long _generation;
|
|
||||||
NOT_PRODUCT(static jint _max_seq_number; )
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* followings are the classes that are used to hold memory activity records in different stages.
|
|
||||||
* MemPointer
|
|
||||||
* |--------MemPointerRecord
|
|
||||||
* |
|
|
||||||
* |----MemPointerRecordEx
|
|
||||||
* | |
|
|
||||||
* | |-------SeqMemPointerRecordEx
|
|
||||||
* |
|
|
||||||
* |----SeqMemPointerRecord
|
|
||||||
* |
|
|
||||||
* |----VMMemRegion
|
|
||||||
* |
|
|
||||||
* |-----VMMemRegionEx
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* prefix 'Seq' - sequenced, the record contains a sequence number
|
|
||||||
* surfix 'Ex' - extension, the record contains a caller's pc
|
|
||||||
*
|
|
||||||
* per-thread recorder : SeqMemPointerRecord(Ex)
|
|
||||||
* snapshot staging : SeqMemPointerRecord(Ex)
|
|
||||||
* snapshot : MemPointerRecord(Ex) and VMMemRegion(Ex)
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* class that wraps an address to a memory block,
|
|
||||||
* the memory pointer either points to a malloc'd
|
|
||||||
* memory block, or a mmap'd memory block
|
|
||||||
*/
|
|
||||||
class MemPointer VALUE_OBJ_CLASS_SPEC {
|
|
||||||
public:
|
|
||||||
MemPointer(): _addr(0) { }
|
|
||||||
MemPointer(address addr): _addr(addr) { }
|
|
||||||
|
|
||||||
MemPointer(const MemPointer& copy_from) {
|
|
||||||
_addr = copy_from.addr();
|
|
||||||
}
|
|
||||||
|
|
||||||
inline address addr() const {
|
|
||||||
return _addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline operator address() const {
|
|
||||||
return addr();
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool operator == (const MemPointer& other) const {
|
|
||||||
return addr() == other.addr();
|
|
||||||
}
|
|
||||||
|
|
||||||
inline MemPointer& operator = (const MemPointer& other) {
|
|
||||||
_addr = other.addr();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
inline void set_addr(address addr) { _addr = addr; }
|
|
||||||
|
|
||||||
protected:
|
|
||||||
// memory address
|
|
||||||
address _addr;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* MemPointerRecord records an activityand associated
|
|
||||||
* attributes on a memory block.
|
|
||||||
*/
|
|
||||||
class MemPointerRecord : public MemPointer {
|
|
||||||
private:
|
|
||||||
MEMFLAGS _flags;
|
|
||||||
size_t _size;
|
|
||||||
|
|
||||||
public:
|
|
||||||
/* extension of MemoryType enum
|
|
||||||
* see share/vm/memory/allocation.hpp for details.
|
|
||||||
*
|
|
||||||
* The tag values are associated to sorting orders, so be
|
|
||||||
* careful if changes are needed.
|
|
||||||
* The allocation records should be sorted ahead of tagging
|
|
||||||
* records, which in turn ahead of deallocation records
|
|
||||||
*/
|
|
||||||
enum MemPointerTags {
|
|
||||||
tag_alloc = 0x0001, // malloc or reserve record
|
|
||||||
tag_commit = 0x0002, // commit record
|
|
||||||
tag_type = 0x0003, // tag virtual memory to a memory type
|
|
||||||
tag_uncommit = 0x0004, // uncommit record
|
|
||||||
tag_release = 0x0005, // free or release record
|
|
||||||
tag_size = 0x0006, // arena size
|
|
||||||
tag_masks = 0x0007, // all tag bits
|
|
||||||
vmBit = 0x0008
|
|
||||||
};
|
|
||||||
|
|
||||||
/* helper functions to interpret the tagging flags */
|
|
||||||
|
|
||||||
inline static bool is_allocation_record(MEMFLAGS flags) {
|
|
||||||
return (flags & tag_masks) == tag_alloc;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool is_deallocation_record(MEMFLAGS flags) {
|
|
||||||
return (flags & tag_masks) == tag_release;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool is_arena_record(MEMFLAGS flags) {
|
|
||||||
return (flags & (otArena | tag_size)) == otArena;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool is_arena_memory_record(MEMFLAGS flags) {
|
|
||||||
return (flags & (otArena | tag_size)) == (otArena | tag_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool is_virtual_memory_record(MEMFLAGS flags) {
|
|
||||||
return (flags & vmBit) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) {
|
|
||||||
return (flags & 0x0F) == (tag_alloc | vmBit);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) {
|
|
||||||
return (flags & 0x0F) == (tag_commit | vmBit);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) {
|
|
||||||
return (flags & 0x0F) == (tag_uncommit | vmBit);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool is_virtual_memory_release_record(MEMFLAGS flags) {
|
|
||||||
return (flags & 0x0F) == (tag_release | vmBit);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool is_virtual_memory_type_record(MEMFLAGS flags) {
|
|
||||||
return (flags & 0x0F) == (tag_type | vmBit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* tagging flags */
|
|
||||||
inline static MEMFLAGS malloc_tag() { return tag_alloc; }
|
|
||||||
inline static MEMFLAGS free_tag() { return tag_release; }
|
|
||||||
inline static MEMFLAGS arena_size_tag() { return tag_size | otArena; }
|
|
||||||
inline static MEMFLAGS virtual_memory_tag() { return vmBit; }
|
|
||||||
inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); }
|
|
||||||
inline static MEMFLAGS virtual_memory_commit_tag() { return (tag_commit | vmBit); }
|
|
||||||
inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); }
|
|
||||||
inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); }
|
|
||||||
inline static MEMFLAGS virtual_memory_type_tag() { return (tag_type | vmBit); }
|
|
||||||
|
|
||||||
public:
|
|
||||||
MemPointerRecord(): _size(0), _flags(mtNone) { }
|
|
||||||
|
|
||||||
MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0):
|
|
||||||
MemPointer(addr), _flags(memflags), _size(size) { }
|
|
||||||
|
|
||||||
MemPointerRecord(const MemPointerRecord& copy_from):
|
|
||||||
MemPointer(copy_from), _flags(copy_from.flags()),
|
|
||||||
_size(copy_from.size()) {
|
|
||||||
}
|
|
||||||
|
|
||||||
/* MemPointerRecord is not sequenced, it always return
|
|
||||||
* 0 to indicate non-sequenced
|
|
||||||
*/
|
|
||||||
virtual jint seq() const { return 0; }
|
|
||||||
|
|
||||||
inline size_t size() const { return _size; }
|
|
||||||
inline void set_size(size_t size) { _size = size; }
|
|
||||||
|
|
||||||
inline MEMFLAGS flags() const { return _flags; }
|
|
||||||
inline void set_flags(MEMFLAGS flags) { _flags = flags; }
|
|
||||||
|
|
||||||
MemPointerRecord& operator= (const MemPointerRecord& ptr) {
|
|
||||||
MemPointer::operator=(ptr);
|
|
||||||
_flags = ptr.flags();
|
|
||||||
#ifdef ASSERT
|
|
||||||
if (IS_ARENA_OBJ(_flags)) {
|
|
||||||
assert(!is_vm_pointer(), "wrong flags");
|
|
||||||
assert((_flags & ot_masks) == otArena, "wrong flags");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
_size = ptr.size();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the pointer represents a malloc-ed memory address
|
|
||||||
inline bool is_malloced_pointer() const {
|
|
||||||
return !is_vm_pointer();
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the pointer represents a virtual memory address
|
|
||||||
inline bool is_vm_pointer() const {
|
|
||||||
return is_virtual_memory_record(_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this record records a 'malloc' or virtual memory
|
|
||||||
// 'reserve' call
|
|
||||||
inline bool is_allocation_record() const {
|
|
||||||
return is_allocation_record(_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this record records a size information of an arena
|
|
||||||
inline bool is_arena_memory_record() const {
|
|
||||||
return is_arena_memory_record(_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this pointer represents an address to an arena object
|
|
||||||
inline bool is_arena_record() const {
|
|
||||||
return is_arena_record(_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this record represents a size information of specific arena
|
|
||||||
inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
|
|
||||||
assert(is_arena_memory_record(), "not size record");
|
|
||||||
assert(arena_rc->is_arena_record(), "not arena record");
|
|
||||||
return (arena_rc->addr() + sizeof(void*)) == addr();
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this record records a 'free' or virtual memory 'free' call
|
|
||||||
inline bool is_deallocation_record() const {
|
|
||||||
return is_deallocation_record(_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this record records a virtual memory 'commit' call
|
|
||||||
inline bool is_commit_record() const {
|
|
||||||
return is_virtual_memory_commit_record(_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this record records a virtual memory 'uncommit' call
|
|
||||||
inline bool is_uncommit_record() const {
|
|
||||||
return is_virtual_memory_uncommit_record(_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this record is a tagging record of a virtual memory block
|
|
||||||
inline bool is_type_tagging_record() const {
|
|
||||||
return is_virtual_memory_type_record(_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the two memory pointer records actually represent the same
|
|
||||||
// memory block
|
|
||||||
inline bool is_same_region(const MemPointerRecord* other) const {
|
|
||||||
return (addr() == other->addr() && size() == other->size());
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this memory region fully contains another one
|
|
||||||
inline bool contains_region(const MemPointerRecord* other) const {
|
|
||||||
return contains_region(other->addr(), other->size());
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this memory region fully contains specified memory range
|
|
||||||
inline bool contains_region(address add, size_t sz) const {
|
|
||||||
return (addr() <= add && addr() + size() >= add + sz);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool contains_address(address add) const {
|
|
||||||
return (addr() <= add && addr() + size() > add);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this memory region overlaps another region
|
|
||||||
inline bool overlaps_region(const MemPointerRecord* other) const {
|
|
||||||
assert(other != NULL, "Just check");
|
|
||||||
assert(size() > 0 && other->size() > 0, "empty range");
|
|
||||||
return contains_address(other->addr()) ||
|
|
||||||
contains_address(other->addr() + other->size() - 1) || // exclude end address
|
|
||||||
other->contains_address(addr()) ||
|
|
||||||
other->contains_address(addr() + size() - 1); // exclude end address
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
// MemPointerRecordEx also records callsite pc, from where
|
|
||||||
// the memory block is allocated
|
|
||||||
class MemPointerRecordEx : public MemPointerRecord {
|
|
||||||
private:
|
|
||||||
address _pc; // callsite pc
|
|
||||||
|
|
||||||
public:
|
|
||||||
MemPointerRecordEx(): _pc(0) { }
|
|
||||||
|
|
||||||
MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0):
|
|
||||||
MemPointerRecord(addr, memflags, size), _pc(pc) {}
|
|
||||||
|
|
||||||
MemPointerRecordEx(const MemPointerRecordEx& copy_from):
|
|
||||||
MemPointerRecord(copy_from), _pc(copy_from.pc()) {}
|
|
||||||
|
|
||||||
inline address pc() const { return _pc; }
|
|
||||||
|
|
||||||
void init(const MemPointerRecordEx* mpe) {
|
|
||||||
MemPointerRecord::operator=(*mpe);
|
|
||||||
_pc = mpe->pc();
|
|
||||||
}
|
|
||||||
|
|
||||||
void init(const MemPointerRecord* mp) {
|
|
||||||
MemPointerRecord::operator=(*mp);
|
|
||||||
_pc = 0;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// a virtual memory region. The region can represent a reserved
|
|
||||||
// virtual memory region or a committed memory region
|
|
||||||
class VMMemRegion : public MemPointerRecord {
|
|
||||||
public:
|
|
||||||
VMMemRegion() { }
|
|
||||||
|
|
||||||
void init(const MemPointerRecord* mp) {
|
|
||||||
assert(mp->is_vm_pointer(), "Sanity check");
|
|
||||||
_addr = mp->addr();
|
|
||||||
set_size(mp->size());
|
|
||||||
set_flags(mp->flags());
|
|
||||||
}
|
|
||||||
|
|
||||||
VMMemRegion& operator=(const VMMemRegion& other) {
|
|
||||||
MemPointerRecord::operator=(other);
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool is_reserved_region() const {
|
|
||||||
return is_allocation_record();
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool is_committed_region() const {
|
|
||||||
return is_commit_record();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* base address of this virtual memory range */
|
|
||||||
inline address base() const {
|
|
||||||
return addr();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* tag this virtual memory range to the specified memory type */
|
|
||||||
inline void tag(MEMFLAGS f) {
|
|
||||||
set_flags(flags() | (f & mt_masks));
|
|
||||||
}
|
|
||||||
|
|
||||||
// expand this region to also cover specified range.
|
|
||||||
// The range has to be on either end of the memory region.
|
|
||||||
void expand_region(address addr, size_t sz) {
|
|
||||||
if (addr < base()) {
|
|
||||||
assert(addr + sz == base(), "Sanity check");
|
|
||||||
_addr = addr;
|
|
||||||
set_size(size() + sz);
|
|
||||||
} else {
|
|
||||||
assert(base() + size() == addr, "Sanity check");
|
|
||||||
set_size(size() + sz);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// exclude the specified address range from this region.
|
|
||||||
// The excluded memory range has to be on either end of this memory
|
|
||||||
// region.
|
|
||||||
inline void exclude_region(address add, size_t sz) {
|
|
||||||
assert(is_reserved_region() || is_committed_region(), "Sanity check");
|
|
||||||
assert(addr() != NULL && size() != 0, "Sanity check");
|
|
||||||
assert(add >= addr() && add < addr() + size(), "Sanity check");
|
|
||||||
assert(add == addr() || (add + sz) == (addr() + size()),
|
|
||||||
"exclude in the middle");
|
|
||||||
if (add == addr()) {
|
|
||||||
set_addr(add + sz);
|
|
||||||
set_size(size() - sz);
|
|
||||||
} else {
|
|
||||||
set_size(size() - sz);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class VMMemRegionEx : public VMMemRegion {
|
|
||||||
private:
|
|
||||||
jint _seq; // sequence number
|
|
||||||
|
|
||||||
public:
|
|
||||||
VMMemRegionEx(): _pc(0) { }
|
|
||||||
|
|
||||||
void init(const MemPointerRecordEx* mpe) {
|
|
||||||
VMMemRegion::init(mpe);
|
|
||||||
_pc = mpe->pc();
|
|
||||||
}
|
|
||||||
|
|
||||||
void init(const MemPointerRecord* mpe) {
|
|
||||||
VMMemRegion::init(mpe);
|
|
||||||
_pc = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
VMMemRegionEx& operator=(const VMMemRegionEx& other) {
|
|
||||||
VMMemRegion::operator=(other);
|
|
||||||
_pc = other.pc();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline address pc() const { return _pc; }
|
|
||||||
private:
|
|
||||||
address _pc;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Sequenced memory record
|
|
||||||
*/
|
|
||||||
class SeqMemPointerRecord : public MemPointerRecord {
|
|
||||||
private:
|
|
||||||
jint _seq; // sequence number
|
|
||||||
|
|
||||||
public:
|
|
||||||
SeqMemPointerRecord(): _seq(0){ }
|
|
||||||
|
|
||||||
SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
|
|
||||||
: MemPointerRecord(addr, flags, size), _seq(seq) {
|
|
||||||
}
|
|
||||||
|
|
||||||
SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
|
|
||||||
: MemPointerRecord(copy_from) {
|
|
||||||
_seq = copy_from.seq();
|
|
||||||
}
|
|
||||||
|
|
||||||
SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) {
|
|
||||||
MemPointerRecord::operator=(ptr);
|
|
||||||
_seq = ptr.seq();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline jint seq() const {
|
|
||||||
return _seq;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class SeqMemPointerRecordEx : public MemPointerRecordEx {
|
|
||||||
private:
|
|
||||||
jint _seq; // sequence number
|
|
||||||
|
|
||||||
public:
|
|
||||||
SeqMemPointerRecordEx(): _seq(0) { }
|
|
||||||
|
|
||||||
SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
|
|
||||||
jint seq, address pc):
|
|
||||||
MemPointerRecordEx(addr, flags, size, pc), _seq(seq) {
|
|
||||||
}
|
|
||||||
|
|
||||||
SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
|
|
||||||
: MemPointerRecordEx(copy_from) {
|
|
||||||
_seq = copy_from.seq();
|
|
||||||
}
|
|
||||||
|
|
||||||
SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) {
|
|
||||||
MemPointerRecordEx::operator=(ptr);
|
|
||||||
_seq = ptr.seq();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline jint seq() const {
|
|
||||||
return _seq;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_SERVICES_MEM_PTR_HPP
|
|
@ -1,306 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#ifndef SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
|
|
||||||
#define SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
|
|
||||||
|
|
||||||
#include "memory/allocation.hpp"
|
|
||||||
#include "services/memPtr.hpp"
|
|
||||||
|
|
||||||
class MemPtr;
|
|
||||||
class MemRecorder;
|
|
||||||
class ArenaInfo;
|
|
||||||
class MemSnapshot;
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
typedef int (*FN_SORT)(const void *, const void *);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Memory pointer array interface. This array is used by NMT to hold
|
|
||||||
// various memory block information.
|
|
||||||
// The memory pointer arrays are usually walked with their iterators.
|
|
||||||
|
|
||||||
class MemPointerArray : public CHeapObj<mtNMT> {
|
|
||||||
public:
|
|
||||||
virtual ~MemPointerArray() { }
|
|
||||||
|
|
||||||
// return true if it can not allocate storage for the data
|
|
||||||
virtual bool out_of_memory() const = 0;
|
|
||||||
virtual bool is_empty() const = 0;
|
|
||||||
virtual bool is_full() = 0;
|
|
||||||
virtual int length() const = 0;
|
|
||||||
virtual void clear() = 0;
|
|
||||||
virtual bool append(MemPointer* ptr) = 0;
|
|
||||||
virtual bool insert_at(MemPointer* ptr, int pos) = 0;
|
|
||||||
virtual bool remove_at(int pos) = 0;
|
|
||||||
virtual MemPointer* at(int index) const = 0;
|
|
||||||
virtual void sort(FN_SORT fn) = 0;
|
|
||||||
virtual size_t instance_size() const = 0;
|
|
||||||
virtual bool shrink() = 0;
|
|
||||||
|
|
||||||
NOT_PRODUCT(virtual int capacity() const = 0;)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Iterator interface
|
|
||||||
class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC {
|
|
||||||
public:
|
|
||||||
// return the pointer at current position
|
|
||||||
virtual MemPointer* current() const = 0;
|
|
||||||
// return the next pointer and advance current position
|
|
||||||
virtual MemPointer* next() = 0;
|
|
||||||
// return next pointer without advancing current position
|
|
||||||
virtual MemPointer* peek_next() const = 0;
|
|
||||||
// return previous pointer without changing current position
|
|
||||||
virtual MemPointer* peek_prev() const = 0;
|
|
||||||
// remove the pointer at current position
|
|
||||||
virtual void remove() = 0;
|
|
||||||
// insert the pointer at current position
|
|
||||||
virtual bool insert(MemPointer* ptr) = 0;
|
|
||||||
// insert specified element after current position and
|
|
||||||
// move current position to newly inserted position
|
|
||||||
virtual bool insert_after(MemPointer* ptr) = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
// implementation class
|
|
||||||
class MemPointerArrayIteratorImpl : public MemPointerArrayIterator {
|
|
||||||
protected:
|
|
||||||
MemPointerArray* _array;
|
|
||||||
int _pos;
|
|
||||||
|
|
||||||
public:
|
|
||||||
MemPointerArrayIteratorImpl(MemPointerArray* arr) {
|
|
||||||
assert(arr != NULL, "Parameter check");
|
|
||||||
_array = arr;
|
|
||||||
_pos = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual MemPointer* current() const {
|
|
||||||
if (_pos < _array->length()) {
|
|
||||||
return _array->at(_pos);
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual MemPointer* next() {
|
|
||||||
if (_pos + 1 < _array->length()) {
|
|
||||||
return _array->at(++_pos);
|
|
||||||
}
|
|
||||||
_pos = _array->length();
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual MemPointer* peek_next() const {
|
|
||||||
if (_pos + 1 < _array->length()) {
|
|
||||||
return _array->at(_pos + 1);
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual MemPointer* peek_prev() const {
|
|
||||||
if (_pos > 0) {
|
|
||||||
return _array->at(_pos - 1);
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void remove() {
|
|
||||||
if (_pos < _array->length()) {
|
|
||||||
_array->remove_at(_pos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool insert(MemPointer* ptr) {
|
|
||||||
return _array->insert_at(ptr, _pos);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool insert_after(MemPointer* ptr) {
|
|
||||||
if (_array->insert_at(ptr, _pos + 1)) {
|
|
||||||
_pos ++;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Memory pointer array implementation.
|
|
||||||
// This implementation implements expandable array
|
|
||||||
#define DEFAULT_PTR_ARRAY_SIZE 1024
|
|
||||||
|
|
||||||
template <class E> class MemPointerArrayImpl : public MemPointerArray {
|
|
||||||
private:
|
|
||||||
int _max_size;
|
|
||||||
int _size;
|
|
||||||
bool _init_elements;
|
|
||||||
E* _data;
|
|
||||||
|
|
||||||
public:
|
|
||||||
MemPointerArrayImpl(int initial_size = DEFAULT_PTR_ARRAY_SIZE, bool init_elements = true):
|
|
||||||
_max_size(initial_size), _size(0), _init_elements(init_elements) {
|
|
||||||
_data = (E*)raw_allocate(sizeof(E), initial_size);
|
|
||||||
if (_init_elements) {
|
|
||||||
for (int index = 0; index < _max_size; index ++) {
|
|
||||||
::new ((void*)&_data[index]) E();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual ~MemPointerArrayImpl() {
|
|
||||||
if (_data != NULL) {
|
|
||||||
raw_free(_data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
bool out_of_memory() const {
|
|
||||||
return (_data == NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t instance_size() const {
|
|
||||||
return sizeof(MemPointerArrayImpl<E>) + _max_size * sizeof(E);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_empty() const {
|
|
||||||
assert(_data != NULL, "Just check");
|
|
||||||
return _size == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_full() {
|
|
||||||
assert(_data != NULL, "Just check");
|
|
||||||
if (_size < _max_size) {
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
return !expand_array();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int length() const {
|
|
||||||
assert(_data != NULL, "Just check");
|
|
||||||
return _size;
|
|
||||||
}
|
|
||||||
|
|
||||||
NOT_PRODUCT(int capacity() const { return _max_size; })
|
|
||||||
|
|
||||||
void clear() {
|
|
||||||
assert(_data != NULL, "Just check");
|
|
||||||
_size = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool append(MemPointer* ptr) {
|
|
||||||
assert(_data != NULL, "Just check");
|
|
||||||
if (is_full()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
_data[_size ++] = *(E*)ptr;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool insert_at(MemPointer* ptr, int pos) {
|
|
||||||
assert(_data != NULL, "Just check");
|
|
||||||
if (is_full()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
for (int index = _size; index > pos; index --) {
|
|
||||||
_data[index] = _data[index - 1];
|
|
||||||
}
|
|
||||||
_data[pos] = *(E*)ptr;
|
|
||||||
_size ++;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool remove_at(int pos) {
|
|
||||||
assert(_data != NULL, "Just check");
|
|
||||||
if (_size <= pos && pos >= 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
-- _size;
|
|
||||||
|
|
||||||
for (int index = pos; index < _size; index ++) {
|
|
||||||
_data[index] = _data[index + 1];
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
MemPointer* at(int index) const {
|
|
||||||
assert(_data != NULL, "Just check");
|
|
||||||
assert(index >= 0 && index < _size, "illegal index");
|
|
||||||
return &_data[index];
|
|
||||||
}
|
|
||||||
|
|
||||||
bool shrink() {
|
|
||||||
float used = ((float)_size) / ((float)_max_size);
|
|
||||||
if (used < 0.40) {
|
|
||||||
E* old_ptr = _data;
|
|
||||||
int new_size = ((_max_size) / (2 * DEFAULT_PTR_ARRAY_SIZE) + 1) * DEFAULT_PTR_ARRAY_SIZE;
|
|
||||||
_data = (E*)raw_reallocate(_data, sizeof(E), new_size);
|
|
||||||
if (_data == NULL) {
|
|
||||||
_data = old_ptr;
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
_max_size = new_size;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void sort(FN_SORT fn) {
|
|
||||||
assert(_data != NULL, "Just check");
|
|
||||||
qsort((void*)_data, _size, sizeof(E), fn);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
bool expand_array() {
|
|
||||||
assert(_data != NULL, "Not yet allocated");
|
|
||||||
E* old_ptr = _data;
|
|
||||||
if ((_data = (E*)raw_reallocate((void*)_data, sizeof(E),
|
|
||||||
_max_size + DEFAULT_PTR_ARRAY_SIZE)) == NULL) {
|
|
||||||
_data = old_ptr;
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
_max_size += DEFAULT_PTR_ARRAY_SIZE;
|
|
||||||
if (_init_elements) {
|
|
||||||
for (int index = _size; index < _max_size; index ++) {
|
|
||||||
::new ((void*)&_data[index]) E();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void* raw_allocate(size_t elementSize, int items) {
|
|
||||||
return os::malloc(elementSize * items, mtNMT);
|
|
||||||
}
|
|
||||||
|
|
||||||
void* raw_reallocate(void* ptr, size_t elementSize, int items) {
|
|
||||||
return os::realloc(ptr, elementSize * items, mtNMT);
|
|
||||||
}
|
|
||||||
|
|
||||||
void raw_free(void* ptr) {
|
|
||||||
os::free(ptr, mtNMT);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
|
|
@ -1,171 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
|
||||||
|
|
||||||
#include "runtime/atomic.inline.hpp"
|
|
||||||
#include "services/memBaseline.hpp"
|
|
||||||
#include "services/memRecorder.hpp"
|
|
||||||
#include "services/memPtr.hpp"
|
|
||||||
#include "services/memTracker.hpp"
|
|
||||||
|
|
||||||
MemPointer* SequencedRecordIterator::next_record() {
|
|
||||||
MemPointerRecord* itr_cur = (MemPointerRecord*)_itr.current();
|
|
||||||
if (itr_cur == NULL) {
|
|
||||||
return itr_cur;
|
|
||||||
}
|
|
||||||
|
|
||||||
MemPointerRecord* itr_next = (MemPointerRecord*)_itr.next();
|
|
||||||
|
|
||||||
// don't collapse virtual memory records
|
|
||||||
while (itr_next != NULL && !itr_cur->is_vm_pointer() &&
|
|
||||||
!itr_next->is_vm_pointer() &&
|
|
||||||
same_kind(itr_cur, itr_next)) {
|
|
||||||
itr_cur = itr_next;
|
|
||||||
itr_next = (MemPointerRecord*)_itr.next();
|
|
||||||
}
|
|
||||||
|
|
||||||
return itr_cur;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
volatile jint MemRecorder::_instance_count = 0;
|
|
||||||
|
|
||||||
MemRecorder::MemRecorder() {
|
|
||||||
assert(MemTracker::is_on(), "Native memory tracking is off");
|
|
||||||
Atomic::inc(&_instance_count);
|
|
||||||
set_generation();
|
|
||||||
|
|
||||||
if (MemTracker::track_callsite()) {
|
|
||||||
_pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
|
|
||||||
DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
|
|
||||||
} else {
|
|
||||||
_pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecord,
|
|
||||||
DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
|
|
||||||
}
|
|
||||||
_next = NULL;
|
|
||||||
|
|
||||||
|
|
||||||
if (_pointer_records != NULL) {
|
|
||||||
// recode itself
|
|
||||||
address pc = CURRENT_PC;
|
|
||||||
record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
|
|
||||||
sizeof(MemRecorder), SequenceGenerator::next(), pc);
|
|
||||||
record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
|
|
||||||
_pointer_records->instance_size(), SequenceGenerator::next(), pc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
MemRecorder::~MemRecorder() {
|
|
||||||
if (_pointer_records != NULL) {
|
|
||||||
if (MemTracker::is_on()) {
|
|
||||||
MemTracker::record_free((address)_pointer_records, mtNMT);
|
|
||||||
MemTracker::record_free((address)this, mtNMT);
|
|
||||||
}
|
|
||||||
delete _pointer_records;
|
|
||||||
}
|
|
||||||
// delete all linked recorders
|
|
||||||
while (_next != NULL) {
|
|
||||||
MemRecorder* tmp = _next;
|
|
||||||
_next = _next->next();
|
|
||||||
tmp->set_next(NULL);
|
|
||||||
delete tmp;
|
|
||||||
}
|
|
||||||
Atomic::dec(&_instance_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sorting order:
|
|
||||||
// 1. memory block address
|
|
||||||
// 2. mem pointer record tags
|
|
||||||
// 3. sequence number
|
|
||||||
int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
|
|
||||||
const MemPointerRecord* p1 = (const MemPointerRecord*)e1;
|
|
||||||
const MemPointerRecord* p2 = (const MemPointerRecord*)e2;
|
|
||||||
int delta = UNSIGNED_COMPARE(p1->addr(), p2->addr());
|
|
||||||
if (delta == 0) {
|
|
||||||
int df = UNSIGNED_COMPARE((p1->flags() & MemPointerRecord::tag_masks),
|
|
||||||
(p2->flags() & MemPointerRecord::tag_masks));
|
|
||||||
if (df == 0) {
|
|
||||||
assert(p1->seq() != p2->seq(), "dup seq");
|
|
||||||
return p1->seq() - p2->seq();
|
|
||||||
} else {
|
|
||||||
return df;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return delta;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) {
|
|
||||||
assert(seq > 0, "No sequence number");
|
|
||||||
#ifdef ASSERT
|
|
||||||
if (MemPointerRecord::is_virtual_memory_record(flags)) {
|
|
||||||
assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
|
|
||||||
} else {
|
|
||||||
assert((flags & MemPointerRecord::tag_masks) == MemPointerRecord::malloc_tag() ||
|
|
||||||
(flags & MemPointerRecord::tag_masks) == MemPointerRecord::free_tag() ||
|
|
||||||
IS_ARENA_OBJ(flags),
|
|
||||||
"bad malloc record");
|
|
||||||
}
|
|
||||||
// a recorder should only hold records within the same generation
|
|
||||||
unsigned long cur_generation = SequenceGenerator::current_generation();
|
|
||||||
assert(cur_generation == _generation,
|
|
||||||
"this thread did not enter sync point");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (MemTracker::track_callsite()) {
|
|
||||||
SeqMemPointerRecordEx ap(p, flags, size, seq, pc);
|
|
||||||
debug_only(check_dup_seq(ap.seq());)
|
|
||||||
return _pointer_records->append(&ap);
|
|
||||||
} else {
|
|
||||||
SeqMemPointerRecord ap(p, flags, size, seq);
|
|
||||||
debug_only(check_dup_seq(ap.seq());)
|
|
||||||
return _pointer_records->append(&ap);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterator for alloc pointers
|
|
||||||
SequencedRecordIterator MemRecorder::pointer_itr() {
|
|
||||||
assert(_pointer_records != NULL, "just check");
|
|
||||||
_pointer_records->sort((FN_SORT)sort_record_fn);
|
|
||||||
return SequencedRecordIterator(_pointer_records);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void MemRecorder::set_generation() {
|
|
||||||
_generation = SequenceGenerator::current_generation();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
|
|
||||||
void MemRecorder::check_dup_seq(jint seq) const {
|
|
||||||
MemPointerArrayIteratorImpl itr(_pointer_records);
|
|
||||||
MemPointerRecord* rc = (MemPointerRecord*)itr.current();
|
|
||||||
while (rc != NULL) {
|
|
||||||
assert(rc->seq() != seq, "dup seq");
|
|
||||||
rc = (MemPointerRecord*)itr.next();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,271 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef SHARE_VM_SERVICES_MEM_RECORDER_HPP
|
|
||||||
#define SHARE_VM_SERVICES_MEM_RECORDER_HPP
|
|
||||||
|
|
||||||
#include "memory/allocation.hpp"
|
|
||||||
#include "runtime/os.hpp"
|
|
||||||
#include "services/memPtrArray.hpp"
|
|
||||||
|
|
||||||
class MemSnapshot;
|
|
||||||
class MemTracker;
|
|
||||||
class MemTrackWorker;
|
|
||||||
|
|
||||||
// Fixed size memory pointer array implementation
|
|
||||||
template <class E, int SIZE> class FixedSizeMemPointerArray :
|
|
||||||
public MemPointerArray {
|
|
||||||
// This implementation is for memory recorder only
|
|
||||||
friend class MemRecorder;
|
|
||||||
|
|
||||||
private:
|
|
||||||
E _data[SIZE];
|
|
||||||
int _size;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
FixedSizeMemPointerArray(bool init_elements = false):
|
|
||||||
_size(0){
|
|
||||||
if (init_elements) {
|
|
||||||
for (int index = 0; index < SIZE; index ++) {
|
|
||||||
::new ((void*)&_data[index]) E();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
|
|
||||||
// the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
|
|
||||||
// to avoid recursion
|
|
||||||
return os::malloc(size, (mtNMT | otNMTRecorder));
|
|
||||||
}
|
|
||||||
|
|
||||||
void* operator new(size_t size) throw() {
|
|
||||||
assert(false, "use nothrow version");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void operator delete(void* p) {
|
|
||||||
os::free(p, (mtNMT | otNMTRecorder));
|
|
||||||
}
|
|
||||||
|
|
||||||
// instance size
|
|
||||||
inline size_t instance_size() const {
|
|
||||||
return sizeof(FixedSizeMemPointerArray<E, SIZE>);
|
|
||||||
}
|
|
||||||
|
|
||||||
NOT_PRODUCT(int capacity() const { return SIZE; })
|
|
||||||
|
|
||||||
public:
|
|
||||||
// implementation of public interface
|
|
||||||
bool out_of_memory() const { return false; }
|
|
||||||
bool is_empty() const { return _size == 0; }
|
|
||||||
bool is_full() { return length() >= SIZE; }
|
|
||||||
int length() const { return _size; }
|
|
||||||
|
|
||||||
void clear() {
|
|
||||||
_size = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool append(MemPointer* ptr) {
|
|
||||||
if (is_full()) return false;
|
|
||||||
_data[_size ++] = *(E*)ptr;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool insert_at(MemPointer* p, int pos) {
|
|
||||||
assert(false, "append only");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool remove_at(int pos) {
|
|
||||||
assert(false, "not supported");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
MemPointer* at(int index) const {
|
|
||||||
assert(index >= 0 && index < length(),
|
|
||||||
"parameter check");
|
|
||||||
return ((E*)&_data[index]);
|
|
||||||
}
|
|
||||||
|
|
||||||
void sort(FN_SORT fn) {
|
|
||||||
qsort((void*)_data, _size, sizeof(E), fn);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool shrink() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
// This iterator requires pre-sorted MemPointerArray, which is sorted by:
|
|
||||||
// 1. address
|
|
||||||
// 2. allocation type
|
|
||||||
// 3. sequence number
|
|
||||||
// During the array walking, iterator collapses pointers with the same
|
|
||||||
// address and allocation type, and only returns the one with highest
|
|
||||||
// sequence number.
|
|
||||||
//
|
|
||||||
// This is read-only iterator, update methods are asserted.
|
|
||||||
class SequencedRecordIterator : public MemPointerArrayIterator {
|
|
||||||
private:
|
|
||||||
MemPointerArrayIteratorImpl _itr;
|
|
||||||
MemPointer* _cur;
|
|
||||||
|
|
||||||
public:
|
|
||||||
SequencedRecordIterator(const MemPointerArray* arr):
|
|
||||||
_itr(const_cast<MemPointerArray*>(arr)) {
|
|
||||||
_cur = next_record();
|
|
||||||
}
|
|
||||||
|
|
||||||
SequencedRecordIterator(const SequencedRecordIterator& itr):
|
|
||||||
_itr(itr._itr) {
|
|
||||||
_cur = next_record();
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the pointer at current position
|
|
||||||
virtual MemPointer* current() const {
|
|
||||||
return _cur;
|
|
||||||
};
|
|
||||||
|
|
||||||
// return the next pointer and advance current position
|
|
||||||
virtual MemPointer* next() {
|
|
||||||
_cur = next_record();
|
|
||||||
return _cur;
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the next pointer without advancing current position
|
|
||||||
virtual MemPointer* peek_next() const {
|
|
||||||
assert(false, "not implemented");
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
}
|
|
||||||
// return the previous pointer without changing current position
|
|
||||||
virtual MemPointer* peek_prev() const {
|
|
||||||
assert(false, "not implemented");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove the pointer at current position
|
|
||||||
virtual void remove() {
|
|
||||||
assert(false, "read-only iterator");
|
|
||||||
};
|
|
||||||
// insert the pointer at current position
|
|
||||||
virtual bool insert(MemPointer* ptr) {
|
|
||||||
assert(false, "read-only iterator");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool insert_after(MemPointer* ptr) {
|
|
||||||
assert(false, "read-only iterator");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
private:
|
|
||||||
// collapse the 'same kind' of records, and return this 'kind' of
|
|
||||||
// record with highest sequence number
|
|
||||||
MemPointer* next_record();
|
|
||||||
|
|
||||||
// Test if the two records are the same kind: the same memory block and allocation
|
|
||||||
// type.
|
|
||||||
inline bool same_kind(const MemPointerRecord* p1, const MemPointerRecord* p2) const {
|
|
||||||
assert(!p1->is_vm_pointer() && !p2->is_vm_pointer(), "malloc pointer only");
|
|
||||||
return (p1->addr() == p2->addr() &&
|
|
||||||
(p1->flags() &MemPointerRecord::tag_masks) ==
|
|
||||||
(p2->flags() & MemPointerRecord::tag_masks));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define DEFAULT_RECORDER_PTR_ARRAY_SIZE 512
|
|
||||||
|
|
||||||
class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
|
|
||||||
friend class MemSnapshot;
|
|
||||||
friend class MemTracker;
|
|
||||||
friend class MemTrackWorker;
|
|
||||||
friend class GenerationData;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
// the array that holds memory records
|
|
||||||
MemPointerArray* _pointer_records;
|
|
||||||
|
|
||||||
private:
|
|
||||||
// used for linked list
|
|
||||||
MemRecorder* _next;
|
|
||||||
// active recorder can only record a certain generation data
|
|
||||||
unsigned long _generation;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
_NOINLINE_ MemRecorder();
|
|
||||||
~MemRecorder();
|
|
||||||
|
|
||||||
// record a memory operation
|
|
||||||
bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0);
|
|
||||||
|
|
||||||
// linked list support
|
|
||||||
inline void set_next(MemRecorder* rec) {
|
|
||||||
_next = rec;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline MemRecorder* next() const {
|
|
||||||
return _next;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the recorder is full
|
|
||||||
inline bool is_full() const {
|
|
||||||
assert(_pointer_records != NULL, "just check");
|
|
||||||
return _pointer_records->is_full();
|
|
||||||
}
|
|
||||||
|
|
||||||
// if running out of memory when initializing recorder's internal
|
|
||||||
// data
|
|
||||||
inline bool out_of_memory() const {
|
|
||||||
return (_pointer_records == NULL ||
|
|
||||||
_pointer_records->out_of_memory());
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void clear() {
|
|
||||||
assert(_pointer_records != NULL, "Just check");
|
|
||||||
_pointer_records->clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
SequencedRecordIterator pointer_itr();
|
|
||||||
|
|
||||||
// return the generation of this recorder which it belongs to
|
|
||||||
unsigned long get_generation() const { return _generation; }
|
|
||||||
protected:
|
|
||||||
// number of MemRecorder instance
|
|
||||||
static volatile jint _instance_count;
|
|
||||||
|
|
||||||
private:
|
|
||||||
// sorting function, sort records into following order
|
|
||||||
// 1. memory address
|
|
||||||
// 2. allocation type
|
|
||||||
// 3. sequence number
|
|
||||||
static int sort_record_fn(const void* e1, const void* e2);
|
|
||||||
|
|
||||||
debug_only(void check_dup_seq(jint seq) const;)
|
|
||||||
void set_generation();
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP
|
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user