8046598: Scalable Native memory tracking development

Enhance scalability of native memory tracking

Reviewed-by: coleenp, ctornqvi, gtriantafill
This commit is contained in:
Zhengyu Gu 2014-08-07 12:18:58 -07:00
parent 40b035d141
commit f0cf82f571
72 changed files with 5166 additions and 6109 deletions

@ -119,8 +119,8 @@ ifeq ($(INCLUDE_NMT), false)
CFLAGS += -DINCLUDE_NMT=0
Src_Files_EXCLUDE += \
memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
memTracker.cpp nmtDCmd.cpp
memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
endif
-include $(HS_ALT_MAKE)/excludeSrc.make

@ -2439,23 +2439,25 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
}
// The memory is committed
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
return addr;
}
bool os::release_memory_special(char* base, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
if (rslt == 0) {
tkr.record((address)base, bytes);
return true;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
if (rslt == 0) {
tkr.record((address)base, bytes);
return true;
} else {
return false;
}
} else {
tkr.discard();
return false;
return shmdt(base) == 0;
}
}
size_t os::large_page_size() {

@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
return mapAddress;
}
@ -918,7 +918,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
*addr = mapAddress;
*sizep = size;

@ -3504,9 +3504,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
assert(is_ptr_aligned(start, alignment), "Must be");
// os::reserve_memory_special will record this memory area.
// Need to release it here to prevent overlapping reservations.
MemTracker::record_virtual_memory_release((address)start, bytes);
if (MemTracker::tracking_level() > NMT_minimal) {
// os::reserve_memory_special will record this memory area.
// Need to release it here to prevent overlapping reservations.
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
tkr.record((address)start, bytes);
}
char* end = start + bytes;
@ -3601,7 +3604,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
}
// The memory is committed
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
}
return addr;
@ -3617,24 +3620,30 @@ bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
}
bool os::release_memory_special(char* base, size_t bytes) {
assert(UseLargePages, "only for large pages");
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
res = os::Linux::release_memory_special_impl(base, bytes);
if (res) {
tkr.record((address)base, bytes);
}
} else {
res = os::Linux::release_memory_special_impl(base, bytes);
}
return res;
}
bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
assert(UseLargePages, "only for large pages");
bool res;
if (UseSHM) {
res = os::Linux::release_memory_special_shm(base, bytes);
} else {
assert(UseHugeTLBFS, "must be");
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
}
if (res) {
tkr.record((address)base, bytes);
} else {
tkr.discard();
}
return res;
}

@ -108,6 +108,7 @@ class Linux {
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
static bool release_memory_special_impl(char* base, size_t bytes);
static bool release_memory_special_shm(char* base, size_t bytes);
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);

@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
return mapAddress;
}
@ -924,7 +924,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
*addr = mapAddress;
*sizep = size;

@ -75,21 +75,41 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
VMError::report_coredump_status(buffer, success);
}
address os::get_caller_pc(int n) {
int os::get_native_stack(address* stack, int frames, int toSkip) {
#ifdef _NMT_NOINLINE_
n ++;
toSkip++;
#endif
int frame_idx = 0;
int num_of_frames; // number of frames captured
frame fr = os::current_frame();
while (n > 0 && fr.pc() &&
!os::is_first_C_frame(&fr) && fr.sender_pc()) {
fr = os::get_sender_for_C_frame(&fr);
n --;
while (fr.pc() && frame_idx < frames) {
if (toSkip > 0) {
toSkip --;
} else {
stack[frame_idx ++] = fr.pc();
}
if (fr.fp() == NULL || os::is_first_C_frame(&fr)
||fr.sender_pc() == NULL || fr.cb() != NULL) break;
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
fr = os::get_sender_for_C_frame(&fr);
} else {
break;
}
}
if (n == 0) {
return fr.pc();
} else {
return NULL;
num_of_frames = frame_idx;
for (; frame_idx < frames; frame_idx ++) {
stack[frame_idx] = NULL;
}
return num_of_frames;
}
bool os::unsetenv(const char* name) {
assert(name != NULL, "Null pointer");
return (::unsetenv(name) == 0);
}
int os::get_last_error() {

@ -770,7 +770,8 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
size, CURRENT_PC, mtInternal);
return mapAddress;
}
@ -941,7 +942,8 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
size, CURRENT_PC, mtInternal);
*addr = mapAddress;
*sizep = size;

@ -138,9 +138,8 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
// Workaround for issue when a custom launcher doesn't call
// DestroyJavaVM and NMT is trying to track memory when free is
// called from a static destructor
if (MemTracker::is_on()) {
MemTracker::shutdown(MemTracker::NMT_normal);
}
MemTracker::shutdown();
break;
default:
break;
@ -163,6 +162,10 @@ bool os::getenv(const char* name, char* buffer, int len) {
return result > 0 && result < len;
}
bool os::unsetenv(const char* name) {
assert(name != NULL, "Null pointer");
return (SetEnvironmentVariable(name, NULL) == TRUE);
}
// No setuid programs under Windows.
bool os::have_special_privileges() {
@ -319,15 +322,16 @@ extern "C" void breakpoint() {
* So far, this method is only used by Native Memory Tracking, which is
* only supported on Windows XP or later.
*/
address os::get_caller_pc(int n) {
int os::get_native_stack(address* stack, int frames, int toSkip) {
#ifdef _NMT_NOINLINE_
n++;
toSkip ++;
#endif
address pc;
if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
return pc;
int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
(PVOID*)stack, NULL);
for (int index = captured; index < frames; index ++) {
stack[index] = NULL;
}
return NULL;
return captured;
}
@ -2901,7 +2905,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
PAGE_READWRITE);
// If reservation failed, return NULL
if (p_buf == NULL) return NULL;
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
os::release_memory(p_buf, bytes + chunk_size);
// we still need to round up to a page boundary (in case we are using large pages)
@ -2967,7 +2971,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
// need to create a dummy 'reserve' record to match
// the release.
MemTracker::record_virtual_memory_reserve((address)p_buf,
bytes_to_release, mtNone, CALLER_PC);
bytes_to_release, CALLER_PC);
os::release_memory(p_buf, bytes_to_release);
}
#ifdef ASSERT
@ -2986,11 +2990,10 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
}
// Although the memory is allocated individually, it is returned as one.
// NMT records it as one block.
address pc = CALLER_PC;
if ((flags & MEM_COMMIT) != 0) {
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
} else {
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
}
// made it this far, success
@ -3188,8 +3191,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, boo
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
if (res != NULL) {
address pc = CALLER_PC;
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
}
return res;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1498,7 +1498,8 @@ static char* mapping_create_shared(size_t size) {
(void)memset(mapAddress, '\0', size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
size, CURRENT_PC, mtInternal);
return (char*) mapAddress;
}
@ -1680,7 +1681,8 @@ static void open_file_mapping(const char* user, int vmid,
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size,
CURRENT_PC, mtInternal);
*addrp = (char*)mapAddress;
@ -1834,10 +1836,14 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
return;
}
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
remove_file_mapping(addr);
// it does not go through os api, the operation has to record from here
tkr.record((address)addr, bytes);
if (MemTracker::tracking_level() > NMT_minimal) {
// it does not go through os api, the operation has to record from here
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
remove_file_mapping(addr);
tkr.record((address)addr, bytes);
} else {
remove_file_mapping(addr);
}
}
char* PerfMemory::backing_store_filename() {

@ -269,7 +269,7 @@ address CodeBuffer::decode_begin() {
GrowableArray<int>* CodeBuffer::create_patch_overflow() {
if (_overflow_arena == NULL) {
_overflow_arena = new (mtCode) Arena();
_overflow_arena = new (mtCode) Arena(mtCode);
}
return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
}

@ -48,7 +48,7 @@ Compiler::Compiler() : AbstractCompiler(c1) {
void Compiler::init_c1_runtime() {
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
Arena* arena = new (mtCompiler) Arena();
Arena* arena = new (mtCompiler) Arena(mtCompiler);
Runtime1::initialize(buffer_blob);
FrameMap::initialize();
// initialize data structures

@ -86,7 +86,8 @@ static bool firstEnv = true;
// ------------------------------------------------------------------
// ciEnv::ciEnv
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
: _ciEnv_arena(mtCompiler) {
VM_ENTRY_MARK;
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
@ -144,7 +145,7 @@ ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
_jvmti_can_pop_frame = false;
}
ciEnv::ciEnv(Arena* arena) {
ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
ASSERT_IN_VM;
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,7 +112,7 @@ void ciObjectFactory::initialize() {
// This Arena is long lived and exists in the resource mark of the
// compiler thread that initializes the initial ciObjectFactory which
// creates the shared ciObjects that all later ciObjectFactories use.
Arena* arena = new (mtCompiler) Arena();
Arena* arena = new (mtCompiler) Arena(mtCompiler);
ciEnv initial(arena);
ciEnv* env = ciEnv::current();
env->_factory->init_shared_objects();

@ -70,9 +70,9 @@ Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS
void SymbolTable::initialize_symbols(int arena_alloc_size) {
// Initialize the arena for global symbols, size passed in depends on CDS.
if (arena_alloc_size == 0) {
_arena = new (mtSymbol) Arena();
_arena = new (mtSymbol) Arena(mtSymbol);
} else {
_arena = new (mtSymbol) Arena(arena_alloc_size);
_arena = new (mtSymbol) Arena(mtSymbol, arena_alloc_size);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,8 @@ void ConcurrentMarkSweepPolicy::initialize_alignments() {
}
void ConcurrentMarkSweepPolicy::initialize_generations() {
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_generations == NULL)
vm_exit_during_initialization("Unable to allocate gen spec");

@ -289,7 +289,7 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
}
_fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
mtGC, 0, AllocFailStrategy::RETURN_NULL);
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_fine_grain_regions == NULL) {
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,

@ -422,26 +422,23 @@ void Chunk::start_chunk_pool_cleaner_task() {
}
//------------------------------Arena------------------------------------------
NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
Arena::Arena(size_t init_size) {
Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
size_t round_size = (sizeof (char *)) - 1;
init_size = (init_size+round_size) & ~round_size;
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
_size_in_bytes = 0;
MemTracker::record_new_arena(flag);
set_size_in_bytes(init_size);
NOT_PRODUCT(Atomic::inc(&_instance_count);)
}
Arena::Arena() {
Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
_size_in_bytes = 0;
MemTracker::record_new_arena(flag);
set_size_in_bytes(Chunk::init_size);
NOT_PRODUCT(Atomic::inc(&_instance_count);)
}
Arena *Arena::move_contents(Arena *copy) {
@ -463,7 +460,7 @@ Arena *Arena::move_contents(Arena *copy) {
Arena::~Arena() {
destruct_contents();
NOT_PRODUCT(Atomic::dec(&_instance_count);)
MemTracker::record_arena_free(_flags);
}
void* Arena::operator new(size_t size) throw() {
@ -479,21 +476,21 @@ void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant)
// dynamic memory type binding
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
#ifdef ASSERT
void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
return p;
#else
return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
return (void *) AllocateHeap(size, flags, CALLER_PC);
#endif
}
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
#ifdef ASSERT
void* p = os::malloc(size, flags|otArena, CALLER_PC);
void* p = os::malloc(size, flags, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
return p;
#else
return os::malloc(size, flags|otArena, CALLER_PC);
return os::malloc(size, flags, CALLER_PC);
#endif
}
@ -518,8 +515,9 @@ void Arena::destruct_contents() {
// change the size
void Arena::set_size_in_bytes(size_t size) {
if (_size_in_bytes != size) {
long delta = (long)(size - size_in_bytes());
_size_in_bytes = size;
MemTracker::record_arena_size((address)this, size);
MemTracker::record_arena_size_change(delta, _flags);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -133,51 +133,34 @@ class AllocatedObj {
/*
* MemoryType bitmap layout:
* | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
* | memory type | object | reserved |
* | | type | |
* Memory types
*/
enum MemoryType {
// Memory type by sub systems. It occupies lower byte.
mtNone = 0x0000, // undefined
mtClass = 0x0100, // memory class for Java classes
mtThread = 0x0200, // memory for thread objects
mtThreadStack = 0x0300,
mtCode = 0x0400, // memory for generated code
mtGC = 0x0500, // memory for GC
mtCompiler = 0x0600, // memory for compiler
mtInternal = 0x0700, // memory used by VM, but does not belong to
mtJavaHeap = 0x00, // Java heap
mtClass = 0x01, // memory class for Java classes
mtThread = 0x02, // memory for thread objects
mtThreadStack = 0x03,
mtCode = 0x04, // memory for generated code
mtGC = 0x05, // memory for GC
mtCompiler = 0x06, // memory for compiler
mtInternal = 0x07, // memory used by VM, but does not belong to
// any of above categories, and not used for
// native memory tracking
mtOther = 0x0800, // memory not used by VM
mtSymbol = 0x0900, // symbol
mtNMT = 0x0A00, // memory used by native memory tracking
mtChunk = 0x0B00, // chunk that holds content of arenas
mtJavaHeap = 0x0C00, // Java heap
mtClassShared = 0x0D00, // class data sharing
mtTest = 0x0E00, // Test type for verifying NMT
mtTracing = 0x0F00, // memory used for Tracing
mt_number_of_types = 0x000F, // number of memory types (mtDontTrack
mtOther = 0x08, // memory not used by VM
mtSymbol = 0x09, // symbol
mtNMT = 0x0A, // memory used by native memory tracking
mtClassShared = 0x0B, // class data sharing
mtChunk = 0x0C, // chunk that holds content of arenas
mtTest = 0x0D, // Test type for verifying NMT
mtTracing = 0x0E, // memory used for Tracing
mtNone = 0x0F, // undefined
mt_number_of_types = 0x10 // number of memory types (mtDontTrack
// is not included as validate type)
mtDontTrack = 0x0F00, // memory we do not or cannot track
mt_masks = 0x7F00,
// object type mask
otArena = 0x0010, // an arena object
otNMTRecorder = 0x0020, // memory recorder object
ot_masks = 0x00F0
};
#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
typedef MemoryType MEMFLAGS;
#define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena)
#define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder)
#define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
typedef unsigned short MEMFLAGS;
#if INCLUDE_NMT
@ -189,27 +172,23 @@ const bool NMT_track_callsite = false;
#endif // INCLUDE_NMT
// debug build does not inline
#if defined(_NMT_NOINLINE_)
#define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
#define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
#define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
#else
#define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0)
#define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
#define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
#endif
class NativeCallStack;
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
public:
_NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
_NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new(size_t size) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
address caller_pc = 0) throw();
_NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant)
throw();
_NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new [](size_t size) throw();
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
address caller_pc = 0) throw();
const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant)
throw();
void operator delete(void* p);
void operator delete [] (void* p);
};
@ -384,13 +363,15 @@ class Chunk: CHeapObj<mtChunk> {
//------------------------------Arena------------------------------------------
// Fast allocation of memory
class Arena : public CHeapObj<mtNone|otArena> {
class Arena : public CHeapObj<mtNone> {
protected:
friend class ResourceMark;
friend class HandleMark;
friend class NoHandleMark;
friend class VMStructs;
MEMFLAGS _flags; // Memory tracking flags
Chunk *_first; // First chunk
Chunk *_chunk; // current chunk
char *_hwm, *_max; // High water mark and max in current chunk
@ -418,8 +399,8 @@ protected:
}
public:
Arena();
Arena(size_t init_size);
Arena(MEMFLAGS memflag);
Arena(MEMFLAGS memflag, size_t init_size);
~Arena();
void destruct_contents();
char* hwm() const { return _hwm; }
@ -518,8 +499,6 @@ protected:
static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
static void free_all(char** start, char** end) PRODUCT_RETURN;
// how many arena instances
NOT_PRODUCT(static volatile jint _instance_count;)
private:
// Reset this Arena to empty, access will trigger grow if necessary
void reset(void) {
@ -681,7 +660,7 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL)
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#include "services/memTracker.hpp"
// Explicit C-heap memory management
@ -49,12 +50,10 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) {
#endif
// allocate using malloc; will fail if no memory available
inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
const NativeCallStack& stack,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
if (pc == 0) {
pc = CURRENT_PC;
}
char* p = (char*) os::malloc(size, flags, pc);
char* p = (char*) os::malloc(size, flags, stack);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
#endif
@ -63,10 +62,14 @@ inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
}
return p;
}
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
}
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
#endif
@ -85,8 +88,22 @@ inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
address caller_pc) throw() {
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
const NativeCallStack& stack) throw() {
void* p = (void*)AllocateHeap(size, F, stack);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
#endif
return p;
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
return CHeapObj<F>::operator new(size, CALLER_PC);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
void* p = (void*)AllocateHeap(size, F, stack,
AllocFailStrategy::RETURN_NULL);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
#endif
@ -94,23 +111,28 @@ template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
AllocFailStrategy::RETURN_NULL);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
#endif
return p;
const std::nothrow_t& nothrow_constant) throw() {
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
address caller_pc) throw() {
return CHeapObj<F>::operator new(size, caller_pc);
const NativeCallStack& stack) throw() {
return CHeapObj<F>::operator new(size, stack);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
throw() {
return CHeapObj<F>::operator new(size, CALLER_PC);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
return CHeapObj<F>::operator new(size, nothrow_constant, stack);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
const std::nothrow_t& nothrow_constant) throw() {
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
}
template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){

@ -56,7 +56,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap,
#endif
set_bs(_ct_bs);
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
mtGC, 0, AllocFailStrategy::RETURN_NULL);
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_last_cur_val_in_gen == NULL) {
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
}

@ -909,7 +909,8 @@ void MarkSweepPolicy::initialize_alignments() {
}
void MarkSweepPolicy::initialize_generations() {
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
AllocFailStrategy::RETURN_NULL);
if (_generations == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
}

@ -135,7 +135,7 @@ KlassInfoTable::KlassInfoTable(bool need_class_stats) {
_ref = (HeapWord*) Universe::boolArrayKlassObj();
_buckets =
(KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
mtInternal, 0, AllocFailStrategy::RETURN_NULL);
mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_buckets != NULL) {
_size = _num_buckets;
for (int index = 0; index < _size; index++) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -103,11 +103,13 @@ MemRegion MemRegion::minus(const MemRegion mr2) const {
}
void* MemRegion::operator new(size_t size) throw() {
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
return (address)AllocateHeap(size, mtGC, CURRENT_PC,
AllocFailStrategy::RETURN_NULL);
}
void* MemRegion::operator new [](size_t size) throw() {
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
return (address)AllocateHeap(size, mtGC, CURRENT_PC,
AllocFailStrategy::RETURN_NULL);
}
void MemRegion::operator delete(void* p) {
FreeHeap(p, mtGC);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,11 +49,11 @@ class ResourceArea: public Arena {
debug_only(static int _warned;) // to suppress multiple warnings
public:
ResourceArea() {
ResourceArea() : Arena(mtThread) {
debug_only(_nesting = 0;)
}
ResourceArea(size_t init_size) : Arena(init_size) {
ResourceArea(size_t init_size) : Arena(mtThread, init_size) {
debug_only(_nesting = 0;);
}
@ -64,7 +64,7 @@ public:
if (UseMallocOnly) {
// use malloc, but save pointer in res. area for later freeing
char** save = (char**)internal_malloc_4(sizeof(char*));
return (*save = (char*)os::malloc(size, mtThread));
return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
}
#endif
return (char*)Amalloc(size, alloc_failmode);

@ -647,6 +647,10 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_printer(IdealGraphPrinter::printer()),
#endif
_congraph(NULL),
_comp_arena(mtCompiler),
_node_arena(mtCompiler),
_old_arena(mtCompiler),
_Compile_types(mtCompiler),
_replay_inline_data(NULL),
_late_inlines(comp_arena(), 2, 0, NULL),
_string_late_inlines(comp_arena(), 2, 0, NULL),
@ -954,6 +958,10 @@ Compile::Compile( ciEnv* ci_env,
_in_dump_cnt(0),
_printer(NULL),
#endif
_comp_arena(mtCompiler),
_node_arena(mtCompiler),
_old_arena(mtCompiler),
_Compile_types(mtCompiler),
_dead_node_list(comp_arena()),
_dead_node_count(0),
_congraph(NULL),

@ -265,7 +265,7 @@ void Type::Initialize_shared(Compile* current) {
// locking.
Arena* save = current->type_arena();
Arena* shared_type_arena = new (mtCompiler)Arena();
Arena* shared_type_arena = new (mtCompiler)Arena(mtCompiler);
current->set_type_arena(shared_type_arena);
_shared_type_dict =

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -222,10 +222,17 @@
# include "runtime/vmThread.hpp"
# include "runtime/vm_operations.hpp"
# include "runtime/vm_version.hpp"
# include "services/allocationSite.hpp"
# include "services/lowMemoryDetector.hpp"
# include "services/mallocTracker.hpp"
# include "services/memBaseline.hpp"
# include "services/memoryPool.hpp"
# include "services/memoryService.hpp"
# include "services/memoryUsage.hpp"
# include "services/memReporter.hpp"
# include "services/memTracker.hpp"
# include "services/nmtCommon.hpp"
# include "services/virtualMemoryTracker.hpp"
# include "utilities/accessFlags.hpp"
# include "utilities/array.hpp"
# include "utilities/bitMap.hpp"
@ -240,6 +247,7 @@
# include "utilities/hashtable.hpp"
# include "utilities/histogram.hpp"
# include "utilities/macros.hpp"
# include "utilities/nativeCallStack.hpp"
# include "utilities/numberSeq.hpp"
# include "utilities/ostream.hpp"
# include "utilities/preserveException.hpp"

@ -74,6 +74,7 @@
#include "runtime/signature.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vm_operations.hpp"
#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
#include "trace/tracing.hpp"
#include "utilities/defaultStream.hpp"
@ -2697,6 +2698,7 @@ static char* get_bad_address() {
if (bad_address != NULL) {
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
/*is_committed*/false);
MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal);
}
}
return bad_address;
@ -3857,6 +3859,7 @@ void TestOldSize_test();
void TestKlass_test();
void TestBitMap_test();
void TestAsUtf8();
void Test_linked_list();
#if INCLUDE_ALL_GCS
void TestOldFreeSpaceCalculation_test();
void TestG1BiasedArray_test();
@ -3887,6 +3890,7 @@ void execute_internal_vm_tests() {
run_unit_test(TestBitMap_test());
run_unit_test(TestAsUtf8());
run_unit_test(ObjectMonitor::sanity_checks());
run_unit_test(Test_linked_list());
#if INCLUDE_VM_STRUCTS
run_unit_test(VMStructs::test());
#endif

@ -52,8 +52,10 @@
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef INCLUDE_NMT
#if INCLUDE_NMT
#include "services/mallocSiteTable.hpp"
#include "services/memTracker.hpp"
#include "utilities/nativeCallStack.hpp"
#endif // INCLUDE_NMT
#include "compiler/compileBroker.hpp"
@ -255,14 +257,18 @@ WB_END
// NMT picks it up correctly
WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
jlong addr = 0;
if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
}
return addr;
WB_END
// Alloc memory with pseudo call stack. The test can create psudo malloc
// allocation site to stress the malloc tracking.
WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
address pc = (address)(size_t)pseudo_stack;
NativeCallStack stack(&pc, 1);
return (jlong)os::malloc(size, mtTest, stack);
WB_END
// Free the memory allocated by NMTAllocTest
WB_ENTRY(void, WB_NMTFree(JNIEnv* env, jobject o, jlong mem))
os::free((void*)(uintptr_t)mem, mtTest);
@ -271,10 +277,8 @@ WB_END
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
jlong addr = 0;
if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
addr = (jlong)(uintptr_t)os::reserve_memory(size);
MemTracker::record_virtual_memory_type((address)addr, mtTest);
}
return addr;
WB_END
@ -293,20 +297,20 @@ WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong siz
os::release_memory((char *)(uintptr_t)addr, size);
WB_END
// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
return false;
}
return MemTracker::wbtest_wait_for_data_merge();
WB_END
WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
return MemTracker::tracking_level() == MemTracker::NMT_detail;
return MemTracker::tracking_level() == NMT_detail;
WB_END
WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
address pc = (address)1;
for (jlong index = 0; index < num; index ++) {
NativeCallStack stack(&pc, 1);
os::malloc(0, mtTest, stack);
pc += MallocSiteTable::hash_buckets();
}
WB_END
#endif // INCLUDE_NMT
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
@ -843,12 +847,13 @@ static JNINativeMethod methods[] = {
#endif // INCLUDE_ALL_GCS
#if INCLUDE_NMT
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
{CC"NMTMallocWithPseudoStack", CC"(JI)J", (void*)&WB_NMTMallocWithPseudoStack},
{CC"NMTFree", CC"(J)V", (void*)&WB_NMTFree },
{CC"NMTReserveMemory", CC"(J)J", (void*)&WB_NMTReserveMemory },
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
{CC"NMTWaitForDataMerge", CC"()Z", (void*)&WB_NMTWaitForDataMerge},
{CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket},
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
#endif // INCLUDE_NMT
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },

@ -300,6 +300,7 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "UseNewReflection", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ NULL, JDK_Version(0), JDK_Version(0) }
};
@ -2372,7 +2373,7 @@ bool Arguments::check_vm_args_consistency() {
if (PrintNMTStatistics) {
#if INCLUDE_NMT
if (MemTracker::tracking_level() == MemTracker::NMT_off) {
if (MemTracker::tracking_level() == NMT_off) {
#endif // INCLUDE_NMT
warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
PrintNMTStatistics = false;
@ -3582,15 +3583,24 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
CommandLineFlags::printFlags(tty, false);
vm_exit(0);
}
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
#if INCLUDE_NMT
MemTracker::init_tracking_options(tail);
#else
jio_fprintf(defaultStream::error_stream(),
"Native Memory Tracking is not supported in this VM\n");
return JNI_ERR;
#endif
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
// The launcher did not setup nmt environment variable properly.
// if (!MemTracker::check_launcher_nmt_support(tail)) {
// warning("Native Memory Tracking did not setup properly, using wrong launcher?");
// }
// Verify if nmt option is valid.
if (MemTracker::verify_nmt_option()) {
// Late initialization, still in single-threaded mode.
if (MemTracker::tracking_level() >= NMT_summary) {
MemTracker::init();
}
} else {
vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
}
}
#endif
#ifndef PRODUCT

@ -945,11 +945,6 @@ class CommandLineFlags {
diagnostic(bool, PrintNMTStatistics, false, \
"Print native memory tracking summary data if it is on") \
\
diagnostic(bool, AutoShutdownNMT, true, \
"Automatically shutdown native memory tracking under stress " \
"situations. When set to false, native memory tracking tries to " \
"stay alive at the expense of JVM performance") \
\
diagnostic(bool, LogCompilation, false, \
"Log compilation activity in detail to LogFile") \
\

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -227,7 +227,7 @@ class HandleArea: public Arena {
HandleArea* _prev; // link to outer (older) area
public:
// Constructor
HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) {
HandleArea(HandleArea* prev) : Arena(mtThread, Chunk::tiny_size) {
debug_only(_handle_mark_nesting = 0);
debug_only(_no_handle_mark_nesting = 0);
_prev = prev;

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,8 +34,10 @@
#include "runtime/init.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/sharedRuntime.hpp"
#include "services/memTracker.hpp"
#include "utilities/macros.hpp"
// Initialization done by VM thread in vm_init_globals()
void check_ThreadShadow();
void eventlog_init();
@ -131,6 +133,12 @@ jint init_globals() {
javaClasses_init(); // must happen after vtable initialization
stubRoutines_init2(); // note: StubRoutines need 2-phase init
#if INCLUDE_NMT
// Solaris stack is walkable only after stubRoutines are set up.
// On Other platforms, the stack is always walkable.
NMT_stack_walkable = true;
#endif // INCLUDE_NMT
// All the flags that get adjusted by VM_Version_init and os::init_2
// have been set so dump the flags now.
if (PrintFlagsFinal) {

@ -57,7 +57,6 @@
#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#include "runtime/vm_operations.hpp"
#include "services/memReporter.hpp"
#include "services/memTracker.hpp"
#include "trace/tracing.hpp"
#include "utilities/dtrace.hpp"
@ -349,12 +348,7 @@ void print_statistics() {
#endif // ENABLE_ZAP_DEAD_LOCALS
// Native memory tracking data
if (PrintNMTStatistics) {
if (MemTracker::is_on()) {
BaselineTTYOutputer outputer(tty);
MemTracker::print_memory_usage(outputer, K, false);
} else {
tty->print_cr("%s", MemTracker::reason());
}
MemTracker::final_report(tty);
}
}
@ -390,12 +384,7 @@ void print_statistics() {
// Native memory tracking data
if (PrintNMTStatistics) {
if (MemTracker::is_on()) {
BaselineTTYOutputer outputer(tty);
MemTracker::print_memory_usage(outputer, K, false);
} else {
tty->print_cr("%s", MemTracker::reason());
}
MemTracker::final_report(tty);
}
}
@ -544,10 +533,6 @@ void before_exit(JavaThread * thread) {
BeforeExit_lock->notify_all();
}
// Shutdown NMT before exit. Otherwise,
// it will run into trouble when system destroys static variables.
MemTracker::shutdown(MemTracker::NMT_normal);
if (VerifyStringTableAtExit) {
int fail_cnt = 0;
{

@ -52,6 +52,7 @@
#include "runtime/thread.inline.hpp"
#include "runtime/vm_version.hpp"
#include "services/attachListener.hpp"
#include "services/nmtCommon.hpp"
#include "services/memTracker.hpp"
#include "services/threadService.hpp"
#include "utilities/defaultStream.hpp"
@ -553,7 +554,11 @@ static u_char* testMalloc(size_t alloc_size) {
return ptr;
}
void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
void* os::malloc(size_t size, MEMFLAGS flags) {
return os::malloc(size, flags, CALLER_PC);
}
void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
@ -579,11 +584,15 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
size = 1;
}
// NMT support
NMT_TrackingLevel level = MemTracker::tracking_level();
size_t nmt_header_size = MemTracker::malloc_header_size(level);
#ifndef ASSERT
const size_t alloc_size = size;
const size_t alloc_size = size + nmt_header_size;
#else
const size_t alloc_size = GuardedMemory::get_total_size(size);
if (size > alloc_size) { // Check for rollover.
const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
if (size + nmt_header_size > alloc_size) { // Check for rollover.
return NULL;
}
#endif
@ -602,7 +611,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
return NULL;
}
// Wrap memory with guard
GuardedMemory guarded(ptr, size);
GuardedMemory guarded(ptr, size + nmt_header_size);
ptr = guarded.get_user_ptr();
#endif
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
@ -615,48 +624,50 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
}
// we do not track guard memory
MemTracker::record_malloc((address)ptr, size, memflags, caller == 0 ? CALLER_PC : caller);
return ptr;
return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
}
void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
return os::realloc(memblock, size, flags, CALLER_PC);
}
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
#ifndef ASSERT
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
void* ptr = ::realloc(memblock, size);
if (ptr != NULL) {
tkr.record((address)memblock, (address)ptr, size, memflags,
caller == 0 ? CALLER_PC : caller);
} else {
tkr.discard();
}
return ptr;
// NMT support
void* membase = MemTracker::record_free(memblock);
NMT_TrackingLevel level = MemTracker::tracking_level();
size_t nmt_header_size = MemTracker::malloc_header_size(level);
void* ptr = ::realloc(membase, size + nmt_header_size);
return MemTracker::record_malloc(ptr, size, memflags, stack, level);
#else
if (memblock == NULL) {
return os::malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
return os::malloc(size, memflags, stack);
}
if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
breakpoint();
}
verify_memory(memblock);
// NMT support
void* membase = MemTracker::malloc_base(memblock);
verify_memory(membase);
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
if (size == 0) {
return NULL;
}
// always move the block
void* ptr = os::malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
void* ptr = os::malloc(size, memflags, stack);
if (PrintMalloc) {
tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
}
// Copy to new memory if malloc didn't fail
if ( ptr != NULL ) {
GuardedMemory guarded(memblock);
memcpy(ptr, memblock, MIN2(size, guarded.get_user_size()));
if (paranoid) verify_memory(ptr);
GuardedMemory guarded(MemTracker::malloc_base(memblock));
// Guard's user data contains NMT header
size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
memcpy(ptr, memblock, MIN2(size, memblock_size));
if (paranoid) verify_memory(MemTracker::malloc_base(ptr));
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
breakpoint();
@ -669,7 +680,6 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
void os::free(void *memblock, MEMFLAGS memflags) {
address trackp = (address) memblock;
NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
#ifdef ASSERT
if (memblock == NULL) return;
@ -677,20 +687,22 @@ void os::free(void *memblock, MEMFLAGS memflags) {
if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
breakpoint();
}
verify_memory(memblock);
void* membase = MemTracker::record_free(memblock);
verify_memory(membase);
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
GuardedMemory guarded(memblock);
GuardedMemory guarded(membase);
size_t size = guarded.get_user_size();
inc_stat_counter(&free_bytes, size);
memblock = guarded.release_for_freeing();
membase = guarded.release_for_freeing();
if (PrintMalloc && tty != NULL) {
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase);
}
::free(membase);
#else
void* membase = MemTracker::record_free(memblock);
::free(membase);
#endif
MemTracker::record_free(trackp, memflags);
::free(memblock);
}
void os::init_random(long initval) {
@ -1404,7 +1416,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
}
return result;
@ -1414,7 +1426,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
MEMFLAGS flags) {
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
MemTracker::record_virtual_memory_type((address)result, flags);
}
@ -1424,7 +1436,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
char* result = pd_attempt_reserve_memory_at(bytes, addr);
if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
}
return result;
}
@ -1464,23 +1476,29 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
}
bool os::uncommit_memory(char* addr, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
bool res = pd_uncommit_memory(addr, bytes);
if (res) {
tkr.record((address)addr, bytes);
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
res = pd_uncommit_memory(addr, bytes);
if (res) {
tkr.record((address)addr, bytes);
}
} else {
tkr.discard();
res = pd_uncommit_memory(addr, bytes);
}
return res;
}
bool os::release_memory(char* addr, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
bool res = pd_release_memory(addr, bytes);
if (res) {
tkr.record((address)addr, bytes);
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
res = pd_release_memory(addr, bytes);
if (res) {
tkr.record((address)addr, bytes);
}
} else {
tkr.discard();
res = pd_release_memory(addr, bytes);
}
return res;
}
@ -1491,7 +1509,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
bool allow_exec) {
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
if (result != NULL) {
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
}
return result;
}
@ -1504,12 +1522,15 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
}
bool os::unmap_memory(char *addr, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
bool result = pd_unmap_memory(addr, bytes);
if (result) {
tkr.record((address)addr, bytes);
bool result;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
result = pd_unmap_memory(addr, bytes);
if (result) {
tkr.record((address)addr, bytes);
}
} else {
tkr.discard();
result = pd_unmap_memory(addr, bytes);
}
return result;
}

@ -65,6 +65,8 @@ class JavaThread;
class Event;
class DLL;
class FileHandle;
class NativeCallStack;
template<class E> class GrowableArray;
// %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
@ -96,9 +98,11 @@ const bool ExecMem = true;
// Typedef for structured exception handling support
typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
class MallocTracker;
class os: AllStatic {
friend class VMStructs;
friend class MallocTracker;
public:
enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
@ -160,7 +164,10 @@ class os: AllStatic {
// Override me as needed
static int file_name_strcmp(const char* s1, const char* s2);
// get/unset environment variable
static bool getenv(const char* name, char* buffer, int len);
static bool unsetenv(const char* name);
static bool have_special_privileges();
static jlong javaTimeMillis();
@ -207,8 +214,13 @@ class os: AllStatic {
// Interface for detecting multiprocessor system
static inline bool is_MP() {
#if !INCLUDE_NMT
assert(_processor_count > 0, "invalid processor count");
return _processor_count > 1 || AssumeMP;
#else
// NMT needs atomic operations before this initialization.
return true;
#endif
}
static julong available_memory();
static julong physical_memory();
@ -635,12 +647,20 @@ class os: AllStatic {
static void* thread_local_storage_at(int index);
static void free_thread_local_storage(int index);
// Stack walk
static address get_caller_pc(int n = 0);
// Retrieve native stack frames.
// Parameter:
// stack: an array to storage stack pointers.
// frames: size of above array.
// toSkip: number of stack frames to skip at the beginning.
// Return: number of stack frames captured.
static int get_native_stack(address* stack, int size, int toSkip = 0);
// General allocation (must be MT-safe)
static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0);
static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0);
static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
static void* malloc (size_t size, MEMFLAGS flags);
static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
static void free (void *memblock, MEMFLAGS flags = mtNone);
static bool check_heap(bool force = false); // verify C heap integrity
static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup

@ -52,7 +52,6 @@
#include "runtime/sweeper.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
@ -527,10 +526,6 @@ void SafepointSynchronize::do_cleanup_tasks() {
TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
ClassLoaderDataGraph::purge_if_needed();
}
if (MemTracker::is_on()) {
MemTracker::sync();
}
}

@ -297,8 +297,7 @@ void Thread::record_stack_base_and_size() {
#if INCLUDE_NMT
// record thread's native stack, stack grows downward
address stack_low_addr = stack_base() - stack_size();
MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
CURRENT_PC);
MemTracker::record_thread_stack(stack_low_addr, stack_size());
#endif // INCLUDE_NMT
}
@ -316,7 +315,7 @@ Thread::~Thread() {
#if INCLUDE_NMT
if (_stack_base != NULL) {
address low_stack_addr = stack_base() - stack_size();
MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
MemTracker::release_thread_stack(low_stack_addr, stack_size());
#ifdef ASSERT
set_stack_base(NULL);
#endif
@ -1425,9 +1424,6 @@ void JavaThread::initialize() {
set_monitor_chunks(NULL);
set_next(NULL);
set_thread_state(_thread_new);
#if INCLUDE_NMT
set_recorder(NULL);
#endif
_terminated = _not_terminated;
_privileged_stack_top = NULL;
_array_for_gc = NULL;
@ -1503,7 +1499,6 @@ JavaThread::JavaThread(bool is_attaching_via_jni) :
_jni_attach_state = _not_attaching_via_jni;
}
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
_safepoint_visible = false;
}
bool JavaThread::reguard_stack(address cur_sp) {
@ -1566,7 +1561,6 @@ JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
os::java_thread;
os::create_thread(this, thr_type, stack_sz);
_safepoint_visible = false;
// The _osthread may be NULL here because we ran out of memory (too many threads active).
// We need to throw and OutOfMemoryError - however we cannot do this here because the caller
// may hold a lock and all locks must be unlocked before throwing the exception (throwing
@ -1584,13 +1578,6 @@ JavaThread::~JavaThread() {
tty->print_cr("terminate thread %p", this);
}
// By now, this thread should already be invisible to safepoint,
// and its per-thread recorder also collected.
assert(!is_safepoint_visible(), "wrong state");
#if INCLUDE_NMT
assert(get_recorder() == NULL, "Already collected");
#endif // INCLUDE_NMT
// JSR166 -- return the parker to the free list
Parker::Release(_parker);
_parker = NULL;
@ -3359,11 +3346,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// initialize TLS
ThreadLocalStorage::init();
// Bootstrap native memory tracking, so it can start recording memory
// activities before worker thread is started. This is the first phase
// of bootstrapping, VM is currently running in single-thread mode.
MemTracker::bootstrap_single_thread();
// Initialize output stream logging
ostream_init_log();
@ -3414,9 +3396,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// Initialize Java-Level synchronization subsystem
ObjectMonitor::Initialize();
// Second phase of bootstrapping, VM is about entering multi-thread mode
MemTracker::bootstrap_multi_thread();
// Initialize global modules
jint status = init_globals();
if (status != JNI_OK) {
@ -3438,9 +3417,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// real raw monitor. VM is setup enough here for raw monitor enter.
JvmtiExport::transition_pending_onload_raw_monitors();
// Fully start NMT
MemTracker::start();
// Create the VMThread
{ TraceTime timer("Start VMThread", TraceStartupTime);
VMThread::create();
@ -3995,8 +3971,6 @@ void Threads::add(JavaThread* p, bool force_daemon) {
daemon = false;
}
p->set_safepoint_visible(true);
ThreadService::add_thread(p, daemon);
// Possible GC point.
@ -4042,13 +4016,6 @@ void Threads::remove(JavaThread* p) {
// to do callbacks into the safepoint code. However, the safepoint code is not aware
// of this thread since it is removed from the queue.
p->set_terminated_value();
// Now, this thread is not visible to safepoint
p->set_safepoint_visible(false);
// once the thread becomes safepoint invisible, we can not use its per-thread
// recorder. And Threads::do_threads() no longer walks this thread, so we have
// to release its per-thread recorder here.
MemTracker::thread_exiting(p);
} // unlock Threads_lock
// Since Events::log uses a lock, we grab it outside the Threads_lock

@ -43,10 +43,6 @@
#include "runtime/unhandledOops.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_NMT
#include "services/memRecorder.hpp"
#endif // INCLUDE_NMT
#include "trace/traceBackend.hpp"
#include "trace/traceMacros.hpp"
#include "utilities/exceptions.hpp"
@ -1036,16 +1032,6 @@ class JavaThread: public Thread {
bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
#if INCLUDE_NMT
// native memory tracking
inline MemRecorder* get_recorder() const { return (MemRecorder*)_recorder; }
inline void set_recorder(MemRecorder* rc) { _recorder = rc; }
private:
// per-thread memory recorder
MemRecorder* volatile _recorder;
#endif // INCLUDE_NMT
// Suspend/resume support for JavaThread
private:
inline void set_ext_suspended();
@ -1485,19 +1471,6 @@ public:
return result;
}
// NMT (Native memory tracking) support.
// This flag helps NMT to determine if this JavaThread will be blocked
// at safepoint. If not, ThreadCritical is needed for writing memory records.
// JavaThread is only safepoint visible when it is in Threads' thread list,
// it is not visible until it is added to the list and becomes invisible
// once it is removed from the list.
public:
bool is_safepoint_visible() const { return _safepoint_visible; }
void set_safepoint_visible(bool visible) { _safepoint_visible = visible; }
private:
bool _safepoint_visible;
// Static operations
public:
// Returns the running thread as a JavaThread
static inline JavaThread* current();

@ -0,0 +1,57 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
#define SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
#include "memory/allocation.hpp"
#include "utilities/nativeCallStack.hpp"
// Allocation site represents a code path that makes a memory
// allocation
template <class E> class AllocationSite VALUE_OBJ_CLASS_SPEC {
private:
NativeCallStack _call_stack;
E e;
public:
AllocationSite(const NativeCallStack& stack) : _call_stack(stack) { }
int hash() const { return _call_stack.hash(); }
bool equals(const NativeCallStack& stack) const {
return _call_stack.equals(stack);
}
bool equals(const AllocationSite<E>& other) const {
return other.equals(_call_stack);
}
const NativeCallStack* call_stack() const {
return &_call_stack;
}
// Information regarding this allocation
E* data() { return &e; }
const E* peek() const { return &e; }
};
#endif // SHARE_VM_SERVICES_ALLOCATION_SITE_HPP

@ -0,0 +1,261 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
#include "services/mallocSiteTable.hpp"
/*
* Early os::malloc() calls come from initializations of static variables, long before entering any
* VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be
* initialized, along with the allocation site for the hashtable entries.
* To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc()
* call, the hashtable bucket array and hashtable entry allocation site have to be static.
* It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just
* allocates a block memory and zero the memory for it.
* But for hashtable entry allocation site object, things get tricky. C runtime not only allocates
* memory for it, but also calls its constructor at some later time. If we initialize the allocation site
* at the first os::malloc() call, the object will be reinitialized when its constructor is called
* by C runtime.
* To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry,
* the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site.
* Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable,
* which is exactly what we want.
* The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation.
*
* Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare
* the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment.
*/
// Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects
size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
// Malloc site hashtable buckets
MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size];
// concurrent access counter
volatile int MallocSiteTable::_access_count = 0;
// Tracking hashtable contention
NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
/*
* Initialize malloc site table.
* Hashtable entry is malloc'd, so it can cause infinite recursion.
* To avoid above problem, we pre-initialize a hash entry for
* this allocation site.
* The method is called during C runtime static variable initialization
* time, it is in single-threaded mode from JVM perspective.
*/
bool MallocSiteTable::initialize() {
assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check");
assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry),
"Sanity Check");
assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
// Fake the call stack for hashtable entry allocation
assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");
// Create pseudo call stack for hashtable entry allocation
address pc[3];
if (NMT_TrackingStackDepth >= 3) {
pc[2] = (address)MallocSiteTable::allocation_at;
}
if (NMT_TrackingStackDepth >= 2) {
pc[1] = (address)MallocSiteTable::lookup_or_add;
}
pc[0] = (address)MallocSiteTable::new_entry;
// Instantiate NativeCallStack object, have to use placement new operator. (see comments above)
NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack)
NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
// Instantiate hash entry for hashtable entry allocation callsite
MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site)
MallocSiteHashtableEntry(*stack);
// Add the allocation site to hashtable.
int index = hash_to_index(stack->hash());
_table[index] = entry;
return true;
}
// Walks entries in the hashtable.
// It stops walk if the walker returns false.
bool MallocSiteTable::walk(MallocSiteWalker* walker) {
MallocSiteHashtableEntry* head;
for (int index = 0; index < table_size; index ++) {
head = _table[index];
while (head != NULL) {
if (!walker->do_malloc_site(head->peek())) {
return false;
}
head = (MallocSiteHashtableEntry*)head->next();
}
}
return true;
}
/*
* The hashtable does not have deletion policy on individual entry,
* and each linked list node is inserted via compare-and-swap,
* so each linked list is stable, the contention only happens
* at the end of linked list.
* This method should not return NULL under normal circumstance.
* If NULL is returned, it indicates:
* 1. Out of memory, it cannot allocate new hash entry.
* 2. Overflow hash bucket.
* Under any of above circumstances, caller should handle the situation.
*/
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
size_t* pos_idx) {
int index = hash_to_index(key.hash());
assert(index >= 0, "Negative index");
*bucket_idx = (size_t)index;
*pos_idx = 0;
// First entry for this hash bucket
if (_table[index] == NULL) {
MallocSiteHashtableEntry* entry = new_entry(key);
// OOM check
if (entry == NULL) return NULL;
// swap in the head
if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) {
return entry->data();
}
delete entry;
}
MallocSiteHashtableEntry* head = _table[index];
while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
MallocSite* site = head->data();
if (site->equals(key)) {
// found matched entry
return head->data();
}
if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
MallocSiteHashtableEntry* entry = new_entry(key);
// OOM check
if (entry == NULL) return NULL;
if (head->atomic_insert(entry)) {
(*pos_idx) ++;
return entry->data();
}
// contended, other thread won
delete entry;
}
head = (MallocSiteHashtableEntry*)head->next();
(*pos_idx) ++;
}
return NULL;
}
// Access malloc site
MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
assert(bucket_idx < table_size, "Invalid bucket index");
MallocSiteHashtableEntry* head = _table[bucket_idx];
for (size_t index = 0; index < pos_idx && head != NULL;
index ++, head = (MallocSiteHashtableEntry*)head->next());
assert(head != NULL, "Invalid position index");
return head->data();
}
// Allocates MallocSiteHashtableEntry object. Special call stack
// (pre-installed allocation site) has to be used to avoid infinite
// recursion.
MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key) {
void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
*hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
return ::new (p) MallocSiteHashtableEntry(key);
}
void MallocSiteTable::reset() {
for (int index = 0; index < table_size; index ++) {
MallocSiteHashtableEntry* head = _table[index];
_table[index] = NULL;
delete_linked_list(head);
}
}
void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
MallocSiteHashtableEntry* p;
while (head != NULL) {
p = head;
head = (MallocSiteHashtableEntry*)head->next();
if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) {
delete p;
}
}
}
void MallocSiteTable::shutdown() {
AccessLock locker(&_access_count);
locker.exclusiveLock();
reset();
}
bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
assert(walker != NULL, "NuLL walker");
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
return walk(walker);
}
return false;
}
void MallocSiteTable::AccessLock::exclusiveLock() {
jint target;
jint val;
assert(_lock_state != ExclusiveLock, "Can only call once");
assert(*_lock >= 0, "Can not content exclusive lock");
// make counter negative to block out shared locks
do {
val = *_lock;
target = _MAGIC_ + *_lock;
} while (Atomic::cmpxchg(target, _lock, val) != val);
// wait for all readers to exit
while (*_lock != _MAGIC_) {
#ifdef _WINDOWS
os::naked_short_sleep(1);
#else
os::naked_yield();
#endif
}
_lock_state = ExclusiveLock;
}

@ -0,0 +1,268 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
#if INCLUDE_NMT
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "services/allocationSite.hpp"
#include "services/mallocTracker.hpp"
#include "services/nmtCommon.hpp"
// MallocSite represents a code path that eventually calls
// os::malloc() to allocate memory
class MallocSite : public AllocationSite<MemoryCounter> {
public:
MallocSite() :
AllocationSite<MemoryCounter>(emptyStack) { }
MallocSite(const NativeCallStack& stack) :
AllocationSite<MemoryCounter>(stack) { }
void allocate(size_t size) { data()->allocate(size); }
void deallocate(size_t size) { data()->deallocate(size); }
// Memory allocated from this code path
size_t size() const { return peek()->size(); }
// The number of calls were made
size_t count() const { return peek()->count(); }
};
// Malloc site hashtable entry
class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
private:
MallocSite _malloc_site;
MallocSiteHashtableEntry* _next;
public:
MallocSiteHashtableEntry() : _next(NULL) { }
MallocSiteHashtableEntry(NativeCallStack stack):
_malloc_site(stack), _next(NULL) { }
inline const MallocSiteHashtableEntry* next() const {
return _next;
}
// Insert an entry atomically.
// Return true if the entry is inserted successfully.
// The operation can be failed due to contention from other thread.
bool atomic_insert(const MallocSiteHashtableEntry* entry) {
return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
NULL) == NULL);
}
void set_callsite(const MallocSite& site) {
_malloc_site = site;
}
inline const MallocSite* peek() const { return &_malloc_site; }
inline MallocSite* data() { return &_malloc_site; }
inline long hash() const { return _malloc_site.hash(); }
inline bool equals(const NativeCallStack& stack) const {
return _malloc_site.equals(stack);
}
// Allocation/deallocation on this allocation site
inline void allocate(size_t size) { _malloc_site.allocate(size); }
inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
// Memory counters
inline size_t size() const { return _malloc_site.size(); }
inline size_t count() const { return _malloc_site.count(); }
};
// The walker walks every entry on MallocSiteTable
class MallocSiteWalker : public StackObj {
public:
virtual bool do_malloc_site(const MallocSite* e) { return false; }
};
/*
* Native memory tracking call site table.
* The table is only needed when detail tracking is enabled.
*/
class MallocSiteTable : AllStatic {
private:
// The number of hash bucket in this hashtable. The number should
// be tuned if malloc activities changed significantly.
// The statistics data can be obtained via Jcmd
// jcmd <pid> VM.native_memory statistics.
// Currently, (number of buckets / number of entires) ratio is
// about 1 / 6
enum {
table_base_size = 128, // The base size is calculated from statistics to give
// table ratio around 1:6
table_size = (table_base_size * NMT_TrackingStackDepth - 1)
};
// This is a very special lock, that allows multiple shared accesses (sharedLock), but
// once exclusive access (exclusiveLock) is requested, all shared accesses are
// rejected forever.
class AccessLock : public StackObj {
enum LockState {
NoLock,
SharedLock,
ExclusiveLock
};
private:
// A very large negative number. The only possibility to "overflow"
// this number is when there are more than -min_jint threads in
// this process, which is not going to happen in foreseeable future.
const static int _MAGIC_ = min_jint;
LockState _lock_state;
volatile int* _lock;
public:
AccessLock(volatile int* lock) :
_lock(lock), _lock_state(NoLock) {
}
~AccessLock() {
if (_lock_state == SharedLock) {
Atomic::dec((volatile jint*)_lock);
}
}
// Acquire shared lock.
// Return true if shared access is granted.
inline bool sharedLock() {
jint res = Atomic::add(1, _lock);
if (res < 0) {
Atomic::add(-1, _lock);
return false;
}
_lock_state = SharedLock;
return true;
}
// Acquire exclusive lock
void exclusiveLock();
};
public:
static bool initialize();
static void shutdown();
NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
// Number of hash buckets
static inline int hash_buckets() { return (int)table_size; }
// Access and copy a call stack from this table. Shared lock should be
// acquired before access the entry.
static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
size_t pos_idx) {
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
MallocSite* site = malloc_site(bucket_idx, pos_idx);
if (site != NULL) {
stack = *site->call_stack();
return true;
}
}
return false;
}
// Record a new allocation from specified call path.
// Return true if the allocation is recorded successfully, bucket_idx
// and pos_idx are also updated to indicate the entry where the allocation
// information was recorded.
// Return false only occurs under rare scenarios:
// 1. out of memory
// 2. overflow hash bucket
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx) {
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
if (site != NULL) site->allocate(size);
return site != NULL;
}
return false;
}
// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
// information was recorded.
static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
MallocSite* site = malloc_site(bucket_idx, pos_idx);
if (site != NULL) {
site->deallocate(size);
return true;
}
}
return false;
}
// Walk this table.
static bool walk_malloc_site(MallocSiteWalker* walker);
private:
static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
static void reset();
// Delete a bucket linked list
static void delete_linked_list(MallocSiteHashtableEntry* head);
static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
static bool walk(MallocSiteWalker* walker);
static inline int hash_to_index(int hash) {
hash = (hash > 0) ? hash : (-hash);
return (hash % table_size);
}
static inline const NativeCallStack* hash_entry_allocation_stack() {
return (NativeCallStack*)_hash_entry_allocation_stack;
}
private:
// Counter for counting concurrent access
static volatile int _access_count;
// The callsite hashtable. It has to be a static table,
// since malloc call can come from C runtime linker.
static MallocSiteHashtableEntry* _table[table_size];
// Reserve enough memory for placing the objects
// The memory for hashtable entry allocation stack object
static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
// The memory for hashtable entry allocation callsite object
static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
NOT_PRODUCT(static int _peak_count;)
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP

@ -0,0 +1,200 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "services/mallocSiteTable.hpp"
#include "services/mallocTracker.hpp"
#include "services/mallocTracker.inline.hpp"
#include "services/memTracker.hpp"
size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
// Total malloc'd memory amount
size_t MallocMemorySnapshot::total() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
amount += _malloc[index].malloc_size();
}
amount += _tracking_header.size() + total_arena();
return amount;
}
// Total malloc'd memory used by arenas
size_t MallocMemorySnapshot::total_arena() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
amount += _malloc[index].arena_size();
}
return amount;
}
void MallocMemorySnapshot::reset() {
_tracking_header.reset();
for (int index = 0; index < mt_number_of_types; index ++) {
_malloc[index].reset();
}
}
// Make adjustment by subtracting chunks used by arenas
// from total chunks to get total free chunck size
void MallocMemorySnapshot::make_adjustment() {
size_t arena_size = total_arena();
int chunk_idx = NMTUtil::flag_to_index(mtChunk);
_malloc[chunk_idx].record_free(arena_size);
}
void MallocMemorySummary::initialize() {
assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check");
// Uses placement new operator to initialize static area.
::new ((void*)_snapshot)MallocMemorySnapshot();
}
void MallocHeader::release() const {
// Tracking already shutdown, no housekeeping is needed anymore
if (MemTracker::tracking_level() <= NMT_minimal) return;
MallocMemorySummary::record_free(size(), flags());
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
if (tracking_level() == NMT_detail) {
MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
}
}
bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx) const {
bool ret = MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx);
// Something went wrong, could be OOM or overflow malloc site table.
// We want to keep tracking data under OOM circumstance, so transition to
// summary tracking.
if (!ret) {
MemTracker::transition_to(NMT_summary);
}
return ret;
}
bool MallocHeader::get_stack(NativeCallStack& stack) const {
return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx);
}
bool MallocTracker::initialize(NMT_TrackingLevel level) {
if (level >= NMT_summary) {
MallocMemorySummary::initialize();
}
if (level == NMT_detail) {
return MallocSiteTable::initialize();
}
return true;
}
bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
assert(from != NMT_off, "Can not transition from off state");
assert(to != NMT_off, "Can not transition to off state");
if (from == NMT_minimal) {
MallocMemorySummary::reset();
}
if (to == NMT_detail) {
assert(from == NMT_minimal || from == NMT_summary, "Just check");
return MallocSiteTable::initialize();
} else if (from == NMT_detail) {
assert(to == NMT_minimal || to == NMT_summary, "Just check");
MallocSiteTable::shutdown();
}
return true;
}
// Record a malloc memory allocation
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
const NativeCallStack& stack, NMT_TrackingLevel level) {
void* memblock; // the address for user data
MallocHeader* header = NULL;
if (malloc_base == NULL) {
return NULL;
}
// Check malloc size, size has to <= MAX_MALLOC_SIZE. This is only possible on 32-bit
// systems, when malloc size >= 1GB, but is is safe to assume it won't happen.
if (size > MAX_MALLOC_SIZE) {
fatal("Should not use malloc for big memory block, use virtual memory instead");
}
// Uses placement global new operator to initialize malloc header
switch(level) {
case NMT_off:
return malloc_base;
case NMT_minimal: {
MallocHeader* hdr = ::new (malloc_base) MallocHeader();
break;
}
case NMT_summary: {
header = ::new (malloc_base) MallocHeader(size, flags);
break;
}
case NMT_detail: {
header = ::new (malloc_base) MallocHeader(size, flags, stack);
break;
}
default:
ShouldNotReachHere();
}
memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
// The alignment check: 8 bytes alignment for 32 bit systems.
// 16 bytes alignment for 64-bit systems.
assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
// Sanity check
assert(get_memory_tracking_level(memblock) == level,
"Wrong tracking level");
#ifdef ASSERT
if (level > NMT_minimal) {
// Read back
assert(get_size(memblock) == size, "Wrong size");
assert(get_flags(memblock) == flags, "Wrong flags");
}
#endif
return memblock;
}
void* MallocTracker::record_free(void* memblock) {
// Never turned on
if (MemTracker::tracking_level() == NMT_off ||
memblock == NULL) {
return memblock;
}
MallocHeader* header = malloc_header(memblock);
header->release();
return (void*)header;
}

@ -0,0 +1,424 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
#define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
#if INCLUDE_NMT
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "services/nmtCommon.hpp"
#include "utilities/nativeCallStack.hpp"
/*
* This counter class counts memory allocation and deallocation,
* records total memory allocation size and number of allocations.
* The counters are updated atomically.
*/
class MemoryCounter VALUE_OBJ_CLASS_SPEC {
private:
size_t _count;
size_t _size;
DEBUG_ONLY(size_t _peak_count;)
DEBUG_ONLY(size_t _peak_size; )
public:
MemoryCounter() : _count(0), _size(0) {
DEBUG_ONLY(_peak_count = 0;)
DEBUG_ONLY(_peak_size = 0;)
}
// Reset counters
void reset() {
_size = 0;
_count = 0;
DEBUG_ONLY(_peak_size = 0;)
DEBUG_ONLY(_peak_count = 0;)
}
inline void allocate(size_t sz) {
Atomic::add(1, (volatile MemoryCounterType*)&_count);
if (sz > 0) {
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
}
DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
}
inline void deallocate(size_t sz) {
assert(_count > 0, "Negative counter");
assert(_size >= sz, "Negative size");
Atomic::add(-1, (volatile MemoryCounterType*)&_count);
if (sz > 0) {
Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
}
}
inline void resize(long sz) {
if (sz != 0) {
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
}
}
inline size_t count() const { return _count; }
inline size_t size() const { return _size; }
DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; })
};
/*
* Malloc memory used by a particular subsystem.
* It includes the memory acquired through os::malloc()
* call and arena's backing memory.
*/
class MallocMemory VALUE_OBJ_CLASS_SPEC {
private:
MemoryCounter _malloc;
MemoryCounter _arena;
public:
MallocMemory() { }
inline void record_malloc(size_t sz) {
_malloc.allocate(sz);
}
inline void record_free(size_t sz) {
_malloc.deallocate(sz);
}
inline void record_new_arena() {
_arena.allocate(0);
}
inline void record_arena_free() {
_arena.deallocate(0);
}
inline void record_arena_size_change(long sz) {
_arena.resize(sz);
}
void reset() {
_malloc.reset();
_arena.reset();
}
inline size_t malloc_size() const { return _malloc.size(); }
inline size_t malloc_count() const { return _malloc.count();}
inline size_t arena_size() const { return _arena.size(); }
inline size_t arena_count() const { return _arena.count(); }
DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
};
class MallocMemorySummary;
// A snapshot of malloc'd memory, includes malloc memory
// usage by types and memory used by tracking itself.
class MallocMemorySnapshot : public ResourceObj {
friend class MallocMemorySummary;
private:
MallocMemory _malloc[mt_number_of_types];
MemoryCounter _tracking_header;
public:
inline MallocMemory* by_type(MEMFLAGS flags) {
int index = NMTUtil::flag_to_index(flags);
return &_malloc[index];
}
inline MallocMemory* by_index(int index) {
assert(index >= 0, "Index out of bound");
assert(index < mt_number_of_types, "Index out of bound");
return &_malloc[index];
}
inline MemoryCounter* malloc_overhead() {
return &_tracking_header;
}
// Total malloc'd memory amount
size_t total() const;
// Total malloc'd memory used by arenas
size_t total_arena() const;
inline size_t thread_count() {
return by_type(mtThreadStack)->malloc_count();
}
void reset();
void copy_to(MallocMemorySnapshot* s) {
s->_tracking_header = _tracking_header;
for (int index = 0; index < mt_number_of_types; index ++) {
s->_malloc[index] = _malloc[index];
}
}
// Make adjustment by subtracting chunks used by arenas
// from total chunks to get total free chunk size
void make_adjustment();
};
/*
* This class is for collecting malloc statistics at summary level
*/
class MallocMemorySummary : AllStatic {
private:
// Reserve memory for placement of MallocMemorySnapshot object
static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
public:
static void initialize();
static inline void record_malloc(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_malloc(size);
}
static inline void record_free(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_free(size);
}
static inline void record_new_arena(MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_new_arena();
}
static inline void record_arena_free(MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_arena_free();
}
static inline void record_arena_size_change(long size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_arena_size_change(size);
}
static void snapshot(MallocMemorySnapshot* s) {
as_snapshot()->copy_to(s);
s->make_adjustment();
}
// Record memory used by malloc tracking header
static inline void record_new_malloc_header(size_t sz) {
as_snapshot()->malloc_overhead()->allocate(sz);
}
static inline void record_free_malloc_header(size_t sz) {
as_snapshot()->malloc_overhead()->deallocate(sz);
}
// The memory used by malloc tracking headers
static inline size_t tracking_overhead() {
return as_snapshot()->malloc_overhead()->size();
}
// Reset all counters to zero
static void reset() {
as_snapshot()->reset();
}
static MallocMemorySnapshot* as_snapshot() {
return (MallocMemorySnapshot*)_snapshot;
}
};
/*
* Malloc tracking header.
* To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
* which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
*/
class MallocHeader VALUE_OBJ_CLASS_SPEC {
#ifdef _LP64
size_t _size : 62;
size_t _level : 2;
size_t _flags : 8;
size_t _pos_idx : 16;
size_t _bucket_idx: 40;
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
#define MAX_BUCKET_LENGTH ((size_t)(1 << 16))
#define MAX_MALLOC_SIZE (((size_t)1 << 62) - 1)
#else
size_t _size : 30;
size_t _level : 2;
size_t _flags : 8;
size_t _pos_idx : 8;
size_t _bucket_idx: 16;
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)(1 << 16))
#define MAX_BUCKET_LENGTH ((size_t)(1 << 8))
// Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
#define MAX_MALLOC_SIZE ((size_t)(1 << 30) - 1)
#endif // _LP64
public:
// Summary tracking header
MallocHeader(size_t size, MEMFLAGS flags) {
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
"Wrong header size");
_level = NMT_summary;
_flags = flags;
set_size(size);
MallocMemorySummary::record_malloc(size, flags);
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
}
// Detail tracking header
MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
"Wrong header size");
_level = NMT_detail;
_flags = flags;
set_size(size);
size_t bucket_idx;
size_t pos_idx;
if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
_bucket_idx = bucket_idx;
_pos_idx = pos_idx;
}
MallocMemorySummary::record_malloc(size, flags);
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
}
// Minimal tracking header
MallocHeader() {
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
"Wrong header size");
_level = (unsigned short)NMT_minimal;
}
inline NMT_TrackingLevel tracking_level() const {
return (NMT_TrackingLevel)_level;
}
inline size_t size() const { return _size; }
inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
bool get_stack(NativeCallStack& stack) const;
// Cleanup tracking information before the memory is released.
void release() const;
private:
inline void set_size(size_t size) {
assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
_size = size;
}
bool record_malloc_site(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx) const;
};
// Main class called from MemTracker to track malloc activities
class MallocTracker : AllStatic {
public:
// Initialize malloc tracker for specific tracking level
static bool initialize(NMT_TrackingLevel level);
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
// malloc tracking header size for specific tracking level
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
return (level == NMT_off) ? 0 : sizeof(MallocHeader);
}
// Parameter name convention:
// memblock : the beginning address for user data
// malloc_base: the beginning address that includes malloc tracking header
//
// The relationship:
// memblock = (char*)malloc_base + sizeof(nmt header)
//
// Record malloc on specified memory block
static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
const NativeCallStack& stack, NMT_TrackingLevel level);
// Record free on specified memory block
static void* record_free(void* memblock);
// Get tracking level of specified memory block
static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
// Offset memory address to header address
static inline void* get_base(void* memblock);
static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
if (memblock == NULL || level == NMT_off) return memblock;
return (char*)memblock - malloc_header_size(level);
}
// Get memory size
static inline size_t get_size(void* memblock) {
MallocHeader* header = malloc_header(memblock);
assert(header->tracking_level() >= NMT_summary,
"Wrong tracking level");
return header->size();
}
// Get memory type
static inline MEMFLAGS get_flags(void* memblock) {
MallocHeader* header = malloc_header(memblock);
assert(header->tracking_level() >= NMT_summary,
"Wrong tracking level");
return header->flags();
}
// Get header size
static inline size_t get_header_size(void* memblock) {
return (memblock == NULL) ? 0 : sizeof(MallocHeader);
}
static inline void record_new_arena(MEMFLAGS flags) {
MallocMemorySummary::record_new_arena(flags);
}
static inline void record_arena_free(MEMFLAGS flags) {
MallocMemorySummary::record_arena_free(flags);
}
static inline void record_arena_size_change(int size, MEMFLAGS flags) {
MallocMemorySummary::record_arena_size_change(size, flags);
}
private:
static inline MallocHeader* malloc_header(void *memblock) {
assert(memblock != NULL, "NULL pointer");
MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
assert(header->tracking_level() >= NMT_minimal, "Bad header");
return header;
}
};
#endif // INCLUDE_NMT
#endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,22 +22,22 @@
*
*/
#include "precompiled.hpp"
#include "runtime/atomic.inline.hpp"
#include "services/memPtr.hpp"
#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
#define SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
#include "services/mallocTracker.hpp"
#include "services/memTracker.hpp"
volatile jint SequenceGenerator::_seq_number = 1;
volatile unsigned long SequenceGenerator::_generation = 1;
NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;)
jint SequenceGenerator::next() {
jint seq = Atomic::add(1, &_seq_number);
if (seq < 0) {
MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
} else {
NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
}
return seq;
inline NMT_TrackingLevel MallocTracker::get_memory_tracking_level(void* memblock) {
assert(memblock != NULL, "Sanity check");
if (MemTracker::tracking_level() == NMT_off) return NMT_off;
MallocHeader* header = malloc_header(memblock);
return header->tracking_level();
}
inline void* MallocTracker::get_base(void* memblock){
return get_base(memblock, MemTracker::tracking_level());
}
#endif // SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,471 +22,301 @@
*
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
#include "services/memBaseline.hpp"
#include "services/memTracker.hpp"
/*
* Sizes are sorted in descenting order for reporting
*/
int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) {
if (s1.size() == s2.size()) {
return 0;
} else if (s1.size() > s2.size()) {
return -1;
} else {
return 1;
}
}
MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
{mtJavaHeap, "Java Heap"},
{mtClass, "Class"},
{mtThreadStack,"Thread Stack"},
{mtThread, "Thread"},
{mtCode, "Code"},
{mtGC, "GC"},
{mtCompiler, "Compiler"},
{mtInternal, "Internal"},
{mtOther, "Other"},
{mtSymbol, "Symbol"},
{mtNMT, "Memory Tracking"},
{mtTracing, "Tracing"},
{mtChunk, "Pooled Free Chunks"},
{mtClassShared,"Shared spaces for classes"},
{mtTest, "Test"},
{mtNone, "Unknown"} // It can happen when type tagging records are lagging
// behind
int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1,
const VirtualMemoryAllocationSite& s2) {
if (s1.reserved() == s2.reserved()) {
return 0;
} else if (s1.reserved() > s2.reserved()) {
return -1;
} else {
return 1;
}
}
// Sort into allocation site addresses order for baseline comparison
int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) {
return s1.call_stack()->compare(*s2.call_stack());
}
int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1,
const VirtualMemoryAllocationSite& s2) {
return s1.call_stack()->compare(*s2.call_stack());
}
/*
* Walker to walk malloc allocation site table
*/
class MallocAllocationSiteWalker : public MallocSiteWalker {
private:
SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
_malloc_sites;
size_t _count;
// Entries in MallocSiteTable with size = 0 and count = 0,
// when the malloc site is not longer there.
public:
MallocAllocationSiteWalker(Arena* arena) : _count(0), _malloc_sites(arena) {
}
inline size_t count() const { return _count; }
LinkedList<MallocSite>* malloc_sites() {
return &_malloc_sites;
}
bool do_malloc_site(const MallocSite* site) {
if (site->size() >= MemBaseline::SIZE_THRESHOLD) {
if (_malloc_sites.add(*site) != NULL) {
_count++;
return true;
} else {
return false; // OOM
}
} else {
// malloc site does not meet threshold, ignore and continue
return true;
}
}
};
MemBaseline::MemBaseline() {
_baselined = false;
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
_malloc_data[index].set_type(MemType2NameMap[index]._flag);
_vm_data[index].set_type(MemType2NameMap[index]._flag);
_arena_data[index].set_type(MemType2NameMap[index]._flag);
}
_malloc_cs = NULL;
_vm_cs = NULL;
_vm_map = NULL;
_number_of_classes = 0;
_number_of_threads = 0;
// Compare virtual memory region's base address
int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
return r1.compare(r2);
}
// Walk all virtual memory regions for baselining
class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
private:
SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base, ResourceObj::ARENA>
_virtual_memory_regions;
size_t _count;
void MemBaseline::clear() {
if (_malloc_cs != NULL) {
delete _malloc_cs;
_malloc_cs = NULL;
public:
VirtualMemoryAllocationWalker(Arena* a) : _count(0), _virtual_memory_regions(a) {
}
if (_vm_cs != NULL) {
delete _vm_cs;
_vm_cs = NULL;
}
if (_vm_map != NULL) {
delete _vm_map;
_vm_map = NULL;
}
reset();
}
void MemBaseline::reset() {
_baselined = false;
_total_vm_reserved = 0;
_total_vm_committed = 0;
_total_malloced = 0;
_number_of_classes = 0;
if (_malloc_cs != NULL) _malloc_cs->clear();
if (_vm_cs != NULL) _vm_cs->clear();
if (_vm_map != NULL) _vm_map->clear();
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
_malloc_data[index].clear();
_vm_data[index].clear();
_arena_data[index].clear();
}
}
MemBaseline::~MemBaseline() {
clear();
}
// baseline malloc'd memory records, generate overall summary and summaries by
// memory types
bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
size_t used_arena_size = 0;
int index;
while (malloc_ptr != NULL) {
index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
size_t size = malloc_ptr->size();
if (malloc_ptr->is_arena_memory_record()) {
// We do have anonymous arenas, they are either used as value objects,
// which are embedded inside other objects, or used as stack objects.
_arena_data[index].inc(size);
used_arena_size += size;
} else {
_total_malloced += size;
_malloc_data[index].inc(size);
if (malloc_ptr->is_arena_record()) {
// see if arena memory record present
MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
"Arena records do not match");
size = next_malloc_ptr->size();
_arena_data[index].inc(size);
used_arena_size += size;
malloc_itr.next();
}
}
}
malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
}
// substract used arena size to get size of arena chunk in free list
index = flag2index(mtChunk);
_malloc_data[index].reduce(used_arena_size);
// we really don't know how many chunks in free list, so just set to
// 0
_malloc_data[index].overwrite_counter(0);
return true;
}
// check if there is a safepoint in progress, if so, block the thread
// for the safepoint
void MemBaseline::check_safepoint(JavaThread* thr) {
if (SafepointSynchronize::is_synchronizing()) {
// grab and drop the SR_lock to honor the safepoint protocol
MutexLocker ml(thr->SR_lock());
}
}
// baseline mmap'd memory records, generate overall summary and summaries by
// memory types
bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
int index;
while (vm_ptr != NULL) {
if (vm_ptr->is_reserved_region()) {
index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
// we use the number of thread stack to count threads
if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
_number_of_threads ++;
}
_total_vm_reserved += vm_ptr->size();
_vm_data[index].inc(vm_ptr->size(), 0);
} else {
_total_vm_committed += vm_ptr->size();
_vm_data[index].inc(0, vm_ptr->size());
}
vm_ptr = (VMMemRegion*)vm_itr.next();
}
return true;
}
// baseline malloc'd memory by callsites, but only the callsites with memory allocation
// over 1KB are stored.
bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
assert(MemTracker::track_callsite(), "detail tracking is off");
MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
MallocCallsitePointer malloc_callsite;
// initailize malloc callsite array
if (_malloc_cs == NULL) {
_malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
// out of native memory
if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
return false;
}
} else {
_malloc_cs->clear();
}
MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
// sort into callsite pc order. Details are aggregated by callsites
malloc_data->sort((FN_SORT)malloc_sort_by_pc);
bool ret = true;
// baseline memory that is totaled over 1 KB
while (malloc_ptr != NULL) {
if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
// skip thread stacks
if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
if (malloc_callsite.addr() != malloc_ptr->pc()) {
if ((malloc_callsite.amount()/K) > 0) {
if (!_malloc_cs->append(&malloc_callsite)) {
ret = false;
break;
}
}
malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
}
malloc_callsite.inc(malloc_ptr->size());
}
}
malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
}
// restore to address order. Snapshot malloc data is maintained in memory
// address order.
malloc_data->sort((FN_SORT)malloc_sort_by_addr);
if (!ret) {
return false;
}
// deal with last record
if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
if (!_malloc_cs->append(&malloc_callsite)) {
return false;
}
}
return true;
}
// baseline mmap'd memory by callsites
bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
assert(MemTracker::track_callsite(), "detail tracking is off");
VMCallsitePointer vm_callsite;
VMCallsitePointer* cur_callsite = NULL;
MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
// initialize virtual memory map array
if (_vm_map == NULL) {
_vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
if (_vm_map == NULL || _vm_map->out_of_memory()) {
return false;
}
} else {
_vm_map->clear();
}
// initialize virtual memory callsite array
if (_vm_cs == NULL) {
_vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
return false;
}
} else {
_vm_cs->clear();
}
// consolidate virtual memory data
VMMemRegionEx* reserved_rec = NULL;
VMMemRegionEx* committed_rec = NULL;
// vm_ptr is coming in increasing base address order
while (vm_ptr != NULL) {
if (vm_ptr->is_reserved_region()) {
// consolidate reserved memory regions for virtual memory map.
// The criteria for consolidation is:
// 1. two adjacent reserved memory regions
// 2. belong to the same memory type
// 3. reserved from the same callsite
if (reserved_rec == NULL ||
reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
reserved_rec->pc() != vm_ptr->pc()) {
if (!_vm_map->append(vm_ptr)) {
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
if (_virtual_memory_regions.add(*rgn) != NULL) {
_count ++;
return true;
} else {
return false;
}
// inserted reserved region, we need the pointer to the element in virtual
// memory map array.
reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
} else {
reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
}
if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
return false;
}
vm_callsite = VMCallsitePointer(vm_ptr->pc());
cur_callsite = &vm_callsite;
vm_callsite.inc(vm_ptr->size(), 0);
} else {
// consolidate committed memory regions for virtual memory map
// The criterial is:
// 1. two adjacent committed memory regions
// 2. committed from the same callsite
if (committed_rec == NULL ||
committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
committed_rec->pc() != vm_ptr->pc()) {
if (!_vm_map->append(vm_ptr)) {
return false;
}
committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
} else {
committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
}
vm_callsite.inc(0, vm_ptr->size());
}
vm_ptr = (VMMemRegionEx*)vm_itr.next();
return true;
}
// deal with last record
if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() {
return &_virtual_memory_regions;
}
};
bool MemBaseline::baseline_summary() {
assert(_malloc_memory_snapshot == NULL, "Malloc baseline not yet reset");
assert(_virtual_memory_snapshot == NULL, "Virtual baseline not yet reset");
_malloc_memory_snapshot = new (arena()) MallocMemorySnapshot();
_virtual_memory_snapshot = new (arena()) VirtualMemorySnapshot();
if (_malloc_memory_snapshot == NULL || _virtual_memory_snapshot == NULL) {
return false;
}
MallocMemorySummary::snapshot(_malloc_memory_snapshot);
VirtualMemorySummary::snapshot(_virtual_memory_snapshot);
return true;
}
bool MemBaseline::baseline_allocation_sites() {
assert(arena() != NULL, "Just check");
// Malloc allocation sites
MallocAllocationSiteWalker malloc_walker(arena());
if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) {
return false;
}
// sort it into callsite pc order. Details are aggregated by callsites
_vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
_malloc_sites.set_head(malloc_walker.malloc_sites()->head());
// The malloc sites are collected in size order
_malloc_sites_order = by_size;
// walk the array to consolidate record by pc
MemPointerArrayIteratorImpl itr(_vm_cs);
VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
while (next_rec != NULL) {
assert(callsite_rec != NULL, "Sanity check");
if (next_rec->addr() == callsite_rec->addr()) {
callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
itr.remove();
next_rec = (VMCallsitePointer*)itr.current();
} else {
callsite_rec = next_rec;
next_rec = (VMCallsitePointer*)itr.next();
}
// Virtual memory allocation sites
VirtualMemoryAllocationWalker virtual_memory_walker(arena());
if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
return false;
}
// Virtual memory allocations are collected in call stack order
_virtual_memory_allocations.set_head(virtual_memory_walker.virtual_memory_allocations()->head());
if (!aggregate_virtual_memory_allocation_sites()) {
return false;
}
// Virtual memory allocation sites are aggregrated in call stack order
_virtual_memory_sites_order = by_address;
return true;
}
bool MemBaseline::baseline(bool summaryOnly) {
if (arena() == NULL) {
_arena = new (std::nothrow, mtNMT) Arena(mtNMT);
if (arena() == NULL) return false;
}
reset();
_class_count = InstanceKlass::number_of_instance_classes();
if (!baseline_summary()) {
return false;
}
_baseline_type = Summary_baselined;
// baseline details
if (!summaryOnly &&
MemTracker::tracking_level() == NMT_detail) {
baseline_allocation_sites();
_baseline_type = Detail_baselined;
}
return true;
}
// baseline a snapshot. If summary_only = false, memory usages aggregated by
// callsites are also baselined.
// The method call can be lengthy, especially when detail tracking info is
// requested. So the method checks for safepoint explicitly.
bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
Thread* THREAD = Thread::current();
assert(THREAD->is_Java_thread(), "must be a JavaThread");
MutexLocker snapshot_locker(snapshot._lock);
reset();
_baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
if (_baselined) {
check_safepoint((JavaThread*)THREAD);
_baselined = baseline_vm_summary(snapshot._vm_ptrs);
}
_number_of_classes = snapshot.number_of_classes();
int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
const VirtualMemoryAllocationSite& s2) {
return s1.call_stack()->compare(*s2.call_stack());
}
if (!summary_only && MemTracker::track_callsite() && _baselined) {
check_safepoint((JavaThread*)THREAD);
_baselined = baseline_malloc_details(snapshot._alloc_ptrs);
if (_baselined) {
check_safepoint((JavaThread*)THREAD);
_baselined = baseline_vm_details(snapshot._vm_ptrs);
bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site, ResourceObj::ARENA>
allocation_sites(arena());
VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
const ReservedMemoryRegion* rgn;
VirtualMemoryAllocationSite* site;
while ((rgn = itr.next()) != NULL) {
VirtualMemoryAllocationSite tmp(*rgn->call_stack());
site = allocation_sites.find(tmp);
if (site == NULL) {
LinkedListNode<VirtualMemoryAllocationSite>* node =
allocation_sites.add(tmp);
if (node == NULL) return false;
site = node->data();
}
}
return _baselined;
}
int MemBaseline::flag2index(MEMFLAGS flag) const {
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
if (MemType2NameMap[index]._flag == flag) {
return index;
}
}
assert(false, "no type");
return -1;
}
const char* MemBaseline::type2name(MEMFLAGS type) {
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
if (MemType2NameMap[index]._flag == type) {
return MemType2NameMap[index]._name;
}
}
assert(false, err_msg("bad type %x", type));
return NULL;
}
MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
_total_malloced = other._total_malloced;
_total_vm_reserved = other._total_vm_reserved;
_total_vm_committed = other._total_vm_committed;
_baselined = other._baselined;
_number_of_classes = other._number_of_classes;
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
_malloc_data[index] = other._malloc_data[index];
_vm_data[index] = other._vm_data[index];
_arena_data[index] = other._arena_data[index];
site->reserve_memory(rgn->size());
site->commit_memory(rgn->committed_size());
}
if (MemTracker::track_callsite()) {
assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
assert(other._malloc_cs != NULL && other._vm_cs != NULL,
"not properly baselined");
_malloc_cs->clear();
_vm_cs->clear();
int index;
for (index = 0; index < other._malloc_cs->length(); index ++) {
_malloc_cs->append(other._malloc_cs->at(index));
}
_virtual_memory_sites.set_head(allocation_sites.head());
return true;
}
for (index = 0; index < other._vm_cs->length(); index ++) {
_vm_cs->append(other._vm_cs->at(index));
}
MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
assert(!_malloc_sites.is_empty(), "Detail baseline?");
switch(order) {
case by_size:
malloc_sites_to_size_order();
break;
case by_site:
malloc_sites_to_allocation_site_order();
break;
case by_address:
default:
ShouldNotReachHere();
}
return *this;
return MallocSiteIterator(_malloc_sites.head());
}
/* compare functions for sorting */
// sort snapshot malloc'd records in callsite pc order
int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
assert(MemTracker::track_callsite(),"Just check");
const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
}
// sort baselined malloc'd records in size order
int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
assert(MemTracker::is_on(), "Just check");
const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
}
// sort baselined malloc'd records in callsite pc order
int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
assert(MemTracker::is_on(), "Just check");
const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) {
assert(!_virtual_memory_sites.is_empty(), "Detail baseline?");
switch(order) {
case by_size:
virtual_memory_sites_to_size_order();
break;
case by_site:
virtual_memory_sites_to_reservation_site_order();
break;
case by_address:
default:
ShouldNotReachHere();
}
return VirtualMemorySiteIterator(_virtual_memory_sites.head());
}
// sort baselined mmap'd records in size (reserved size) order
int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
assert(MemTracker::is_on(), "Just check");
const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
// Sorting allocations sites in different orders
void MemBaseline::malloc_sites_to_size_order() {
if (_malloc_sites_order != by_size) {
SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
tmp(arena());
// Add malloc sites to sorted linked list to sort into size order
tmp.move(&_malloc_sites);
_malloc_sites.set_head(tmp.head());
tmp.set_head(NULL);
_malloc_sites_order = by_size;
}
}
// sort baselined mmap'd records in callsite pc order
int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
assert(MemTracker::is_on(), "Just check");
const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
void MemBaseline::malloc_sites_to_allocation_site_order() {
if (_malloc_sites_order != by_site) {
SortedLinkedList<MallocSite, compare_malloc_site, ResourceObj::ARENA>
tmp(arena());
// Add malloc sites to sorted linked list to sort into site (address) order
tmp.move(&_malloc_sites);
_malloc_sites.set_head(tmp.head());
tmp.set_head(NULL);
_malloc_sites_order = by_site;
}
}
void MemBaseline::virtual_memory_sites_to_size_order() {
if (_virtual_memory_sites_order != by_size) {
SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size, ResourceObj::ARENA>
tmp(arena());
// sort snapshot malloc'd records in memory block address order
int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
assert(MemTracker::is_on(), "Just check");
const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
assert(p1 == p2 || delta != 0, "dup pointer");
return delta;
tmp.move(&_virtual_memory_sites);
_virtual_memory_sites.set_head(tmp.head());
tmp.set_head(NULL);
_virtual_memory_sites_order = by_size;
}
}
void MemBaseline::virtual_memory_sites_to_reservation_site_order() {
if (_virtual_memory_sites_order != by_size) {
SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site, ResourceObj::ARENA>
tmp(arena());
tmp.add(&_virtual_memory_sites);
_virtual_memory_sites.set_head(tmp.head());
tmp.set_head(NULL);
_virtual_memory_sites_order = by_size;
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,425 +25,205 @@
#ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP
#define SHARE_VM_SERVICES_MEM_BASELINE_HPP
#if INCLUDE_NMT
#include "memory/allocation.hpp"
#include "runtime/mutex.hpp"
#include "services/memPtr.hpp"
#include "services/memSnapshot.hpp"
#include "services/mallocSiteTable.hpp"
#include "services/mallocTracker.hpp"
#include "services/nmtCommon.hpp"
#include "services/virtualMemoryTracker.hpp"
#include "utilities/linkedlist.hpp"
// compare unsigned number
#define UNSIGNED_COMPARE(a, b) ((a > b) ? 1 : ((a == b) ? 0 : -1))
typedef LinkedListIterator<MallocSite> MallocSiteIterator;
typedef LinkedListIterator<VirtualMemoryAllocationSite> VirtualMemorySiteIterator;
typedef LinkedListIterator<ReservedMemoryRegion> VirtualMemoryAllocationIterator;
/*
* MallocCallsitePointer and VMCallsitePointer are used
* to baseline memory blocks with their callsite information.
* They are only available when detail tracking is turned
* on.
*/
/* baselined malloc record aggregated by callsite */
class MallocCallsitePointer : public MemPointer {
private:
size_t _count; // number of malloc invocation from this callsite
size_t _amount; // total amount of memory malloc-ed from this callsite
public:
MallocCallsitePointer() {
_count = 0;
_amount = 0;
}
MallocCallsitePointer(address pc) : MemPointer(pc) {
_count = 0;
_amount = 0;
}
MallocCallsitePointer& operator=(const MallocCallsitePointer& p) {
MemPointer::operator=(p);
_count = p.count();
_amount = p.amount();
return *this;
}
inline void inc(size_t size) {
_count ++;
_amount += size;
};
inline size_t count() const {
return _count;
}
inline size_t amount() const {
return _amount;
}
};
// baselined virtual memory record aggregated by callsite
class VMCallsitePointer : public MemPointer {
private:
size_t _count; // number of invocation from this callsite
size_t _reserved_amount; // total reserved amount
size_t _committed_amount; // total committed amount
public:
VMCallsitePointer() {
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
}
VMCallsitePointer(address pc) : MemPointer(pc) {
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
}
VMCallsitePointer& operator=(const VMCallsitePointer& p) {
MemPointer::operator=(p);
_count = p.count();
_reserved_amount = p.reserved_amount();
_committed_amount = p.committed_amount();
return *this;
}
inline void inc(size_t reserved, size_t committed) {
_count ++;
_reserved_amount += reserved;
_committed_amount += committed;
}
inline size_t count() const {
return _count;
}
inline size_t reserved_amount() const {
return _reserved_amount;
}
inline size_t committed_amount() const {
return _committed_amount;
}
};
// maps a memory type flag to readable name
typedef struct _memType2Name {
MEMFLAGS _flag;
const char* _name;
} MemType2Name;
// This class aggregates malloc'd records by memory type
class MallocMem VALUE_OBJ_CLASS_SPEC {
private:
MEMFLAGS _type;
size_t _count;
size_t _amount;
public:
MallocMem() {
_type = mtNone;
_count = 0;
_amount = 0;
}
MallocMem(MEMFLAGS flags) {
assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
_type = FLAGS_TO_MEMORY_TYPE(flags);
_count = 0;
_amount = 0;
}
inline void set_type(MEMFLAGS flag) {
_type = flag;
}
inline void clear() {
_count = 0;
_amount = 0;
_type = mtNone;
}
MallocMem& operator=(const MallocMem& m) {
assert(_type == m.type(), "different type");
_count = m.count();
_amount = m.amount();
return *this;
}
inline void inc(size_t amt) {
_amount += amt;
_count ++;
}
inline void reduce(size_t amt) {
assert(_amount >= amt, "Just check");
_amount -= amt;
}
inline void overwrite_counter(size_t count) {
_count = count;
}
inline MEMFLAGS type() const {
return _type;
}
inline bool is_type(MEMFLAGS flags) const {
return FLAGS_TO_MEMORY_TYPE(flags) == _type;
}
inline size_t count() const {
return _count;
}
inline size_t amount() const {
return _amount;
}
};
// This class records live arena's memory usage
class ArenaMem : public MallocMem {
public:
ArenaMem(MEMFLAGS typeflag): MallocMem(typeflag) {
}
ArenaMem() { }
};
// This class aggregates virtual memory by its memory type
class VMMem VALUE_OBJ_CLASS_SPEC {
private:
MEMFLAGS _type;
size_t _count;
size_t _reserved_amount;
size_t _committed_amount;
public:
VMMem() {
_type = mtNone;
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
}
VMMem(MEMFLAGS flags) {
assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
_type = FLAGS_TO_MEMORY_TYPE(flags);
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
}
inline void clear() {
_type = mtNone;
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
}
inline void set_type(MEMFLAGS flags) {
_type = FLAGS_TO_MEMORY_TYPE(flags);
}
VMMem& operator=(const VMMem& m) {
assert(_type == m.type(), "different type");
_count = m.count();
_reserved_amount = m.reserved_amount();
_committed_amount = m.committed_amount();
return *this;
}
inline MEMFLAGS type() const {
return _type;
}
inline bool is_type(MEMFLAGS flags) const {
return FLAGS_TO_MEMORY_TYPE(flags) == _type;
}
inline void inc(size_t reserved_amt, size_t committed_amt) {
_reserved_amount += reserved_amt;
_committed_amount += committed_amt;
_count ++;
}
inline size_t count() const {
return _count;
}
inline size_t reserved_amount() const {
return _reserved_amount;
}
inline size_t committed_amount() const {
return _committed_amount;
}
};
#define NUMBER_OF_MEMORY_TYPE (mt_number_of_types + 1)
class BaselineReporter;
class BaselineComparisonReporter;
/*
* This class baselines current memory snapshot.
* A memory baseline summarizes memory usage by memory type,
* aggregates memory usage by callsites when detail tracking
* is on.
* Baseline a memory snapshot
*/
class MemBaseline VALUE_OBJ_CLASS_SPEC {
friend class BaselineReporter;
friend class BaselineComparisonReporter;
public:
enum BaselineThreshold {
SIZE_THRESHOLD = K // Only allocation size over this threshold will be baselined.
};
enum BaselineType {
Not_baselined,
Summary_baselined,
Detail_baselined
};
enum SortingOrder {
by_address, // by memory address
by_size, // by memory size
by_site // by call site where the memory is allocated from
};
private:
// overall summaries
size_t _total_malloced;
size_t _total_vm_reserved;
size_t _total_vm_committed;
size_t _number_of_classes;
size_t _number_of_threads;
// All baseline data is stored in this arena
Arena* _arena;
// if it has properly baselined
bool _baselined;
// Summary information
MallocMemorySnapshot* _malloc_memory_snapshot;
VirtualMemorySnapshot* _virtual_memory_snapshot;
// we categorize memory into three categories within the memory type
MallocMem _malloc_data[NUMBER_OF_MEMORY_TYPE];
VMMem _vm_data[NUMBER_OF_MEMORY_TYPE];
ArenaMem _arena_data[NUMBER_OF_MEMORY_TYPE];
size_t _class_count;
// memory records that aggregate memory usage by callsites.
// only available when detail tracking is on.
MemPointerArray* _malloc_cs;
MemPointerArray* _vm_cs;
// virtual memory map
MemPointerArray* _vm_map;
// Allocation sites information
// Malloc allocation sites
LinkedListImpl<MallocSite, ResourceObj::ARENA>
_malloc_sites;
private:
static MemType2Name MemType2NameMap[NUMBER_OF_MEMORY_TYPE];
// All virtual memory allocations
LinkedListImpl<ReservedMemoryRegion, ResourceObj::ARENA>
_virtual_memory_allocations;
private:
// should not use copy constructor
MemBaseline(MemBaseline& copy) { ShouldNotReachHere(); }
// Virtual memory allocations by allocation sites, always in by_address
// order
LinkedListImpl<VirtualMemoryAllocationSite, ResourceObj::ARENA>
_virtual_memory_sites;
// check and block at a safepoint
static inline void check_safepoint(JavaThread* thr);
SortingOrder _malloc_sites_order;
SortingOrder _virtual_memory_sites_order;
BaselineType _baseline_type;
public:
// create a memory baseline
MemBaseline();
~MemBaseline();
inline bool baselined() const {
return _baselined;
MemBaseline():
_baseline_type(Not_baselined),
_class_count(0),
_arena(NULL),
_malloc_memory_snapshot(NULL),
_virtual_memory_snapshot(NULL),
_malloc_sites(NULL) {
}
MemBaseline& operator=(const MemBaseline& other);
~MemBaseline() {
reset();
if (_arena != NULL) {
delete _arena;
}
}
bool baseline(bool summaryOnly = true);
BaselineType baseline_type() const { return _baseline_type; }
MallocMemorySnapshot* malloc_memory_snapshot() const {
return _malloc_memory_snapshot;
}
VirtualMemorySnapshot* virtual_memory_snapshot() const {
return _virtual_memory_snapshot;
}
MallocSiteIterator malloc_sites(SortingOrder order);
VirtualMemorySiteIterator virtual_memory_sites(SortingOrder order);
// Virtual memory allocation iterator always returns in virtual memory
// base address order.
VirtualMemoryAllocationIterator virtual_memory_allocations() {
assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline");
return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head());
}
// Total reserved memory = total malloc'd memory + total reserved virtual
// memory
size_t total_reserved_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
assert(_virtual_memory_snapshot != NULL, "No virtual memory snapshot");
assert(_malloc_memory_snapshot != NULL, "No malloc memory snapshot");
size_t amount = _malloc_memory_snapshot->total() +
_virtual_memory_snapshot->total_reserved();
return amount;
}
// Total committed memory = total malloc'd memory + total committed
// virtual memory
size_t total_committed_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
assert(_virtual_memory_snapshot != NULL,
"Not a snapshot");
size_t amount = _malloc_memory_snapshot->total() +
_virtual_memory_snapshot->total_committed();
return amount;
}
size_t total_arena_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
assert(_malloc_memory_snapshot != NULL, "Not yet baselined");
return _malloc_memory_snapshot->total_arena();
}
size_t malloc_tracking_overhead() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _malloc_memory_snapshot->malloc_overhead()->size();
}
const MallocMemory* malloc_memory(MEMFLAGS flag) const {
assert(_malloc_memory_snapshot != NULL, "Not a snapshot");
return _malloc_memory_snapshot->by_type(flag);
}
const VirtualMemory* virtual_memory(MEMFLAGS flag) const {
assert(_virtual_memory_snapshot != NULL, "Not a snapshot");
return _virtual_memory_snapshot->by_type(flag);
}
size_t class_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _class_count;
}
size_t thread_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
assert(_malloc_memory_snapshot != NULL, "Baselined?");
return _malloc_memory_snapshot->thread_count();
}
// reset the baseline for reuse
void clear();
void reset() {
_baseline_type = Not_baselined;
_malloc_memory_snapshot = NULL;
_virtual_memory_snapshot = NULL;
_class_count = 0;
// baseline the snapshot
bool baseline(MemSnapshot& snapshot, bool summary_only = true);
_malloc_sites = NULL;
_virtual_memory_sites = NULL;
_virtual_memory_allocations = NULL;
bool baseline(const MemPointerArray* malloc_records,
const MemPointerArray* vm_records,
bool summary_only = true);
// total malloc'd memory of specified memory type
inline size_t malloc_amount(MEMFLAGS flag) const {
return _malloc_data[flag2index(flag)].amount();
if (_arena != NULL) {
_arena->destruct_contents();
}
}
// number of malloc'd memory blocks of specified memory type
inline size_t malloc_count(MEMFLAGS flag) const {
return _malloc_data[flag2index(flag)].count();
}
// total memory used by arenas of specified memory type
inline size_t arena_amount(MEMFLAGS flag) const {
return _arena_data[flag2index(flag)].amount();
}
// number of arenas of specified memory type
inline size_t arena_count(MEMFLAGS flag) const {
return _arena_data[flag2index(flag)].count();
}
// total reserved memory of specified memory type
inline size_t reserved_amount(MEMFLAGS flag) const {
return _vm_data[flag2index(flag)].reserved_amount();
}
// total committed memory of specified memory type
inline size_t committed_amount(MEMFLAGS flag) const {
return _vm_data[flag2index(flag)].committed_amount();
}
// total memory (malloc'd + mmap'd + arena) of specified
// memory type
inline size_t total_amount(MEMFLAGS flag) const {
int index = flag2index(flag);
return _malloc_data[index].amount() +
_vm_data[index].reserved_amount() +
_arena_data[index].amount();
}
/* overall summaries */
// total malloc'd memory in snapshot
inline size_t total_malloc_amount() const {
return _total_malloced;
}
// total mmap'd memory in snapshot
inline size_t total_reserved_amount() const {
return _total_vm_reserved;
}
// total committed memory in snapshot
inline size_t total_committed_amount() const {
return _total_vm_committed;
}
// number of loaded classes
inline size_t number_of_classes() const {
return _number_of_classes;
}
// number of running threads
inline size_t number_of_threads() const {
return _number_of_threads;
}
// lookup human readable name of a memory type
static const char* type2name(MEMFLAGS type);
private:
// convert memory flag to the index to mapping table
int flag2index(MEMFLAGS flag) const;
// Baseline summary information
bool baseline_summary();
// reset baseline values
void reset();
// Baseline allocation sites (detail tracking only)
bool baseline_allocation_sites();
// summarize the records in global snapshot
bool baseline_malloc_summary(const MemPointerArray* malloc_records);
bool baseline_vm_summary(const MemPointerArray* vm_records);
bool baseline_malloc_details(const MemPointerArray* malloc_records);
bool baseline_vm_details(const MemPointerArray* vm_records);
// Aggregate virtual memory allocation by allocation sites
bool aggregate_virtual_memory_allocation_sites();
// print a line of malloc'd memory aggregated by callsite
void print_malloc_callsite(outputStream* st, address pc, size_t size,
size_t count, int diff_amt, int diff_count) const;
// print a line of mmap'd memory aggregated by callsite
void print_vm_callsite(outputStream* st, address pc, size_t rsz,
size_t csz, int diff_rsz, int diff_csz) const;
Arena* arena() { return _arena; }
// sorting functions for raw records
static int malloc_sort_by_pc(const void* p1, const void* p2);
static int malloc_sort_by_addr(const void* p1, const void* p2);
// Sorting allocation sites in different orders
// Sort allocation sites in size order
void malloc_sites_to_size_order();
// Sort allocation sites in call site address order
void malloc_sites_to_allocation_site_order();
private:
// sorting functions for baselined records
static int bl_malloc_sort_by_size(const void* p1, const void* p2);
static int bl_vm_sort_by_size(const void* p1, const void* p2);
static int bl_malloc_sort_by_pc(const void* p1, const void* p2);
static int bl_vm_sort_by_pc(const void* p1, const void* p2);
// Sort allocation sites in reserved size order
void virtual_memory_sites_to_size_order();
// Sort allocation sites in call site address order
void virtual_memory_sites_to_reservation_site_order();
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP

@ -1,509 +0,0 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_MEM_PTR_HPP
#define SHARE_VM_SERVICES_MEM_PTR_HPP
#include "memory/allocation.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
/*
* global sequence generator that generates sequence numbers to serialize
* memory records.
*/
class SequenceGenerator : AllStatic {
public:
static jint next();
// peek last sequence number
static jint peek() {
return _seq_number;
}
// reset sequence number
static void reset() {
assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
_seq_number = 1;
_generation ++;
};
static unsigned long current_generation() { return _generation; }
NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
private:
static volatile jint _seq_number;
static volatile unsigned long _generation;
NOT_PRODUCT(static jint _max_seq_number; )
};
/*
* followings are the classes that are used to hold memory activity records in different stages.
* MemPointer
* |--------MemPointerRecord
* |
* |----MemPointerRecordEx
* | |
* | |-------SeqMemPointerRecordEx
* |
* |----SeqMemPointerRecord
* |
* |----VMMemRegion
* |
* |-----VMMemRegionEx
*
*
* prefix 'Seq' - sequenced, the record contains a sequence number
* surfix 'Ex' - extension, the record contains a caller's pc
*
* per-thread recorder : SeqMemPointerRecord(Ex)
* snapshot staging : SeqMemPointerRecord(Ex)
* snapshot : MemPointerRecord(Ex) and VMMemRegion(Ex)
*
*/
/*
* class that wraps an address to a memory block,
* the memory pointer either points to a malloc'd
* memory block, or a mmap'd memory block
*/
class MemPointer VALUE_OBJ_CLASS_SPEC {
public:
MemPointer(): _addr(0) { }
MemPointer(address addr): _addr(addr) { }
MemPointer(const MemPointer& copy_from) {
_addr = copy_from.addr();
}
inline address addr() const {
return _addr;
}
inline operator address() const {
return addr();
}
inline bool operator == (const MemPointer& other) const {
return addr() == other.addr();
}
inline MemPointer& operator = (const MemPointer& other) {
_addr = other.addr();
return *this;
}
protected:
inline void set_addr(address addr) { _addr = addr; }
protected:
// memory address
address _addr;
};
/* MemPointerRecord records an activityand associated
* attributes on a memory block.
*/
class MemPointerRecord : public MemPointer {
private:
MEMFLAGS _flags;
size_t _size;
public:
/* extension of MemoryType enum
* see share/vm/memory/allocation.hpp for details.
*
* The tag values are associated to sorting orders, so be
* careful if changes are needed.
* The allocation records should be sorted ahead of tagging
* records, which in turn ahead of deallocation records
*/
enum MemPointerTags {
tag_alloc = 0x0001, // malloc or reserve record
tag_commit = 0x0002, // commit record
tag_type = 0x0003, // tag virtual memory to a memory type
tag_uncommit = 0x0004, // uncommit record
tag_release = 0x0005, // free or release record
tag_size = 0x0006, // arena size
tag_masks = 0x0007, // all tag bits
vmBit = 0x0008
};
/* helper functions to interpret the tagging flags */
inline static bool is_allocation_record(MEMFLAGS flags) {
return (flags & tag_masks) == tag_alloc;
}
inline static bool is_deallocation_record(MEMFLAGS flags) {
return (flags & tag_masks) == tag_release;
}
inline static bool is_arena_record(MEMFLAGS flags) {
return (flags & (otArena | tag_size)) == otArena;
}
inline static bool is_arena_memory_record(MEMFLAGS flags) {
return (flags & (otArena | tag_size)) == (otArena | tag_size);
}
inline static bool is_virtual_memory_record(MEMFLAGS flags) {
return (flags & vmBit) != 0;
}
inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_alloc | vmBit);
}
inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_commit | vmBit);
}
inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_uncommit | vmBit);
}
inline static bool is_virtual_memory_release_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_release | vmBit);
}
inline static bool is_virtual_memory_type_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_type | vmBit);
}
/* tagging flags */
inline static MEMFLAGS malloc_tag() { return tag_alloc; }
inline static MEMFLAGS free_tag() { return tag_release; }
inline static MEMFLAGS arena_size_tag() { return tag_size | otArena; }
inline static MEMFLAGS virtual_memory_tag() { return vmBit; }
inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); }
inline static MEMFLAGS virtual_memory_commit_tag() { return (tag_commit | vmBit); }
inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); }
inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); }
inline static MEMFLAGS virtual_memory_type_tag() { return (tag_type | vmBit); }
public:
MemPointerRecord(): _size(0), _flags(mtNone) { }
MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0):
MemPointer(addr), _flags(memflags), _size(size) { }
MemPointerRecord(const MemPointerRecord& copy_from):
MemPointer(copy_from), _flags(copy_from.flags()),
_size(copy_from.size()) {
}
/* MemPointerRecord is not sequenced, it always return
* 0 to indicate non-sequenced
*/
virtual jint seq() const { return 0; }
inline size_t size() const { return _size; }
inline void set_size(size_t size) { _size = size; }
inline MEMFLAGS flags() const { return _flags; }
inline void set_flags(MEMFLAGS flags) { _flags = flags; }
MemPointerRecord& operator= (const MemPointerRecord& ptr) {
MemPointer::operator=(ptr);
_flags = ptr.flags();
#ifdef ASSERT
if (IS_ARENA_OBJ(_flags)) {
assert(!is_vm_pointer(), "wrong flags");
assert((_flags & ot_masks) == otArena, "wrong flags");
}
#endif
_size = ptr.size();
return *this;
}
// if the pointer represents a malloc-ed memory address
inline bool is_malloced_pointer() const {
return !is_vm_pointer();
}
// if the pointer represents a virtual memory address
inline bool is_vm_pointer() const {
return is_virtual_memory_record(_flags);
}
// if this record records a 'malloc' or virtual memory
// 'reserve' call
inline bool is_allocation_record() const {
return is_allocation_record(_flags);
}
// if this record records a size information of an arena
inline bool is_arena_memory_record() const {
return is_arena_memory_record(_flags);
}
// if this pointer represents an address to an arena object
inline bool is_arena_record() const {
return is_arena_record(_flags);
}
// if this record represents a size information of specific arena
inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
assert(is_arena_memory_record(), "not size record");
assert(arena_rc->is_arena_record(), "not arena record");
return (arena_rc->addr() + sizeof(void*)) == addr();
}
// if this record records a 'free' or virtual memory 'free' call
inline bool is_deallocation_record() const {
return is_deallocation_record(_flags);
}
// if this record records a virtual memory 'commit' call
inline bool is_commit_record() const {
return is_virtual_memory_commit_record(_flags);
}
// if this record records a virtual memory 'uncommit' call
inline bool is_uncommit_record() const {
return is_virtual_memory_uncommit_record(_flags);
}
// if this record is a tagging record of a virtual memory block
inline bool is_type_tagging_record() const {
return is_virtual_memory_type_record(_flags);
}
// if the two memory pointer records actually represent the same
// memory block
inline bool is_same_region(const MemPointerRecord* other) const {
return (addr() == other->addr() && size() == other->size());
}
// if this memory region fully contains another one
inline bool contains_region(const MemPointerRecord* other) const {
return contains_region(other->addr(), other->size());
}
// if this memory region fully contains specified memory range
inline bool contains_region(address add, size_t sz) const {
return (addr() <= add && addr() + size() >= add + sz);
}
inline bool contains_address(address add) const {
return (addr() <= add && addr() + size() > add);
}
// if this memory region overlaps another region
inline bool overlaps_region(const MemPointerRecord* other) const {
assert(other != NULL, "Just check");
assert(size() > 0 && other->size() > 0, "empty range");
return contains_address(other->addr()) ||
contains_address(other->addr() + other->size() - 1) || // exclude end address
other->contains_address(addr()) ||
other->contains_address(addr() + size() - 1); // exclude end address
}
};
// MemPointerRecordEx also records callsite pc, from where
// the memory block is allocated
class MemPointerRecordEx : public MemPointerRecord {
private:
address _pc; // callsite pc
public:
MemPointerRecordEx(): _pc(0) { }
MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0):
MemPointerRecord(addr, memflags, size), _pc(pc) {}
MemPointerRecordEx(const MemPointerRecordEx& copy_from):
MemPointerRecord(copy_from), _pc(copy_from.pc()) {}
inline address pc() const { return _pc; }
void init(const MemPointerRecordEx* mpe) {
MemPointerRecord::operator=(*mpe);
_pc = mpe->pc();
}
void init(const MemPointerRecord* mp) {
MemPointerRecord::operator=(*mp);
_pc = 0;
}
};
// a virtual memory region. The region can represent a reserved
// virtual memory region or a committed memory region
class VMMemRegion : public MemPointerRecord {
public:
VMMemRegion() { }
void init(const MemPointerRecord* mp) {
assert(mp->is_vm_pointer(), "Sanity check");
_addr = mp->addr();
set_size(mp->size());
set_flags(mp->flags());
}
VMMemRegion& operator=(const VMMemRegion& other) {
MemPointerRecord::operator=(other);
return *this;
}
inline bool is_reserved_region() const {
return is_allocation_record();
}
inline bool is_committed_region() const {
return is_commit_record();
}
/* base address of this virtual memory range */
inline address base() const {
return addr();
}
/* tag this virtual memory range to the specified memory type */
inline void tag(MEMFLAGS f) {
set_flags(flags() | (f & mt_masks));
}
// expand this region to also cover specified range.
// The range has to be on either end of the memory region.
void expand_region(address addr, size_t sz) {
if (addr < base()) {
assert(addr + sz == base(), "Sanity check");
_addr = addr;
set_size(size() + sz);
} else {
assert(base() + size() == addr, "Sanity check");
set_size(size() + sz);
}
}
// exclude the specified address range from this region.
// The excluded memory range has to be on either end of this memory
// region.
inline void exclude_region(address add, size_t sz) {
assert(is_reserved_region() || is_committed_region(), "Sanity check");
assert(addr() != NULL && size() != 0, "Sanity check");
assert(add >= addr() && add < addr() + size(), "Sanity check");
assert(add == addr() || (add + sz) == (addr() + size()),
"exclude in the middle");
if (add == addr()) {
set_addr(add + sz);
set_size(size() - sz);
} else {
set_size(size() - sz);
}
}
};
class VMMemRegionEx : public VMMemRegion {
private:
jint _seq; // sequence number
public:
VMMemRegionEx(): _pc(0) { }
void init(const MemPointerRecordEx* mpe) {
VMMemRegion::init(mpe);
_pc = mpe->pc();
}
void init(const MemPointerRecord* mpe) {
VMMemRegion::init(mpe);
_pc = 0;
}
VMMemRegionEx& operator=(const VMMemRegionEx& other) {
VMMemRegion::operator=(other);
_pc = other.pc();
return *this;
}
inline address pc() const { return _pc; }
private:
address _pc;
};
/*
* Sequenced memory record
*/
class SeqMemPointerRecord : public MemPointerRecord {
private:
jint _seq; // sequence number
public:
SeqMemPointerRecord(): _seq(0){ }
SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
: MemPointerRecord(addr, flags, size), _seq(seq) {
}
SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
: MemPointerRecord(copy_from) {
_seq = copy_from.seq();
}
SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) {
MemPointerRecord::operator=(ptr);
_seq = ptr.seq();
return *this;
}
inline jint seq() const {
return _seq;
}
};
class SeqMemPointerRecordEx : public MemPointerRecordEx {
private:
jint _seq; // sequence number
public:
SeqMemPointerRecordEx(): _seq(0) { }
SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
jint seq, address pc):
MemPointerRecordEx(addr, flags, size, pc), _seq(seq) {
}
SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
: MemPointerRecordEx(copy_from) {
_seq = copy_from.seq();
}
SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) {
MemPointerRecordEx::operator=(ptr);
_seq = ptr.seq();
return *this;
}
inline jint seq() const {
return _seq;
}
};
#endif // SHARE_VM_SERVICES_MEM_PTR_HPP

@ -1,306 +0,0 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
#define SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
#include "memory/allocation.hpp"
#include "services/memPtr.hpp"
class MemPtr;
class MemRecorder;
class ArenaInfo;
class MemSnapshot;
extern "C" {
typedef int (*FN_SORT)(const void *, const void *);
}
// Memory pointer array interface. This array is used by NMT to hold
// various memory block information.
// The memory pointer arrays are usually walked with their iterators.
class MemPointerArray : public CHeapObj<mtNMT> {
public:
virtual ~MemPointerArray() { }
// return true if it can not allocate storage for the data
virtual bool out_of_memory() const = 0;
virtual bool is_empty() const = 0;
virtual bool is_full() = 0;
virtual int length() const = 0;
virtual void clear() = 0;
virtual bool append(MemPointer* ptr) = 0;
virtual bool insert_at(MemPointer* ptr, int pos) = 0;
virtual bool remove_at(int pos) = 0;
virtual MemPointer* at(int index) const = 0;
virtual void sort(FN_SORT fn) = 0;
virtual size_t instance_size() const = 0;
virtual bool shrink() = 0;
NOT_PRODUCT(virtual int capacity() const = 0;)
};
// Iterator interface
class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC {
public:
// return the pointer at current position
virtual MemPointer* current() const = 0;
// return the next pointer and advance current position
virtual MemPointer* next() = 0;
// return next pointer without advancing current position
virtual MemPointer* peek_next() const = 0;
// return previous pointer without changing current position
virtual MemPointer* peek_prev() const = 0;
// remove the pointer at current position
virtual void remove() = 0;
// insert the pointer at current position
virtual bool insert(MemPointer* ptr) = 0;
// insert specified element after current position and
// move current position to newly inserted position
virtual bool insert_after(MemPointer* ptr) = 0;
};
// implementation class
class MemPointerArrayIteratorImpl : public MemPointerArrayIterator {
protected:
MemPointerArray* _array;
int _pos;
public:
MemPointerArrayIteratorImpl(MemPointerArray* arr) {
assert(arr != NULL, "Parameter check");
_array = arr;
_pos = 0;
}
virtual MemPointer* current() const {
if (_pos < _array->length()) {
return _array->at(_pos);
}
return NULL;
}
virtual MemPointer* next() {
if (_pos + 1 < _array->length()) {
return _array->at(++_pos);
}
_pos = _array->length();
return NULL;
}
virtual MemPointer* peek_next() const {
if (_pos + 1 < _array->length()) {
return _array->at(_pos + 1);
}
return NULL;
}
virtual MemPointer* peek_prev() const {
if (_pos > 0) {
return _array->at(_pos - 1);
}
return NULL;
}
virtual void remove() {
if (_pos < _array->length()) {
_array->remove_at(_pos);
}
}
virtual bool insert(MemPointer* ptr) {
return _array->insert_at(ptr, _pos);
}
virtual bool insert_after(MemPointer* ptr) {
if (_array->insert_at(ptr, _pos + 1)) {
_pos ++;
return true;
}
return false;
}
};
// Memory pointer array implementation.
// This implementation implements expandable array
#define DEFAULT_PTR_ARRAY_SIZE 1024
template <class E> class MemPointerArrayImpl : public MemPointerArray {
private:
int _max_size;
int _size;
bool _init_elements;
E* _data;
public:
MemPointerArrayImpl(int initial_size = DEFAULT_PTR_ARRAY_SIZE, bool init_elements = true):
_max_size(initial_size), _size(0), _init_elements(init_elements) {
_data = (E*)raw_allocate(sizeof(E), initial_size);
if (_init_elements) {
for (int index = 0; index < _max_size; index ++) {
::new ((void*)&_data[index]) E();
}
}
}
virtual ~MemPointerArrayImpl() {
if (_data != NULL) {
raw_free(_data);
}
}
public:
bool out_of_memory() const {
return (_data == NULL);
}
size_t instance_size() const {
return sizeof(MemPointerArrayImpl<E>) + _max_size * sizeof(E);
}
bool is_empty() const {
assert(_data != NULL, "Just check");
return _size == 0;
}
bool is_full() {
assert(_data != NULL, "Just check");
if (_size < _max_size) {
return false;
} else {
return !expand_array();
}
}
int length() const {
assert(_data != NULL, "Just check");
return _size;
}
NOT_PRODUCT(int capacity() const { return _max_size; })
void clear() {
assert(_data != NULL, "Just check");
_size = 0;
}
bool append(MemPointer* ptr) {
assert(_data != NULL, "Just check");
if (is_full()) {
return false;
}
_data[_size ++] = *(E*)ptr;
return true;
}
bool insert_at(MemPointer* ptr, int pos) {
assert(_data != NULL, "Just check");
if (is_full()) {
return false;
}
for (int index = _size; index > pos; index --) {
_data[index] = _data[index - 1];
}
_data[pos] = *(E*)ptr;
_size ++;
return true;
}
bool remove_at(int pos) {
assert(_data != NULL, "Just check");
if (_size <= pos && pos >= 0) {
return false;
}
-- _size;
for (int index = pos; index < _size; index ++) {
_data[index] = _data[index + 1];
}
return true;
}
MemPointer* at(int index) const {
assert(_data != NULL, "Just check");
assert(index >= 0 && index < _size, "illegal index");
return &_data[index];
}
bool shrink() {
float used = ((float)_size) / ((float)_max_size);
if (used < 0.40) {
E* old_ptr = _data;
int new_size = ((_max_size) / (2 * DEFAULT_PTR_ARRAY_SIZE) + 1) * DEFAULT_PTR_ARRAY_SIZE;
_data = (E*)raw_reallocate(_data, sizeof(E), new_size);
if (_data == NULL) {
_data = old_ptr;
return false;
} else {
_max_size = new_size;
return true;
}
}
return false;
}
void sort(FN_SORT fn) {
assert(_data != NULL, "Just check");
qsort((void*)_data, _size, sizeof(E), fn);
}
private:
bool expand_array() {
assert(_data != NULL, "Not yet allocated");
E* old_ptr = _data;
if ((_data = (E*)raw_reallocate((void*)_data, sizeof(E),
_max_size + DEFAULT_PTR_ARRAY_SIZE)) == NULL) {
_data = old_ptr;
return false;
} else {
_max_size += DEFAULT_PTR_ARRAY_SIZE;
if (_init_elements) {
for (int index = _size; index < _max_size; index ++) {
::new ((void*)&_data[index]) E();
}
}
return true;
}
}
void* raw_allocate(size_t elementSize, int items) {
return os::malloc(elementSize * items, mtNMT);
}
void* raw_reallocate(void* ptr, size_t elementSize, int items) {
return os::realloc(ptr, elementSize * items, mtNMT);
}
void raw_free(void* ptr) {
os::free(ptr, mtNMT);
}
};
#endif // SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP

@ -1,171 +0,0 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/atomic.inline.hpp"
#include "services/memBaseline.hpp"
#include "services/memRecorder.hpp"
#include "services/memPtr.hpp"
#include "services/memTracker.hpp"
MemPointer* SequencedRecordIterator::next_record() {
MemPointerRecord* itr_cur = (MemPointerRecord*)_itr.current();
if (itr_cur == NULL) {
return itr_cur;
}
MemPointerRecord* itr_next = (MemPointerRecord*)_itr.next();
// don't collapse virtual memory records
while (itr_next != NULL && !itr_cur->is_vm_pointer() &&
!itr_next->is_vm_pointer() &&
same_kind(itr_cur, itr_next)) {
itr_cur = itr_next;
itr_next = (MemPointerRecord*)_itr.next();
}
return itr_cur;
}
volatile jint MemRecorder::_instance_count = 0;
MemRecorder::MemRecorder() {
assert(MemTracker::is_on(), "Native memory tracking is off");
Atomic::inc(&_instance_count);
set_generation();
if (MemTracker::track_callsite()) {
_pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
} else {
_pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecord,
DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
}
_next = NULL;
if (_pointer_records != NULL) {
// recode itself
address pc = CURRENT_PC;
record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
sizeof(MemRecorder), SequenceGenerator::next(), pc);
record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
_pointer_records->instance_size(), SequenceGenerator::next(), pc);
}
}
MemRecorder::~MemRecorder() {
if (_pointer_records != NULL) {
if (MemTracker::is_on()) {
MemTracker::record_free((address)_pointer_records, mtNMT);
MemTracker::record_free((address)this, mtNMT);
}
delete _pointer_records;
}
// delete all linked recorders
while (_next != NULL) {
MemRecorder* tmp = _next;
_next = _next->next();
tmp->set_next(NULL);
delete tmp;
}
Atomic::dec(&_instance_count);
}
// Sorting order:
// 1. memory block address
// 2. mem pointer record tags
// 3. sequence number
int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
const MemPointerRecord* p1 = (const MemPointerRecord*)e1;
const MemPointerRecord* p2 = (const MemPointerRecord*)e2;
int delta = UNSIGNED_COMPARE(p1->addr(), p2->addr());
if (delta == 0) {
int df = UNSIGNED_COMPARE((p1->flags() & MemPointerRecord::tag_masks),
(p2->flags() & MemPointerRecord::tag_masks));
if (df == 0) {
assert(p1->seq() != p2->seq(), "dup seq");
return p1->seq() - p2->seq();
} else {
return df;
}
} else {
return delta;
}
}
bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) {
assert(seq > 0, "No sequence number");
#ifdef ASSERT
if (MemPointerRecord::is_virtual_memory_record(flags)) {
assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
} else {
assert((flags & MemPointerRecord::tag_masks) == MemPointerRecord::malloc_tag() ||
(flags & MemPointerRecord::tag_masks) == MemPointerRecord::free_tag() ||
IS_ARENA_OBJ(flags),
"bad malloc record");
}
// a recorder should only hold records within the same generation
unsigned long cur_generation = SequenceGenerator::current_generation();
assert(cur_generation == _generation,
"this thread did not enter sync point");
#endif
if (MemTracker::track_callsite()) {
SeqMemPointerRecordEx ap(p, flags, size, seq, pc);
debug_only(check_dup_seq(ap.seq());)
return _pointer_records->append(&ap);
} else {
SeqMemPointerRecord ap(p, flags, size, seq);
debug_only(check_dup_seq(ap.seq());)
return _pointer_records->append(&ap);
}
}
// iterator for alloc pointers
SequencedRecordIterator MemRecorder::pointer_itr() {
assert(_pointer_records != NULL, "just check");
_pointer_records->sort((FN_SORT)sort_record_fn);
return SequencedRecordIterator(_pointer_records);
}
void MemRecorder::set_generation() {
_generation = SequenceGenerator::current_generation();
}
#ifdef ASSERT
void MemRecorder::check_dup_seq(jint seq) const {
MemPointerArrayIteratorImpl itr(_pointer_records);
MemPointerRecord* rc = (MemPointerRecord*)itr.current();
while (rc != NULL) {
assert(rc->seq() != seq, "dup seq");
rc = (MemPointerRecord*)itr.next();
}
}
#endif

@ -1,271 +0,0 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_MEM_RECORDER_HPP
#define SHARE_VM_SERVICES_MEM_RECORDER_HPP
#include "memory/allocation.hpp"
#include "runtime/os.hpp"
#include "services/memPtrArray.hpp"
class MemSnapshot;
class MemTracker;
class MemTrackWorker;
// Fixed size memory pointer array implementation
template <class E, int SIZE> class FixedSizeMemPointerArray :
public MemPointerArray {
// This implementation is for memory recorder only
friend class MemRecorder;
private:
E _data[SIZE];
int _size;
protected:
FixedSizeMemPointerArray(bool init_elements = false):
_size(0){
if (init_elements) {
for (int index = 0; index < SIZE; index ++) {
::new ((void*)&_data[index]) E();
}
}
}
void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
// the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
// to avoid recursion
return os::malloc(size, (mtNMT | otNMTRecorder));
}
void* operator new(size_t size) throw() {
assert(false, "use nothrow version");
return NULL;
}
void operator delete(void* p) {
os::free(p, (mtNMT | otNMTRecorder));
}
// instance size
inline size_t instance_size() const {
return sizeof(FixedSizeMemPointerArray<E, SIZE>);
}
NOT_PRODUCT(int capacity() const { return SIZE; })
public:
// implementation of public interface
bool out_of_memory() const { return false; }
bool is_empty() const { return _size == 0; }
bool is_full() { return length() >= SIZE; }
int length() const { return _size; }
void clear() {
_size = 0;
}
bool append(MemPointer* ptr) {
if (is_full()) return false;
_data[_size ++] = *(E*)ptr;
return true;
}
virtual bool insert_at(MemPointer* p, int pos) {
assert(false, "append only");
return false;
}
virtual bool remove_at(int pos) {
assert(false, "not supported");
return false;
}
MemPointer* at(int index) const {
assert(index >= 0 && index < length(),
"parameter check");
return ((E*)&_data[index]);
}
void sort(FN_SORT fn) {
qsort((void*)_data, _size, sizeof(E), fn);
}
bool shrink() {
return false;
}
};
// This iterator requires pre-sorted MemPointerArray, which is sorted by:
// 1. address
// 2. allocation type
// 3. sequence number
// During the array walking, iterator collapses pointers with the same
// address and allocation type, and only returns the one with highest
// sequence number.
//
// This is read-only iterator, update methods are asserted.
class SequencedRecordIterator : public MemPointerArrayIterator {
private:
MemPointerArrayIteratorImpl _itr;
MemPointer* _cur;
public:
SequencedRecordIterator(const MemPointerArray* arr):
_itr(const_cast<MemPointerArray*>(arr)) {
_cur = next_record();
}
SequencedRecordIterator(const SequencedRecordIterator& itr):
_itr(itr._itr) {
_cur = next_record();
}
// return the pointer at current position
virtual MemPointer* current() const {
return _cur;
};
// return the next pointer and advance current position
virtual MemPointer* next() {
_cur = next_record();
return _cur;
}
// return the next pointer without advancing current position
virtual MemPointer* peek_next() const {
assert(false, "not implemented");
return NULL;
}
// return the previous pointer without changing current position
virtual MemPointer* peek_prev() const {
assert(false, "not implemented");
return NULL;
}
// remove the pointer at current position
virtual void remove() {
assert(false, "read-only iterator");
};
// insert the pointer at current position
virtual bool insert(MemPointer* ptr) {
assert(false, "read-only iterator");
return false;
}
virtual bool insert_after(MemPointer* ptr) {
assert(false, "read-only iterator");
return false;
}
private:
// collapse the 'same kind' of records, and return this 'kind' of
// record with highest sequence number
MemPointer* next_record();
// Test if the two records are the same kind: the same memory block and allocation
// type.
inline bool same_kind(const MemPointerRecord* p1, const MemPointerRecord* p2) const {
assert(!p1->is_vm_pointer() && !p2->is_vm_pointer(), "malloc pointer only");
return (p1->addr() == p2->addr() &&
(p1->flags() &MemPointerRecord::tag_masks) ==
(p2->flags() & MemPointerRecord::tag_masks));
}
};
#define DEFAULT_RECORDER_PTR_ARRAY_SIZE 512
class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
friend class MemSnapshot;
friend class MemTracker;
friend class MemTrackWorker;
friend class GenerationData;
protected:
// the array that holds memory records
MemPointerArray* _pointer_records;
private:
// used for linked list
MemRecorder* _next;
// active recorder can only record a certain generation data
unsigned long _generation;
protected:
_NOINLINE_ MemRecorder();
~MemRecorder();
// record a memory operation
bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0);
// linked list support
inline void set_next(MemRecorder* rec) {
_next = rec;
}
inline MemRecorder* next() const {
return _next;
}
// if the recorder is full
inline bool is_full() const {
assert(_pointer_records != NULL, "just check");
return _pointer_records->is_full();
}
// if running out of memory when initializing recorder's internal
// data
inline bool out_of_memory() const {
return (_pointer_records == NULL ||
_pointer_records->out_of_memory());
}
inline void clear() {
assert(_pointer_records != NULL, "Just check");
_pointer_records->clear();
}
SequencedRecordIterator pointer_itr();
// return the generation of this recorder which it belongs to
unsigned long get_generation() const { return _generation; }
protected:
// number of MemRecorder instance
static volatile jint _instance_count;
private:
// sorting function, sort records into following order
// 1. memory address
// 2. allocation type
// 3. sequence number
static int sort_record_fn(const void* e1, const void* e2);
debug_only(void check_dup_seq(jint seq) const;)
void set_generation();
};
#endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP

File diff suppressed because it is too large Load Diff

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,262 +25,217 @@
#ifndef SHARE_VM_SERVICES_MEM_REPORTER_HPP
#define SHARE_VM_SERVICES_MEM_REPORTER_HPP
#include "runtime/mutexLocker.hpp"
#include "services/memBaseline.hpp"
#include "services/memTracker.hpp"
#include "utilities/ostream.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_NMT
#include "oops/instanceKlass.hpp"
#include "services/memBaseline.hpp"
#include "services/nmtCommon.hpp"
#include "services/mallocTracker.hpp"
#include "services/virtualMemoryTracker.hpp"
/*
* MemBaselineReporter reports data to this outputer class,
* ReportOutputer is responsible for format, store and redirect
* the data to the final destination.
*/
class BaselineOutputer : public StackObj {
* Base class that provides helpers
*/
class MemReporterBase : public StackObj {
private:
size_t _scale; // report in this scale
outputStream* _output; // destination
public:
// start to report memory usage in specified scale.
// if report_diff = true, the reporter reports baseline comparison
// information.
MemReporterBase(outputStream* out = NULL, size_t scale = K)
: _scale(scale) {
_output = (out == NULL) ? tty : out;
}
virtual void start(size_t scale, bool report_diff = false) = 0;
// Done reporting
virtual void done() = 0;
protected:
inline outputStream* output() const {
return _output;
}
// Current reporting scale
inline const char* current_scale() const {
return NMTUtil::scale_name(_scale);
}
// Convert memory amount in bytes to current reporting scale
inline size_t amount_in_current_scale(size_t amount) const {
return NMTUtil::amount_in_scale(amount, _scale);
}
/* report baseline summary information */
virtual void total_usage(size_t total_reserved,
size_t total_committed) = 0;
virtual void num_of_classes(size_t classes) = 0;
virtual void num_of_threads(size_t threads) = 0;
// Convert diff amount in bytes to current reporting scale
inline long diff_in_current_scale(size_t s1, size_t s2) const {
long amount = (long)(s1 - s2);
long scale = (long)_scale;
amount = (amount > 0) ? (amount + scale / 2) : (amount - scale / 2);
return amount / scale;
}
virtual void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) = 0;
/* report baseline summary comparison */
virtual void diff_total_usage(size_t total_reserved,
size_t total_committed,
int reserved_diff,
int committed_diff) = 0;
virtual void diff_num_of_classes(size_t classes, int diff) = 0;
virtual void diff_num_of_threads(size_t threads, int diff) = 0;
virtual void diff_thread_info(size_t stack_reserved, size_t stack_committed,
int stack_reserved_diff, int stack_committed_diff) = 0;
// Helper functions
// Calculate total reserved and committed amount
size_t reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
size_t committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
/*
* memory summary by memory types.
* for each memory type, following summaries are reported:
* - reserved amount, committed amount
* - malloc'd amount, malloc count
* - arena amount, arena count
*/
// Print summary total, malloc and virtual memory
void print_total(size_t reserved, size_t committed) const;
void print_malloc(size_t amount, size_t count) const;
void print_virtual_memory(size_t reserved, size_t committed) const;
// start reporting memory summary by memory type
virtual void start_category_summary() = 0;
void print_malloc_line(size_t amount, size_t count) const;
void print_virtual_memory_line(size_t reserved, size_t committed) const;
void print_arena_line(size_t amount, size_t count) const;
virtual void category_summary(MEMFLAGS type, size_t reserved_amt,
size_t committed_amt,
size_t malloc_amt, size_t malloc_count,
size_t arena_amt, size_t arena_count) = 0;
virtual void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
size_t cur_committed_amt,
size_t cur_malloc_amt, size_t cur_malloc_count,
size_t cur_arena_amt, size_t cur_arena_count,
int reserved_diff, int committed_diff, int malloc_diff,
int malloc_count_diff, int arena_diff,
int arena_count_diff) = 0;
virtual void done_category_summary() = 0;
virtual void start_virtual_memory_map() = 0;
virtual void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc) = 0;
virtual void committed_memory_region(address base, address end, size_t size, address pc) = 0;
virtual void done_virtual_memory_map() = 0;
/*
* Report callsite information
*/
virtual void start_callsite() = 0;
virtual void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count) = 0;
virtual void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt) = 0;
virtual void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
int malloc_diff, int malloc_count_diff) = 0;
virtual void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
int reserved_diff, int committed_diff) = 0;
virtual void done_callsite() = 0;
// return current scale in "KB", "MB" or "GB"
static const char* memory_unit(size_t scale);
void print_virtual_memory_region(const char* type, address base, size_t size) const;
};
/*
* This class reports processed data from a baseline or
* the changes between the two baseline.
* The class is for generating summary tracking report.
*/
class BaselineReporter : public StackObj {
class MemSummaryReporter : public MemReporterBase {
private:
BaselineOutputer& _outputer;
size_t _scale;
MallocMemorySnapshot* _malloc_snapshot;
VirtualMemorySnapshot* _vm_snapshot;
size_t _class_count;
public:
// construct a reporter that reports memory usage
// in specified scale
BaselineReporter(BaselineOutputer& outputer, size_t scale = K):
_outputer(outputer) {
_scale = scale;
// Report summary tracking data from global snapshots directly.
// This constructor is used for final reporting and hs_err reporting.
MemSummaryReporter(MallocMemorySnapshot* malloc_snapshot,
VirtualMemorySnapshot* vm_snapshot, outputStream* output,
size_t class_count = 0, size_t scale = K) :
MemReporterBase(output, scale),
_malloc_snapshot(malloc_snapshot),
_vm_snapshot(vm_snapshot) {
if (class_count == 0) {
_class_count = InstanceKlass::number_of_instance_classes();
} else {
_class_count = class_count;
}
}
virtual void report_baseline(const MemBaseline& baseline, bool summary_only = false);
virtual void diff_baselines(const MemBaseline& cur, const MemBaseline& prev,
bool summary_only = false);
// This constructor is for normal reporting from a recent baseline.
MemSummaryReporter(MemBaseline& baseline, outputStream* output,
size_t scale = K) : MemReporterBase(output, scale),
_malloc_snapshot(baseline.malloc_memory_snapshot()),
_vm_snapshot(baseline.virtual_memory_snapshot()),
_class_count(baseline.class_count()) { }
void set_scale(size_t scale);
size_t scale() const { return _scale; }
// Generate summary report
virtual void report();
private:
void report_summaries(const MemBaseline& baseline);
void report_virtual_memory_map(const MemBaseline& baseline);
void report_callsites(const MemBaseline& baseline);
void diff_summaries(const MemBaseline& cur, const MemBaseline& prev);
void diff_callsites(const MemBaseline& cur, const MemBaseline& prev);
// calculate memory size in current memory scale
size_t amount_in_current_scale(size_t amt) const;
// diff two unsigned values in current memory scale
int diff_in_current_scale(size_t value1, size_t value2) const;
// diff two unsigned value
int diff(size_t value1, size_t value2) const;
// Report summary for each memory type
void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory,
VirtualMemory* virtual_memory);
};
/*
* tty output implementation. Native memory tracking
* DCmd uses this outputer.
* The class is for generating detail tracking report.
*/
class BaselineTTYOutputer : public BaselineOutputer {
class MemDetailReporter : public MemSummaryReporter {
private:
size_t _scale;
size_t _num_of_classes;
size_t _num_of_threads;
size_t _thread_stack_reserved;
size_t _thread_stack_committed;
int _num_of_classes_diff;
int _num_of_threads_diff;
int _thread_stack_reserved_diff;
int _thread_stack_committed_diff;
outputStream* _output;
MemBaseline& _baseline;
public:
BaselineTTYOutputer(outputStream* st) {
_scale = K;
_num_of_classes = 0;
_num_of_threads = 0;
_thread_stack_reserved = 0;
_thread_stack_committed = 0;
_num_of_classes_diff = 0;
_num_of_threads_diff = 0;
_thread_stack_reserved_diff = 0;
_thread_stack_committed_diff = 0;
_output = st;
MemDetailReporter(MemBaseline& baseline, outputStream* output, size_t scale = K) :
MemSummaryReporter(baseline, output, scale),
_baseline(baseline) { }
// Generate detail report.
// The report contains summary and detail sections.
virtual void report() {
MemSummaryReporter::report();
report_virtual_memory_map();
report_detail();
}
// begin reporting memory usage in specified scale
void start(size_t scale, bool report_diff = false);
// done reporting
void done();
private:
// Report detail tracking data.
void report_detail();
// Report virtual memory map
void report_virtual_memory_map();
// Report malloc allocation sites
void report_malloc_sites();
// Report virtual memory reservation sites
void report_virtual_memory_allocation_sites();
// total memory usage
void total_usage(size_t total_reserved,
size_t total_committed);
// report total loaded classes
void num_of_classes(size_t classes) {
_num_of_classes = classes;
}
void num_of_threads(size_t threads) {
_num_of_threads = threads;
}
void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) {
_thread_stack_reserved = stack_reserved_amt;
_thread_stack_committed = stack_committed_amt;
}
void diff_total_usage(size_t total_reserved,
size_t total_committed,
int reserved_diff,
int committed_diff);
void diff_num_of_classes(size_t classes, int diff) {
_num_of_classes = classes;
_num_of_classes_diff = diff;
}
void diff_num_of_threads(size_t threads, int diff) {
_num_of_threads = threads;
_num_of_threads_diff = diff;
}
void diff_thread_info(size_t stack_reserved_amt, size_t stack_committed_amt,
int stack_reserved_diff, int stack_committed_diff) {
_thread_stack_reserved = stack_reserved_amt;
_thread_stack_committed = stack_committed_amt;
_thread_stack_reserved_diff = stack_reserved_diff;
_thread_stack_committed_diff = stack_committed_diff;
}
/*
* Report memory summary categoriuzed by memory types.
* For each memory type, following summaries are reported:
* - reserved amount, committed amount
* - malloc-ed amount, malloc count
* - arena amount, arena count
*/
// start reporting memory summary by memory type
void start_category_summary();
void category_summary(MEMFLAGS type, size_t reserved_amt, size_t committed_amt,
size_t malloc_amt, size_t malloc_count,
size_t arena_amt, size_t arena_count);
void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
size_t cur_committed_amt,
size_t cur_malloc_amt, size_t cur_malloc_count,
size_t cur_arena_amt, size_t cur_arena_count,
int reserved_diff, int committed_diff, int malloc_diff,
int malloc_count_diff, int arena_diff,
int arena_count_diff);
void done_category_summary();
// virtual memory map
void start_virtual_memory_map();
void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc);
void committed_memory_region(address base, address end, size_t size, address pc);
void done_virtual_memory_map();
/*
* Report callsite information
*/
void start_callsite();
void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count);
void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt);
void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
int malloc_diff, int malloc_count_diff);
void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
int reserved_diff, int committed_diff);
void done_callsite();
// Report a virtual memory region
void report_virtual_memory_region(const ReservedMemoryRegion* rgn);
};
/*
* The class is for generating summary comparison report.
* It compares current memory baseline against an early baseline.
*/
class MemSummaryDiffReporter : public MemReporterBase {
protected:
MemBaseline& _early_baseline;
MemBaseline& _current_baseline;
public:
MemSummaryDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
outputStream* output, size_t scale = K) : MemReporterBase(output, scale),
_early_baseline(early_baseline), _current_baseline(current_baseline) {
assert(early_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined");
assert(current_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined");
}
// Generate summary comparison report
virtual void report_diff();
private:
// report the comparison of each memory type
void diff_summary_of_type(MEMFLAGS type,
const MallocMemory* early_malloc, const VirtualMemory* early_vm,
const MallocMemory* current_malloc, const VirtualMemory* current_vm) const;
protected:
void print_malloc_diff(size_t current_amount, size_t current_count,
size_t early_amount, size_t early_count) const;
void print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
size_t early_reserved, size_t early_committed) const;
void print_arena_diff(size_t current_amount, size_t current_count,
size_t early_amount, size_t early_count) const;
};
/*
* The class is for generating detail comparison report.
* It compares current memory baseline against an early baseline,
* both baselines have to be detail baseline.
*/
class MemDetailDiffReporter : public MemSummaryDiffReporter {
public:
MemDetailDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
outputStream* output, size_t scale = K) :
MemSummaryDiffReporter(early_baseline, current_baseline, output, scale) { }
// Generate detail comparison report
virtual void report_diff();
// Malloc allocation site comparison
void diff_malloc_sites() const;
// Virutal memory reservation site comparison
void diff_virtual_memory_sites() const;
// New malloc allocation site in recent baseline
void new_malloc_site (const MallocSite* site) const;
// The malloc allocation site is not in recent baseline
void old_malloc_site (const MallocSite* site) const;
// Compare malloc allocation site, it is in both baselines
void diff_malloc_site(const MallocSite* early, const MallocSite* current) const;
// New virtual memory allocation site in recent baseline
void new_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
// The virtual memory allocation site is not in recent baseline
void old_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
// Compare virtual memory allocation site, it is in both baseline
void diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
const VirtualMemoryAllocationSite* current) const;
void diff_malloc_site(const NativeCallStack* stack, size_t current_size,
size_t currrent_count, size_t early_size, size_t early_count) const;
void diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
size_t current_committed, size_t early_reserved, size_t early_committed) const;
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MEM_REPORTER_HPP
#endif

@ -1,748 +0,0 @@
/*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/decoder.hpp"
#include "services/memBaseline.hpp"
#include "services/memPtr.hpp"
#include "services/memPtrArray.hpp"
#include "services/memSnapshot.hpp"
#include "services/memTracker.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#ifdef ASSERT
void decode_pointer_record(MemPointerRecord* rec) {
tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT "] size = %d bytes", rec->addr(),
rec->addr() + rec->size(), (int)rec->size());
tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
if (rec->is_vm_pointer()) {
if (rec->is_allocation_record()) {
tty->print_cr(" (reserve)");
} else if (rec->is_commit_record()) {
tty->print_cr(" (commit)");
} else if (rec->is_uncommit_record()) {
tty->print_cr(" (uncommit)");
} else if (rec->is_deallocation_record()) {
tty->print_cr(" (release)");
} else {
tty->print_cr(" (tag)");
}
} else {
if (rec->is_arena_memory_record()) {
tty->print_cr(" (arena size)");
} else if (rec->is_allocation_record()) {
tty->print_cr(" (malloc)");
} else {
tty->print_cr(" (free)");
}
}
if (MemTracker::track_callsite()) {
char buf[1024];
address pc = ((MemPointerRecordEx*)rec)->pc();
if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
tty->print_cr("\tfrom %s", buf);
} else {
tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
}
}
}
void decode_vm_region_record(VMMemRegion* rec) {
tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
rec->addr() + rec->size());
tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
if (rec->is_allocation_record()) {
tty->print_cr(" (reserved)");
} else if (rec->is_commit_record()) {
tty->print_cr(" (committed)");
} else {
ShouldNotReachHere();
}
if (MemTracker::track_callsite()) {
char buf[1024];
address pc = ((VMMemRegionEx*)rec)->pc();
if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
tty->print_cr("\tfrom %s", buf);
} else {
tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
}
}
}
#endif
bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
VMMemRegionEx new_rec;
assert(rec->is_allocation_record() || rec->is_commit_record(),
"Sanity check");
if (MemTracker::track_callsite()) {
new_rec.init((MemPointerRecordEx*)rec);
} else {
new_rec.init(rec);
}
return insert(&new_rec);
}
bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
VMMemRegionEx new_rec;
assert(rec->is_allocation_record() || rec->is_commit_record(),
"Sanity check");
if (MemTracker::track_callsite()) {
new_rec.init((MemPointerRecordEx*)rec);
} else {
new_rec.init(rec);
}
return insert_after(&new_rec);
}
// we don't consolidate reserved regions, since they may be categorized
// in different types.
bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
assert(rec->is_allocation_record(), "Sanity check");
VMMemRegion* reserved_region = (VMMemRegion*)current();
// we don't have anything yet
if (reserved_region == NULL) {
return insert_record(rec);
}
assert(reserved_region->is_reserved_region(), "Sanity check");
// duplicated records
if (reserved_region->is_same_region(rec)) {
return true;
}
// Overlapping stack regions indicate that a JNI thread failed to
// detach from the VM before exiting. This leaks the JavaThread object.
if (CheckJNICalls) {
guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
!reserved_region->overlaps_region(rec),
"Attached JNI thread exited without being detached");
}
// otherwise, we should not have overlapping reserved regions
assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
reserved_region->base() > rec->addr(), "Just check: locate()");
assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
!reserved_region->overlaps_region(rec), "overlapping reserved regions");
return insert_record(rec);
}
// we do consolidate committed regions
bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
assert(rec->is_commit_record(), "Sanity check");
VMMemRegion* reserved_rgn = (VMMemRegion*)current();
assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
"Sanity check");
// thread's native stack is always marked as "committed", ignore
// the "commit" operation for creating stack guard pages
if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
return true;
}
// if the reserved region has any committed regions
VMMemRegion* committed_rgn = (VMMemRegion*)next();
while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
// duplicated commit records
if(committed_rgn->contains_region(rec)) {
return true;
} else if (committed_rgn->overlaps_region(rec)) {
// overlaps front part
if (rec->addr() < committed_rgn->addr()) {
committed_rgn->expand_region(rec->addr(),
committed_rgn->addr() - rec->addr());
} else {
// overlaps tail part
address committed_rgn_end = committed_rgn->addr() +
committed_rgn->size();
assert(committed_rgn_end < rec->addr() + rec->size(),
"overlap tail part");
committed_rgn->expand_region(committed_rgn_end,
(rec->addr() + rec->size()) - committed_rgn_end);
}
} else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
// adjunct each other
committed_rgn->expand_region(rec->addr(), rec->size());
VMMemRegion* next_reg = (VMMemRegion*)next();
// see if we can consolidate next committed region
if (next_reg != NULL && next_reg->is_committed_region() &&
next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
committed_rgn->expand_region(next_reg->base(), next_reg->size());
// delete merged region
remove();
}
return true;
} else if (committed_rgn->base() > rec->addr()) {
// found the location, insert this committed region
return insert_record(rec);
}
committed_rgn = (VMMemRegion*)next();
}
return insert_record(rec);
}
bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
assert(rec->is_uncommit_record(), "sanity check");
VMMemRegion* cur;
cur = (VMMemRegion*)current();
assert(cur->is_reserved_region() && cur->contains_region(rec),
"Sanity check");
// thread's native stack is always marked as "committed", ignore
// the "commit" operation for creating stack guard pages
if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
return true;
}
cur = (VMMemRegion*)next();
while (cur != NULL && cur->is_committed_region()) {
// region already uncommitted, must be due to duplicated record
if (cur->addr() >= rec->addr() + rec->size()) {
break;
} else if (cur->contains_region(rec)) {
// uncommit whole region
if (cur->is_same_region(rec)) {
remove();
break;
} else if (rec->addr() == cur->addr() ||
rec->addr() + rec->size() == cur->addr() + cur->size()) {
// uncommitted from either end of current memory region.
cur->exclude_region(rec->addr(), rec->size());
break;
} else { // split the committed region and release the middle
address high_addr = cur->addr() + cur->size();
size_t sz = high_addr - rec->addr();
cur->exclude_region(rec->addr(), sz);
sz = high_addr - (rec->addr() + rec->size());
if (MemTracker::track_callsite()) {
MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
((VMMemRegionEx*)cur)->pc());
return insert_record_after(&tmp);
} else {
MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
return insert_record_after(&tmp);
}
}
}
cur = (VMMemRegion*)next();
}
// we may not find committed record due to duplicated records
return true;
}
bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
assert(rec->is_deallocation_record(), "Sanity check");
VMMemRegion* cur = (VMMemRegion*)current();
assert(cur->is_reserved_region() && cur->contains_region(rec),
"Sanity check");
if (rec->is_same_region(cur)) {
// In snapshot, the virtual memory records are sorted in following orders:
// 1. virtual memory's base address
// 2. virtual memory reservation record, followed by commit records within this reservation.
// The commit records are also in base address order.
// When a reserved region is released, we want to remove the reservation record and all
// commit records following it.
#ifdef ASSERT
address low_addr = cur->addr();
address high_addr = low_addr + cur->size();
#endif
// remove virtual memory reservation record
remove();
// remove committed regions within above reservation
VMMemRegion* next_region = (VMMemRegion*)current();
while (next_region != NULL && next_region->is_committed_region()) {
assert(next_region->addr() >= low_addr &&
next_region->addr() + next_region->size() <= high_addr,
"Range check");
remove();
next_region = (VMMemRegion*)current();
}
} else if (rec->addr() == cur->addr() ||
rec->addr() + rec->size() == cur->addr() + cur->size()) {
// released region is at either end of this region
cur->exclude_region(rec->addr(), rec->size());
assert(check_reserved_region(), "Integrity check");
} else { // split the reserved region and release the middle
address high_addr = cur->addr() + cur->size();
size_t sz = high_addr - rec->addr();
cur->exclude_region(rec->addr(), sz);
sz = high_addr - rec->addr() - rec->size();
if (MemTracker::track_callsite()) {
MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
((VMMemRegionEx*)cur)->pc());
bool ret = insert_reserved_region(&tmp);
assert(!ret || check_reserved_region(), "Integrity check");
return ret;
} else {
MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
bool ret = insert_reserved_region(&tmp);
assert(!ret || check_reserved_region(), "Integrity check");
return ret;
}
}
return true;
}
bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
// skip all 'commit' records associated with previous reserved region
VMMemRegion* p = (VMMemRegion*)next();
while (p != NULL && p->is_committed_region() &&
p->base() + p->size() < rec->addr()) {
p = (VMMemRegion*)next();
}
return insert_record(rec);
}
bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
size_t sz = rgn->size() - new_rgn_size;
// the original region becomes 'new' region
rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
// remaining becomes next region
MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
return insert_reserved_region(&next_rgn);
} else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
rgn->exclude_region(new_rgn_addr, new_rgn_size);
MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
return insert_reserved_region(&next_rgn);
} else {
// the orginal region will be split into three
address rgn_high_addr = rgn->base() + rgn->size();
// first region
rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
// the second region is the new region
MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
if (!insert_reserved_region(&new_rgn)) return false;
// the remaining region
MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
return insert_reserved_region(&rem_rgn);
}
}
static int sort_in_seq_order(const void* p1, const void* p2) {
assert(p1 != NULL && p2 != NULL, "Sanity check");
const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
return (mp1->seq() - mp2->seq());
}
bool StagingArea::init() {
if (MemTracker::track_callsite()) {
_malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
_vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
} else {
_malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
_vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
}
if (_malloc_data != NULL && _vm_data != NULL &&
!_malloc_data->out_of_memory() &&
!_vm_data->out_of_memory()) {
return true;
} else {
if (_malloc_data != NULL) delete _malloc_data;
if (_vm_data != NULL) delete _vm_data;
_malloc_data = NULL;
_vm_data = NULL;
return false;
}
}
VMRecordIterator StagingArea::virtual_memory_record_walker() {
MemPointerArray* arr = vm_data();
// sort into seq number order
arr->sort((FN_SORT)sort_in_seq_order);
return VMRecordIterator(arr);
}
MemSnapshot::MemSnapshot() {
if (MemTracker::track_callsite()) {
_alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
_vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
} else {
_alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
_vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
}
_staging_area.init();
_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
NOT_PRODUCT(_untracked_count = 0;)
_number_of_classes = 0;
}
MemSnapshot::~MemSnapshot() {
assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
{
MutexLockerEx locker(_lock);
if (_alloc_ptrs != NULL) {
delete _alloc_ptrs;
_alloc_ptrs = NULL;
}
if (_vm_ptrs != NULL) {
delete _vm_ptrs;
_vm_ptrs = NULL;
}
}
if (_lock != NULL) {
delete _lock;
_lock = NULL;
}
}
void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
assert(dest != NULL && src != NULL, "Just check");
assert(dest->addr() == src->addr(), "Just check");
assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
if (MemTracker::track_callsite()) {
*(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
} else {
*(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
}
}
void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
assert(src != NULL && dest != NULL, "Just check");
assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
if (MemTracker::track_callsite()) {
*(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
} else {
*(MemPointerRecord*)dest = *(MemPointerRecord*)src;
}
}
// merge a recorder to the staging area
bool MemSnapshot::merge(MemRecorder* rec) {
assert(rec != NULL && !rec->out_of_memory(), "Just check");
SequencedRecordIterator itr(rec->pointer_itr());
MutexLockerEx lock(_lock, true);
MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
MemPointerRecord* matched_rec;
while (incoming_rec != NULL) {
if (incoming_rec->is_vm_pointer()) {
// we don't do anything with virtual memory records during merge
if (!_staging_area.vm_data()->append(incoming_rec)) {
return false;
}
} else {
// locate matched record and/or also position the iterator to proper
// location for this incoming record.
matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
// we have not seen this memory block in this generation,
// so just add to staging area
if (matched_rec == NULL) {
if (!malloc_staging_itr.insert(incoming_rec)) {
return false;
}
} else if (incoming_rec->addr() == matched_rec->addr()) {
// whoever has higher sequence number wins
if (incoming_rec->seq() > matched_rec->seq()) {
copy_seq_pointer(matched_rec, incoming_rec);
}
} else if (incoming_rec->addr() < matched_rec->addr()) {
if (!malloc_staging_itr.insert(incoming_rec)) {
return false;
}
} else {
ShouldNotReachHere();
}
}
incoming_rec = (MemPointerRecord*)itr.next();
}
NOT_PRODUCT(void check_staging_data();)
return true;
}
// promote data to next generation
bool MemSnapshot::promote(int number_of_classes) {
assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
"Just check");
MutexLockerEx lock(_lock, true);
MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker();
bool promoted = false;
if (promote_malloc_records(&malloc_itr)) {
VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
if (promote_virtual_memory_records(&vm_itr)) {
promoted = true;
}
}
NOT_PRODUCT(check_malloc_pointers();)
_staging_area.clear();
_number_of_classes = number_of_classes;
return promoted;
}
bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
MemPointerRecord* matched_rec;
while (new_rec != NULL) {
matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
// found matched memory block
if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
// snapshot already contains 'live' records
assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
"Sanity check");
// update block states
if (new_rec->is_allocation_record()) {
assign_pointer(matched_rec, new_rec);
} else if (new_rec->is_arena_memory_record()) {
if (new_rec->size() == 0) {
// remove size record once size drops to 0
malloc_snapshot_itr.remove();
} else {
assign_pointer(matched_rec, new_rec);
}
} else {
// a deallocation record
assert(new_rec->is_deallocation_record(), "Sanity check");
// an arena record can be followed by a size record, we need to remove both
if (matched_rec->is_arena_record()) {
MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
if (next != NULL && next->is_arena_memory_record() &&
next->is_memory_record_of_arena(matched_rec)) {
malloc_snapshot_itr.remove();
}
}
// the memory is deallocated, remove related record(s)
malloc_snapshot_itr.remove();
}
} else {
// don't insert size 0 record
if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
new_rec = NULL;
}
if (new_rec != NULL) {
if (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
if (!malloc_snapshot_itr.insert_after(new_rec)) {
return false;
}
} else {
if (!malloc_snapshot_itr.insert(new_rec)) {
return false;
}
}
}
#ifndef PRODUCT
else if (!has_allocation_record(new_rec->addr())) {
// NMT can not track some startup memory, which is allocated before NMT is on
_untracked_count ++;
}
#endif
}
}
new_rec = (MemPointerRecord*)itr->next();
}
return true;
}
bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
VMMemRegion* reserved_rec;
while (new_rec != NULL) {
assert(new_rec->is_vm_pointer(), "Sanity check");
// locate a reserved region that contains the specified address, or
// the nearest reserved region has base address just above the specified
// address
reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
// snapshot can only have 'live' records
assert(reserved_rec->is_reserved_region(), "Sanity check");
if (new_rec->is_allocation_record()) {
if (!reserved_rec->is_same_region(new_rec)) {
// only deal with split a bigger reserved region into smaller regions.
// So far, CDS is the only use case.
if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
return false;
}
}
} else if (new_rec->is_uncommit_record()) {
if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
return false;
}
} else if (new_rec->is_commit_record()) {
// insert or expand existing committed region to cover this
// newly committed region
if (!vm_snapshot_itr.add_committed_region(new_rec)) {
return false;
}
} else if (new_rec->is_deallocation_record()) {
// release part or all memory region
if (!vm_snapshot_itr.remove_released_region(new_rec)) {
return false;
}
} else if (new_rec->is_type_tagging_record()) {
// tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
// to different type.
assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
"Sanity check");
reserved_rec->tag(new_rec->flags());
} else {
ShouldNotReachHere();
}
} else {
/*
* The assertion failure indicates mis-matched virtual memory records. The likely
* scenario is, that some virtual memory operations are not going through os::xxxx_memory()
* api, which have to be tracked manually. (perfMemory is an example).
*/
assert(new_rec->is_allocation_record(), "Sanity check");
if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
return false;
}
}
new_rec = (MemPointerRecord*)itr->next();
}
return true;
}
#ifndef PRODUCT
void MemSnapshot::print_snapshot_stats(outputStream* st) {
st->print_cr("Snapshot:");
st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
(100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
(100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
_staging_area.malloc_data()->capacity(),
(100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
_staging_area.malloc_data()->instance_size()/K);
st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
_staging_area.vm_data()->capacity(),
(100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
_staging_area.vm_data()->instance_size()/K);
st->print_cr("\tUntracked allocation: %d", _untracked_count);
}
void MemSnapshot::check_malloc_pointers() {
MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
MemPointerRecord* p = (MemPointerRecord*)mItr.current();
MemPointerRecord* prev = NULL;
while (p != NULL) {
if (prev != NULL) {
assert(p->addr() >= prev->addr(), "sorting order");
}
prev = p;
p = (MemPointerRecord*)mItr.next();
}
}
bool MemSnapshot::has_allocation_record(address addr) {
MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
MemPointerRecord* cur = (MemPointerRecord*)itr.current();
while (cur != NULL) {
if (cur->addr() == addr && cur->is_allocation_record()) {
return true;
}
cur = (MemPointerRecord*)itr.next();
}
return false;
}
#endif // PRODUCT
#ifdef ASSERT
void MemSnapshot::check_staging_data() {
MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
MemPointerRecord* cur = (MemPointerRecord*)itr.current();
MemPointerRecord* next = (MemPointerRecord*)itr.next();
while (next != NULL) {
assert((next->addr() > cur->addr()) ||
((next->flags() & MemPointerRecord::tag_masks) >
(cur->flags() & MemPointerRecord::tag_masks)),
"sorting order");
cur = next;
next = (MemPointerRecord*)itr.next();
}
MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
cur = (MemPointerRecord*)vm_itr.current();
while (cur != NULL) {
assert(cur->is_vm_pointer(), "virtual memory pointer only");
cur = (MemPointerRecord*)vm_itr.next();
}
}
void MemSnapshot::dump_all_vm_pointers() {
MemPointerArrayIteratorImpl itr(_vm_ptrs);
VMMemRegion* ptr = (VMMemRegion*)itr.current();
tty->print_cr("dump virtual memory pointers:");
while (ptr != NULL) {
if (ptr->is_committed_region()) {
tty->print("\t");
}
tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
(ptr->addr() + ptr->size()), ptr->flags());
if (MemTracker::track_callsite()) {
VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
if (ex->pc() != NULL) {
char buf[1024];
if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
tty->print_cr("\t%s", buf);
} else {
tty->cr();
}
}
}
ptr = (VMMemRegion*)itr.next();
}
tty->flush();
}
#endif // ASSERT

@ -1,408 +0,0 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
#define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
#include "memory/allocation.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/memBaseline.hpp"
#include "services/memPtrArray.hpp"
// Snapshot pointer array iterator
// The pointer array contains malloc-ed pointers
class MemPointerIterator : public MemPointerArrayIteratorImpl {
public:
MemPointerIterator(MemPointerArray* arr):
MemPointerArrayIteratorImpl(arr) {
assert(arr != NULL, "null array");
}
#ifdef ASSERT
virtual bool is_dup_pointer(const MemPointer* ptr1,
const MemPointer* ptr2) const {
MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
if (p1->addr() != p2->addr()) return false;
if ((p1->flags() & MemPointerRecord::tag_masks) !=
(p2->flags() & MemPointerRecord::tag_masks)) {
return false;
}
// we do see multiple commit/uncommit on the same memory, it is ok
return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
(p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
}
virtual bool insert(MemPointer* ptr) {
if (_pos > 0) {
MemPointer* p1 = (MemPointer*)ptr;
MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
assert(!is_dup_pointer(p1, p2),
err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
}
if (_pos < _array->length() -1) {
MemPointer* p1 = (MemPointer*)ptr;
MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
assert(!is_dup_pointer(p1, p2),
err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
}
return _array->insert_at(ptr, _pos);
}
virtual bool insert_after(MemPointer* ptr) {
if (_pos > 0) {
MemPointer* p1 = (MemPointer*)ptr;
MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
assert(!is_dup_pointer(p1, p2),
err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
}
if (_pos < _array->length() - 1) {
MemPointer* p1 = (MemPointer*)ptr;
MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
assert(!is_dup_pointer(p1, p2),
err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
}
if (_array->insert_at(ptr, _pos + 1)) {
_pos ++;
return true;
}
return false;
}
#endif
virtual MemPointer* locate(address addr) {
MemPointer* cur = current();
while (cur != NULL && cur->addr() < addr) {
cur = next();
}
return cur;
}
};
class VMMemPointerIterator : public MemPointerIterator {
public:
VMMemPointerIterator(MemPointerArray* arr):
MemPointerIterator(arr) {
}
// locate an existing reserved memory region that contains specified address,
// or the reserved region just above this address, where the incoming
// reserved region should be inserted.
virtual MemPointer* locate(address addr) {
reset();
VMMemRegion* reg = (VMMemRegion*)current();
while (reg != NULL) {
if (reg->is_reserved_region()) {
if (reg->contains_address(addr) || addr < reg->base()) {
return reg;
}
}
reg = (VMMemRegion*)next();
}
return NULL;
}
// following methods update virtual memory in the context
// of 'current' position, which is properly positioned by
// callers via locate method.
bool add_reserved_region(MemPointerRecord* rec);
bool add_committed_region(MemPointerRecord* rec);
bool remove_uncommitted_region(MemPointerRecord* rec);
bool remove_released_region(MemPointerRecord* rec);
// split a reserved region to create a new memory region with specified base and size
bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
private:
bool insert_record(MemPointerRecord* rec);
bool insert_record_after(MemPointerRecord* rec);
bool insert_reserved_region(MemPointerRecord* rec);
// reset current position
inline void reset() { _pos = 0; }
#ifdef ASSERT
// check integrity of records on current reserved memory region.
bool check_reserved_region() {
VMMemRegion* reserved_region = (VMMemRegion*)current();
assert(reserved_region != NULL && reserved_region->is_reserved_region(),
"Sanity check");
// all committed regions that follow current reserved region, should all
// belong to the reserved region.
VMMemRegion* next_region = (VMMemRegion*)next();
for (; next_region != NULL && next_region->is_committed_region();
next_region = (VMMemRegion*)next() ) {
if(!reserved_region->contains_region(next_region)) {
return false;
}
}
return true;
}
virtual bool is_dup_pointer(const MemPointer* ptr1,
const MemPointer* ptr2) const {
VMMemRegion* p1 = (VMMemRegion*)ptr1;
VMMemRegion* p2 = (VMMemRegion*)ptr2;
if (p1->addr() != p2->addr()) return false;
if ((p1->flags() & MemPointerRecord::tag_masks) !=
(p2->flags() & MemPointerRecord::tag_masks)) {
return false;
}
// we do see multiple commit/uncommit on the same memory, it is ok
return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
(p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
}
#endif
};
class MallocRecordIterator : public MemPointerArrayIterator {
private:
MemPointerArrayIteratorImpl _itr;
public:
MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
}
virtual MemPointer* current() const {
#ifdef ASSERT
MemPointer* cur_rec = _itr.current();
if (cur_rec != NULL) {
MemPointer* prev_rec = _itr.peek_prev();
MemPointer* next_rec = _itr.peek_next();
assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
}
#endif
return _itr.current();
}
virtual MemPointer* next() {
MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
// arena memory record is a special case, which we have to compare
// sequence number against its associated arena record.
if (next_rec != NULL && next_rec->is_arena_memory_record()) {
MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
// if there is an associated arena record, it has to be previous
// record because of sorting order (by address) - NMT generates a pseudo address
// for arena's size record by offsetting arena's address, that guarantees
// the order of arena record and it's size record.
if (prev_rec != NULL && prev_rec->is_arena_record() &&
next_rec->is_memory_record_of_arena(prev_rec)) {
if (prev_rec->seq() > next_rec->seq()) {
// Skip this arena memory record
// Two scenarios:
// - if the arena record is an allocation record, this early
// size record must be leftover by previous arena,
// and the last size record should have size = 0.
// - if the arena record is a deallocation record, this
// size record should be its cleanup record, which should
// also have size = 0. In other world, arena alway reset
// its size before gone (see Arena's destructor)
assert(next_rec->size() == 0, "size not reset");
return _itr.next();
} else {
assert(prev_rec->is_allocation_record(),
"Arena size record ahead of allocation record");
}
}
}
return next_rec;
}
MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
void remove() { ShouldNotReachHere(); }
bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
};
// collapse duplicated records. Eliminating duplicated records here, is much
// cheaper than during promotion phase. However, it does have limitation - it
// can only eliminate duplicated records within the generation, there are
// still chances seeing duplicated records during promotion.
// We want to use the record with higher sequence number, because it has
// more accurate callsite pc.
class VMRecordIterator : public MemPointerArrayIterator {
private:
MemPointerArrayIteratorImpl _itr;
public:
VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
while (next != NULL) {
assert(cur != NULL, "Sanity check");
assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
"pre-sort order");
if (is_duplicated_record(cur, next)) {
_itr.next();
next = (MemPointerRecord*)_itr.peek_next();
} else {
break;
}
}
}
virtual MemPointer* current() const {
return _itr.current();
}
// get next record, but skip the duplicated records
virtual MemPointer* next() {
MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
while (next != NULL) {
assert(cur != NULL, "Sanity check");
assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
"pre-sort order");
if (is_duplicated_record(cur, next)) {
_itr.next();
cur = next;
next = (MemPointerRecord*)_itr.peek_next();
} else {
break;
}
}
return cur;
}
MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
void remove() { ShouldNotReachHere(); }
bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
private:
bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
return ret;
}
};
class StagingArea VALUE_OBJ_CLASS_SPEC {
private:
MemPointerArray* _malloc_data;
MemPointerArray* _vm_data;
public:
StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
init();
}
~StagingArea() {
if (_malloc_data != NULL) delete _malloc_data;
if (_vm_data != NULL) delete _vm_data;
}
MallocRecordIterator malloc_record_walker() {
return MallocRecordIterator(malloc_data());
}
VMRecordIterator virtual_memory_record_walker();
bool init();
void clear() {
assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
_malloc_data->shrink();
_malloc_data->clear();
_vm_data->clear();
}
inline MemPointerArray* malloc_data() { return _malloc_data; }
inline MemPointerArray* vm_data() { return _vm_data; }
};
class MemBaseline;
class MemSnapshot : public CHeapObj<mtNMT> {
private:
// the following two arrays contain records of all known lived memory blocks
// live malloc-ed memory pointers
MemPointerArray* _alloc_ptrs;
// live virtual memory pointers
MemPointerArray* _vm_ptrs;
StagingArea _staging_area;
// the lock to protect this snapshot
Monitor* _lock;
// the number of instance classes
int _number_of_classes;
NOT_PRODUCT(size_t _untracked_count;)
friend class MemBaseline;
public:
MemSnapshot();
virtual ~MemSnapshot();
// if we are running out of native memory
bool out_of_memory() {
return (_alloc_ptrs == NULL ||
_staging_area.malloc_data() == NULL ||
_staging_area.vm_data() == NULL ||
_vm_ptrs == NULL || _lock == NULL ||
_alloc_ptrs->out_of_memory() ||
_vm_ptrs->out_of_memory());
}
// merge a per-thread memory recorder into staging area
bool merge(MemRecorder* rec);
// promote staged data to snapshot
bool promote(int number_of_classes);
int number_of_classes() const { return _number_of_classes; }
void wait(long timeout) {
assert(_lock != NULL, "Just check");
MonitorLockerEx locker(_lock);
locker.wait(true, timeout);
}
NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
NOT_PRODUCT(void check_staging_data();)
NOT_PRODUCT(void check_malloc_pointers();)
NOT_PRODUCT(bool has_allocation_record(address addr);)
// dump all virtual memory pointers in snapshot
DEBUG_ONLY( void dump_all_vm_pointers();)
private:
// copy sequenced pointer from src to dest
void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
// assign a sequenced pointer to non-sequenced pointer
void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
bool promote_malloc_records(MemPointerArrayIterator* itr);
bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
};
#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP

@ -1,212 +0,0 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/threadCritical.hpp"
#include "services/memTracker.hpp"
#include "services/memTrackWorker.hpp"
#include "utilities/decoder.hpp"
#include "utilities/vmError.hpp"
void GenerationData::reset() {
_number_of_classes = 0;
while (_recorder_list != NULL) {
MemRecorder* tmp = _recorder_list;
_recorder_list = _recorder_list->next();
MemTracker::release_thread_recorder(tmp);
}
}
MemTrackWorker::MemTrackWorker(MemSnapshot* snapshot): _snapshot(snapshot) {
// create thread uses cgc thread type for now. We should revisit
// the option, or create new thread type.
_has_error = !os::create_thread(this, os::cgc_thread);
set_name("MemTrackWorker");
// initial generation circuit buffer
if (!has_error()) {
_head = _tail = 0;
for(int index = 0; index < MAX_GENERATIONS; index ++) {
::new ((void*)&_gen[index]) GenerationData();
}
}
NOT_PRODUCT(_sync_point_count = 0;)
NOT_PRODUCT(_merge_count = 0;)
NOT_PRODUCT(_last_gen_in_use = 0;)
}
MemTrackWorker::~MemTrackWorker() {
for (int index = 0; index < MAX_GENERATIONS; index ++) {
_gen[index].reset();
}
}
void* MemTrackWorker::operator new(size_t size) throw() {
assert(false, "use nothrow version");
return NULL;
}
void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
return allocate(size, false, mtNMT);
}
void MemTrackWorker::start() {
os::start_thread(this);
}
/*
* Native memory tracking worker thread loop:
* 1. merge one generation of memory recorders to staging area
* 2. promote staging data to memory snapshot
*
* This thread can run through safepoint.
*/
void MemTrackWorker::run() {
assert(MemTracker::is_on(), "native memory tracking is off");
this->initialize_thread_local_storage();
this->record_stack_base_and_size();
assert(_snapshot != NULL, "Worker should not be started");
MemRecorder* rec;
unsigned long processing_generation = 0;
bool worker_idle = false;
while (!MemTracker::shutdown_in_progress()) {
NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
{
// take a recorder from earliest generation in buffer
ThreadCritical tc;
rec = _gen[_head].next_recorder();
}
if (rec != NULL) {
if (rec->get_generation() != processing_generation || worker_idle) {
processing_generation = rec->get_generation();
worker_idle = false;
MemTracker::set_current_processing_generation(processing_generation);
}
// merge the recorder into staging area
if (!_snapshot->merge(rec)) {
MemTracker::shutdown(MemTracker::NMT_out_of_memory);
} else {
NOT_PRODUCT(_merge_count ++;)
}
MemTracker::release_thread_recorder(rec);
} else {
// no more recorder to merge, promote staging area
// to snapshot
if (_head != _tail) {
long number_of_classes;
{
ThreadCritical tc;
if (_gen[_head].has_more_recorder() || _head == _tail) {
continue;
}
number_of_classes = _gen[_head].number_of_classes();
_gen[_head].reset();
// done with this generation, increment _head pointer
_head = (_head + 1) % MAX_GENERATIONS;
}
// promote this generation data to snapshot
if (!_snapshot->promote(number_of_classes)) {
// failed to promote, means out of memory
MemTracker::shutdown(MemTracker::NMT_out_of_memory);
}
} else {
// worker thread is idle
worker_idle = true;
MemTracker::report_worker_idle();
_snapshot->wait(1000);
ThreadCritical tc;
// check if more data arrived
if (!_gen[_head].has_more_recorder()) {
_gen[_head].add_recorders(MemTracker::get_pending_recorders());
}
}
}
}
assert(MemTracker::shutdown_in_progress(), "just check");
// transits to final shutdown
MemTracker::final_shutdown();
}
// at synchronization point, where 'safepoint visible' Java threads are blocked
// at a safepoint, and the rest of threads are blocked on ThreadCritical lock.
// The caller MemTracker::sync() already takes ThreadCritical before calling this
// method.
//
// Following tasks are performed:
// 1. add all recorders in pending queue to current generation
// 2. increase generation
void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) {
NOT_PRODUCT(_sync_point_count ++;)
assert(count_recorder(rec) <= MemRecorder::_instance_count,
"pending queue has infinite loop");
bool out_of_generation_buffer = false;
// check shutdown state inside ThreadCritical
if (MemTracker::shutdown_in_progress()) return;
_gen[_tail].set_number_of_classes(number_of_classes);
// append the recorders to the end of the generation
_gen[_tail].add_recorders(rec);
assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count,
"after add to current generation has infinite loop");
// we have collected all recorders for this generation. If there is data,
// we need to increment _tail to start a new generation.
if (_gen[_tail].has_more_recorder() || _head == _tail) {
_tail = (_tail + 1) % MAX_GENERATIONS;
out_of_generation_buffer = (_tail == _head);
}
if (out_of_generation_buffer) {
MemTracker::shutdown(MemTracker::NMT_out_of_generation);
}
}
#ifndef PRODUCT
int MemTrackWorker::count_recorder(const MemRecorder* head) {
int count = 0;
while(head != NULL) {
count ++;
head = head->next();
}
return count;
}
int MemTrackWorker::count_pending_recorders() const {
int count = 0;
for (int index = 0; index < MAX_GENERATIONS; index ++) {
MemRecorder* head = _gen[index].peek();
if (head != NULL) {
count += count_recorder(head);
}
}
return count;
}
#endif

@ -1,118 +0,0 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
#define SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
#include "memory/allocation.hpp"
#include "runtime/thread.hpp"
#include "services/memRecorder.hpp"
// Maximum MAX_GENERATIONS generation data can be tracked.
#define MAX_GENERATIONS 512
class GenerationData VALUE_OBJ_CLASS_SPEC {
private:
int _number_of_classes;
MemRecorder* _recorder_list;
public:
GenerationData(): _number_of_classes(0), _recorder_list(NULL) { }
inline int number_of_classes() const { return _number_of_classes; }
inline void set_number_of_classes(long num) { _number_of_classes = num; }
inline MemRecorder* next_recorder() {
if (_recorder_list == NULL) {
return NULL;
} else {
MemRecorder* tmp = _recorder_list;
_recorder_list = _recorder_list->next();
return tmp;
}
}
inline bool has_more_recorder() const {
return (_recorder_list != NULL);
}
// add recorders to this generation
void add_recorders(MemRecorder* head) {
if (head != NULL) {
if (_recorder_list == NULL) {
_recorder_list = head;
} else {
MemRecorder* tmp = _recorder_list;
for (; tmp->next() != NULL; tmp = tmp->next());
tmp->set_next(head);
}
}
}
void reset();
NOT_PRODUCT(MemRecorder* peek() const { return _recorder_list; })
};
class MemTrackWorker : public NamedThread {
private:
// circular buffer. This buffer contains generation data to be merged into global
// snaphsot.
// Each slot holds a generation
GenerationData _gen[MAX_GENERATIONS];
int _head, _tail; // head and tail pointers to above circular buffer
bool _has_error;
MemSnapshot* _snapshot;
public:
MemTrackWorker(MemSnapshot* snapshot);
~MemTrackWorker();
_NOINLINE_ void* operator new(size_t size) throw();
_NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw();
void start();
void run();
inline bool has_error() const { return _has_error; }
// task at synchronization point
void at_sync_point(MemRecorder* pending_recorders, int number_of_classes);
// for debugging purpose, they are not thread safe.
NOT_PRODUCT(static int count_recorder(const MemRecorder* head);)
NOT_PRODUCT(int count_pending_recorders() const;)
NOT_PRODUCT(int _sync_point_count;)
NOT_PRODUCT(int _merge_count;)
NOT_PRODUCT(int _last_gen_in_use;)
// how many generations are queued
inline int generations_in_use() const {
return (_tail >= _head ? (_tail - _head + 1) : (MAX_GENERATIONS - (_head - _tail) + 1));
}
};
#endif // SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP

File diff suppressed because it is too large Load Diff

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,566 +25,289 @@
#ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
#define SHARE_VM_SERVICES_MEM_TRACKER_HPP
#include "utilities/macros.hpp"
#include "services/nmtCommon.hpp"
class NativeCallStack;
extern NativeCallStack emptyStack;
#if !INCLUDE_NMT
#include "utilities/ostream.hpp"
#define CURRENT_PC emptyStack
#define CALLER_PC emptyStack
class BaselineOutputer : public StackObj {
};
class BaselineTTYOutputer : public BaselineOutputer {
public:
BaselineTTYOutputer(outputStream* st) { }
class Tracker : public StackObj {
public:
Tracker() { }
void record(address addr, size_t size) { }
};
class MemTracker : AllStatic {
public:
enum ShutdownReason {
NMT_shutdown_none, // no shutdown requested
NMT_shutdown_user, // user requested shutdown
NMT_normal, // normal shutdown, process exit
NMT_out_of_memory, // shutdown due to out of memory
NMT_initialization, // shutdown due to initialization failure
NMT_use_malloc_only, // can not combine NMT with UseMallocOnly flag
NMT_error_reporting, // shutdown by vmError::report_and_die()
NMT_out_of_generation, // running out of generation queue
NMT_sequence_overflow // overflow the sequence number
};
public:
static inline NMT_TrackingLevel tracking_level() { return NMT_off; }
static inline void shutdown() { }
static inline void init() { }
static bool check_launcher_nmt_support(const char* value) { return true; }
static bool verify_nmt_option() { return true; }
class Tracker {
public:
void discard() { }
static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; }
static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; }
static inline size_t malloc_header_size(void* memblock) { return 0; }
static inline void* malloc_base(void* memblock) { return memblock; }
static inline void* record_free(void* memblock) { return memblock; }
void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { }
void record(address old_addr, address new_addr, size_t size,
MEMFLAGS flags, address pc = NULL) { }
};
static inline void record_new_arena(MEMFLAGS flag) { }
static inline void record_arena_free(MEMFLAGS flag) { }
static inline void record_arena_size_change(int diff, MEMFLAGS flag) { }
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) { }
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); }
static inline Tracker get_virtual_memory_release_tracker() { }
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
static inline void record_thread_stack(void* addr, size_t size) { }
static inline void release_thread_stack(void* addr, size_t size) { }
private:
static Tracker _tkr;
public:
static inline void init_tracking_options(const char* option_line) { }
static inline bool is_on() { return false; }
static const char* reason() { return "Native memory tracking is not implemented"; }
static inline bool can_walk_stack() { return false; }
static inline void bootstrap_single_thread() { }
static inline void bootstrap_multi_thread() { }
static inline void start() { }
static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
address pc = 0, Thread* thread = NULL) { }
static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
static inline void record_arena_size(address addr, size_t size) { }
static inline void record_virtual_memory_reserve(address addr, size_t size,
MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
static inline void record_virtual_memory_commit(address addr, size_t size,
address pc = 0, Thread* thread = NULL) { }
static inline void record_virtual_memory_release(address addr, size_t size,
Thread* thread = NULL) { }
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
Thread* thread = NULL) { }
static inline Tracker get_realloc_tracker() { return _tkr; }
static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; }
static inline Tracker get_virtual_memory_release_tracker() { return _tkr; }
static inline bool baseline() { return false; }
static inline bool has_baseline() { return false; }
static inline void set_autoShutdown(bool value) { }
static void shutdown(ShutdownReason reason) { }
static inline bool shutdown_in_progress() { return false; }
static bool print_memory_usage(BaselineOutputer& out, size_t unit,
bool summary_only = true) { return false; }
static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
bool summary_only = true) { return false; }
static bool wbtest_wait_for_data_merge() { return false; }
static inline void sync() { }
static inline void thread_exiting(JavaThread* thread) { }
static void final_report(outputStream*) { }
};
#else // !INCLUDE_NMT
#include "memory/allocation.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.hpp"
#include "services/memPtr.hpp"
#include "services/memRecorder.hpp"
#include "services/memSnapshot.hpp"
#include "services/memTrackWorker.hpp"
extern bool NMT_track_callsite;
#ifndef MAX_UNSIGNED_LONG
#define MAX_UNSIGNED_LONG (unsigned long)(-1)
#endif
#ifdef ASSERT
#define DEBUG_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
#else
#define DEBUG_CALLER_PC 0
#endif
// The thread closure walks threads to collect per-thread
// memory recorders at NMT sync point
class SyncThreadRecorderClosure : public ThreadClosure {
private:
int _thread_count;
#include "runtime/atomic.hpp"
#include "runtime/threadCritical.hpp"
#include "services/mallocTracker.hpp"
#include "services/virtualMemoryTracker.hpp"
extern volatile bool NMT_stack_walkable;
#define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
NativeCallStack(0, true) : emptyStack)
#define CALLER_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
NativeCallStack(1, true) : emptyStack)
class MemBaseline;
class Mutex;
// Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid
// the other thread obtains and records the same region that is just 'released' by current
// thread but before it can record the operation.
class Tracker : public StackObj {
public:
enum TrackerType {
uncommit,
release
};
public:
SyncThreadRecorderClosure() {
_thread_count =0;
}
void do_thread(Thread* thread);
int get_thread_count() const {
return _thread_count;
}
Tracker(enum TrackerType type) : _type(type) { }
void record(address addr, size_t size);
private:
enum TrackerType _type;
// Virtual memory tracking data structures are protected by ThreadCritical lock.
ThreadCritical _tc;
};
class BaselineOutputer;
class MemSnapshot;
class MemTrackWorker;
class Thread;
/*
* MemTracker is the 'gate' class to native memory tracking runtime.
*/
class MemTracker : AllStatic {
friend class GenerationData;
friend class MemTrackWorker;
friend class MemSnapshot;
friend class SyncThreadRecorderClosure;
// NMT state
enum NMTStates {
NMT_uninited, // not yet initialized
NMT_bootstrapping_single_thread, // bootstrapping, VM is in single thread mode
NMT_bootstrapping_multi_thread, // bootstrapping, VM is about to enter multi-thread mode
NMT_started, // NMT fully started
NMT_shutdown_pending, // shutdown pending
NMT_final_shutdown, // in final phase of shutdown
NMT_shutdown // shutdown
};
public:
class Tracker : public StackObj {
friend class MemTracker;
public:
enum MemoryOperation {
NoOp, // no op
Malloc, // malloc
Realloc, // realloc
Free, // free
Reserve, // virtual memory reserve
Commit, // virtual memory commit
ReserveAndCommit, // virtual memory reserve and commit
StackAlloc = ReserveAndCommit, // allocate thread stack
Type, // assign virtual memory type
Uncommit, // virtual memory uncommit
Release, // virtual memory release
ArenaSize, // set arena size
StackRelease // release thread stack
};
protected:
Tracker(MemoryOperation op, Thread* thr = NULL);
public:
void discard();
void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
void record(address old_addr, address new_addr, size_t size,
MEMFLAGS flags, address pc = NULL);
private:
bool _need_thread_critical_lock;
JavaThread* _java_thread;
MemoryOperation _op; // memory operation
jint _seq; // reserved sequence number
};
public:
// native memory tracking level
enum NMTLevel {
NMT_off, // native memory tracking is off
NMT_summary, // don't track callsite
NMT_detail // track callsite also
};
enum ShutdownReason {
NMT_shutdown_none, // no shutdown requested
NMT_shutdown_user, // user requested shutdown
NMT_normal, // normal shutdown, process exit
NMT_out_of_memory, // shutdown due to out of memory
NMT_initialization, // shutdown due to initialization failure
NMT_use_malloc_only, // can not combine NMT with UseMallocOnly flag
NMT_error_reporting, // shutdown by vmError::report_and_die()
NMT_out_of_generation, // running out of generation queue
NMT_sequence_overflow // overflow the sequence number
};
public:
// initialize NMT tracking level from command line options, called
// from VM command line parsing code
static void init_tracking_options(const char* option_line);
// if NMT is enabled to record memory activities
static inline bool is_on() {
return (_tracking_level >= NMT_summary &&
_state >= NMT_bootstrapping_single_thread);
}
static inline enum NMTLevel tracking_level() {
static inline NMT_TrackingLevel tracking_level() {
if (_tracking_level == NMT_unknown) {
// No fencing is needed here, since JVM is in single-threaded
// mode.
_tracking_level = init_tracking_level();
_cmdline_tracking_level = _tracking_level;
}
return _tracking_level;
}
// user readable reason for shutting down NMT
static const char* reason() {
switch(_reason) {
case NMT_shutdown_none:
return "Native memory tracking is not enabled";
case NMT_shutdown_user:
return "Native memory tracking has been shutdown by user";
case NMT_normal:
return "Native memory tracking has been shutdown due to process exiting";
case NMT_out_of_memory:
return "Native memory tracking has been shutdown due to out of native memory";
case NMT_initialization:
return "Native memory tracking failed to initialize";
case NMT_error_reporting:
return "Native memory tracking has been shutdown due to error reporting";
case NMT_out_of_generation:
return "Native memory tracking has been shutdown due to running out of generation buffer";
case NMT_sequence_overflow:
return "Native memory tracking has been shutdown due to overflow the sequence number";
case NMT_use_malloc_only:
return "Native memory tracking is not supported when UseMallocOnly is on";
default:
ShouldNotReachHere();
return NULL;
// A late initialization, for the stuff(s) can not be
// done in init_tracking_level(), which can NOT malloc
// any memory.
static void init();
// Shutdown native memory tracking
static void shutdown();
// Verify native memory tracking command line option.
// This check allows JVM to detect if compatible launcher
// is used.
// If an incompatible launcher is used, NMT may not be
// able to start, even it is enabled by command line option.
// A warning message should be given if it is encountered.
static bool check_launcher_nmt_support(const char* value);
// This method checks native memory tracking environment
// variable value passed by launcher.
// Launcher only obligated to pass native memory tracking
// option value, but not obligated to validate the value,
// and launcher has option to discard native memory tracking
// option from the command line once it sets up the environment
// variable, so NMT has to catch the bad value here.
static bool verify_nmt_option();
// Transition the tracking level to specified level
static bool transition_to(NMT_TrackingLevel level);
static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
const NativeCallStack& stack, NMT_TrackingLevel level) {
return MallocTracker::record_malloc(mem_base, size, flag, stack, level);
}
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
return MallocTracker::malloc_header_size(level);
}
static size_t malloc_header_size(void* memblock) {
if (tracking_level() != NMT_off) {
return MallocTracker::get_header_size(memblock);
}
return 0;
}
// To malloc base address, which is the starting address
// of malloc tracking header if tracking is enabled.
// Otherwise, it returns the same address.
static void* malloc_base(void* memblock);
// Record malloc free and return malloc base address
static inline void* record_free(void* memblock) {
return MallocTracker::record_free(memblock);
}
// Record creation of an arena
static inline void record_new_arena(MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
MallocTracker::record_new_arena(flag);
}
// Record destruction of an arena
static inline void record_arena_free(MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
MallocTracker::record_arena_free(flag);
}
// Record arena size change. Arena size is the size of all arena
// chuncks that backing up the arena.
static inline void record_arena_size_change(int diff, MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
MallocTracker::record_arena_size_change(diff, flag);
}
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
ThreadCritical tc;
// Recheck to avoid potential racing during NMT shutdown
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
}
}
// test if we can walk native stack
static bool can_walk_stack() {
// native stack is not walkable during bootstrapping on sparc
#if defined(SPARC)
return (_state == NMT_started);
#else
return (_state >= NMT_bootstrapping_single_thread && _state <= NMT_started);
#endif
}
// if native memory tracking tracks callsite
static inline bool track_callsite() { return _tracking_level == NMT_detail; }
// NMT automatically shuts itself down under extreme situation by default.
// When the value is set to false, NMT will try its best to stay alive,
// even it has to slow down VM.
static inline void set_autoShutdown(bool value) {
AutoShutdownNMT = value;
if (AutoShutdownNMT && _slowdown_calling_thread) {
_slowdown_calling_thread = false;
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_reserved_region((address)addr, size,
stack, flag, true);
}
}
// shutdown native memory tracking capability. Native memory tracking
// can be shutdown by VM when it encounters low memory scenarios.
// Memory tracker should gracefully shutdown itself, and preserve the
// latest memory statistics for post morten diagnosis.
static void shutdown(ShutdownReason reason);
// if there is shutdown requested
static inline bool shutdown_in_progress() {
return (_state >= NMT_shutdown_pending);
}
// bootstrap native memory tracking, so it can start to collect raw data
// before worker thread can start
// the first phase of bootstrapping, when VM still in single-threaded mode
static void bootstrap_single_thread();
// the second phase of bootstrapping, VM is about or already in multi-threaded mode
static void bootstrap_multi_thread();
// start() has to be called when VM still in single thread mode, but after
// command line option parsing is done.
static void start();
// record a 'malloc' call
static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
address pc = 0, Thread* thread = NULL) {
Tracker tkr(Tracker::Malloc, thread);
tkr.record(addr, size, flags, pc);
}
// record a 'free' call
static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
Tracker tkr(Tracker::Free, thread);
tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
}
static inline void record_arena_size(address addr, size_t size) {
Tracker tkr(Tracker::ArenaSize);
tkr.record(addr, size);
}
// record a virtual memory 'reserve' call
static inline void record_virtual_memory_reserve(address addr, size_t size,
MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
assert(size > 0, "Sanity check");
Tracker tkr(Tracker::Reserve, thread);
tkr.record(addr, size, flags, pc);
}
static inline void record_thread_stack(address addr, size_t size, Thread* thr,
address pc = 0) {
Tracker tkr(Tracker::StackAlloc, thr);
tkr.record(addr, size, mtThreadStack, pc);
}
static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
Tracker tkr(Tracker::StackRelease, thr);
tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
}
// record a virtual memory 'commit' call
static inline void record_virtual_memory_commit(address addr, size_t size,
address pc, Thread* thread = NULL) {
Tracker tkr(Tracker::Commit, thread);
tkr.record(addr, size, mtNone, pc);
}
static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
MEMFLAGS flags, address pc, Thread* thread = NULL) {
Tracker tkr(Tracker::ReserveAndCommit, thread);
tkr.record(addr, size, flags, pc);
}
static inline void record_virtual_memory_release(address addr, size_t size,
Thread* thread = NULL) {
if (is_on()) {
Tracker tkr(Tracker::Release, thread);
tkr.record(addr, size);
static inline void record_virtual_memory_commit(void* addr, size_t size,
const NativeCallStack& stack) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
}
}
// record memory type on virtual memory base address
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
Thread* thread = NULL) {
Tracker tkr(Tracker::Type);
tkr.record(base, 0, flags);
}
// Get memory trackers for memory operations that can result race conditions.
// The memory tracker has to be obtained before realloc, virtual memory uncommit
// and virtual memory release, and call tracker.record() method if operation
// succeeded, or tracker.discard() to abort the tracking.
static inline Tracker get_realloc_tracker() {
return Tracker(Tracker::Realloc);
}
static inline Tracker get_virtual_memory_uncommit_tracker() {
return Tracker(Tracker::Uncommit);
assert(tracking_level() >= NMT_summary, "Check by caller");
return Tracker(Tracker::uncommit);
}
static inline Tracker get_virtual_memory_release_tracker() {
return Tracker(Tracker::Release);
assert(tracking_level() >= NMT_summary, "Check by caller");
return Tracker(Tracker::release);
}
// create memory baseline of current memory snapshot
static bool baseline();
// is there a memory baseline
static bool has_baseline() {
return _baseline.baselined();
}
// print memory usage from current snapshot
static bool print_memory_usage(BaselineOutputer& out, size_t unit,
bool summary_only = true);
// compare memory usage between current snapshot and baseline
static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
bool summary_only = true);
// the version for whitebox testing support, it ensures that all memory
// activities before this method call, are reflected in the snapshot
// database.
static bool wbtest_wait_for_data_merge();
// sync is called within global safepoint to synchronize nmt data
static void sync();
// called when a thread is about to exit
static void thread_exiting(JavaThread* thread);
// retrieve global snapshot
static MemSnapshot* get_snapshot() {
if (shutdown_in_progress()) {
return NULL;
}
return _snapshot;
}
// print tracker stats
NOT_PRODUCT(static void print_tracker_stats(outputStream* st);)
NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);)
private:
// start native memory tracking worker thread
static bool start_worker(MemSnapshot* snapshot);
// called by worker thread to complete shutdown process
static void final_shutdown();
protected:
// retrieve per-thread recorder of the specified thread.
// if the recorder is full, it will be enqueued to overflow
// queue, a new recorder is acquired from recorder pool or a
// new instance is created.
// when thread == NULL, it means global recorder
static MemRecorder* get_thread_recorder(JavaThread* thread);
// per-thread recorder pool
static void release_thread_recorder(MemRecorder* rec);
static void delete_all_pooled_recorders();
// pending recorder queue. Recorders are queued to pending queue
// when they are overflowed or collected at nmt sync point.
static void enqueue_pending_recorder(MemRecorder* rec);
static MemRecorder* get_pending_recorders();
static void delete_all_pending_recorders();
// write a memory tracking record in recorder
static void write_tracking_record(address addr, MEMFLAGS type,
size_t size, jint seq, address pc, JavaThread* thread);
static bool is_single_threaded_bootstrap() {
return _state == NMT_bootstrapping_single_thread;
}
static void check_NMT_load(Thread* thr) {
assert(thr != NULL, "Sanity check");
if (_slowdown_calling_thread && thr != _worker_thread) {
#ifdef _WINDOWS
// On Windows, os::NakedYield() does not work as well
// as short sleep.
os::naked_short_sleep(1);
#else
os::naked_yield();
#endif
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
}
}
static void inc_pending_op_count() {
Atomic::inc(&_pending_op_count);
static inline void record_thread_stack(void* addr, size_t size) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
// uses thread stack malloc slot for book keeping number of threads
MallocMemorySummary::record_malloc(0, mtThreadStack);
record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
}
}
static void dec_pending_op_count() {
Atomic::dec(&_pending_op_count);
assert(_pending_op_count >= 0, "Sanity check");
static inline void release_thread_stack(void* addr, size_t size) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
// uses thread stack malloc slot for book keeping number of threads
MallocMemorySummary::record_free(0, mtThreadStack);
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::remove_released_region((address)addr, size);
}
}
// Query lock is used to synchronize the access to tracking data.
// So far, it is only used by JCmd query, but it may be used by
// other tools.
static inline Mutex* query_lock() { return _query_lock; }
// Make a final report and shutdown.
// This function generates summary report without creating snapshots,
// to avoid additional memory allocation. It uses native memory summary
// counters, and makes adjustment to them, once the adjustment is made,
// the counters are no longer accurate. As the result, this function
// should only be used for final reporting before shutting down.
static void final_report(outputStream*);
// Stored baseline
static inline MemBaseline& get_baseline() {
return _baseline;
}
static NMT_TrackingLevel cmdline_tracking_level() {
return _cmdline_tracking_level;
}
static void tuning_statistics(outputStream* out);
private:
// retrieve a pooled memory record or create new one if there is not
// one available
static MemRecorder* get_new_or_pooled_instance();
static void create_memory_record(address addr, MEMFLAGS type,
size_t size, address pc, Thread* thread);
static void create_record_in_recorder(address addr, MEMFLAGS type,
size_t size, address pc, JavaThread* thread);
static void set_current_processing_generation(unsigned long generation) {
_worker_thread_idle = false;
_processing_generation = generation;
}
static void report_worker_idle() {
_worker_thread_idle = true;
}
static NMT_TrackingLevel init_tracking_level();
private:
// global memory snapshot
static MemSnapshot* _snapshot;
// a memory baseline of snapshot
// Tracking level
static volatile NMT_TrackingLevel _tracking_level;
// If NMT option value passed by launcher through environment
// variable is valid
static bool _is_nmt_env_valid;
// command line tracking level
static NMT_TrackingLevel _cmdline_tracking_level;
// Stored baseline
static MemBaseline _baseline;
// query lock
// Query lock
static Mutex* _query_lock;
// a thread can start to allocate memory before it is attached
// to VM 'Thread', those memory activities are recorded here.
// ThreadCritical is required to guard this global recorder.
static MemRecorder* volatile _global_recorder;
// main thread id
debug_only(static intx _main_thread_tid;)
// pending recorders to be merged
static MemRecorder* volatile _merge_pending_queue;
NOT_PRODUCT(static volatile jint _pending_recorder_count;)
// pooled memory recorders
static MemRecorder* volatile _pooled_recorders;
// memory recorder pool management, uses following
// counter to determine if a released memory recorder
// should be pooled
// latest thread count
static int _thread_count;
// pooled recorder count
static volatile jint _pooled_recorder_count;
// worker thread to merge pending recorders into snapshot
static MemTrackWorker* _worker_thread;
// how many safepoints we skipped without entering sync point
static int _sync_point_skip_count;
// if the tracker is properly intialized
static bool _is_tracker_ready;
// tracking level (off, summary and detail)
static enum NMTLevel _tracking_level;
// current nmt state
static volatile enum NMTStates _state;
// the reason for shutting down nmt
static enum ShutdownReason _reason;
// the generation that NMT is processing
static volatile unsigned long _processing_generation;
// although NMT is still procesing current generation, but
// there is not more recorder to process, set idle state
static volatile bool _worker_thread_idle;
// if NMT should slow down calling thread to allow
// worker thread to catch up
static volatile bool _slowdown_calling_thread;
// pending memory op count.
// Certain memory ops need to pre-reserve sequence number
// before memory operation can happen to avoid race condition.
// See MemTracker::Tracker for detail
static volatile jint _pending_op_count;
};
#endif // !INCLUDE_NMT
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP

@ -0,0 +1,73 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "services/nmtCommon.hpp"
const char* NMTUtil::_memory_type_names[] = {
"Java Heap",
"Class",
"Thread",
"Thread Stack",
"Code",
"GC",
"Compiler",
"Internal",
"Other",
"Symbol",
"Native Memory Tracking",
"Shared class space",
"Arena Chunk",
"Test",
"Tracing",
"Unknown"
};
const char* NMTUtil::scale_name(size_t scale) {
switch(scale) {
case K: return "KB";
case M: return "MB";
case G: return "GB";
}
ShouldNotReachHere();
return NULL;
}
size_t NMTUtil::scale_from_name(const char* scale) {
assert(scale != NULL, "Null pointer check");
if (strncmp(scale, "KB", 2) == 0 ||
strncmp(scale, "kb", 2) == 0) {
return K;
} else if (strncmp(scale, "MB", 2) == 0 ||
strncmp(scale, "mb", 2) == 0) {
return M;
} else if (strncmp(scale, "GB", 2) == 0 ||
strncmp(scale, "gb", 2) == 0) {
return G;
} else {
return 0; // Invalid value
}
return K;
}

@ -0,0 +1,87 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_NMT_COMMON_HPP
#define SHARE_VM_SERVICES_NMT_COMMON_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
#define CALC_OBJ_SIZE_IN_TYPE(obj, type) (align_size_up_(sizeof(obj), sizeof(type))/sizeof(type))
// Data type for memory counters
#ifdef _LP64
typedef jlong MemoryCounterType;
#else
typedef jint MemoryCounterType;
#endif
// Native memory tracking level
enum NMT_TrackingLevel {
NMT_unknown = 0xFF,
NMT_off = 0x00,
NMT_minimal = 0x01,
NMT_summary = 0x02,
NMT_detail = 0x03
};
// Number of stack frames to capture. This is a
// build time decision.
const int NMT_TrackingStackDepth = 4;
class NativeCallStack;
extern NativeCallStack emptyStack;
// A few common utilities for native memory tracking
class NMTUtil : AllStatic {
public:
// Map memory type to index
static inline int flag_to_index(MEMFLAGS flag) {
return (flag & 0xff);
}
// Map memory type to human readable name
static const char* flag_to_name(MEMFLAGS flag) {
return _memory_type_names[flag_to_index(flag)];
}
// Map an index to memory type
static MEMFLAGS index_to_flag(int index) {
return (MEMFLAGS)index;
}
// Memory size scale
static const char* scale_name(size_t scale);
static size_t scale_from_name(const char* scale);
// Translate memory size in specified scale
static size_t amount_in_scale(size_t amount, size_t scale) {
return (amount + scale / 2) / scale;
}
private:
static const char* _memory_type_names[mt_number_of_types];
};
#endif

@ -22,6 +22,8 @@
*
*/
#include "precompiled.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/nmtDCmd.hpp"
#include "services/memReporter.hpp"
#include "services/memTracker.hpp"
@ -49,13 +51,8 @@ NMTDCmd::NMTDCmd(outputStream* output,
_shutdown("shutdown", "request runtime to shutdown itself and free the " \
"memory used by runtime.",
"BOOLEAN", false, "false"),
_auto_shutdown("autoShutdown", "automatically shutdown itself under " \
"stress situation",
"BOOLEAN", true, "true"),
#ifndef PRODUCT
_debug("debug", "print tracker statistics. Debug only, not thread safe", \
_statistics("statistics", "print tracker statistics for tuning purpose.", \
"BOOLEAN", false, "false"),
#endif
_scale("scale", "Memory usage in which scale, KB, MB or GB",
"STRING", false, "KB") {
_dcmdparser.add_dcmd_option(&_summary);
@ -64,25 +61,30 @@ NMTDCmd::NMTDCmd(outputStream* output,
_dcmdparser.add_dcmd_option(&_summary_diff);
_dcmdparser.add_dcmd_option(&_detail_diff);
_dcmdparser.add_dcmd_option(&_shutdown);
_dcmdparser.add_dcmd_option(&_auto_shutdown);
#ifndef PRODUCT
_dcmdparser.add_dcmd_option(&_debug);
#endif
_dcmdparser.add_dcmd_option(&_statistics);
_dcmdparser.add_dcmd_option(&_scale);
}
size_t NMTDCmd::get_scale(const char* scale) const {
if (scale == NULL) return 0;
return NMTUtil::scale_from_name(scale);
}
void NMTDCmd::execute(DCmdSource source, TRAPS) {
// Check NMT state
// native memory tracking has to be on
if (MemTracker::tracking_level() == NMT_off) {
output()->print_cr("Native memory tracking is not enabled");
return;
} else if (MemTracker::tracking_level() == NMT_minimal) {
output()->print_cr("Native memory tracking has been shutdown");
return;
}
const char* scale_value = _scale.value();
size_t scale_unit;
if (strcmp(scale_value, "KB") == 0 || strcmp(scale_value, "kb") == 0) {
scale_unit = K;
} else if (strcmp(scale_value, "MB") == 0 ||
strcmp(scale_value, "mb") == 0) {
scale_unit = M;
} else if (strcmp(scale_value, "GB") == 0 ||
strcmp(scale_value, "gb") == 0) {
scale_unit = G;
} else {
size_t scale_unit = get_scale(scale_value);
if (scale_unit == 0) {
output()->print_cr("Incorrect scale value: %s", scale_value);
return;
}
@ -94,19 +96,11 @@ void NMTDCmd::execute(DCmdSource source, TRAPS) {
if (_summary_diff.is_set() && _summary_diff.value()) { ++nopt; }
if (_detail_diff.is_set() && _detail_diff.value()) { ++nopt; }
if (_shutdown.is_set() && _shutdown.value()) { ++nopt; }
if (_auto_shutdown.is_set()) { ++nopt; }
#ifndef PRODUCT
if (_debug.is_set() && _debug.value()) { ++nopt; }
#endif
if (_statistics.is_set() && _statistics.value()) { ++nopt; }
if (nopt > 1) {
output()->print_cr("At most one of the following option can be specified: " \
"summary, detail, baseline, summary.diff, detail.diff, shutdown"
#ifndef PRODUCT
", debug"
#endif
);
"summary, detail, baseline, summary.diff, detail.diff, shutdown");
return;
} else if (nopt == 0) {
if (_summary.is_set()) {
@ -117,53 +111,47 @@ void NMTDCmd::execute(DCmdSource source, TRAPS) {
}
}
#ifndef PRODUCT
if (_debug.value()) {
output()->print_cr("debug command is NOT thread-safe, may cause crash");
MemTracker::print_tracker_stats(output());
return;
}
#endif
// native memory tracking has to be on
if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
// if it is not on, what's the reason?
output()->print_cr("%s", MemTracker::reason());
return;
}
// Serialize NMT query
MutexLocker locker(MemTracker::query_lock());
if (_summary.value()) {
BaselineTTYOutputer outputer(output());
MemTracker::print_memory_usage(outputer, scale_unit, true);
report(true, scale_unit);
} else if (_detail.value()) {
BaselineTTYOutputer outputer(output());
MemTracker::print_memory_usage(outputer, scale_unit, false);
if (!check_detail_tracking_level(output())) {
return;
}
report(false, scale_unit);
} else if (_baseline.value()) {
if (MemTracker::baseline()) {
output()->print_cr("Successfully baselined.");
MemBaseline& baseline = MemTracker::get_baseline();
if (!baseline.baseline(MemTracker::tracking_level() != NMT_detail)) {
output()->print_cr("Baseline failed");
} else {
output()->print_cr("Baseline failed.");
output()->print_cr("Baseline succeeded");
}
} else if (_summary_diff.value()) {
if (MemTracker::has_baseline()) {
BaselineTTYOutputer outputer(output());
MemTracker::compare_memory_usage(outputer, scale_unit, true);
MemBaseline& baseline = MemTracker::get_baseline();
if (baseline.baseline_type() >= MemBaseline::Summary_baselined) {
report_diff(true, scale_unit);
} else {
output()->print_cr("No baseline to compare, run 'baseline' command first");
output()->print_cr("No baseline for comparison");
}
} else if (_detail_diff.value()) {
if (MemTracker::has_baseline()) {
BaselineTTYOutputer outputer(output());
MemTracker::compare_memory_usage(outputer, scale_unit, false);
if (!check_detail_tracking_level(output())) {
return;
}
MemBaseline& baseline = MemTracker::get_baseline();
if (baseline.baseline_type() == MemBaseline::Detail_baselined) {
report_diff(false, scale_unit);
} else {
output()->print_cr("No baseline to compare to, run 'baseline' command first");
output()->print_cr("No detail baseline for comparison");
}
} else if (_shutdown.value()) {
MemTracker::shutdown(MemTracker::NMT_shutdown_user);
output()->print_cr("Shutdown is in progress, it will take a few moments to " \
"completely shutdown");
} else if (_auto_shutdown.is_set()) {
MemTracker::set_autoShutdown(_auto_shutdown.value());
MemTracker::shutdown();
output()->print_cr("Native memory tracking has been turned off");
} else if (_statistics.value()) {
if (check_detail_tracking_level(output())) {
MemTracker::tuning_statistics(output());
}
} else {
ShouldNotReachHere();
output()->print_cr("Unknown command");
@ -181,3 +169,46 @@ int NMTDCmd::num_arguments() {
}
}
void NMTDCmd::report(bool summaryOnly, size_t scale_unit) {
MemBaseline baseline;
if (baseline.baseline(summaryOnly)) {
if (summaryOnly) {
MemSummaryReporter rpt(baseline, output(), scale_unit);
rpt.report();
} else {
MemDetailReporter rpt(baseline, output(), scale_unit);
rpt.report();
}
}
}
void NMTDCmd::report_diff(bool summaryOnly, size_t scale_unit) {
MemBaseline& early_baseline = MemTracker::get_baseline();
assert(early_baseline.baseline_type() != MemBaseline::Not_baselined,
"Not yet baselined");
assert(summaryOnly || early_baseline.baseline_type() == MemBaseline::Detail_baselined,
"Not a detail baseline");
MemBaseline baseline;
if (baseline.baseline(summaryOnly)) {
if (summaryOnly) {
MemSummaryDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
rpt.report_diff();
} else {
MemDetailDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
rpt.report_diff();
}
}
}
bool NMTDCmd::check_detail_tracking_level(outputStream* out) {
if (MemTracker::tracking_level() == NMT_detail) {
return true;
} else if (MemTracker::cmdline_tracking_level() == NMT_detail) {
out->print_cr("Tracking level has been downgraded due to lack of resources");
return false;
} else {
out->print_cr("Detail tracking is not enabled");
return false;
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,12 @@
#ifndef SHARE_VM_SERVICES_NMT_DCMD_HPP
#define SHARE_VM_SERVICES_NMT_DCMD_HPP
#if INCLUDE_NMT
#include "services/diagnosticArgument.hpp"
#include "services/diagnosticFramework.hpp"
#include "services/memBaseline.hpp"
#include "services/mallocTracker.hpp"
/**
* Native memory tracking DCmd implementation
@ -39,10 +43,7 @@ class NMTDCmd: public DCmdWithParser {
DCmdArgument<bool> _summary_diff;
DCmdArgument<bool> _detail_diff;
DCmdArgument<bool> _shutdown;
DCmdArgument<bool> _auto_shutdown;
#ifndef PRODUCT
DCmdArgument<bool> _debug;
#endif
DCmdArgument<bool> _statistics;
DCmdArgument<char*> _scale;
public:
@ -61,6 +62,17 @@ class NMTDCmd: public DCmdWithParser {
}
static int num_arguments();
virtual void execute(DCmdSource source, TRAPS);
private:
void report(bool summaryOnly, size_t scale);
void report_diff(bool summaryOnly, size_t scale);
size_t get_scale(const char* scale) const;
// check if NMT running at detail tracking level
bool check_detail_tracking_level(outputStream* out);
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_NMT_DCMD_HPP

@ -0,0 +1,448 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/threadCritical.hpp"
#include "services/virtualMemoryTracker.hpp"
size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
void VirtualMemorySummary::initialize() {
assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
// Use placement operator new to initialize static data area.
::new ((void*)_snapshot) VirtualMemorySnapshot();
}
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> VirtualMemoryTracker::_reserved_regions;
int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
return r1.compare(r2);
}
int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
return r1.compare(r2);
}
bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
assert(contain_region(addr, size), "Not contain this region");
if (all_committed()) return true;
CommittedMemoryRegion committed_rgn(addr, size, stack);
LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
if (node != NULL) {
CommittedMemoryRegion* rgn = node->data();
if (rgn->same_region(addr, size)) {
return true;
}
if (rgn->adjacent_to(addr, size)) {
// check if the next region covers this committed region,
// the regions may not be merged due to different call stacks
LinkedListNode<CommittedMemoryRegion>* next =
node->next();
if (next != NULL && next->data()->contain_region(addr, size)) {
if (next->data()->same_region(addr, size)) {
next->data()->set_call_stack(stack);
}
return true;
}
if (rgn->call_stack()->equals(stack)) {
VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
// the two adjacent regions have the same call stack, merge them
rgn->expand_region(addr, size);
VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
return true;
}
VirtualMemorySummary::record_committed_memory(size, flag());
if (rgn->base() > addr) {
return _committed_regions.insert_before(committed_rgn, node) != NULL;
} else {
return _committed_regions.insert_after(committed_rgn, node) != NULL;
}
}
assert(rgn->contain_region(addr, size), "Must cover this region");
return true;
} else {
// New committed region
VirtualMemorySummary::record_committed_memory(size, flag());
return add_committed_region(committed_rgn);
}
}
void ReservedMemoryRegion::set_all_committed(bool b) {
if (all_committed() != b) {
_all_committed = b;
if (b) {
VirtualMemorySummary::record_committed_memory(size(), flag());
}
}
}
bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
address addr, size_t size) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
CommittedMemoryRegion* rgn = node->data();
assert(rgn->contain_region(addr, size), "Has to be contained");
assert(!rgn->same_region(addr, size), "Can not be the same region");
if (rgn->base() == addr ||
rgn->end() == addr + size) {
rgn->exclude_region(addr, size);
return true;
} else {
// split this region
address top =rgn->end();
// use this region for lower part
size_t exclude_size = rgn->end() - addr;
rgn->exclude_region(addr, exclude_size);
// higher part
address high_base = addr + size;
size_t high_size = top - high_base;
CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
assert(high_node == NULL || node->next() == high_node, "Should be right after");
return (high_node != NULL);
}
return false;
}
bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
// uncommit stack guard pages
if (flag() == mtThreadStack && !same_region(addr, sz)) {
return true;
}
assert(addr != NULL, "Invalid address");
assert(sz > 0, "Invalid size");
if (all_committed()) {
assert(_committed_regions.is_empty(), "Sanity check");
assert(contain_region(addr, sz), "Reserved region does not contain this region");
set_all_committed(false);
VirtualMemorySummary::record_uncommitted_memory(sz, flag());
if (same_region(addr, sz)) {
return true;
} else {
CommittedMemoryRegion rgn(base(), size(), *call_stack());
if (rgn.base() == addr || rgn.end() == (addr + sz)) {
rgn.exclude_region(addr, sz);
return add_committed_region(rgn);
} else {
// split this region
// top of the whole region
address top =rgn.end();
// use this region for lower part
size_t exclude_size = rgn.end() - addr;
rgn.exclude_region(addr, exclude_size);
if (add_committed_region(rgn)) {
// higher part
address high_base = addr + sz;
size_t high_size = top - high_base;
CommittedMemoryRegion high_rgn(high_base, high_size, emptyStack);
return add_committed_region(high_rgn);
} else {
return false;
}
}
}
} else {
// we have to walk whole list to remove the committed regions in
// specified range
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
LinkedListNode<CommittedMemoryRegion>* prev = NULL;
VirtualMemoryRegion uncommitted_rgn(addr, sz);
while (head != NULL && !uncommitted_rgn.is_empty()) {
CommittedMemoryRegion* crgn = head->data();
// this committed region overlaps to region to uncommit
if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
// find matched region, remove the node will do
VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
_committed_regions.remove_after(prev);
return true;
} else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
// this committed region contains whole uncommitted region
VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
} else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
// this committed region has been uncommitted
size_t exclude_size = crgn->end() - uncommitted_rgn.base();
uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
LinkedListNode<CommittedMemoryRegion>* tmp = head;
head = head->next();
_committed_regions.remove_after(prev);
continue;
} else if (crgn->contain_address(uncommitted_rgn.base())) {
size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
} else if (uncommitted_rgn.contain_address(crgn->base())) {
size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
crgn->exclude_region(crgn->base(), toUncommitted);
uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
toUncommitted);
VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
}
}
prev = head;
head = head->next();
}
}
return true;
}
void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
assert(addr != NULL, "Invalid address");
// split committed regions
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
LinkedListNode<CommittedMemoryRegion>* prev = NULL;
while (head != NULL) {
if (head->data()->base() >= addr) {
break;
}
prev = head;
head = head->next();
}
if (head != NULL) {
if (prev != NULL) {
prev->set_next(head->next());
} else {
_committed_regions.set_head(NULL);
}
}
rgn._committed_regions.set_head(head);
}
size_t ReservedMemoryRegion::committed_size() const {
if (all_committed()) {
return size();
} else {
size_t committed = 0;
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
while (head != NULL) {
committed += head->data()->size();
head = head->next();
}
return committed;
}
}
void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
assert((flag() == mtNone || flag() == f), "Overwrite memory type");
if (flag() != f) {
VirtualMemorySummary::move_reserved_memory(flag(), f, size());
VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
_flag = f;
}
}
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
if (level >= NMT_summary) {
VirtualMemorySummary::initialize();
}
return true;
}
bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
assert(base_addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
ReservedMemoryRegion rgn(base_addr, size, stack, flag);
ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
LinkedListNode<ReservedMemoryRegion>* node;
if (reserved_rgn == NULL) {
VirtualMemorySummary::record_reserved_memory(size, flag);
node = _reserved_regions.add(rgn);
if (node != NULL) {
node->data()->set_all_committed(all_committed);
return true;
} else {
return false;
}
} else {
if (reserved_rgn->same_region(base_addr, size)) {
reserved_rgn->set_call_stack(stack);
reserved_rgn->set_flag(flag);
return true;
} else if (reserved_rgn->adjacent_to(base_addr, size)) {
VirtualMemorySummary::record_reserved_memory(size, flag);
reserved_rgn->expand_region(base_addr, size);
reserved_rgn->set_call_stack(stack);
return true;
} else {
// Overlapped reservation.
// It can happen when the regions are thread stacks, as JNI
// thread does not detach from VM before exits, and leads to
// leak JavaThread object
if (reserved_rgn->flag() == mtThreadStack) {
guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
// Overwrite with new region
// Release old region
VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
// Add new region
VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
*reserved_rgn = rgn;
return true;
} else {
ShouldNotReachHere();
return false;
}
}
}
}
void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
assert(addr != NULL, "Invalid address");
ReservedMemoryRegion rgn(addr, 1);
ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
if (reserved_rgn != NULL) {
assert(reserved_rgn->contain_address(addr), "Containment");
if (reserved_rgn->flag() != flag) {
assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
reserved_rgn->set_flag(flag);
}
}
}
bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
const NativeCallStack& stack) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
assert(reserved_rgn != NULL, "No reserved region");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
return reserved_rgn->add_committed_region(addr, size, stack);
}
bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
assert(reserved_rgn != NULL, "No reserved region");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
return reserved_rgn->remove_uncommitted_region(addr, size);
}
bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
assert(reserved_rgn != NULL, "No reserved region");
// uncommit regions within the released region
if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
return false;
}
VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
if (reserved_rgn->same_region(addr, size)) {
return _reserved_regions.remove(rgn);
} else {
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
if (reserved_rgn->base() == addr ||
reserved_rgn->end() == addr + size) {
reserved_rgn->exclude_region(addr, size);
return true;
} else {
address top = reserved_rgn->end();
address high_base = addr + size;
ReservedMemoryRegion high_rgn(high_base, top - high_base,
*reserved_rgn->call_stack(), reserved_rgn->flag());
// use original region for lower region
reserved_rgn->exclude_region(addr, top - addr);
LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions.add(high_rgn);
if (new_rgn == NULL) {
return false;
} else {
reserved_rgn->move_committed_regions(addr, *new_rgn->data());
return true;
}
}
}
}
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
ThreadCritical tc;
LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions.head();
while (head != NULL) {
const ReservedMemoryRegion* rgn = head->peek();
if (!walker->do_allocation_site(rgn)) {
return false;
}
head = head->next();
}
return true;
}
// Transition virtual memory tracking level.
bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
if (from == NMT_minimal) {
assert(to == NMT_summary || to == NMT_detail, "Just check");
VirtualMemorySummary::reset();
} else if (to == NMT_minimal) {
assert(from == NMT_summary || from == NMT_detail, "Just check");
// Clean up virtual memory tracking data structures.
ThreadCritical tc;
_reserved_regions.clear();
}
return true;
}

@ -0,0 +1,437 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
#define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
#if INCLUDE_NMT
#include "memory/allocation.hpp"
#include "services/allocationSite.hpp"
#include "services/nmtCommon.hpp"
#include "utilities/linkedlist.hpp"
#include "utilities/nativeCallStack.hpp"
#include "utilities/ostream.hpp"
/*
* Virtual memory counter
*/
class VirtualMemory VALUE_OBJ_CLASS_SPEC {
private:
size_t _reserved;
size_t _committed;
public:
VirtualMemory() : _reserved(0), _committed(0) { }
inline void reserve_memory(size_t sz) { _reserved += sz; }
inline void commit_memory (size_t sz) {
_committed += sz;
assert(_committed <= _reserved, "Sanity check");
}
inline void release_memory (size_t sz) {
assert(_reserved >= sz, "Negative amount");
_reserved -= sz;
}
inline void uncommit_memory(size_t sz) {
assert(_committed >= sz, "Negative amount");
_committed -= sz;
}
void reset() {
_reserved = 0;
_committed = 0;
}
inline size_t reserved() const { return _reserved; }
inline size_t committed() const { return _committed; }
};
// Virtual memory allocation site, keeps track where the virtual memory is reserved.
class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
public:
VirtualMemoryAllocationSite(const NativeCallStack& stack) :
AllocationSite<VirtualMemory>(stack) { }
inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); }
inline void commit_memory (size_t sz) { data()->commit_memory(sz); }
inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
inline void release_memory(size_t sz) { data()->release_memory(sz); }
inline size_t reserved() const { return peek()->reserved(); }
inline size_t committed() const { return peek()->committed(); }
};
class VirtualMemorySummary;
// This class represents a snapshot of virtual memory at a given time.
// The latest snapshot is saved in a static area.
class VirtualMemorySnapshot : public ResourceObj {
friend class VirtualMemorySummary;
private:
VirtualMemory _virtual_memory[mt_number_of_types];
public:
inline VirtualMemory* by_type(MEMFLAGS flag) {
int index = NMTUtil::flag_to_index(flag);
return &_virtual_memory[index];
}
inline VirtualMemory* by_index(int index) {
assert(index >= 0, "Index out of bound");
assert(index < mt_number_of_types, "Index out of bound");
return &_virtual_memory[index];
}
inline size_t total_reserved() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
amount += _virtual_memory[index].reserved();
}
return amount;
}
inline size_t total_committed() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
amount += _virtual_memory[index].committed();
}
return amount;
}
inline void reset() {
for (int index = 0; index < mt_number_of_types; index ++) {
_virtual_memory[index].reset();
}
}
void copy_to(VirtualMemorySnapshot* s) {
for (int index = 0; index < mt_number_of_types; index ++) {
s->_virtual_memory[index] = _virtual_memory[index];
}
}
};
class VirtualMemorySummary : AllStatic {
public:
static void initialize();
static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->reserve_memory(size);
}
static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->commit_memory(size);
}
static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->uncommit_memory(size);
}
static inline void record_released_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->release_memory(size);
}
// Move virtual memory from one memory type to another.
// Virtual memory can be reserved before it is associated with a memory type, and tagged
// as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
// type to specified memory type.
static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
as_snapshot()->by_type(from)->release_memory(size);
as_snapshot()->by_type(to)->reserve_memory(size);
}
static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
as_snapshot()->by_type(from)->uncommit_memory(size);
as_snapshot()->by_type(to)->commit_memory(size);
}
static inline void snapshot(VirtualMemorySnapshot* s) {
as_snapshot()->copy_to(s);
}
static inline void reset() {
as_snapshot()->reset();
}
static VirtualMemorySnapshot* as_snapshot() {
return (VirtualMemorySnapshot*)_snapshot;
}
private:
static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
};
/*
* A virtual memory region
*/
class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
private:
address _base_address;
size_t _size;
public:
VirtualMemoryRegion(address addr, size_t size) :
_base_address(addr), _size(size) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
}
inline address base() const { return _base_address; }
inline address end() const { return base() + size(); }
inline size_t size() const { return _size; }
inline bool is_empty() const { return size() == 0; }
inline bool contain_address(address addr) const {
return (addr >= base() && addr < end());
}
inline bool contain_region(address addr, size_t size) const {
return contain_address(addr) && contain_address(addr + size - 1);
}
inline bool same_region(address addr, size_t sz) const {
return (addr == base() && sz == size());
}
inline bool overlap_region(address addr, size_t sz) const {
VirtualMemoryRegion rgn(addr, sz);
return contain_address(addr) ||
contain_address(addr + sz - 1) ||
rgn.contain_address(base()) ||
rgn.contain_address(end() - 1);
}
inline bool adjacent_to(address addr, size_t sz) const {
return (addr == end() || (addr + sz) == base());
}
void exclude_region(address addr, size_t sz) {
assert(contain_region(addr, sz), "Not containment");
assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
size_t new_size = size() - sz;
if (addr == base()) {
set_base(addr + sz);
}
set_size(new_size);
}
void expand_region(address addr, size_t sz) {
assert(adjacent_to(addr, sz), "Not adjacent regions");
if (base() == addr + sz) {
set_base(addr);
}
set_size(size() + sz);
}
protected:
void set_base(address base) {
assert(base != NULL, "Sanity check");
_base_address = base;
}
void set_size(size_t size) {
assert(size > 0, "Sanity check");
_size = size;
}
};
class CommittedMemoryRegion : public VirtualMemoryRegion {
private:
NativeCallStack _stack;
public:
CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
VirtualMemoryRegion(addr, size), _stack(stack) { }
inline int compare(const CommittedMemoryRegion& rgn) const {
if (overlap_region(rgn.base(), rgn.size()) ||
adjacent_to (rgn.base(), rgn.size())) {
return 0;
} else {
if (base() == rgn.base()) {
return 0;
} else if (base() > rgn.base()) {
return 1;
} else {
return -1;
}
}
}
inline bool equals(const CommittedMemoryRegion& rgn) const {
return compare(rgn) == 0;
}
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
inline const NativeCallStack* call_stack() const { return &_stack; }
};
typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
class ReservedMemoryRegion : public VirtualMemoryRegion {
private:
SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
_committed_regions;
NativeCallStack _stack;
MEMFLAGS _flag;
bool _all_committed;
public:
ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) :
VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
_all_committed(false) { }
ReservedMemoryRegion(address base, size_t size) :
VirtualMemoryRegion(base, size), _stack(emptyStack), _flag(mtNone),
_all_committed(false) { }
// Copy constructor
ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
VirtualMemoryRegion(rr.base(), rr.size()) {
*this = rr;
}
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
inline const NativeCallStack* call_stack() const { return &_stack; }
void set_flag(MEMFLAGS flag);
inline MEMFLAGS flag() const { return _flag; }
inline int compare(const ReservedMemoryRegion& rgn) const {
if (overlap_region(rgn.base(), rgn.size())) {
return 0;
} else {
if (base() == rgn.base()) {
return 0;
} else if (base() > rgn.base()) {
return 1;
} else {
return -1;
}
}
}
inline bool equals(const ReservedMemoryRegion& rgn) const {
return compare(rgn) == 0;
}
bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
bool remove_uncommitted_region(address addr, size_t size);
size_t committed_size() const;
// move committed regions that higher than specified address to
// the new region
void move_committed_regions(address addr, ReservedMemoryRegion& rgn);
inline bool all_committed() const { return _all_committed; }
void set_all_committed(bool b);
CommittedRegionIterator iterate_committed_regions() const {
return CommittedRegionIterator(_committed_regions.head());
}
ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
set_base(other.base());
set_size(other.size());
_stack = *other.call_stack();
_flag = other.flag();
_all_committed = other.all_committed();
if (other.all_committed()) {
set_all_committed(true);
} else {
CommittedRegionIterator itr = other.iterate_committed_regions();
const CommittedMemoryRegion* rgn = itr.next();
while (rgn != NULL) {
_committed_regions.add(*rgn);
rgn = itr.next();
}
}
return *this;
}
private:
// The committed region contains the uncommitted region, subtract the uncommitted
// region from this committed region
bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
address addr, size_t sz);
bool add_committed_region(const CommittedMemoryRegion& rgn) {
assert(rgn.base() != NULL, "Invalid base address");
assert(size() > 0, "Invalid size");
return _committed_regions.add(rgn) != NULL;
}
};
int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
class VirtualMemoryWalker : public StackObj {
public:
virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
};
// Main class called from MemTracker to track virtual memory allocations, commits and releases.
class VirtualMemoryTracker : AllStatic {
public:
static bool initialize(NMT_TrackingLevel level);
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone, bool all_committed = false);
static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
static bool remove_uncommitted_region (address base_addr, size_t size);
static bool remove_released_region (address base_addr, size_t size);
static void set_reserved_region_type (address addr, MEMFLAGS flag);
// Walk virtual memory data structure for creating baseline, etc.
static bool walk_virtual_memory(VirtualMemoryWalker* walker);
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
private:
static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> _reserved_regions;
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP

@ -0,0 +1,114 @@
/*
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
/////////////// Unit tests ///////////////
#ifndef PRODUCT
#include "runtime/os.hpp"
#include "utilities/linkedlist.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
class Integer : public StackObj {
private:
int _value;
public:
Integer(int i) : _value(i) { }
int value() const { return _value; }
bool equals(const Integer& i) const {
return _value == i.value();
}
};
int compare_Integer(const Integer& i1, const Integer& i2) {
return i1.value() - i2.value();
}
void check_list_values(const int* expected, const LinkedList<Integer>* list) {
LinkedListNode<Integer>* head = list->head();
int index = 0;
while (head != NULL) {
assert(head->peek()->value() == expected[index], "Unexpected value");
head = head->next();
index ++;
}
}
void Test_linked_list() {
LinkedListImpl<Integer, ResourceObj::C_HEAP, mtTest> ll;
// Test regular linked list
assert(ll.is_empty(), "Start with empty list");
Integer one(1), two(2), three(3), four(4), five(5), six(6);
ll.add(six);
assert(!ll.is_empty(), "Should not be empty");
Integer* i = ll.find(six);
assert(i != NULL, "Should find it");
i = ll.find(three);
assert(i == NULL, "Not in the list");
LinkedListNode<Integer>* node = ll.find_node(six);
assert(node != NULL, "6 is in the list");
ll.insert_after(three, node);
ll.insert_before(one, node);
int expected[3] = {1, 6, 3};
check_list_values(expected, &ll);
ll.add(two);
ll.add(four);
ll.add(five);
// Test sorted linked list
SortedLinkedList<Integer, compare_Integer, ResourceObj::C_HEAP, mtTest> sl;
assert(sl.is_empty(), "Start with empty list");
size_t ll_size = ll.size();
sl.move(&ll);
size_t sl_size = sl.size();
assert(ll_size == sl_size, "Should be the same size");
assert(ll.is_empty(), "No more entires");
// sorted result
int sorted_result[] = {1, 2, 3, 4, 5, 6};
check_list_values(sorted_result, &sl);
node = sl.find_node(four);
assert(node != NULL, "4 is in the list");
sl.remove_before(node);
sl.remove_after(node);
int remains[] = {1, 2, 4, 6};
check_list_values(remains, &sl);
}
#endif // PRODUCT

@ -0,0 +1,416 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_UTILITIES_LINKED_LIST_HPP
#define SHARE_VM_UTILITIES_LINKED_LIST_HPP
#include "memory/allocation.hpp"
/*
* The implementation of a generic linked list, which uses various
* backing storages, such as C heap, arena and resource, etc.
*/
// An entry in a linked list. It should use the same backing storage
// as the linked list that contains this entry.
template <class E> class LinkedListNode : public ResourceObj {
private:
E _data; // embedded content
LinkedListNode<E>* _next; // next entry
protected:
LinkedListNode() : _next(NULL) { }
public:
LinkedListNode(const E& e): _data(e), _next(NULL) { }
inline void set_next(LinkedListNode<E>* node) { _next = node; }
inline LinkedListNode<E> * next() const { return _next; }
E* data() { return &_data; }
const E* peek() const { return &_data; }
};
// A linked list interface. It does not specify
// any storage type it uses, so all methods involving
// memory allocation or deallocation are pure virtual
template <class E> class LinkedList : public ResourceObj {
protected:
LinkedListNode<E>* _head;
public:
LinkedList() : _head(NULL) { }
inline void set_head(LinkedListNode<E>* h) { _head = h; }
inline LinkedListNode<E>* head() const { return _head; }
inline bool is_empty() const { return head() == NULL; }
inline size_t size() const {
LinkedListNode<E>* p;
size_t count = 0;
for (p = head(); p != NULL; count++, p = p->next());
return count;
}
// Move all entries from specified linked list to this one
virtual void move(LinkedList<E>* list) = 0;
// Add an entry to this linked list
virtual LinkedListNode<E>* add(const E& e) = 0;
// Add all entries from specified linked list to this one,
virtual void add(LinkedListNode<E>* node) = 0;
// Add a linked list to this linked list
virtual bool add(const LinkedList<E>* list) = 0;
// Search entry in the linked list
virtual LinkedListNode<E>* find_node(const E& e) = 0;
virtual E* find(const E& e) = 0;
// Insert entry to the linked list
virtual LinkedListNode<E>* insert_before(const E& e, LinkedListNode<E>* ref) = 0;
virtual LinkedListNode<E>* insert_after (const E& e, LinkedListNode<E>* ref) = 0;
// Remove entry from the linked list
virtual bool remove(const E& e) = 0;
virtual bool remove(LinkedListNode<E>* node) = 0;
virtual bool remove_before(LinkedListNode<E>* ref) = 0;
virtual bool remove_after(LinkedListNode<E>* ref) = 0;
LinkedListNode<E>* unlink_head() {
LinkedListNode<E>* h = this->head();
if (h != NULL) {
this->set_head(h->next());
}
return h;
}
DEBUG_ONLY(virtual ResourceObj::allocation_type storage_type() = 0;)
};
// A linked list implementation.
// The linked list can be allocated in various type of memory: C heap, arena and resource area, etc.
template <class E, ResourceObj::allocation_type T = ResourceObj::C_HEAP,
MEMFLAGS F = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL>
class LinkedListImpl : public LinkedList<E> {
protected:
Arena* _arena;
public:
LinkedListImpl() : _arena(NULL) { }
LinkedListImpl(Arena* a) : _arena(a) { }
virtual ~LinkedListImpl() {
clear();
}
virtual void clear() {
LinkedListNode<E>* p = this->head();
this->set_head(NULL);
while (p != NULL) {
LinkedListNode<E>* to_delete = p;
p = p->next();
delete_node(to_delete);
}
}
// Add an entry to the linked list
virtual LinkedListNode<E>* add(const E& e) {
LinkedListNode<E>* node = this->new_node(e);
if (node != NULL) {
this->add(node);
}
return node;
}
virtual void add(LinkedListNode<E>* node) {
assert(node != NULL, "NULL pointer");
node->set_next(this->head());
this->set_head(node);
}
// Move a linked list to this linked list, both have to be allocated on the same
// storage type.
virtual void move(LinkedList<E>* list) {
assert(list->storage_type() == this->storage_type(), "Different storage type");
LinkedListNode<E>* node = this->head();
while (node != NULL && node->next() != NULL) {
node = node->next();
}
if (node == NULL) {
this->set_head(list->head());
} else {
node->set_next(list->head());
}
// All entries are moved
list->set_head(NULL);
}
virtual bool add(const LinkedList<E>* list) {
LinkedListNode<E>* node = list->head();
while (node != NULL) {
if (this->add(*node->peek()) == NULL) {
return false;
}
node = node->next();
}
return true;
}
virtual LinkedListNode<E>* find_node(const E& e) {
LinkedListNode<E>* p = this->head();
while (p != NULL && !p->peek()->equals(e)) {
p = p->next();
}
return p;
}
E* find(const E& e) {
LinkedListNode<E>* node = find_node(e);
return (node == NULL) ? NULL : node->data();
}
// Add an entry in front of the reference entry
LinkedListNode<E>* insert_before(const E& e, LinkedListNode<E>* ref_node) {
LinkedListNode<E>* node = this->new_node(e);
if (node == NULL) return NULL;
if (ref_node == this->head()) {
node->set_next(ref_node);
this->set_head(node);
} else {
LinkedListNode<E>* p = this->head();
while (p != NULL && p->next() != ref_node) {
p = p->next();
}
assert(p != NULL, "ref_node not in the list");
node->set_next(ref_node);
p->set_next(node);
}
return node;
}
// Add an entry behind the reference entry
LinkedListNode<E>* insert_after(const E& e, LinkedListNode<E>* ref_node) {
LinkedListNode<E>* node = this->new_node(e);
if (node == NULL) return NULL;
node->set_next(ref_node->next());
ref_node->set_next(node);
return node;
}
// Remove an entry from the linked list.
// Return true if the entry is successfully removed
virtual bool remove(const E& e) {
LinkedListNode<E>* tmp = this->head();
LinkedListNode<E>* prev = NULL;
while (tmp != NULL) {
if (tmp->peek()->equals(e)) {
return remove_after(prev);
}
prev = tmp;
tmp = tmp->next();
}
return false;
}
// Remove the node after the reference entry
virtual bool remove_after(LinkedListNode<E>* prev) {
LinkedListNode<E>* to_delete;
if (prev == NULL) {
to_delete = this->unlink_head();
} else {
to_delete = prev->next();
if (to_delete != NULL) {
prev->set_next(to_delete->next());
}
}
if (to_delete != NULL) {
delete_node(to_delete);
return true;
}
return false;
}
virtual bool remove(LinkedListNode<E>* node) {
LinkedListNode<E>* p = this->head();
while (p != NULL && p->next() != node) {
p = p->next();
}
if (p != NULL) {
p->set_next(node->next());
delete_node(node);
return true;
} else {
return false;
}
}
virtual bool remove_before(LinkedListNode<E>* ref) {
assert(ref != NULL, "NULL pointer");
LinkedListNode<E>* p = this->head();
LinkedListNode<E>* to_delete = NULL; // to be deleted
LinkedListNode<E>* prev = NULL; // node before the node to be deleted
while (p != NULL && p != ref) {
prev = to_delete;
to_delete = p;
p = p->next();
}
if (p == NULL || to_delete == NULL) return false;
assert(to_delete->next() == ref, "Wrong node to delete");
assert(prev == NULL || prev->next() == to_delete,
"Sanity check");
if (prev == NULL) {
assert(to_delete == this->head(), "Must be head");
this->set_head(to_delete->next());
} else {
prev->set_next(to_delete->next());
}
delete_node(to_delete);
return true;
}
DEBUG_ONLY(ResourceObj::allocation_type storage_type() { return T; })
protected:
// Create new linked list node object in specified storage
LinkedListNode<E>* new_node(const E& e) const {
switch(T) {
case ResourceObj::ARENA: {
assert(_arena != NULL, "Arena not set");
return new(_arena) LinkedListNode<E>(e);
}
case ResourceObj::RESOURCE_AREA:
case ResourceObj::C_HEAP: {
if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
return new(std::nothrow, T, F) LinkedListNode<E>(e);
} else {
return new(T, F) LinkedListNode<E>(e);
}
}
default:
ShouldNotReachHere();
}
return NULL;
}
// Delete linked list node object
void delete_node(LinkedListNode<E>* node) {
if (T == ResourceObj::C_HEAP) {
delete node;
}
}
};
// Sorted linked list. The linked list maintains sorting order specified by the comparison
// function
template <class E, int (*FUNC)(const E&, const E&),
ResourceObj::allocation_type T = ResourceObj::C_HEAP,
MEMFLAGS F = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL>
class SortedLinkedList : public LinkedListImpl<E, T, F, alloc_failmode> {
public:
SortedLinkedList() { }
SortedLinkedList(Arena* a) : LinkedListImpl<E, T, F, alloc_failmode>(a) { }
virtual LinkedListNode<E>* add(const E& e) {
return LinkedListImpl<E, T, F, alloc_failmode>::add(e);
}
virtual void move(LinkedList<E>* list) {
assert(list->storage_type() == this->storage_type(), "Different storage type");
LinkedListNode<E>* node;
while ((node = list->unlink_head()) != NULL) {
this->add(node);
}
assert(list->is_empty(), "All entries are moved");
}
virtual void add(LinkedListNode<E>* node) {
assert(node != NULL, "NULL pointer");
LinkedListNode<E>* tmp = this->head();
LinkedListNode<E>* prev = NULL;
int cmp_val;
while (tmp != NULL) {
cmp_val = FUNC(*tmp->peek(), *node->peek());
if (cmp_val >= 0) {
break;
}
prev = tmp;
tmp = tmp->next();
}
if (prev != NULL) {
node->set_next(prev->next());
prev->set_next(node);
} else {
node->set_next(this->head());
this->set_head(node);
}
}
virtual bool add(const LinkedList<E>* list) {
return LinkedListImpl<E, T, F, alloc_failmode>::add(list);
}
virtual LinkedListNode<E>* find_node(const E& e) {
LinkedListNode<E>* p = this->head();
while (p != NULL) {
int comp_val = FUNC(*p->peek(), e);
if (comp_val == 0) {
return p;
} else if (comp_val > 0) {
return NULL;
}
p = p->next();
}
return NULL;
}
};
// Iterates all entries in the list
template <class E> class LinkedListIterator : public StackObj {
private:
LinkedListNode<E>* _p;
bool _is_empty;
public:
LinkedListIterator(LinkedListNode<E>* head) : _p(head) {
_is_empty = (head == NULL);
}
bool is_empty() const { return _is_empty; }
const E* next() {
if (_p == NULL) return NULL;
const E* e = _p->peek();
_p = _p->next();
return e;
}
};
#endif

@ -0,0 +1,118 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/nativeCallStack.hpp"
NativeCallStack::NativeCallStack(int toSkip, bool fillStack) :
_hash_value(0) {
#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
fillStack = false;
#endif
if (fillStack) {
os::get_native_stack(_stack, NMT_TrackingStackDepth, toSkip);
} else {
for (int index = 0; index < NMT_TrackingStackDepth; index ++) {
_stack[index] = NULL;
}
}
}
NativeCallStack::NativeCallStack(address* pc, int frameCount) {
int frameToCopy = (frameCount < NMT_TrackingStackDepth) ?
frameCount : NMT_TrackingStackDepth;
int index;
for (index = 0; index < frameToCopy; index ++) {
_stack[index] = pc[index];
}
for (; index < NMT_TrackingStackDepth; index ++) {
_stack[index] = NULL;
}
}
// number of stack frames captured
int NativeCallStack::frames() const {
int index;
for (index = 0; index < NMT_TrackingStackDepth; index ++) {
if (_stack[index] == NULL) {
break;
}
}
return index;
}
// Hash code. Any better algorithm?
int NativeCallStack::hash() const {
long hash_val = _hash_value;
if (hash_val == 0) {
long pc;
int index;
for (index = 0; index < NMT_TrackingStackDepth; index ++) {
pc = (long)_stack[index];
if (pc == 0) break;
hash_val += pc;
}
NativeCallStack* p = const_cast<NativeCallStack*>(this);
p->_hash_value = (int)(hash_val & 0xFFFFFFFF);
}
return _hash_value;
}
void NativeCallStack::print_on(outputStream* out) const {
print_on(out, 0);
}
// Decode and print this call path
void NativeCallStack::print_on(outputStream* out, int indent) const {
address pc;
char buf[1024];
int offset;
if (is_empty()) {
for (int index = 0; index < indent; index ++) out->print(" ");
#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
out->print("[BOOTSTRAP]");
#else
out->print("[No stack]");
#endif
} else {
for (int frame = 0; frame < NMT_TrackingStackDepth; frame ++) {
pc = get_frame(frame);
if (pc == NULL) break;
// Print indent
for (int index = 0; index < indent; index ++) out->print(" ");
if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
out->print_cr("[" PTR_FORMAT "] %s+0x%x", p2i(pc), buf, offset);
} else {
out->print_cr("[" PTR_FORMAT "]", p2i(pc));
}
}
}
}

@ -0,0 +1,95 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP
#define SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP
#include "memory/allocation.hpp"
#include "services/nmtCommon.hpp"
#include "utilities/ostream.hpp"
/*
* This class represents a native call path (does not include Java frame)
*
* This class is developed in the context of native memory tracking, it can
* be an useful tool for debugging purpose.
*
* For example, following code should print out native call path:
*
* ....
* NativeCallStack here;
* here.print_on(tty);
* ....
*
* However, there are a couple of restrictions on this class. If the restrictions are
* not strictly followed, it may break native memory tracking badly.
*
* 1. Number of stack frames to capture, is defined by native memory tracking.
* This number has impacts on how much memory to be used by native
* memory tracking.
* 2. The class is strict stack object, no heap or virtual memory can be allocated
* from it.
*/
class NativeCallStack : public StackObj {
private:
address _stack[NMT_TrackingStackDepth];
int _hash_value;
public:
NativeCallStack(int toSkip = 0, bool fillStack = false);
NativeCallStack(address* pc, int frameCount);
// if it is an empty stack
inline bool is_empty() const {
return _stack[0] == NULL;
}
// number of stack frames captured
int frames() const;
inline int compare(const NativeCallStack& other) const {
return memcmp(_stack, other._stack, sizeof(_stack));
}
inline bool equals(const NativeCallStack& other) const {
// compare hash values
if (hash() != other.hash()) return false;
// compare each frame
return compare(other) == 0;
}
inline address get_frame(int index) const {
assert(index >= 0 && index < NMT_TrackingStackDepth, "Index out of bound");
return _stack[index];
}
// Hash code. Any better algorithm?
int hash() const;
void print_on(outputStream* out) const;
void print_on(outputStream* out, int indent) const;
};
#endif

@ -774,6 +774,11 @@ void VMError::report(outputStream* st) {
st->cr();
}
STEP(228, "(Native Memory Tracking)" )
if (_verbose) {
MemTracker::final_report(st);
}
STEP(230, "" )
if (_verbose) {
@ -897,9 +902,6 @@ void VMError::report_and_die() {
static bool log_done = false; // done saving error log
static bool transmit_report_done = false; // done error reporting
// disble NMT to avoid further exception
MemTracker::shutdown(MemTracker::NMT_error_reporting);
if (SuppressFatalErrorMessage) {
os::abort();
}

@ -95,7 +95,8 @@ public class WhiteBox {
public native void NMTCommitMemory(long addr, long size);
public native void NMTUncommitMemory(long addr, long size);
public native void NMTReleaseMemory(long addr, long size);
public native boolean NMTWaitForDataMerge();
public native void NMTOverflowHashBucket(long num);
public native long NMTMallocWithPseudoStack(long size, int index);
public native boolean NMTIsDetailSupported();
// Compiler