8068451: Kitchensink fails with assert(_size >= sz) failed: Negative size
Need ThreadCritical lock around chunk pool cleaning and copying for snapshot Reviewed-by: lfoltan, gtriantafill, hseigel
This commit is contained in:
parent
cc1c11c366
commit
8df6ac1794
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -275,31 +275,30 @@ class ChunkPool: public CHeapObj<mtInternal> {
|
||||
Chunk* cur = NULL;
|
||||
Chunk* next;
|
||||
{
|
||||
// if we have more than n chunks, free all of them
|
||||
ThreadCritical tc;
|
||||
if (_num_chunks > n) {
|
||||
// free chunks at end of queue, for better locality
|
||||
// if we have more than n chunks, free all of them
|
||||
ThreadCritical tc;
|
||||
if (_num_chunks > n) {
|
||||
// free chunks at end of queue, for better locality
|
||||
cur = _first;
|
||||
for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
|
||||
for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
|
||||
|
||||
if (cur != NULL) {
|
||||
if (cur != NULL) {
|
||||
next = cur->next();
|
||||
cur->set_next(NULL);
|
||||
cur = next;
|
||||
cur->set_next(NULL);
|
||||
cur = next;
|
||||
|
||||
_num_chunks = n;
|
||||
// Free all remaining chunks while in ThreadCritical lock
|
||||
// so NMT adjustment is stable.
|
||||
while(cur != NULL) {
|
||||
next = cur->next();
|
||||
os::free(cur);
|
||||
_num_chunks--;
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Free all remaining chunks, outside of ThreadCritical
|
||||
// to avoid deadlock with NMT
|
||||
while(cur != NULL) {
|
||||
next = cur->next();
|
||||
os::free(cur);
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Accessors to preallocated pool's
|
||||
static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
|
||||
@ -384,7 +383,9 @@ void Chunk::operator delete(void* p) {
|
||||
case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
|
||||
case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
|
||||
case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
|
||||
default: os::free(c);
|
||||
default:
|
||||
ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.
|
||||
os::free(c);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ bool MallocSiteTable::walk(MallocSiteWalker* walker) {
|
||||
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
|
||||
size_t* pos_idx) {
|
||||
int index = hash_to_index(key.hash());
|
||||
assert(index >= 0, "Negative index");
|
||||
assert(index >= 0, err_msg("Negative index %d", index));
|
||||
*bucket_idx = (size_t)index;
|
||||
*pos_idx = 0;
|
||||
|
||||
|
@ -52,7 +52,7 @@ size_t MallocMemorySnapshot::total_arena() const {
|
||||
}
|
||||
|
||||
// Make adjustment by subtracting chunks used by arenas
|
||||
// from total chunks to get total free chunck size
|
||||
// from total chunks to get total free chunk size
|
||||
void MallocMemorySnapshot::make_adjustment() {
|
||||
size_t arena_size = total_arena();
|
||||
int chunk_idx = NMTUtil::flag_to_index(mtChunk);
|
||||
|
@ -29,6 +29,7 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/threadCritical.hpp"
|
||||
#include "services/nmtCommon.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
|
||||
@ -164,6 +165,10 @@ class MallocMemorySnapshot : public ResourceObj {
|
||||
}
|
||||
|
||||
void copy_to(MallocMemorySnapshot* s) {
|
||||
// Need to make sure that mtChunks don't get deallocated while the
|
||||
// copy is going on, because their size is adjusted using this
|
||||
// buffer in make_adjustment().
|
||||
ThreadCritical tc;
|
||||
s->_tracking_header = _tracking_header;
|
||||
for (int index = 0; index < mt_number_of_types; index ++) {
|
||||
s->_malloc[index] = _malloc[index];
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -137,8 +137,8 @@ void NMTDCmd::execute(DCmdSource source, TRAPS) {
|
||||
}
|
||||
} else if (_detail_diff.value()) {
|
||||
if (!check_detail_tracking_level(output())) {
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
MemBaseline& baseline = MemTracker::get_baseline();
|
||||
if (baseline.baseline_type() == MemBaseline::Detail_baselined) {
|
||||
report_diff(false, scale_unit);
|
||||
|
Loading…
Reference in New Issue
Block a user