Merge
This commit is contained in:
commit
a53e8f36c1
@ -300,5 +300,3 @@ HeapRegion* OldGCAllocRegion::release() {
|
||||
}
|
||||
return G1AllocRegion::release();
|
||||
}
|
||||
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1Allocator.hpp"
|
||||
#include "gc/g1/g1Allocator.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1MarkSweep.hpp"
|
||||
@ -116,15 +116,85 @@ void G1DefaultAllocator::abandon_gc_alloc_regions() {
|
||||
G1PLAB::G1PLAB(size_t gclab_word_size) :
|
||||
PLAB(gclab_word_size), _retired(true) { }
|
||||
|
||||
HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
|
||||
size_t word_sz,
|
||||
AllocationContext_t context) {
|
||||
size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
|
||||
// Return the remaining space in the cur alloc region, but not less than
|
||||
// the min TLAB size.
|
||||
|
||||
// Also, this value can be at most the humongous object threshold,
|
||||
// since we can't allow tlabs to grow big enough to accommodate
|
||||
// humongous objects.
|
||||
|
||||
HeapRegion* hr = mutator_alloc_region(context)->get();
|
||||
size_t max_tlab = _g1h->max_tlab_size() * wordSize;
|
||||
if (hr == NULL) {
|
||||
return max_tlab;
|
||||
} else {
|
||||
return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
|
||||
size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
switch (dest.value()) {
|
||||
case InCSetState::Young:
|
||||
return survivor_attempt_allocation(word_size, context);
|
||||
case InCSetState::Old:
|
||||
return old_attempt_allocation(word_size, context);
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL; // Keep some compilers happy
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
assert(!_g1h->is_humongous(word_size),
|
||||
"we should not be seeing humongous-size allocations in this path");
|
||||
|
||||
HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
if (result == NULL) {
|
||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
}
|
||||
if (result != NULL) {
|
||||
_g1h->dirty_young_block(result, word_size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
HeapWord* G1Allocator::old_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
assert(!_g1h->is_humongous(word_size),
|
||||
"we should not be seeing humongous-size allocations in this path");
|
||||
|
||||
HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size,
|
||||
true /* bot_updates */);
|
||||
if (result == NULL) {
|
||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
true /* bot_updates */);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_allocator(allocator),
|
||||
_survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
|
||||
}
|
||||
|
||||
HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
|
||||
size_t word_sz,
|
||||
AllocationContext_t context) {
|
||||
size_t gclab_word_size = _g1h->desired_plab_sz(dest);
|
||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||
G1PLAB* alloc_buf = alloc_buffer(dest, context);
|
||||
alloc_buf->retire();
|
||||
|
||||
HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
|
||||
HeapWord* buf = _allocator->par_allocate_during_gc(dest, gclab_word_size, context);
|
||||
if (buf == NULL) {
|
||||
return NULL; // Let caller handle allocation failure.
|
||||
}
|
||||
@ -136,14 +206,18 @@ HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
return obj;
|
||||
} else {
|
||||
return _g1h->par_allocate_during_gc(dest, word_sz, context);
|
||||
return _allocator->par_allocate_during_gc(dest, word_sz, context);
|
||||
}
|
||||
}
|
||||
|
||||
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
|
||||
G1ParGCAllocator(g1h),
|
||||
_surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
|
||||
_tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
|
||||
void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
|
||||
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
|
||||
}
|
||||
|
||||
G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
|
||||
G1PLABAllocator(allocator),
|
||||
_surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
|
||||
_tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
|
||||
for (uint state = 0; state < InCSetState::Num; state++) {
|
||||
_alloc_buffers[state] = NULL;
|
||||
}
|
||||
@ -151,7 +225,7 @@ G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
|
||||
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
|
||||
}
|
||||
|
||||
void G1DefaultParGCAllocator::retire_alloc_buffers() {
|
||||
void G1DefaultPLABAllocator::retire_alloc_buffers() {
|
||||
for (uint state = 0; state < InCSetState::Num; state++) {
|
||||
G1PLAB* const buf = _alloc_buffers[state];
|
||||
if (buf != NULL) {
|
||||
@ -160,7 +234,7 @@ void G1DefaultParGCAllocator::retire_alloc_buffers() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
|
||||
void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
|
||||
wasted = 0;
|
||||
undo_wasted = 0;
|
||||
for (uint state = 0; state < InCSetState::Num; state++) {
|
||||
|
@ -33,17 +33,36 @@
|
||||
|
||||
class EvacuationInfo;
|
||||
|
||||
// Base class for G1 allocators.
|
||||
// Interface to keep track of which regions G1 is currently allocating into. Provides
|
||||
// some accessors (e.g. allocating into them, or getting their occupancy).
|
||||
// Also keeps track of retained regions across GCs.
|
||||
class G1Allocator : public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
|
||||
|
||||
// Accessors to the allocation regions.
|
||||
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
|
||||
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
|
||||
|
||||
// Allocation attempt during GC for a survivor object / PLAB.
|
||||
inline HeapWord* survivor_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context);
|
||||
// Allocation attempt during GC for an old object / PLAB.
|
||||
inline HeapWord* old_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context);
|
||||
public:
|
||||
G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
|
||||
virtual ~G1Allocator() { }
|
||||
|
||||
static G1Allocator* create_allocator(G1CollectedHeap* g1h);
|
||||
|
||||
#ifdef ASSERT
|
||||
// Do we currently have an active mutator region to allocate into?
|
||||
bool has_mutator_alloc_region(AllocationContext_t context) { return mutator_alloc_region(context)->get() != NULL; }
|
||||
#endif
|
||||
virtual void init_mutator_alloc_region() = 0;
|
||||
virtual void release_mutator_alloc_region() = 0;
|
||||
|
||||
@ -51,24 +70,35 @@ public:
|
||||
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
|
||||
virtual void abandon_gc_alloc_regions() = 0;
|
||||
|
||||
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
|
||||
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
|
||||
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
|
||||
virtual size_t used_in_alloc_regions() = 0;
|
||||
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
|
||||
// Management of retained regions.
|
||||
|
||||
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||
OldGCAllocRegion* old,
|
||||
HeapRegion** retained);
|
||||
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
|
||||
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||
OldGCAllocRegion* old,
|
||||
HeapRegion** retained);
|
||||
|
||||
virtual HeapRegion* new_heap_region(uint hrs_index,
|
||||
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr) {
|
||||
return new HeapRegion(hrs_index, sharedOffsetArray, mr);
|
||||
}
|
||||
// Allocate blocks of memory during mutator time.
|
||||
|
||||
inline HeapWord* attempt_allocation(size_t word_size, AllocationContext_t context);
|
||||
inline HeapWord* attempt_allocation_locked(size_t word_size, AllocationContext_t context);
|
||||
inline HeapWord* attempt_allocation_force(size_t word_size, AllocationContext_t context);
|
||||
|
||||
size_t unsafe_max_tlab_alloc(AllocationContext_t context);
|
||||
|
||||
// Allocate blocks of memory during garbage collection. Will ensure an
|
||||
// allocation region, either by picking one or expanding the
|
||||
// heap, and then allocate a block of the given size. The block
|
||||
// may not be a humongous - it must fit into a single heap region.
|
||||
HeapWord* par_allocate_during_gc(InCSetState dest,
|
||||
size_t word_size,
|
||||
AllocationContext_t context);
|
||||
|
||||
virtual size_t used_in_alloc_regions() = 0;
|
||||
};
|
||||
|
||||
// The default allocator for G1.
|
||||
// The default allocation region manager for G1. Provides a single mutator, survivor
|
||||
// and old generation allocation region.
|
||||
// Can retain the (single) old generation allocation region across GCs.
|
||||
class G1DefaultAllocator : public G1Allocator {
|
||||
protected:
|
||||
// Alloc region used to satisfy mutator allocation requests.
|
||||
@ -152,10 +182,14 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class G1ParGCAllocator : public CHeapObj<mtGC> {
|
||||
// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
|
||||
// Needs to handle multiple contexts, extra alignment in any "survivor" area and some
|
||||
// statistics.
|
||||
class G1PLABAllocator : public CHeapObj<mtGC> {
|
||||
friend class G1ParScanThreadState;
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1Allocator* _allocator;
|
||||
|
||||
// The survivor alignment in effect in bytes.
|
||||
// == 0 : don't align survivors
|
||||
@ -182,11 +216,10 @@ protected:
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParGCAllocator(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { }
|
||||
virtual ~G1ParGCAllocator() { }
|
||||
G1PLABAllocator(G1Allocator* allocator);
|
||||
virtual ~G1PLABAllocator() { }
|
||||
|
||||
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
|
||||
static G1PLABAllocator* create_allocator(G1Allocator* allocator);
|
||||
|
||||
virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
|
||||
|
||||
@ -219,18 +252,18 @@ public:
|
||||
return allocate_direct_or_new_plab(dest, word_sz, context);
|
||||
}
|
||||
|
||||
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
|
||||
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
|
||||
}
|
||||
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context);
|
||||
};
|
||||
|
||||
class G1DefaultParGCAllocator : public G1ParGCAllocator {
|
||||
// The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
|
||||
// and old generation allocation.
|
||||
class G1DefaultPLABAllocator : public G1PLABAllocator {
|
||||
G1PLAB _surviving_alloc_buffer;
|
||||
G1PLAB _tenured_alloc_buffer;
|
||||
G1PLAB* _alloc_buffers[InCSetState::Num];
|
||||
|
||||
public:
|
||||
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
|
||||
G1DefaultPLABAllocator(G1Allocator* _allocator);
|
||||
|
||||
virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
|
||||
assert(dest.is_valid(),
|
||||
|
46
hotspot/src/share/vm/gc/g1/g1Allocator.inline.hpp
Normal file
46
hotspot/src/share/vm/gc/g1/g1Allocator.inline.hpp
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1ALLOCATOR_INLINE_HPP
|
||||
#define SHARE_VM_GC_G1_G1ALLOCATOR_INLINE_HPP
|
||||
|
||||
#include "gc/g1/g1Allocator.hpp"
|
||||
#include "gc/g1/g1AllocRegion.inline.hpp"
|
||||
|
||||
HeapWord* G1Allocator::attempt_allocation(size_t word_size, AllocationContext_t context) {
|
||||
return mutator_alloc_region(context)->attempt_allocation(word_size, false /* bot_updates */);
|
||||
}
|
||||
|
||||
HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size, AllocationContext_t context) {
|
||||
HeapWord* result = mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */);
|
||||
assert(result != NULL || mutator_alloc_region(context)->get() == NULL,
|
||||
err_msg("Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(context)->get())));
|
||||
return result;
|
||||
}
|
||||
|
||||
HeapWord* G1Allocator::attempt_allocation_force(size_t word_size, AllocationContext_t context) {
|
||||
return mutator_alloc_region(context)->attempt_allocation_force(word_size, false /* bot_updates */);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP
|
@ -30,6 +30,6 @@ G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
|
||||
return new G1DefaultAllocator(g1h);
|
||||
}
|
||||
|
||||
G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
|
||||
return new G1DefaultParGCAllocator(g1h);
|
||||
G1PLABAllocator* G1PLABAllocator::create_allocator(G1Allocator* allocator) {
|
||||
return new G1DefaultPLABAllocator(allocator);
|
||||
}
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "gc/g1/concurrentG1Refine.hpp"
|
||||
#include "gc/g1/concurrentG1RefineThread.hpp"
|
||||
#include "gc/g1/concurrentMarkThread.inline.hpp"
|
||||
#include "gc/g1/g1AllocRegion.inline.hpp"
|
||||
#include "gc/g1/g1Allocator.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
@ -815,22 +815,16 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
|
||||
{
|
||||
MutexLockerEx x(Heap_lock);
|
||||
result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
result = _allocator->attempt_allocation_locked(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// If we reach here, attempt_allocation_locked() above failed to
|
||||
// allocate a new region. So the mutator alloc region should be NULL.
|
||||
assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
|
||||
|
||||
if (GC_locker::is_active_and_needs_gc()) {
|
||||
if (g1_policy()->can_expand_young_list()) {
|
||||
// No need for an ergo verbose message here,
|
||||
// can_expand_young_list() does this when it returns true.
|
||||
result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
|
||||
false /* bot_updates */);
|
||||
result = _allocator->attempt_allocation_force(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
@ -890,8 +884,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
// first attempt (without holding the Heap_lock) here and the
|
||||
// follow-on attempt will be at the start of the next loop
|
||||
// iteration (after taking the Heap_lock).
|
||||
result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
result = _allocator->attempt_allocation(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
@ -1109,6 +1102,29 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
}
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!is_humongous(word_size), "attempt_allocation() should not "
|
||||
"be called for humongous allocation requests");
|
||||
|
||||
AllocationContext_t context = AllocationContext::current();
|
||||
HeapWord* result = _allocator->attempt_allocation(word_size, context);
|
||||
|
||||
if (result == NULL) {
|
||||
result = attempt_allocation_slow(word_size,
|
||||
context,
|
||||
gc_count_before_ret,
|
||||
gclocker_retry_count_ret);
|
||||
}
|
||||
assert_heap_not_locked();
|
||||
if (result != NULL) {
|
||||
dirty_young_block(result, word_size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
@ -1231,13 +1247,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
||||
AllocationContext_t context,
|
||||
bool expect_null_mutator_alloc_region) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
|
||||
!expect_null_mutator_alloc_region,
|
||||
assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
|
||||
"the current alloc region was unexpectedly found to be non-NULL");
|
||||
|
||||
if (!is_humongous(word_size)) {
|
||||
return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
return _allocator->attempt_allocation_locked(word_size, context);
|
||||
} else {
|
||||
HeapWord* result = humongous_obj_allocate(word_size, context);
|
||||
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
|
||||
@ -2373,7 +2387,6 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
||||
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
|
||||
}
|
||||
|
||||
|
||||
// Computes the sum of the storage used by the various regions.
|
||||
size_t G1CollectedHeap::used() const {
|
||||
size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
|
||||
@ -2632,6 +2645,11 @@ bool G1CollectedHeap::is_in_exact(const void* p) const {
|
||||
}
|
||||
#endif
|
||||
|
||||
bool G1CollectedHeap::obj_in_cs(oop obj) {
|
||||
HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
|
||||
return r != NULL && r->in_collection_set();
|
||||
}
|
||||
|
||||
// Iteration functions.
|
||||
|
||||
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
|
||||
@ -2833,20 +2851,8 @@ size_t G1CollectedHeap::max_tlab_size() const {
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||
// Return the remaining space in the cur alloc region, but not less than
|
||||
// the min TLAB size.
|
||||
|
||||
// Also, this value can be at most the humongous object threshold,
|
||||
// since we can't allow tlabs to grow big enough to accommodate
|
||||
// humongous objects.
|
||||
|
||||
HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
|
||||
size_t max_tlab = max_tlab_size() * wordSize;
|
||||
if (hr == NULL) {
|
||||
return max_tlab;
|
||||
} else {
|
||||
return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
|
||||
}
|
||||
AllocationContext_t context = AllocationContext::current();
|
||||
return _allocator->unsafe_max_tlab_alloc(context);
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::max_capacity() const {
|
||||
@ -4279,18 +4285,18 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
|
||||
g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::preserve_mark_during_evac_failure(uint queue_num, oop obj, markOop m) {
|
||||
void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
|
||||
if (!_evacuation_failed) {
|
||||
_evacuation_failed = true;
|
||||
}
|
||||
|
||||
_evacuation_failed_info_array[queue_num].register_copy_failure(obj->size());
|
||||
_evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
|
||||
|
||||
// We want to call the "for_promotion_failure" version only in the
|
||||
// case of a promotion failure.
|
||||
if (m->must_be_preserved_for_promotion_failure(obj)) {
|
||||
OopAndMarkOop elem(obj, m);
|
||||
_preserved_objs[queue_num].push(elem);
|
||||
_preserved_objs[worker_id].push(elem);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4334,7 +4340,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
|
||||
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
|
||||
assert(_worker_id == _par_scan_state->worker_id(), "sanity");
|
||||
|
||||
const InCSetState state = _g1->in_cset_state(obj);
|
||||
if (state.is_in_cset()) {
|
||||
@ -4443,9 +4449,6 @@ protected:
|
||||
ParallelTaskTerminator _terminator;
|
||||
uint _n_workers;
|
||||
|
||||
Mutex _stats_lock;
|
||||
Mutex* stats_lock() { return &_stats_lock; }
|
||||
|
||||
public:
|
||||
G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
|
||||
: AbstractGangTask("G1 collection"),
|
||||
@ -4453,8 +4456,7 @@ public:
|
||||
_queues(task_queues),
|
||||
_root_processor(root_processor),
|
||||
_terminator(n_workers, _queues),
|
||||
_n_workers(n_workers),
|
||||
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
|
||||
_n_workers(n_workers)
|
||||
{}
|
||||
|
||||
RefToScanQueueSet* queues() { return _queues; }
|
||||
@ -4581,8 +4583,8 @@ public:
|
||||
_g1h->update_surviving_young_words(pss.surviving_young_words()+1);
|
||||
|
||||
if (PrintTerminationStats) {
|
||||
MutexLocker x(stats_lock());
|
||||
pss.print_termination_stats(worker_id);
|
||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
pss.print_termination_stats();
|
||||
}
|
||||
|
||||
assert(pss.queue_is_empty(), "should be empty");
|
||||
@ -5009,7 +5011,7 @@ public:
|
||||
bool G1STWIsAliveClosure::do_object_b(oop p) {
|
||||
// An object is reachable if it is outside the collection set,
|
||||
// or is inside and copied.
|
||||
return !_g1->obj_in_cs(p) || p->is_forwarded();
|
||||
return !_g1->is_in_cset(p) || p->is_forwarded();
|
||||
}
|
||||
|
||||
// Non Copying Keep Alive closure
|
||||
@ -5498,7 +5500,9 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
}
|
||||
|
||||
// The individual threads will set their evac-failure closures.
|
||||
if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
|
||||
if (PrintTerminationStats) {
|
||||
G1ParScanThreadState::print_termination_stats_hdr();
|
||||
}
|
||||
|
||||
workers()->run_task(&g1_par_task);
|
||||
end_par_time_sec = os::elapsedTime();
|
||||
|
@ -27,7 +27,6 @@
|
||||
|
||||
#include "gc/g1/concurrentMark.hpp"
|
||||
#include "gc/g1/evacuationInfo.hpp"
|
||||
#include "gc/g1/g1AllocRegion.hpp"
|
||||
#include "gc/g1/g1AllocationContext.hpp"
|
||||
#include "gc/g1/g1Allocator.hpp"
|
||||
#include "gc/g1/g1BiasedArray.hpp"
|
||||
@ -193,7 +192,7 @@ class G1CollectedHeap : public CollectedHeap {
|
||||
// Closures used in implementation.
|
||||
friend class G1ParScanThreadState;
|
||||
friend class G1ParTask;
|
||||
friend class G1ParGCAllocator;
|
||||
friend class G1PLABAllocator;
|
||||
friend class G1PrepareCompactClosure;
|
||||
|
||||
// Other related classes.
|
||||
@ -248,7 +247,7 @@ private:
|
||||
// The sequence of all heap regions in the heap.
|
||||
HeapRegionManager _hrm;
|
||||
|
||||
// Class that handles the different kinds of allocations.
|
||||
// Handles non-humongous allocations in the G1CollectedHeap.
|
||||
G1Allocator* _allocator;
|
||||
|
||||
// Outside of GC pauses, the number of bytes used in all regions other
|
||||
@ -280,22 +279,6 @@ private:
|
||||
// start of each GC.
|
||||
bool _expand_heap_after_alloc_failure;
|
||||
|
||||
// It resets the mutator alloc region before new allocations can take place.
|
||||
void init_mutator_alloc_region();
|
||||
|
||||
// It releases the mutator alloc region.
|
||||
void release_mutator_alloc_region();
|
||||
|
||||
// It initializes the GC alloc regions at the start of a GC.
|
||||
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
|
||||
// It releases the GC alloc regions at the end of a GC.
|
||||
void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
|
||||
// It does any cleanup that needs to be done on the GC alloc regions
|
||||
// before a Full GC.
|
||||
void abandon_gc_alloc_regions();
|
||||
|
||||
// Helper for monitoring and management support.
|
||||
G1MonitoringSupport* _g1mm;
|
||||
|
||||
@ -557,25 +540,6 @@ protected:
|
||||
// belongs to a young region.
|
||||
inline void dirty_young_block(HeapWord* start, size_t word_size);
|
||||
|
||||
// Allocate blocks during garbage collection. Will ensure an
|
||||
// allocation region, either by picking one or expanding the
|
||||
// heap, and then allocate a block of the given size. The block
|
||||
// may not be a humongous - it must fit into a single heap region.
|
||||
inline HeapWord* par_allocate_during_gc(InCSetState dest,
|
||||
size_t word_size,
|
||||
AllocationContext_t context);
|
||||
// Ensure that no further allocations can happen in "r", bearing in mind
|
||||
// that parallel threads might be attempting allocations.
|
||||
void par_allocate_remaining_space(HeapRegion* r);
|
||||
|
||||
// Allocation attempt during GC for a survivor object / PLAB.
|
||||
inline HeapWord* survivor_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context);
|
||||
|
||||
// Allocation attempt during GC for an old object / PLAB.
|
||||
inline HeapWord* old_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context);
|
||||
|
||||
// These methods are the "callbacks" from the G1AllocRegion class.
|
||||
|
||||
// For mutator alloc regions.
|
||||
@ -725,6 +689,9 @@ public:
|
||||
|
||||
G1HRPrinter* hr_printer() { return &_hr_printer; }
|
||||
|
||||
// Allocates a new heap region instance.
|
||||
HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
|
||||
|
||||
// Frees a non-humongous region by initializing its contents and
|
||||
// adding it to the free list that's passed as a parameter (this is
|
||||
// usually a local list which will be appended to the master free
|
||||
@ -887,7 +854,7 @@ protected:
|
||||
|
||||
// Preserve the mark of "obj", if necessary, in preparation for its mark
|
||||
// word being overwritten with a self-forwarding-pointer.
|
||||
void preserve_mark_during_evac_failure(uint queue, oop obj, markOop m);
|
||||
void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Support for forcing evacuation failures. Analogous to
|
||||
@ -1263,7 +1230,7 @@ public:
|
||||
|
||||
// Return "TRUE" iff the given object address is within the collection
|
||||
// set. Slow implementation.
|
||||
inline bool obj_in_cs(oop obj);
|
||||
bool obj_in_cs(oop obj);
|
||||
|
||||
inline bool is_in_cset(const HeapRegion *hr);
|
||||
inline bool is_in_cset(oop obj);
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
|
||||
|
||||
#include "gc/g1/concurrentMark.hpp"
|
||||
#include "gc/g1/g1AllocRegion.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
@ -57,20 +56,6 @@ size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
|
||||
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
|
||||
size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
switch (dest.value()) {
|
||||
case InCSetState::Young:
|
||||
return survivor_attempt_allocation(word_size, context);
|
||||
case InCSetState::Old:
|
||||
return old_attempt_allocation(word_size, context);
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL; // Keep some compilers happy
|
||||
}
|
||||
}
|
||||
|
||||
// Inline functions for G1CollectedHeap
|
||||
|
||||
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
|
||||
@ -126,67 +111,6 @@ inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
|
||||
_old_set.remove(hr);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
||||
HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
|
||||
return r != NULL && r->in_collection_set();
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!is_humongous(word_size), "attempt_allocation() should not "
|
||||
"be called for humongous allocation requests");
|
||||
|
||||
AllocationContext_t context = AllocationContext::current();
|
||||
HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
if (result == NULL) {
|
||||
result = attempt_allocation_slow(word_size,
|
||||
context,
|
||||
gc_count_before_ret,
|
||||
gclocker_retry_count_ret);
|
||||
}
|
||||
assert_heap_not_locked();
|
||||
if (result != NULL) {
|
||||
dirty_young_block(result, word_size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
assert(!is_humongous(word_size),
|
||||
"we should not be seeing humongous-size allocations in this path");
|
||||
|
||||
HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
if (result == NULL) {
|
||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
}
|
||||
if (result != NULL) {
|
||||
dirty_young_block(result, word_size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
assert(!is_humongous(word_size),
|
||||
"we should not be seeing humongous-size allocations in this path");
|
||||
|
||||
HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
|
||||
true /* bot_updates */);
|
||||
if (result == NULL) {
|
||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
true /* bot_updates */);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// It dirties the cards that cover the block so that so that the post
|
||||
// write barrier never queues anything when updating objects on this
|
||||
// block. It is assumed (and in fact we assert) that the block
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
|
||||
bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
||||
jlong* totals,
|
||||
@ -31,3 +32,8 @@ bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
||||
jint len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
|
||||
MemRegion mr) {
|
||||
return new HeapRegion(hrs_index, bot_shared(), mr);
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "gc/g1/g1ErgoVerbose.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1Log.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/shared/gcPolicyCounters.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
|
@ -48,7 +48,7 @@ void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan
|
||||
assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
|
||||
|
||||
_par_scan_state = par_scan_state;
|
||||
_worker_id = par_scan_state->queue_num();
|
||||
_worker_id = par_scan_state->worker_id();
|
||||
|
||||
assert(_worker_id < ParallelGCThreads,
|
||||
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads));
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "gc/g1/g1ParScanThreadState.inline.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "gc/g1/g1RemSet.inline.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
|
@ -31,13 +31,13 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp)
|
||||
: _g1h(g1h),
|
||||
_refs(g1h->task_queue(queue_num)),
|
||||
_refs(g1h->task_queue(worker_id)),
|
||||
_dcq(&g1h->dirty_card_queue_set()),
|
||||
_ct_bs(g1h->g1_barrier_set()),
|
||||
_g1_rem(g1h->g1_rem_set()),
|
||||
_hash_seed(17), _queue_num(queue_num),
|
||||
_hash_seed(17), _worker_id(worker_id),
|
||||
_term_attempts(0),
|
||||
_tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
|
||||
_age_table(false), _scanner(g1h, rp),
|
||||
@ -59,7 +59,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
|
||||
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
|
||||
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
|
||||
|
||||
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
|
||||
_plab_allocator = G1PLABAllocator::create_allocator(_g1h->allocator());
|
||||
|
||||
_dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
|
||||
// The dest for Young is used when the objects are aged enough to
|
||||
@ -71,37 +71,29 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
|
||||
}
|
||||
|
||||
G1ParScanThreadState::~G1ParScanThreadState() {
|
||||
_g1_par_allocator->retire_alloc_buffers();
|
||||
delete _g1_par_allocator;
|
||||
_plab_allocator->retire_alloc_buffers();
|
||||
delete _plab_allocator;
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
||||
}
|
||||
|
||||
void
|
||||
G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
|
||||
{
|
||||
void G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) {
|
||||
st->print_raw_cr("GC Termination Stats");
|
||||
st->print_raw_cr(" elapsed --strong roots-- -------termination-------"
|
||||
" ------waste (KiB)------");
|
||||
st->print_raw_cr("thr ms ms % ms % attempts"
|
||||
" total alloc undo");
|
||||
st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
|
||||
" ------- ------- -------");
|
||||
st->print_raw_cr(" elapsed --strong roots-- -------termination------- ------waste (KiB)------");
|
||||
st->print_raw_cr("thr ms ms % ms % attempts total alloc undo");
|
||||
st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
|
||||
}
|
||||
|
||||
void
|
||||
G1ParScanThreadState::print_termination_stats(int i,
|
||||
outputStream* const st) const
|
||||
{
|
||||
void G1ParScanThreadState::print_termination_stats(outputStream* const st) const {
|
||||
const double elapsed_ms = elapsed_time() * 1000.0;
|
||||
const double s_roots_ms = strong_roots_time() * 1000.0;
|
||||
const double term_ms = term_time() * 1000.0;
|
||||
size_t alloc_buffer_waste = 0;
|
||||
size_t undo_waste = 0;
|
||||
_g1_par_allocator->waste(alloc_buffer_waste, undo_waste);
|
||||
st->print_cr("%3d %9.2f %9.2f %6.2f "
|
||||
_plab_allocator->waste(alloc_buffer_waste, undo_waste);
|
||||
st->print_cr("%3u %9.2f %9.2f %6.2f "
|
||||
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
|
||||
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
|
||||
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
|
||||
_worker_id, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
|
||||
term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
|
||||
(alloc_buffer_waste + undo_waste) * HeapWordSize / K,
|
||||
alloc_buffer_waste * HeapWordSize / K,
|
||||
@ -167,8 +159,9 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
|
||||
// Right now we only have two types of regions (young / old) so
|
||||
// let's keep the logic here simple. We can generalize it when necessary.
|
||||
if (dest->is_young()) {
|
||||
HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old,
|
||||
word_sz, context);
|
||||
HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
|
||||
word_sz,
|
||||
context);
|
||||
if (obj_ptr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -209,12 +202,12 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
|
||||
uint age = 0;
|
||||
InCSetState dest_state = next_state(state, old_mark, age);
|
||||
HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context);
|
||||
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
|
||||
|
||||
// PLAB allocations should succeed most of the time, so we'll
|
||||
// normally check against NULL once and that's it.
|
||||
if (obj_ptr == NULL) {
|
||||
obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
|
||||
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
|
||||
if (obj_ptr == NULL) {
|
||||
obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
|
||||
if (obj_ptr == NULL) {
|
||||
@ -233,7 +226,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
if (_g1h->evacuation_should_fail()) {
|
||||
// Doing this after all the allocation attempts also tests the
|
||||
// undo_allocation() method too.
|
||||
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
|
||||
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
@ -274,7 +267,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
"sanity");
|
||||
G1StringDedup::enqueue_from_evacuation(is_from_young,
|
||||
is_to_young,
|
||||
queue_num(),
|
||||
_worker_id,
|
||||
obj);
|
||||
}
|
||||
|
||||
@ -295,7 +288,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
}
|
||||
return obj;
|
||||
} else {
|
||||
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
|
||||
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
|
||||
return forward_ptr;
|
||||
}
|
||||
}
|
||||
@ -314,7 +307,7 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
|
||||
_g1h->hr_printer()->evac_failure(r);
|
||||
}
|
||||
|
||||
_g1h->preserve_mark_during_evac_failure(_queue_num, old, m);
|
||||
_g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
|
||||
|
||||
_scanner.set_region(r);
|
||||
old->oop_iterate_backwards(&_scanner);
|
||||
|
@ -46,7 +46,7 @@ class G1ParScanThreadState : public StackObj {
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
G1RemSet* _g1_rem;
|
||||
|
||||
G1ParGCAllocator* _g1_par_allocator;
|
||||
G1PLABAllocator* _plab_allocator;
|
||||
|
||||
ageTable _age_table;
|
||||
InCSetState _dest[InCSetState::Num];
|
||||
@ -55,7 +55,7 @@ class G1ParScanThreadState : public StackObj {
|
||||
G1ParScanClosure _scanner;
|
||||
|
||||
int _hash_seed;
|
||||
uint _queue_num;
|
||||
uint _worker_id;
|
||||
|
||||
size_t _term_attempts;
|
||||
|
||||
@ -85,7 +85,7 @@ class G1ParScanThreadState : public StackObj {
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp);
|
||||
~G1ParScanThreadState();
|
||||
|
||||
ageTable* age_table() { return &_age_table; }
|
||||
@ -112,8 +112,7 @@ class G1ParScanThreadState : public StackObj {
|
||||
}
|
||||
}
|
||||
|
||||
int* hash_seed() { return &_hash_seed; }
|
||||
uint queue_num() { return _queue_num; }
|
||||
uint worker_id() { return _worker_id; }
|
||||
|
||||
size_t term_attempts() const { return _term_attempts; }
|
||||
void note_term_attempt() { _term_attempts++; }
|
||||
@ -139,8 +138,11 @@ class G1ParScanThreadState : public StackObj {
|
||||
return os::elapsedTime() - _start;
|
||||
}
|
||||
|
||||
// Print the header for the per-thread termination statistics.
|
||||
static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
|
||||
void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
|
||||
|
||||
// Print actual per-thread termination statistics.
|
||||
void print_termination_stats(outputStream* const st = gclog_or_tty) const;
|
||||
|
||||
size_t* surviving_young_words() {
|
||||
// We add on to hide entry 0 which accumulates surviving words for
|
||||
|
@ -56,7 +56,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
|
||||
}
|
||||
|
||||
assert(obj != NULL, "Must be");
|
||||
update_rs(from, p, queue_num());
|
||||
update_rs(from, p, _worker_id);
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
|
||||
@ -136,7 +136,7 @@ inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
|
||||
|
||||
void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
|
||||
StarTask stolen_task;
|
||||
while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
|
||||
while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
|
||||
assert(verify_task(stolen_task), "sanity");
|
||||
dispatch_reference(stolen_task);
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1RemSet.inline.hpp"
|
||||
#include "gc/g1/g1RootProcessor.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/fprofiler.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
|
@ -497,20 +497,10 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
return _rem_set;
|
||||
}
|
||||
|
||||
bool in_collection_set() const;
|
||||
inline bool in_collection_set() const;
|
||||
|
||||
HeapRegion* next_in_collection_set() {
|
||||
assert(in_collection_set(), "should only invoke on member of CS.");
|
||||
assert(_next_in_special_set == NULL ||
|
||||
_next_in_special_set->in_collection_set(),
|
||||
"Malformed CS.");
|
||||
return _next_in_special_set;
|
||||
}
|
||||
void set_next_in_collection_set(HeapRegion* r) {
|
||||
assert(in_collection_set(), "should only invoke on member of CS.");
|
||||
assert(r == NULL || r->in_collection_set(), "Malformed CS.");
|
||||
_next_in_special_set = r;
|
||||
}
|
||||
inline HeapRegion* next_in_collection_set() const;
|
||||
inline void set_next_in_collection_set(HeapRegion* r);
|
||||
|
||||
void set_allocation_context(AllocationContext_t context) {
|
||||
_allocation_context = context;
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
|
||||
|
||||
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -200,4 +200,18 @@ inline bool HeapRegion::in_collection_set() const {
|
||||
return G1CollectedHeap::heap()->is_in_cset(this);
|
||||
}
|
||||
|
||||
inline HeapRegion* HeapRegion::next_in_collection_set() const {
|
||||
assert(in_collection_set(), "should only invoke on member of CS.");
|
||||
assert(_next_in_special_set == NULL ||
|
||||
_next_in_special_set->in_collection_set(),
|
||||
"Malformed CS.");
|
||||
return _next_in_special_set;
|
||||
}
|
||||
|
||||
void HeapRegion::set_next_in_collection_set(HeapRegion* r) {
|
||||
assert(in_collection_set(), "should only invoke on member of CS.");
|
||||
assert(r == NULL || r->in_collection_set(), "Malformed CS.");
|
||||
_next_in_special_set = r;
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
|
||||
|
@ -70,7 +70,7 @@ HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
|
||||
HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
|
||||
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
|
||||
assert(reserved().contains(mr), "invariant");
|
||||
return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
|
||||
return g1h->new_heap_region(hrm_index, mr);
|
||||
}
|
||||
|
||||
void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
|
||||
|
@ -94,8 +94,9 @@ void VM_G1IncCollectionPause::doit() {
|
||||
|
||||
if (_word_size > 0) {
|
||||
// An allocation has been requested. So, try to do that first.
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
|
||||
false /* expect_null_cur_alloc_region */);
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||
allocation_context(),
|
||||
false /* expect_null_cur_alloc_region */);
|
||||
if (_result != NULL) {
|
||||
// If we can successfully allocate before we actually do the
|
||||
// pause then we will consider this pause successful.
|
||||
@ -147,8 +148,9 @@ void VM_G1IncCollectionPause::doit() {
|
||||
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
|
||||
if (_pause_succeeded && _word_size > 0) {
|
||||
// An allocation had been requested.
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
|
||||
true /* expect_null_cur_alloc_region */);
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||
allocation_context(),
|
||||
true /* expect_null_cur_alloc_region */);
|
||||
} else {
|
||||
assert(_result == NULL, "invariant");
|
||||
if (!_pause_succeeded) {
|
||||
|
Loading…
Reference in New Issue
Block a user