2014-09-05 09:49:19 +02:00
|
|
|
/*
|
2016-12-07 13:51:20 +01:00
|
|
|
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
2014-09-05 09:49:19 +02:00
|
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
|
|
* accompanied this code).
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License version
|
|
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
|
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
|
|
* questions.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2015-05-13 15:16:06 +02:00
|
|
|
#ifndef SHARE_VM_GC_G1_G1ALLOCATOR_HPP
|
|
|
|
#define SHARE_VM_GC_G1_G1ALLOCATOR_HPP
|
2014-09-05 09:49:19 +02:00
|
|
|
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/g1/g1AllocRegion.hpp"
|
|
|
|
#include "gc/g1/g1InCSetState.hpp"
|
|
|
|
#include "gc/shared/collectedHeap.hpp"
|
|
|
|
#include "gc/shared/plab.hpp"
|
2015-04-02 10:24:24 +02:00
|
|
|
|
|
|
|
class EvacuationInfo;
|
2014-09-05 09:49:19 +02:00
|
|
|
|
2015-08-06 15:49:50 +02:00
|
|
|
// Interface to keep track of which regions G1 is currently allocating into. Provides
|
|
|
|
// some accessors (e.g. allocating into them, or getting their occupancy).
|
|
|
|
// Also keeps track of retained regions across GCs.
|
2014-09-05 09:49:19 +02:00
|
|
|
class G1Allocator : public CHeapObj<mtGC> {
|
|
|
|
friend class VMStructs;
|
|
|
|
protected:
|
|
|
|
G1CollectedHeap* _g1h;
|
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual MutatorAllocRegion* mutator_alloc_region() = 0;
|
2015-08-06 15:49:50 +02:00
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual bool survivor_is_full() const = 0;
|
|
|
|
virtual bool old_is_full() const = 0;
|
2015-08-19 13:47:40 +02:00
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual void set_survivor_full() = 0;
|
|
|
|
virtual void set_old_full() = 0;
|
2015-08-19 13:47:40 +02:00
|
|
|
|
2015-08-06 15:49:50 +02:00
|
|
|
// Accessors to the allocation regions.
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region() = 0;
|
|
|
|
virtual OldGCAllocRegion* old_gc_alloc_region() = 0;
|
2015-08-06 15:49:50 +02:00
|
|
|
|
|
|
|
// Allocation attempt during GC for a survivor object / PLAB.
|
2015-08-24 16:27:41 +02:00
|
|
|
inline HeapWord* survivor_attempt_allocation(size_t min_word_size,
|
|
|
|
size_t desired_word_size,
|
2018-03-02 10:09:08 +01:00
|
|
|
size_t* actual_word_size);
|
2015-08-06 15:49:50 +02:00
|
|
|
// Allocation attempt during GC for an old object / PLAB.
|
2015-08-24 16:27:41 +02:00
|
|
|
inline HeapWord* old_attempt_allocation(size_t min_word_size,
|
|
|
|
size_t desired_word_size,
|
2018-03-02 10:09:08 +01:00
|
|
|
size_t* actual_word_size);
|
2014-09-05 09:49:19 +02:00
|
|
|
public:
|
2015-11-17 11:20:27 +01:00
|
|
|
G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
|
2015-08-06 15:49:50 +02:00
|
|
|
virtual ~G1Allocator() { }
|
2015-07-16 11:54:37 +02:00
|
|
|
|
2015-08-06 15:49:50 +02:00
|
|
|
#ifdef ASSERT
|
|
|
|
// Do we currently have an active mutator region to allocate into?
|
2018-03-02 10:09:08 +01:00
|
|
|
bool has_mutator_alloc_region() { return mutator_alloc_region()->get() != NULL; }
|
2015-08-06 15:49:50 +02:00
|
|
|
#endif
|
2015-07-16 11:54:37 +02:00
|
|
|
virtual void init_mutator_alloc_region() = 0;
|
|
|
|
virtual void release_mutator_alloc_region() = 0;
|
|
|
|
|
2015-11-17 11:20:27 +01:00
|
|
|
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
|
2015-07-16 11:54:37 +02:00
|
|
|
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
|
|
|
|
virtual void abandon_gc_alloc_regions() = 0;
|
|
|
|
|
2015-08-06 15:49:50 +02:00
|
|
|
// Management of retained regions.
|
|
|
|
|
|
|
|
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
|
|
|
|
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
|
|
|
OldGCAllocRegion* old,
|
|
|
|
HeapRegion** retained);
|
|
|
|
|
|
|
|
// Allocate blocks of memory during mutator time.
|
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
inline HeapWord* attempt_allocation(size_t word_size);
|
|
|
|
inline HeapWord* attempt_allocation_locked(size_t word_size);
|
|
|
|
inline HeapWord* attempt_allocation_force(size_t word_size);
|
2015-08-06 15:49:50 +02:00
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
size_t unsafe_max_tlab_alloc();
|
2015-08-06 15:49:50 +02:00
|
|
|
|
|
|
|
// Allocate blocks of memory during garbage collection. Will ensure an
|
|
|
|
// allocation region, either by picking one or expanding the
|
|
|
|
// heap, and then allocate a block of the given size. The block
|
|
|
|
// may not be a humongous - it must fit into a single heap region.
|
|
|
|
HeapWord* par_allocate_during_gc(InCSetState dest,
|
2018-03-02 10:09:08 +01:00
|
|
|
size_t word_size);
|
2015-08-06 15:49:50 +02:00
|
|
|
|
2015-08-24 16:27:41 +02:00
|
|
|
HeapWord* par_allocate_during_gc(InCSetState dest,
|
|
|
|
size_t min_word_size,
|
|
|
|
size_t desired_word_size,
|
2018-03-02 10:09:08 +01:00
|
|
|
size_t* actual_word_size);
|
2015-08-24 16:27:41 +02:00
|
|
|
|
2015-08-06 15:49:50 +02:00
|
|
|
virtual size_t used_in_alloc_regions() = 0;
|
2014-09-05 09:49:19 +02:00
|
|
|
};
|
|
|
|
|
2015-08-06 15:49:50 +02:00
|
|
|
// The default allocation region manager for G1. Provides a single mutator, survivor
|
|
|
|
// and old generation allocation region.
|
|
|
|
// Can retain the (single) old generation allocation region across GCs.
|
2014-09-05 09:49:19 +02:00
|
|
|
class G1DefaultAllocator : public G1Allocator {
|
2015-11-17 11:20:27 +01:00
|
|
|
private:
|
|
|
|
bool _survivor_is_full;
|
|
|
|
bool _old_is_full;
|
2014-09-05 09:49:19 +02:00
|
|
|
protected:
|
|
|
|
// Alloc region used to satisfy mutator allocation requests.
|
|
|
|
MutatorAllocRegion _mutator_alloc_region;
|
|
|
|
|
|
|
|
// Alloc region used to satisfy allocation requests by the GC for
|
|
|
|
// survivor objects.
|
|
|
|
SurvivorGCAllocRegion _survivor_gc_alloc_region;
|
|
|
|
|
|
|
|
// Alloc region used to satisfy allocation requests by the GC for
|
|
|
|
// old objects.
|
|
|
|
OldGCAllocRegion _old_gc_alloc_region;
|
|
|
|
|
|
|
|
HeapRegion* _retained_old_gc_alloc_region;
|
|
|
|
public:
|
2015-08-19 13:59:39 +02:00
|
|
|
G1DefaultAllocator(G1CollectedHeap* heap);
|
2014-09-05 09:49:19 +02:00
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual bool survivor_is_full() const;
|
|
|
|
virtual bool old_is_full() const ;
|
2015-11-17 11:20:27 +01:00
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual void set_survivor_full();
|
|
|
|
virtual void set_old_full();
|
2015-11-17 11:20:27 +01:00
|
|
|
|
2014-09-05 09:49:19 +02:00
|
|
|
virtual void init_mutator_alloc_region();
|
|
|
|
virtual void release_mutator_alloc_region();
|
|
|
|
|
|
|
|
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
2015-07-07 06:37:10 -07:00
|
|
|
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
2014-09-05 09:49:19 +02:00
|
|
|
virtual void abandon_gc_alloc_regions();
|
|
|
|
|
|
|
|
virtual bool is_retained_old_region(HeapRegion* hr) {
|
|
|
|
return _retained_old_gc_alloc_region == hr;
|
|
|
|
}
|
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual MutatorAllocRegion* mutator_alloc_region() {
|
2014-09-05 09:49:19 +02:00
|
|
|
return &_mutator_alloc_region;
|
|
|
|
}
|
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region() {
|
2014-09-05 09:49:19 +02:00
|
|
|
return &_survivor_gc_alloc_region;
|
|
|
|
}
|
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual OldGCAllocRegion* old_gc_alloc_region() {
|
2014-09-05 09:49:19 +02:00
|
|
|
return &_old_gc_alloc_region;
|
|
|
|
}
|
|
|
|
|
2015-07-23 11:14:24 +02:00
|
|
|
virtual size_t used_in_alloc_regions() {
|
2014-09-05 09:49:19 +02:00
|
|
|
assert(Heap_lock->owner() != NULL,
|
|
|
|
"Should be owned on this thread's behalf.");
|
2015-07-23 11:14:24 +02:00
|
|
|
size_t result = 0;
|
2014-09-05 09:49:19 +02:00
|
|
|
|
|
|
|
// Read only once in case it is set to NULL concurrently
|
2018-03-02 10:09:08 +01:00
|
|
|
HeapRegion* hr = mutator_alloc_region()->get();
|
2014-09-05 09:49:19 +02:00
|
|
|
if (hr != NULL) {
|
|
|
|
result += hr->used();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-08-06 15:49:50 +02:00
|
|
|
// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
|
|
|
|
// Needs to handle multiple contexts, extra alignment in any "survivor" area and some
|
|
|
|
// statistics.
|
|
|
|
class G1PLABAllocator : public CHeapObj<mtGC> {
|
2014-09-05 09:49:19 +02:00
|
|
|
friend class G1ParScanThreadState;
|
|
|
|
protected:
|
|
|
|
G1CollectedHeap* _g1h;
|
2015-08-06 15:49:50 +02:00
|
|
|
G1Allocator* _allocator;
|
2014-09-05 09:49:19 +02:00
|
|
|
|
2014-12-19 09:21:06 +01:00
|
|
|
// The survivor alignment in effect in bytes.
|
|
|
|
// == 0 : don't align survivors
|
|
|
|
// != 0 : align survivors to that alignment
|
|
|
|
// These values were chosen to favor the non-alignment case since some
|
|
|
|
// architectures have a special compare against zero instructions.
|
|
|
|
const uint _survivor_alignment_bytes;
|
|
|
|
|
2015-08-19 13:59:39 +02:00
|
|
|
// Number of words allocated directly (not counting PLAB allocation).
|
|
|
|
size_t _direct_allocated[InCSetState::Num];
|
|
|
|
|
|
|
|
virtual void flush_and_retire_stats() = 0;
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual PLAB* alloc_buffer(InCSetState dest) = 0;
|
2014-12-19 09:21:06 +01:00
|
|
|
|
|
|
|
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
|
|
|
|
// there are no restrictions on survivor alignment.
|
|
|
|
static uint calc_survivor_alignment_bytes() {
|
|
|
|
assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
|
|
|
|
if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
|
|
|
|
// No need to align objects in the survivors differently, return 0
|
|
|
|
// which means "survivor alignment is not used".
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
assert(SurvivorAlignmentInBytes > 0, "sanity");
|
|
|
|
return SurvivorAlignmentInBytes;
|
|
|
|
}
|
|
|
|
}
|
2014-09-05 09:49:19 +02:00
|
|
|
|
2015-08-19 13:47:40 +02:00
|
|
|
HeapWord* allocate_new_plab(InCSetState dest,
|
2018-03-02 10:09:08 +01:00
|
|
|
size_t word_sz);
|
2015-08-19 13:47:40 +02:00
|
|
|
|
2015-08-20 15:17:43 +02:00
|
|
|
bool may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const;
|
2014-09-05 09:49:19 +02:00
|
|
|
public:
|
2015-08-06 15:49:50 +02:00
|
|
|
G1PLABAllocator(G1Allocator* allocator);
|
|
|
|
virtual ~G1PLABAllocator() { }
|
2014-09-05 09:49:19 +02:00
|
|
|
|
2015-04-22 17:05:00 +02:00
|
|
|
virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
|
2014-09-05 09:49:19 +02:00
|
|
|
|
2014-12-19 09:21:06 +01:00
|
|
|
// Allocate word_sz words in dest, either directly into the regions or by
|
|
|
|
// allocating a new PLAB. Returns the address of the allocated memory, NULL if
|
2015-08-19 13:47:40 +02:00
|
|
|
// not successful. Plab_refill_failed indicates whether an attempt to refill the
|
|
|
|
// PLAB failed or not.
|
2014-12-19 09:21:06 +01:00
|
|
|
HeapWord* allocate_direct_or_new_plab(InCSetState dest,
|
|
|
|
size_t word_sz,
|
2015-08-19 13:47:40 +02:00
|
|
|
bool* plab_refill_failed);
|
2014-12-19 09:21:06 +01:00
|
|
|
|
|
|
|
// Allocate word_sz words in the PLAB of dest. Returns the address of the
|
|
|
|
// allocated memory, NULL if not successful.
|
2015-08-19 13:50:50 +02:00
|
|
|
inline HeapWord* plab_allocate(InCSetState dest,
|
2018-03-02 10:09:08 +01:00
|
|
|
size_t word_sz);
|
2014-12-19 09:21:06 +01:00
|
|
|
|
2015-08-19 13:47:40 +02:00
|
|
|
HeapWord* allocate(InCSetState dest,
|
|
|
|
size_t word_sz,
|
|
|
|
bool* refill_failed) {
|
2018-03-02 10:09:08 +01:00
|
|
|
HeapWord* const obj = plab_allocate(dest, word_sz);
|
2014-09-05 09:49:19 +02:00
|
|
|
if (obj != NULL) {
|
|
|
|
return obj;
|
|
|
|
}
|
2018-03-02 10:09:08 +01:00
|
|
|
return allocate_direct_or_new_plab(dest, word_sz, refill_failed);
|
2014-09-05 09:49:19 +02:00
|
|
|
}
|
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz);
|
2014-09-05 09:49:19 +02:00
|
|
|
};
|
|
|
|
|
2015-08-06 15:49:50 +02:00
|
|
|
// The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
|
|
|
|
// and old generation allocation.
|
|
|
|
class G1DefaultPLABAllocator : public G1PLABAllocator {
|
2018-02-13 16:13:20 +01:00
|
|
|
PLAB _surviving_alloc_buffer;
|
|
|
|
PLAB _tenured_alloc_buffer;
|
|
|
|
PLAB* _alloc_buffers[InCSetState::Num];
|
2014-09-05 09:49:19 +02:00
|
|
|
|
|
|
|
public:
|
2015-08-06 15:49:50 +02:00
|
|
|
G1DefaultPLABAllocator(G1Allocator* _allocator);
|
2014-09-05 09:49:19 +02:00
|
|
|
|
2018-03-02 10:09:08 +01:00
|
|
|
virtual PLAB* alloc_buffer(InCSetState dest) {
|
2014-12-19 09:21:06 +01:00
|
|
|
assert(dest.is_valid(),
|
2015-09-29 11:02:08 +02:00
|
|
|
"Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value());
|
2014-12-19 09:21:06 +01:00
|
|
|
assert(_alloc_buffers[dest.value()] != NULL,
|
2015-09-29 11:02:08 +02:00
|
|
|
"Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value());
|
2014-12-19 09:21:06 +01:00
|
|
|
return _alloc_buffers[dest.value()];
|
2014-09-05 09:49:19 +02:00
|
|
|
}
|
|
|
|
|
2015-08-19 13:59:39 +02:00
|
|
|
virtual void flush_and_retire_stats();
|
2015-04-22 17:05:00 +02:00
|
|
|
|
|
|
|
virtual void waste(size_t& wasted, size_t& undo_wasted);
|
2014-09-05 09:49:19 +02:00
|
|
|
};
|
|
|
|
|
2016-12-07 13:51:20 +01:00
|
|
|
// G1ArchiveRegionMap is a boolean array used to mark G1 regions as
|
|
|
|
// archive regions. This allows a quick check for whether an object
|
|
|
|
// should not be marked because it is in an archive region.
|
|
|
|
class G1ArchiveRegionMap : public G1BiasedMappedArray<bool> {
|
|
|
|
protected:
|
|
|
|
bool default_value() const { return false; }
|
|
|
|
};
|
|
|
|
|
2015-06-12 19:49:54 -04:00
|
|
|
// G1ArchiveAllocator is used to allocate memory in archive
|
2017-08-14 14:32:17 -04:00
|
|
|
// regions. Such regions are not scavenged nor compacted by GC.
|
|
|
|
// There are two types of archive regions, which are
|
|
|
|
// differ in the kind of references allowed for the contained objects:
|
|
|
|
//
|
|
|
|
// - 'Closed' archive region contain no references outside of other
|
|
|
|
// closed archive regions. The region is immutable by GC. GC does
|
|
|
|
// not mark object header in 'closed' archive region.
|
|
|
|
// - An 'open' archive region allow references to any other regions,
|
|
|
|
// including closed archive, open archive and other java heap regions.
|
|
|
|
// GC can adjust pointers and mark object header in 'open' archive region.
|
2015-06-12 19:49:54 -04:00
|
|
|
class G1ArchiveAllocator : public CHeapObj<mtGC> {
|
|
|
|
protected:
|
2017-08-14 14:32:17 -04:00
|
|
|
bool _open; // Indicate if the region is 'open' archive.
|
2015-06-12 19:49:54 -04:00
|
|
|
G1CollectedHeap* _g1h;
|
|
|
|
|
|
|
|
// The current allocation region
|
|
|
|
HeapRegion* _allocation_region;
|
|
|
|
|
|
|
|
// Regions allocated for the current archive range.
|
|
|
|
GrowableArray<HeapRegion*> _allocated_regions;
|
|
|
|
|
|
|
|
// The number of bytes used in the current range.
|
|
|
|
size_t _summary_bytes_used;
|
|
|
|
|
|
|
|
// Current allocation window within the current region.
|
|
|
|
HeapWord* _bottom;
|
|
|
|
HeapWord* _top;
|
|
|
|
HeapWord* _max;
|
|
|
|
|
|
|
|
// Allocate a new region for this archive allocator.
|
|
|
|
// Allocation is from the top of the reserved heap downward.
|
|
|
|
bool alloc_new_region();
|
|
|
|
|
|
|
|
public:
|
2017-08-14 14:32:17 -04:00
|
|
|
G1ArchiveAllocator(G1CollectedHeap* g1h, bool open) :
|
2015-06-12 19:49:54 -04:00
|
|
|
_g1h(g1h),
|
|
|
|
_allocation_region(NULL),
|
|
|
|
_allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
|
|
|
|
ResourceObj::C_HEAP),
|
|
|
|
2), true /* C_Heap */),
|
|
|
|
_summary_bytes_used(0),
|
|
|
|
_bottom(NULL),
|
|
|
|
_top(NULL),
|
2017-08-14 14:32:17 -04:00
|
|
|
_max(NULL),
|
|
|
|
_open(open) { }
|
2015-06-12 19:49:54 -04:00
|
|
|
|
|
|
|
virtual ~G1ArchiveAllocator() {
|
|
|
|
assert(_allocation_region == NULL, "_allocation_region not NULL");
|
|
|
|
}
|
|
|
|
|
2017-08-14 14:32:17 -04:00
|
|
|
static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h, bool open);
|
2015-06-12 19:49:54 -04:00
|
|
|
|
|
|
|
// Allocate memory for an individual object.
|
|
|
|
HeapWord* archive_mem_allocate(size_t word_size);
|
|
|
|
|
|
|
|
// Return the memory ranges used in the current archive, after
|
|
|
|
// aligning to the requested alignment.
|
|
|
|
void complete_archive(GrowableArray<MemRegion>* ranges,
|
|
|
|
size_t end_alignment_in_bytes);
|
|
|
|
|
|
|
|
// The number of bytes allocated by this allocator.
|
|
|
|
size_t used() {
|
|
|
|
return _summary_bytes_used;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear the count of bytes allocated in prior G1 regions. This
|
|
|
|
// must be done when recalculate_use is used to reset the counter
|
|
|
|
// for the generic allocator, since it counts bytes in all G1
|
|
|
|
// regions, including those still associated with this allocator.
|
|
|
|
void clear_used() {
|
|
|
|
_summary_bytes_used = 0;
|
|
|
|
}
|
|
|
|
|
2016-12-07 13:51:20 +01:00
|
|
|
// Create the _archive_region_map which is used to identify archive objects.
|
|
|
|
static inline void enable_archive_object_check();
|
|
|
|
|
|
|
|
// Set the regions containing the specified address range as archive/non-archive.
|
2017-08-14 14:32:17 -04:00
|
|
|
static inline void set_range_archive(MemRegion range, bool open);
|
2016-12-07 13:51:20 +01:00
|
|
|
|
2017-08-14 14:32:17 -04:00
|
|
|
// Check if the object is in closed archive
|
|
|
|
static inline bool is_closed_archive_object(oop object);
|
|
|
|
// Check if the object is in open archive
|
|
|
|
static inline bool is_open_archive_object(oop object);
|
|
|
|
// Check if the object is either in closed archive or open archive
|
2016-12-07 13:51:20 +01:00
|
|
|
static inline bool is_archive_object(oop object);
|
|
|
|
|
|
|
|
private:
|
|
|
|
static bool _archive_check_enabled;
|
2017-08-14 14:32:17 -04:00
|
|
|
static G1ArchiveRegionMap _closed_archive_region_map;
|
|
|
|
static G1ArchiveRegionMap _open_archive_region_map;
|
2016-12-07 13:51:20 +01:00
|
|
|
|
2017-08-14 14:32:17 -04:00
|
|
|
// Check if an object is in a closed archive region using the _closed_archive_region_map.
|
|
|
|
static inline bool in_closed_archive_range(oop object);
|
|
|
|
// Check if an object is in open archive region using the _open_archive_region_map.
|
|
|
|
static inline bool in_open_archive_range(oop object);
|
2016-12-07 13:51:20 +01:00
|
|
|
|
2017-08-14 14:32:17 -04:00
|
|
|
// Check if archive object checking is enabled, to avoid calling in_open/closed_archive_range
|
2016-12-07 13:51:20 +01:00
|
|
|
// unnecessarily.
|
|
|
|
static inline bool archive_check_enabled();
|
2015-06-12 19:49:54 -04:00
|
|
|
};
|
|
|
|
|
2015-05-13 15:16:06 +02:00
|
|
|
#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP
|