2008-06-05 15:57:56 -07:00
|
|
|
/*
|
2019-01-10 15:13:51 -05:00
|
|
|
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
2008-06-05 15:57:56 -07:00
|
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
|
|
* accompanied this code).
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License version
|
|
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
2010-05-27 19:08:38 -07:00
|
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
|
|
* questions.
|
2008-06-05 15:57:56 -07:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2019-01-10 15:13:51 -05:00
|
|
|
#ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
|
|
|
|
#define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
|
2015-05-13 15:16:06 +02:00
|
|
|
|
2018-03-06 08:36:44 +01:00
|
|
|
#include "gc/g1/g1BarrierSet.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/g1/g1CollectedHeap.hpp"
|
2015-06-05 10:27:41 +02:00
|
|
|
#include "gc/g1/g1CollectorState.hpp"
|
2019-03-04 11:49:16 +01:00
|
|
|
#include "gc/g1/g1Policy.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/g1/heapRegionManager.inline.hpp"
|
2019-05-14 15:36:26 +02:00
|
|
|
#include "gc/g1/heapRegionRemSet.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/g1/heapRegionSet.inline.hpp"
|
2018-11-13 22:08:44 -08:00
|
|
|
#include "gc/shared/taskqueue.inline.hpp"
|
2018-06-06 10:45:40 -04:00
|
|
|
#include "runtime/orderAccess.hpp"
|
2010-11-23 13:22:55 -08:00
|
|
|
|
2019-03-04 11:49:16 +01:00
|
|
|
G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
|
|
|
|
return _policy->phase_times();
|
|
|
|
}
|
|
|
|
|
2019-05-14 15:36:26 +02:00
|
|
|
G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
|
|
|
|
switch (dest.type()) {
|
|
|
|
case G1HeapRegionAttr::Young:
|
2015-08-19 13:59:39 +02:00
|
|
|
return &_survivor_evac_stats;
|
2019-05-14 15:36:26 +02:00
|
|
|
case G1HeapRegionAttr::Old:
|
2015-08-19 13:59:39 +02:00
|
|
|
return &_old_evac_stats;
|
2014-12-19 09:21:06 +01:00
|
|
|
default:
|
|
|
|
ShouldNotReachHere();
|
|
|
|
return NULL; // Keep some compilers happy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-14 15:36:26 +02:00
|
|
|
size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
|
2018-04-18 11:36:48 +02:00
|
|
|
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
|
2014-12-19 09:21:06 +01:00
|
|
|
// Prevent humongous PLAB sizes for two reasons:
|
|
|
|
// * PLABs are allocated using a similar paths as oops, but should
|
|
|
|
// never be in a humongous region
|
|
|
|
// * Allowing humongous PLABs needlessly churns the region free lists
|
|
|
|
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
|
|
|
|
}
|
|
|
|
|
2008-06-05 15:57:56 -07:00
|
|
|
// Inline functions for G1CollectedHeap
|
|
|
|
|
2014-04-02 09:17:38 +02:00
|
|
|
// Return the region with the given index. It assumes the index is valid.
|
2018-12-21 08:18:59 -08:00
|
|
|
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
|
2014-04-02 09:17:38 +02:00
|
|
|
|
2018-10-31 13:43:57 +01:00
|
|
|
// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
|
2018-12-21 08:18:59 -08:00
|
|
|
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
|
2018-10-31 13:43:57 +01:00
|
|
|
|
2015-11-09 09:19:39 +01:00
|
|
|
inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
|
2018-12-21 08:18:59 -08:00
|
|
|
return _hrm->next_region_in_humongous(hr);
|
2015-11-09 09:19:39 +01:00
|
|
|
}
|
|
|
|
|
2014-07-23 09:03:32 +02:00
|
|
|
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
|
|
|
assert(is_in_reserved(addr),
|
2015-09-29 11:02:08 +02:00
|
|
|
"Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
|
|
|
|
p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
|
2014-09-18 12:45:45 +02:00
|
|
|
return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
|
2014-07-23 09:03:32 +02:00
|
|
|
}
|
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
|
2018-12-21 08:18:59 -08:00
|
|
|
return _hrm->reserved().start() + index * HeapRegion::GrainWords;
|
2014-08-18 16:10:44 +02:00
|
|
|
}
|
|
|
|
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
template <class T>
|
2015-11-09 09:19:39 +01:00
|
|
|
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
|
2014-04-17 15:57:02 +02:00
|
|
|
assert(addr != NULL, "invariant");
|
2014-08-18 16:10:44 +02:00
|
|
|
assert(is_in_g1_reserved((const void*) addr),
|
2015-09-29 11:02:08 +02:00
|
|
|
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
|
|
|
|
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
|
2018-12-21 08:18:59 -08:00
|
|
|
return _hrm->addr_to_region((HeapWord*) addr);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2018-10-31 13:43:57 +01:00
|
|
|
template <class T>
|
|
|
|
inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
|
|
|
|
assert(addr != NULL, "invariant");
|
|
|
|
assert(is_in_g1_reserved((const void*) addr),
|
|
|
|
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
|
|
|
|
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
|
|
|
|
uint const region_idx = addr_to_region(addr);
|
|
|
|
return region_at_or_null(region_idx);
|
|
|
|
}
|
|
|
|
|
2015-08-07 15:37:07 +02:00
|
|
|
inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
|
|
|
|
_old_set.add(hr);
|
|
|
|
}
|
|
|
|
|
2014-04-02 09:17:38 +02:00
|
|
|
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
|
|
|
|
_old_set.remove(hr);
|
|
|
|
}
|
|
|
|
|
2018-08-22 20:37:07 +02:00
|
|
|
inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
|
|
|
|
_archive_set.add(hr);
|
|
|
|
}
|
|
|
|
|
2016-06-02 08:46:52 +02:00
|
|
|
// It dirties the cards that cover the block so that the post
|
2010-08-24 17:24:33 -04:00
|
|
|
// write barrier never queues anything when updating objects on this
|
|
|
|
// block. It is assumed (and in fact we assert) that the block
|
|
|
|
// belongs to a young region.
|
|
|
|
inline void
|
|
|
|
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
|
|
|
assert_heap_not_locked();
|
|
|
|
|
|
|
|
// Assign the containing region to containing_hr so that we don't
|
2015-11-09 09:19:39 +01:00
|
|
|
// have to keep calling heap_region_containing() in the
|
2010-08-24 17:24:33 -04:00
|
|
|
// asserts below.
|
2015-11-09 09:19:39 +01:00
|
|
|
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
|
2014-04-17 15:57:02 +02:00
|
|
|
assert(word_size > 0, "pre-condition");
|
2010-08-24 17:24:33 -04:00
|
|
|
assert(containing_hr->is_in(start), "it should contain start");
|
|
|
|
assert(containing_hr->is_young(), "it should be young");
|
2014-09-23 11:43:24 +02:00
|
|
|
assert(!containing_hr->is_humongous(), "it should not be humongous");
|
2010-08-24 17:24:33 -04:00
|
|
|
|
|
|
|
HeapWord* end = start + word_size;
|
|
|
|
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
|
|
|
|
|
|
|
MemRegion mr(start, end);
|
2018-02-26 09:34:12 +01:00
|
|
|
card_table()->g1_mark_as_young(mr);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2015-04-29 15:12:33 +03:00
|
|
|
inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
|
2008-06-05 15:57:56 -07:00
|
|
|
return _task_queues->queue(i);
|
|
|
|
}
|
|
|
|
|
2018-04-05 14:25:53 +02:00
|
|
|
inline bool G1CollectedHeap::is_marked_next(oop obj) const {
|
2017-10-23 11:46:12 +02:00
|
|
|
return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
2010-11-23 13:22:55 -08:00
|
|
|
|
2014-07-23 09:03:32 +02:00
|
|
|
inline bool G1CollectedHeap::is_in_cset(oop obj) {
|
2017-06-28 10:58:19 +02:00
|
|
|
return is_in_cset((HeapWord*)obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
|
2019-05-14 15:36:26 +02:00
|
|
|
return _region_attr.is_in_cset(addr);
|
2014-04-02 09:17:38 +02:00
|
|
|
}
|
|
|
|
|
2015-01-26 10:32:35 +01:00
|
|
|
bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
|
2019-05-14 15:36:26 +02:00
|
|
|
return _region_attr.is_in_cset(hr);
|
2015-01-26 10:32:35 +01:00
|
|
|
}
|
|
|
|
|
2014-07-23 09:03:32 +02:00
|
|
|
bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
|
2019-05-14 15:36:26 +02:00
|
|
|
return _region_attr.is_in_cset_or_humongous((HeapWord*)obj);
|
2014-07-23 09:03:32 +02:00
|
|
|
}
|
|
|
|
|
2019-05-14 15:36:26 +02:00
|
|
|
G1HeapRegionAttr G1CollectedHeap::region_attr(const oop obj) {
|
|
|
|
return _region_attr.at((HeapWord*)obj);
|
2014-07-23 09:03:32 +02:00
|
|
|
}
|
|
|
|
|
2019-05-14 15:36:26 +02:00
|
|
|
void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
|
|
|
|
_region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
|
|
|
|
_region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
|
|
|
|
_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
|
|
|
|
_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
|
2014-07-23 09:03:32 +02:00
|
|
|
}
|
|
|
|
|
2012-08-28 15:20:08 -07:00
|
|
|
#ifndef PRODUCT
|
|
|
|
// Support for G1EvacuationFailureALot
|
|
|
|
|
|
|
|
inline bool
|
2018-03-29 14:07:59 +02:00
|
|
|
G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
|
2012-08-28 15:20:08 -07:00
|
|
|
bool during_initial_mark,
|
2018-03-29 14:07:59 +02:00
|
|
|
bool mark_or_rebuild_in_progress) {
|
2012-08-28 15:20:08 -07:00
|
|
|
bool res = false;
|
2018-03-29 14:07:59 +02:00
|
|
|
if (mark_or_rebuild_in_progress) {
|
2012-08-28 15:20:08 -07:00
|
|
|
res |= G1EvacuationFailureALotDuringConcMark;
|
|
|
|
}
|
|
|
|
if (during_initial_mark) {
|
|
|
|
res |= G1EvacuationFailureALotDuringInitialMark;
|
|
|
|
}
|
2018-03-29 14:07:59 +02:00
|
|
|
if (for_young_gc) {
|
2012-08-28 15:20:08 -07:00
|
|
|
res |= G1EvacuationFailureALotDuringYoungGC;
|
|
|
|
} else {
|
|
|
|
// GCs are mixed
|
|
|
|
res |= G1EvacuationFailureALotDuringMixedGC;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
|
|
|
G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
|
|
|
|
if (G1EvacuationFailureALot) {
|
|
|
|
// Note we can't assert that _evacuation_failure_alot_for_current_gc
|
|
|
|
// is clear here. It may have been set during a previous GC but that GC
|
|
|
|
// did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
|
|
|
|
// trigger an evacuation failure and clear the flags and and counts.
|
|
|
|
|
|
|
|
// Check if we have gone over the interval.
|
|
|
|
const size_t gc_num = total_collections();
|
|
|
|
const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
|
|
|
|
|
|
|
|
_evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
|
|
|
|
|
|
|
|
// Now check if G1EvacuationFailureALot is enabled for the current GC type.
|
2018-03-29 14:07:59 +02:00
|
|
|
const bool in_young_only_phase = collector_state()->in_young_only_phase();
|
|
|
|
const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
|
|
|
|
const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
|
2012-08-28 15:20:08 -07:00
|
|
|
|
|
|
|
_evacuation_failure_alot_for_current_gc &=
|
2018-03-29 14:07:59 +02:00
|
|
|
evacuation_failure_alot_for_gc_type(in_young_only_phase,
|
|
|
|
in_initial_mark_gc,
|
|
|
|
mark_or_rebuild_in_progress);
|
2012-08-28 15:20:08 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
inline bool G1CollectedHeap::evacuation_should_fail() {
|
2012-08-28 15:20:08 -07:00
|
|
|
if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// G1EvacuationFailureALot is in effect for current GC
|
|
|
|
// Access to _evacuation_failure_alot_count is not atomic;
|
|
|
|
// the value does not have to be exact.
|
|
|
|
if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
_evacuation_failure_alot_count = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void G1CollectedHeap::reset_evacuation_should_fail() {
|
|
|
|
if (G1EvacuationFailureALot) {
|
|
|
|
_evacuation_failure_alot_gc_number = total_collections();
|
|
|
|
_evacuation_failure_alot_count = 0;
|
|
|
|
_evacuation_failure_alot_for_current_gc = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // #ifndef PRODUCT
|
|
|
|
|
2014-04-02 09:17:38 +02:00
|
|
|
inline bool G1CollectedHeap::is_in_young(const oop obj) {
|
2014-04-17 15:57:02 +02:00
|
|
|
if (obj == NULL) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return heap_region_containing(obj)->is_young();
|
2014-04-02 09:17:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
|
2014-04-17 15:57:02 +02:00
|
|
|
if (obj == NULL) {
|
|
|
|
return false;
|
2014-04-02 09:17:38 +02:00
|
|
|
}
|
2014-04-17 15:57:02 +02:00
|
|
|
return is_obj_dead(obj, heap_region_containing(obj));
|
2014-04-02 09:17:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
|
2014-04-17 15:57:02 +02:00
|
|
|
if (obj == NULL) {
|
|
|
|
return false;
|
2014-04-02 09:17:38 +02:00
|
|
|
}
|
2014-04-17 15:57:02 +02:00
|
|
|
return is_obj_ill(obj, heap_region_containing(obj));
|
2014-04-02 09:17:38 +02:00
|
|
|
}
|
|
|
|
|
2017-11-14 11:33:23 +01:00
|
|
|
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
|
2018-04-05 14:25:53 +02:00
|
|
|
return !is_marked_next(obj) && !hr->is_archive();
|
2017-11-14 11:33:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
|
|
|
|
return is_obj_dead_full(obj, heap_region_containing(obj));
|
|
|
|
}
|
|
|
|
|
2015-04-15 12:16:01 -04:00
|
|
|
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
|
2018-12-21 08:18:59 -08:00
|
|
|
assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
|
2015-04-15 12:16:01 -04:00
|
|
|
_humongous_reclaim_candidates.set_candidate(region, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
|
2018-12-21 08:18:59 -08:00
|
|
|
assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
|
2015-04-15 12:16:01 -04:00
|
|
|
return _humongous_reclaim_candidates.is_candidate(region);
|
|
|
|
}
|
|
|
|
|
2014-07-23 09:03:32 +02:00
|
|
|
inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
|
|
|
|
uint region = addr_to_region((HeapWord*)obj);
|
2015-04-15 12:16:01 -04:00
|
|
|
// Clear the flag in the humongous_reclaim_candidates table. Also
|
2014-07-23 09:03:32 +02:00
|
|
|
// reset the entry in the _in_cset_fast_test table so that subsequent references
|
|
|
|
// to the same humongous object do not go into the slow path again.
|
|
|
|
// This is racy, as multiple threads may at the same time enter here, but this
|
|
|
|
// is benign.
|
2015-04-15 12:16:01 -04:00
|
|
|
// During collection we only ever clear the "candidate" flag, and only ever clear the
|
2014-07-23 09:03:32 +02:00
|
|
|
// entry in the in_cset_fast_table.
|
|
|
|
// We only ever evaluate the contents of these tables (in the VM thread) after
|
|
|
|
// having synchronized the worker threads with the VM thread, or in the same
|
|
|
|
// thread (i.e. within the VM thread).
|
2015-04-15 12:16:01 -04:00
|
|
|
if (is_humongous_reclaim_candidate(region)) {
|
|
|
|
set_humongous_reclaim_candidate(region, false);
|
2019-05-14 15:36:26 +02:00
|
|
|
_region_attr.clear_humongous(region);
|
2014-07-23 09:03:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-10 15:13:51 -05:00
|
|
|
#endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
|