2008-06-05 15:57:56 -07:00
|
|
|
/*
|
2020-04-24 14:39:20 +02:00
|
|
|
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
2008-06-05 15:57:56 -07:00
|
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
|
|
* accompanied this code).
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License version
|
|
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
2010-05-27 19:08:38 -07:00
|
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
|
|
* questions.
|
2008-06-05 15:57:56 -07:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2019-01-10 15:13:51 -05:00
|
|
|
#ifndef SHARE_GC_G1_HEAPREGIONMANAGER_HPP
|
|
|
|
#define SHARE_GC_G1_HEAPREGIONMANAGER_HPP
|
2010-11-23 13:22:55 -08:00
|
|
|
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/g1/g1BiasedArray.hpp"
|
|
|
|
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
|
|
|
#include "gc/g1/heapRegionSet.hpp"
|
2020-04-24 14:39:20 +02:00
|
|
|
#include "memory/allocation.hpp"
|
2015-03-16 17:51:28 +03:00
|
|
|
#include "services/memoryUsage.hpp"
|
2013-09-25 13:25:24 +02:00
|
|
|
|
2008-06-05 15:57:56 -07:00
|
|
|
class HeapRegion;
|
|
|
|
class HeapRegionClosure;
|
2014-10-07 14:54:53 +02:00
|
|
|
class HeapRegionClaimer;
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
class FreeRegionList;
|
2016-09-16 11:33:47 +02:00
|
|
|
class WorkGang;
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
|
2013-09-25 13:25:24 +02:00
|
|
|
class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
|
|
|
|
protected:
|
2014-08-18 16:10:44 +02:00
|
|
|
virtual HeapRegion* default_value() const { return NULL; }
|
2013-09-25 13:25:24 +02:00
|
|
|
};
|
|
|
|
|
2020-04-24 14:39:20 +02:00
|
|
|
// Helper class to define a range [start, end) of regions.
|
|
|
|
class HeapRegionRange : public StackObj {
|
|
|
|
// Inclusive start of the range.
|
|
|
|
uint _start;
|
|
|
|
// Exclusive end of the range.
|
|
|
|
uint _end;
|
|
|
|
public:
|
|
|
|
HeapRegionRange(uint start, uint end);
|
|
|
|
|
|
|
|
uint start() const { return _start; }
|
|
|
|
uint end() const { return _end; }
|
|
|
|
uint length() const { return _end - _start; }
|
|
|
|
};
|
|
|
|
|
2014-08-19 14:09:10 +02:00
|
|
|
// This class keeps track of the actual heap memory, auxiliary data
|
|
|
|
// and its metadata (i.e., HeapRegion instances) and the list of free regions.
|
|
|
|
//
|
|
|
|
// This allows maximum flexibility for deciding what to commit or uncommit given
|
|
|
|
// a request from outside.
|
|
|
|
//
|
|
|
|
// HeapRegions are kept in the _regions array in address order. A region's
|
|
|
|
// index in the array corresponds to its index in the heap (i.e., 0 is the
|
|
|
|
// region at the bottom of the heap, 1 is the one after it, etc.). Two
|
|
|
|
// regions that are consecutive in the array should also be adjacent in the
|
|
|
|
// address space (i.e., region(i).end() == region(i+1).bottom().
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
//
|
|
|
|
// We create a HeapRegion when we commit the region's address space
|
|
|
|
// for the first time. When we uncommit the address space of a
|
|
|
|
// region we retain the HeapRegion to be able to re-use it in the
|
|
|
|
// future (in case we recommit it).
|
|
|
|
//
|
|
|
|
// We keep track of three lengths:
|
|
|
|
//
|
2014-08-19 14:09:10 +02:00
|
|
|
// * _num_committed (returned by length()) is the number of currently
|
|
|
|
// committed regions. These may not be contiguous.
|
|
|
|
// * _allocated_heapregions_length (not exposed outside this class) is the
|
|
|
|
// number of regions+1 for which we have HeapRegions.
|
2013-09-25 13:25:24 +02:00
|
|
|
// * max_length() returns the maximum number of regions the heap can have.
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
//
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2014-08-26 09:36:53 +02:00
|
|
|
class HeapRegionManager: public CHeapObj<mtGC> {
|
2011-09-20 09:59:59 -04:00
|
|
|
friend class VMStructs;
|
2014-10-07 14:54:53 +02:00
|
|
|
friend class HeapRegionClaimer;
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2014-08-19 14:09:10 +02:00
|
|
|
G1RegionToSpaceMapper* _bot_mapper;
|
|
|
|
G1RegionToSpaceMapper* _cardtable_mapper;
|
|
|
|
G1RegionToSpaceMapper* _card_counts_mapper;
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2014-08-19 14:09:10 +02:00
|
|
|
// Each bit in this bitmap indicates that the corresponding region is available
|
|
|
|
// for allocation.
|
2016-05-03 22:45:27 +02:00
|
|
|
CHeapBitMap _available_map;
|
2014-08-19 14:09:10 +02:00
|
|
|
|
|
|
|
// The number of regions committed in the heap.
|
2014-08-18 16:10:44 +02:00
|
|
|
uint _num_committed;
|
2013-05-06 21:30:34 +02:00
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
// Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
|
|
|
|
uint _allocated_heapregions_length;
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2013-09-25 13:25:24 +02:00
|
|
|
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
|
|
|
|
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
|
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
// Pass down commit calls to the VirtualSpace.
|
2016-09-16 11:33:47 +02:00
|
|
|
void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL);
|
2014-08-18 16:10:44 +02:00
|
|
|
|
|
|
|
// Notify other data structures about change in the heap layout.
|
|
|
|
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
|
|
|
|
|
2020-04-08 18:38:31 +02:00
|
|
|
// Find a contiguous set of empty or uncommitted regions of length num_regions and return
|
2014-08-26 09:36:53 +02:00
|
|
|
// the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful.
|
2020-04-08 18:38:31 +02:00
|
|
|
// Start and end defines the range to seek in, policy is first-fit.
|
|
|
|
uint find_contiguous_in_range(uint start, uint end, uint num_regions);
|
|
|
|
// Find a contiguous set of empty regions of length num_regions. Returns the start index
|
|
|
|
// of that set, or G1_NO_HRM_INDEX.
|
|
|
|
uint find_contiguous_in_free_list(uint num_regions);
|
|
|
|
// Find a contiguous set of empty or unavailable regions of length num_regions. Returns the
|
|
|
|
// start index of that set, or G1_NO_HRM_INDEX.
|
|
|
|
uint find_contiguous_allow_expand(uint num_regions);
|
|
|
|
|
2020-04-14 15:18:22 +02:00
|
|
|
void assert_contiguous_range(uint start, uint num_regions) NOT_DEBUG_RETURN;
|
2020-04-08 18:38:31 +02:00
|
|
|
|
2020-04-24 14:39:20 +02:00
|
|
|
// Finds the next sequence of unavailable regions starting at the given index. Returns the
|
|
|
|
// sequence found as a HeapRegionRange. If no regions can be found, both start and end of
|
|
|
|
// the returned range is equal to max_regions().
|
|
|
|
HeapRegionRange find_unavailable_from_idx(uint index) const;
|
2014-08-18 16:10:44 +02:00
|
|
|
// Finds the next sequence of empty regions starting from start_idx, going backwards in
|
|
|
|
// the heap. Returns the length of the sequence found. If this value is zero, no
|
|
|
|
// sequence could be found, otherwise res_idx contains the start index of this range.
|
|
|
|
uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
|
2018-12-21 08:18:59 -08:00
|
|
|
|
2019-11-13 10:49:12 -08:00
|
|
|
// Checks the G1MemoryNodeManager to see if this region is on the preferred node.
|
|
|
|
bool is_on_preferred_index(uint region_index, uint preferred_node_index);
|
|
|
|
|
2018-12-21 08:18:59 -08:00
|
|
|
protected:
|
|
|
|
G1HeapRegionTable _regions;
|
|
|
|
G1RegionToSpaceMapper* _heap_mapper;
|
|
|
|
G1RegionToSpaceMapper* _prev_bitmap_mapper;
|
|
|
|
G1RegionToSpaceMapper* _next_bitmap_mapper;
|
|
|
|
FreeRegionList _free_list;
|
|
|
|
|
|
|
|
void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
|
|
|
|
void uncommit_regions(uint index, size_t num_regions = 1);
|
2014-08-19 14:09:10 +02:00
|
|
|
// Allocate a new HeapRegion for the given index.
|
2014-08-26 09:36:53 +02:00
|
|
|
HeapRegion* new_heap_region(uint hrm_index);
|
2020-04-08 18:38:31 +02:00
|
|
|
|
|
|
|
// Humongous allocation helpers
|
|
|
|
virtual HeapRegion* allocate_humongous_from_free_list(uint num_regions);
|
|
|
|
virtual HeapRegion* allocate_humongous_allow_expand(uint num_regions);
|
|
|
|
|
|
|
|
// Expand helper for cases when the regions to expand are well defined.
|
|
|
|
void expand_exact(uint start, uint num_regions, WorkGang* pretouch_workers);
|
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
#ifdef ASSERT
|
|
|
|
public:
|
|
|
|
bool is_free(HeapRegion* hr) const;
|
|
|
|
#endif
|
2018-10-31 13:43:57 +01:00
|
|
|
public:
|
2014-01-23 14:47:23 +01:00
|
|
|
// Empty constructor, we'll initialize it with the initialize() method.
|
2018-08-22 20:37:07 +02:00
|
|
|
HeapRegionManager();
|
2014-08-18 16:10:44 +02:00
|
|
|
|
2019-04-15 11:47:46 +02:00
|
|
|
static HeapRegionManager* create_manager(G1CollectedHeap* heap);
|
2018-12-21 08:18:59 -08:00
|
|
|
|
|
|
|
virtual void initialize(G1RegionToSpaceMapper* heap_storage,
|
|
|
|
G1RegionToSpaceMapper* prev_bitmap,
|
|
|
|
G1RegionToSpaceMapper* next_bitmap,
|
|
|
|
G1RegionToSpaceMapper* bot,
|
|
|
|
G1RegionToSpaceMapper* cardtable,
|
|
|
|
G1RegionToSpaceMapper* card_counts);
|
|
|
|
|
|
|
|
// Prepare heap regions before and after full collection.
|
|
|
|
// Nothing to be done in this class.
|
|
|
|
virtual void prepare_for_full_collection_start() {}
|
|
|
|
virtual void prepare_for_full_collection_end() {}
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
// Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
|
|
|
|
// new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
|
|
|
|
// the heap from the lowest address, this region (and its associated data
|
|
|
|
// structures) are available and we do not need to check further.
|
2018-12-21 08:18:59 -08:00
|
|
|
virtual HeapRegion* get_dummy_region() { return new_heap_region(0); }
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
|
|
|
|
// Return the HeapRegion at the given index. Assume that the index
|
|
|
|
// is valid.
|
2012-04-18 07:21:15 -04:00
|
|
|
inline HeapRegion* at(uint index) const;
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2018-10-31 13:43:57 +01:00
|
|
|
// Return the HeapRegion at the given index, NULL if the index
|
|
|
|
// is for an unavailable region.
|
|
|
|
inline HeapRegion* at_or_null(uint index) const;
|
|
|
|
|
|
|
|
// Returns whether the given region is available for allocation.
|
|
|
|
bool is_available(uint region) const;
|
|
|
|
|
2015-11-09 09:19:39 +01:00
|
|
|
// Return the next region (by index) that is part of the same
|
|
|
|
// humongous object that hr is part of.
|
|
|
|
inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
|
|
|
|
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
// If addr is within the committed space return its corresponding
|
|
|
|
// HeapRegion, otherwise return NULL.
|
|
|
|
inline HeapRegion* addr_to_region(HeapWord* addr) const;
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
// Insert the given region into the free region list.
|
|
|
|
inline void insert_into_free_list(HeapRegion* hr);
|
|
|
|
|
2019-12-09 10:26:41 +01:00
|
|
|
// Rebuild the free region list from scratch.
|
|
|
|
void rebuild_free_list(WorkGang* workers);
|
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
// Insert the given region list into the global free region list.
|
|
|
|
void insert_list_into_free_list(FreeRegionList* list) {
|
|
|
|
_free_list.add_ordered(list);
|
|
|
|
}
|
|
|
|
|
2019-11-13 10:49:12 -08:00
|
|
|
// Allocate a free region with specific node index. If fails allocate with next node index.
|
|
|
|
virtual HeapRegion* allocate_free_region(HeapRegionType type, uint requested_node_index);
|
2014-08-18 16:10:44 +02:00
|
|
|
|
2020-04-08 18:38:31 +02:00
|
|
|
// Allocate a humongous object from the free list
|
|
|
|
HeapRegion* allocate_humongous(uint num_regions);
|
|
|
|
|
|
|
|
// Allocate a humongous object by expanding the heap
|
|
|
|
HeapRegion* expand_and_allocate_humongous(uint num_regions);
|
|
|
|
|
|
|
|
inline HeapRegion* allocate_free_regions_starting_at(uint first, uint num_regions);
|
2014-08-18 16:10:44 +02:00
|
|
|
|
|
|
|
// Remove all regions from the free list.
|
|
|
|
void remove_all_free_regions() {
|
|
|
|
_free_list.remove_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the number of committed free regions in the heap.
|
|
|
|
uint num_free_regions() const {
|
|
|
|
return _free_list.length();
|
|
|
|
}
|
|
|
|
|
2019-11-13 10:51:41 -08:00
|
|
|
uint num_free_regions(uint node_index) const {
|
|
|
|
return _free_list.length(node_index);
|
|
|
|
}
|
|
|
|
|
2017-11-14 11:33:23 +01:00
|
|
|
size_t total_free_bytes() const {
|
2014-08-18 16:10:44 +02:00
|
|
|
return num_free_regions() * HeapRegion::GrainBytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the number of available (uncommitted) regions.
|
|
|
|
uint available() const { return max_length() - length(); }
|
|
|
|
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
// Return the number of regions that have been committed in the heap.
|
2014-08-18 16:10:44 +02:00
|
|
|
uint length() const { return _num_committed; }
|
2008-06-05 15:57:56 -07:00
|
|
|
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
// Return the maximum number of regions in the heap.
|
2013-09-25 13:25:24 +02:00
|
|
|
uint max_length() const { return (uint)_regions.length(); }
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2018-12-21 08:18:59 -08:00
|
|
|
// Return maximum number of regions that heap can expand to.
|
|
|
|
virtual uint max_expandable_length() const { return (uint)_regions.length(); }
|
|
|
|
|
2015-03-16 17:51:28 +03:00
|
|
|
MemoryUsage get_auxiliary_data_memory_usage() const;
|
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
|
|
|
|
|
|
|
|
// Expand the sequence to reflect that the heap has grown. Either create new
|
|
|
|
// HeapRegions, or re-use existing ones. Returns the number of regions the
|
|
|
|
// sequence was expanded by. If a HeapRegion allocation fails, the resulting
|
|
|
|
// number of regions might be smaller than what's desired.
|
2018-12-21 08:18:59 -08:00
|
|
|
virtual uint expand_by(uint num_regions, WorkGang* pretouch_workers);
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
// Makes sure that the regions from start to start+num_regions-1 are available
|
|
|
|
// for allocation. Returns the number of regions that were committed to achieve
|
|
|
|
// this.
|
2018-12-21 08:18:59 -08:00
|
|
|
virtual uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2019-11-13 10:49:12 -08:00
|
|
|
// Try to expand on the given node index.
|
|
|
|
virtual uint expand_on_preferred_node(uint node_index);
|
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
|
2015-06-12 19:49:54 -04:00
|
|
|
// Find the highest free or uncommitted region in the reserved heap,
|
|
|
|
// and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
|
|
|
|
// Set the 'expanded' boolean true if a new region was committed.
|
2018-12-21 08:18:59 -08:00
|
|
|
virtual uint find_highest_free(bool* expanded);
|
2015-06-12 19:49:54 -04:00
|
|
|
|
|
|
|
// Allocate the regions that contain the address range specified, committing the
|
|
|
|
// regions if necessary. Return false if any of the regions is already committed
|
|
|
|
// and not free, and return the number of regions newly committed in commit_count.
|
2016-11-24 10:05:47 +01:00
|
|
|
bool allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers);
|
2015-06-12 19:49:54 -04:00
|
|
|
|
2018-02-09 13:09:55 +01:00
|
|
|
// Apply blk->do_heap_region() on all committed regions in address order,
|
|
|
|
// terminating the iteration early if do_heap_region() returns true.
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
void iterate(HeapRegionClosure* blk) const;
|
|
|
|
|
2017-11-14 11:33:23 +01:00
|
|
|
void par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const;
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
|
2014-08-18 16:10:44 +02:00
|
|
|
// Uncommit up to num_regions_to_remove regions that are completely free.
|
|
|
|
// Return the actual number of uncommitted regions.
|
2018-12-21 08:18:59 -08:00
|
|
|
virtual uint shrink_by(uint num_regions_to_remove);
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
|
2015-08-25 17:22:56 -04:00
|
|
|
// Uncommit a number of regions starting at the specified index, which must be available,
|
|
|
|
// empty, and free.
|
|
|
|
void shrink_at(uint index, size_t num_regions);
|
|
|
|
|
2018-12-21 08:18:59 -08:00
|
|
|
virtual void verify();
|
2014-08-18 16:10:44 +02:00
|
|
|
|
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
|
|
|
// Do some sanity checking.
|
|
|
|
void verify_optional() PRODUCT_RETURN;
|
2008-06-05 15:57:56 -07:00
|
|
|
};
|
2010-11-23 13:22:55 -08:00
|
|
|
|
2014-10-07 14:54:53 +02:00
|
|
|
// The HeapRegionClaimer is used during parallel iteration over heap regions,
|
|
|
|
// allowing workers to claim heap regions, gaining exclusive rights to these regions.
|
|
|
|
class HeapRegionClaimer : public StackObj {
|
2016-09-24 16:02:29 -04:00
|
|
|
uint _n_workers;
|
|
|
|
uint _n_regions;
|
|
|
|
volatile uint* _claims;
|
2014-10-07 14:54:53 +02:00
|
|
|
|
|
|
|
static const uint Unclaimed = 0;
|
|
|
|
static const uint Claimed = 1;
|
|
|
|
|
|
|
|
public:
|
|
|
|
HeapRegionClaimer(uint n_workers);
|
|
|
|
~HeapRegionClaimer();
|
|
|
|
|
|
|
|
inline uint n_regions() const {
|
|
|
|
return _n_regions;
|
|
|
|
}
|
|
|
|
|
2017-11-14 11:33:23 +01:00
|
|
|
// Return a start offset given a worker id.
|
|
|
|
uint offset_for_worker(uint worker_id) const;
|
2014-10-07 14:54:53 +02:00
|
|
|
|
|
|
|
// Check if region has been claimed with this HRClaimer.
|
|
|
|
bool is_region_claimed(uint region_index) const;
|
|
|
|
|
|
|
|
// Claim the given region, returns true if successfully claimed.
|
|
|
|
bool claim_region(uint region_index);
|
|
|
|
};
|
2019-01-10 15:13:51 -05:00
|
|
|
#endif // SHARE_GC_G1_HEAPREGIONMANAGER_HPP
|