8220312: Implementation: NUMA-Aware Memory Allocation for G1, Logging (3/3)

Reviewed-by: kbarrett, sjohanss, tschatzl
This commit is contained in:
Sangheon Kim 2019-11-13 10:51:41 -08:00
parent eaa6355cb0
commit 46c3d43f83
25 changed files with 918 additions and 57 deletions

View File

@ -3008,17 +3008,13 @@ int os::numa_get_group_id() {
}
int os::numa_get_group_id_for_address(const void* address) {
#ifndef MPOL_F_NODE
#define MPOL_F_NODE (1<<0) // Return next IL mode instead of node mask
#endif
void** pages = const_cast<void**>(&address);
int id = -1;
#ifndef MPOL_F_ADDR
#define MPOL_F_ADDR (1<<1) // Look up VMA using address
#endif
int id = 0;
if (syscall(SYS_get_mempolicy, &id, NULL, 0, const_cast<void*>(address), MPOL_F_NODE | MPOL_F_ADDR) == -1) {
if (os::Linux::numa_move_pages(0, 1, pages, NULL, &id, 0) == -1) {
return -1;
}
if (id < 0) {
return -1;
}
return id;
@ -3152,6 +3148,8 @@ bool os::Linux::libnuma_init() {
libnuma_v2_dlsym(handle, "numa_get_membind")));
set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t,
libnuma_dlsym(handle, "numa_move_pages")));
if (numa_available() != -1) {
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
@ -3286,6 +3284,7 @@ os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
os::Linux::numa_distance_func_t os::Linux::_numa_distance;
os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
unsigned long* os::Linux::_numa_all_nodes;
struct bitmask* os::Linux::_numa_all_nodes_ptr;

View File

@ -216,6 +216,7 @@ class Linux {
typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
typedef struct bitmask* (*numa_get_membind_func_t)(void);
typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void);
typedef long (*numa_move_pages_func_t)(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags);
typedef void (*numa_set_bind_policy_func_t)(int policy);
typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
@ -234,6 +235,7 @@ class Linux {
static numa_distance_func_t _numa_distance;
static numa_get_membind_func_t _numa_get_membind;
static numa_get_interleave_mask_func_t _numa_get_interleave_mask;
static numa_move_pages_func_t _numa_move_pages;
static unsigned long* _numa_all_nodes;
static struct bitmask* _numa_all_nodes_ptr;
static struct bitmask* _numa_nodes_ptr;
@ -253,6 +255,7 @@ class Linux {
static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; }
static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; }
static void set_numa_move_pages(numa_move_pages_func_t func) { _numa_move_pages = func; }
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
@ -318,6 +321,9 @@ class Linux {
static int numa_distance(int node1, int node2) {
return _numa_distance != NULL ? _numa_distance(node1, node2) : -1;
}
static long numa_move_pages(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags) {
return _numa_move_pages != NULL ? _numa_move_pages(pid, count, pages, nodes, status, flags) : -1;
}
static int get_node_by_cpu(int cpu_id);
static int get_existing_num_nodes();
// Check if numa node is configured (non-zero memory node).

View File

@ -2389,6 +2389,15 @@ void G1CollectedHeap::print_on(outputStream* st) const {
st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
(size_t) survivor_regions * HeapRegion::GrainBytes / K);
st->cr();
if (_numa->is_enabled()) {
uint num_nodes = _numa->num_active_nodes();
st->print(" remaining free region(s) on each NUMA node: ");
const int* node_ids = _numa->node_ids();
for (uint node_index = 0; node_index < num_nodes; node_index++) {
st->print("%d=%u ", node_ids[node_index], _hrm->num_free_regions(node_index));
}
st->cr();
}
MetaspaceUtils::print_on(st);
}
@ -2578,6 +2587,20 @@ void G1CollectedHeap::gc_epilogue(bool full) {
// We have just completed a GC. Update the soft reference
// policy with the new heap occupancy
Universe::update_heap_info_at_gc();
// Print NUMA statistics.
_numa->print_statistics();
}
void G1CollectedHeap::verify_numa_regions(const char* desc) {
LogTarget(Trace, gc, heap, verify) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
// Iterate all heap regions to print matching between preferred numa id and actual numa id.
G1NodeIndexCheckClosure cl(desc, _numa, &ls);
heap_region_iterate(&cl);
}
}
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
@ -2887,6 +2910,7 @@ void G1CollectedHeap::verify_before_young_collection(G1HeapVerifier::G1VerifyTyp
}
_verifier->verify_before_gc(type);
_verifier->check_bitmaps("GC Start");
verify_numa_regions("GC Start");
}
void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
@ -2897,6 +2921,7 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType
}
_verifier->verify_after_gc(type);
_verifier->check_bitmaps("GC End");
verify_numa_regions("GC End");
}
void G1CollectedHeap::expand_heap_after_young_collection(){

View File

@ -530,6 +530,9 @@ private:
// Merges the information gathered on a per-thread basis for all worker threads
// during GC into global variables.
void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
void verify_numa_regions(const char* desc);
public:
G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
@ -1284,7 +1287,9 @@ public:
const G1SurvivorRegions* survivor() const { return &_survivor; }
uint eden_regions_count() const { return _eden.length(); }
uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
uint survivor_regions_count() const { return _survivor.length(); }
uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
uint young_regions_count() const { return _eden.length() + _survivor.length(); }

View File

@ -25,6 +25,7 @@
#ifndef SHARE_GC_G1_G1EDENREGIONS_HPP
#define SHARE_GC_G1_G1EDENREGIONS_HPP
#include "gc/g1/g1RegionsOnNodes.hpp"
#include "gc/g1/heapRegion.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
@ -35,18 +36,25 @@ private:
// Sum of used bytes from all retired eden regions.
// I.e. updated when mutator regions are retired.
volatile size_t _used_bytes;
G1RegionsOnNodes _regions_on_node;
public:
G1EdenRegions() : _length(0), _used_bytes(0) { }
G1EdenRegions() : _length(0), _used_bytes(0), _regions_on_node() { }
void add(HeapRegion* hr) {
virtual uint add(HeapRegion* hr) {
assert(!hr->is_eden(), "should not already be set");
_length++;
return _regions_on_node.add(hr);
}
void clear() { _length = 0; _used_bytes = 0; }
void clear() {
_length = 0;
_used_bytes = 0;
_regions_on_node.clear();
}
uint length() const { return _length; }
uint regions_on_node(uint node_index) const { return _regions_on_node.count(node_index); }
size_t used_bytes() const { return _used_bytes; }

View File

@ -26,15 +26,38 @@
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1HeapTransition.hpp"
#include "gc/g1/g1Policy.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/metaspace.hpp"
G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) {
_eden_length = g1_heap->eden_regions_count();
_survivor_length = g1_heap->survivor_regions_count();
_old_length = g1_heap->old_regions_count();
_archive_length = g1_heap->archive_regions_count();
_humongous_length = g1_heap->humongous_regions_count();
G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) :
_eden_length(g1_heap->eden_regions_count()),
_survivor_length(g1_heap->survivor_regions_count()),
_old_length(g1_heap->old_regions_count()),
_archive_length(g1_heap->archive_regions_count()),
_humongous_length(g1_heap->humongous_regions_count()),
_eden_length_per_node(NULL),
_survivor_length_per_node(NULL) {
uint node_count = G1NUMA::numa()->num_active_nodes();
if (node_count > 1) {
LogTarget(Debug, gc, heap, numa) lt;
if (lt.is_enabled()) {
_eden_length_per_node = NEW_C_HEAP_ARRAY(uint, node_count, mtGC);
_survivor_length_per_node = NEW_C_HEAP_ARRAY(uint, node_count, mtGC);
for (uint i = 0; i < node_count; i++) {
_eden_length_per_node[i] = g1_heap->eden_regions_count(i);
_survivor_length_per_node[i] = g1_heap->survivor_regions_count(i);
}
}
}
}
G1HeapTransition::Data::~Data() {
FREE_C_HEAP_ARRAY(uint, _eden_length_per_node);
FREE_C_HEAP_ARRAY(uint, _survivor_length_per_node);
}
G1HeapTransition::G1HeapTransition(G1CollectedHeap* g1_heap) : _g1_heap(g1_heap), _before(g1_heap) { }
@ -84,6 +107,34 @@ public:
}
};
static void log_regions(const char* msg, size_t before_length, size_t after_length, size_t capacity,
uint* before_per_node_length, uint* after_per_node_length) {
LogTarget(Info, gc, heap) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
ls.print("%s regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
msg, before_length, after_length, capacity);
// Not NULL only if gc+heap+numa at Debug level is enabled.
if (before_per_node_length != NULL && after_per_node_length != NULL) {
G1NUMA* numa = G1NUMA::numa();
uint num_nodes = numa->num_active_nodes();
const int* node_ids = numa->node_ids();
ls.print(" (");
for (uint i = 0; i < num_nodes; i++) {
ls.print("%d: %u->%u", node_ids[i], before_per_node_length[i], after_per_node_length[i]);
// Skip adding below if it is the last one.
if (i != num_nodes - 1) {
ls.print(", ");
}
}
ls.print(")");
}
ls.print_cr("");
}
}
void G1HeapTransition::print() {
Data after(_g1_heap);
@ -106,12 +157,12 @@ void G1HeapTransition::print() {
after._humongous_length, usage._humongous_region_count);
}
log_info(gc, heap)("Eden regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
_before._eden_length, after._eden_length, eden_capacity_length_after_gc);
log_regions("Eden", _before._eden_length, after._eden_length, eden_capacity_length_after_gc,
_before._eden_length_per_node, after._eden_length_per_node);
log_trace(gc, heap)(" Used: 0K, Waste: 0K");
log_info(gc, heap)("Survivor regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
_before._survivor_length, after._survivor_length, survivor_capacity_length_before_gc);
log_regions("Survivor", _before._survivor_length, after._survivor_length, survivor_capacity_length_before_gc,
_before._survivor_length_per_node, after._survivor_length_per_node);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K);

View File

@ -39,7 +39,13 @@ class G1HeapTransition {
size_t _humongous_length;
const metaspace::MetaspaceSizesSnapshot _meta_sizes;
// Only includes current eden regions.
uint* _eden_length_per_node;
// Only includes current survivor regions.
uint* _survivor_length_per_node;
Data(G1CollectedHeap* g1_heap);
~Data();
};
G1CollectedHeap* _g1_heap;

View File

@ -24,8 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1NUMA.hpp"
#include "gc/g1/heapRegion.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
@ -74,7 +73,7 @@ uint G1NUMA::index_of_node_id(int node_id) const {
G1NUMA::G1NUMA() :
_node_id_to_index_map(NULL), _len_node_id_to_index_map(0),
_node_ids(NULL), _num_active_node_ids(0),
_region_size(0), _page_size(0) {
_region_size(0), _page_size(0), _stats(NULL) {
}
void G1NUMA::initialize_without_numa() {
@ -119,9 +118,12 @@ void G1NUMA::initialize(bool use_numa) {
for (uint i = 0; i < _num_active_node_ids; i++) {
_node_id_to_index_map[_node_ids[i]] = i;
}
_stats = new G1NUMAStats(_node_ids, _num_active_node_ids);
}
G1NUMA::~G1NUMA() {
delete _stats;
FREE_C_HEAP_ARRAY(int, _node_id_to_index_map);
FREE_C_HEAP_ARRAY(int, _node_ids);
}
@ -215,7 +217,7 @@ void G1NUMA::request_memory_on_node(void* aligned_address, size_t size_in_bytes,
assert(is_aligned(aligned_address, page_size()), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address));
assert(is_aligned(size_in_bytes, page_size()), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes);
log_debug(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT ") to be numa id (%d).",
log_trace(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT ") to be NUMA id (%d)",
p2i(aligned_address), p2i((char*)aligned_address + size_in_bytes), _node_ids[node_index]);
os::numa_make_local((char*)aligned_address, size_in_bytes, _node_ids[node_index]);
}
@ -225,3 +227,79 @@ uint G1NUMA::max_search_depth() const {
// There would be some cases that 1 page may be consisted of multiple HeapRegions.
return 3 * MAX2((uint)(page_size() / region_size()), (uint)1) * num_active_nodes();
}
void G1NUMA::update_statistics(G1NUMAStats::NodeDataItems phase,
uint requested_node_index,
uint allocated_node_index) {
if (_stats == NULL) {
return;
}
uint converted_req_index;
if(requested_node_index < _num_active_node_ids) {
converted_req_index = requested_node_index;
} else {
assert(requested_node_index == AnyNodeIndex,
"Requested node index %u should be AnyNodeIndex.", requested_node_index);
converted_req_index = _num_active_node_ids;
}
_stats->update(phase, converted_req_index, allocated_node_index);
}
void G1NUMA::copy_statistics(G1NUMAStats::NodeDataItems phase,
uint requested_node_index,
size_t* allocated_stat) {
if (_stats == NULL) {
return;
}
_stats->copy(phase, requested_node_index, allocated_stat);
}
void G1NUMA::print_statistics() const {
if (_stats == NULL) {
return;
}
_stats->print_statistics();
}
G1NodeIndexCheckClosure::G1NodeIndexCheckClosure(const char* desc, G1NUMA* numa, LogStream* ls) :
_desc(desc), _numa(numa), _ls(ls) {
uint num_nodes = _numa->num_active_nodes();
_matched = NEW_C_HEAP_ARRAY(uint, num_nodes, mtGC);
_mismatched = NEW_C_HEAP_ARRAY(uint, num_nodes, mtGC);
_total = NEW_C_HEAP_ARRAY(uint, num_nodes, mtGC);
memset(_matched, 0, sizeof(uint) * num_nodes);
memset(_mismatched, 0, sizeof(uint) * num_nodes);
memset(_total, 0, sizeof(uint) * num_nodes);
}
G1NodeIndexCheckClosure::~G1NodeIndexCheckClosure() {
_ls->print("%s: NUMA region verification (id: matched/mismatched/total): ", _desc);
const int* numa_ids = _numa->node_ids();
for (uint i = 0; i < _numa->num_active_nodes(); i++) {
_ls->print("%d: %u/%u/%u ", numa_ids[i], _matched[i], _mismatched[i], _total[i]);
}
FREE_C_HEAP_ARRAY(uint, _matched);
FREE_C_HEAP_ARRAY(uint, _mismatched);
FREE_C_HEAP_ARRAY(uint, _total);
}
bool G1NodeIndexCheckClosure::do_heap_region(HeapRegion* hr) {
// Preferred node index will only have valid node index.
uint preferred_node_index = _numa->preferred_node_index_for_index(hr->hrm_index());
// Active node index may have UnknownNodeIndex.
uint active_node_index = _numa->index_of_address(hr->bottom());
if (preferred_node_index == active_node_index) {
_matched[preferred_node_index]++;
} else if (active_node_index != G1NUMA::UnknownNodeIndex) {
_mismatched[preferred_node_index]++;
}
_total[preferred_node_index]++;
return false;
}

View File

@ -25,10 +25,12 @@
#ifndef SHARE_VM_GC_G1_NUMA_HPP
#define SHARE_VM_GC_G1_NUMA_HPP
#include "gc/g1/g1NUMAStats.hpp"
#include "gc/g1/heapRegion.hpp"
#include "memory/allocation.hpp"
#include "runtime/os.hpp"
class HeapRegion;
class LogStream;
class G1NUMA: public CHeapObj<mtGC> {
// Mapping of available node ids to 0-based index which can be used for
@ -49,6 +51,9 @@ class G1NUMA: public CHeapObj<mtGC> {
// Necessary when touching memory.
size_t _page_size;
// Stores statistic data.
G1NUMAStats* _stats;
size_t region_size() const;
size_t page_size() const;
@ -113,6 +118,35 @@ public:
// Returns maximum search depth which is used to limit heap region search iterations.
// The number of active nodes, page size and heap region size are considered.
uint max_search_depth() const;
// Update the given phase of requested and allocated node index.
void update_statistics(G1NUMAStats::NodeDataItems phase, uint requested_node_index, uint allocated_node_index);
// Copy all allocated statistics of the given phase and requested node.
// Precondition: allocated_stat should have same length of active nodes.
void copy_statistics(G1NUMAStats::NodeDataItems phase, uint requested_node_index, size_t* allocated_stat);
// Print all statistics.
void print_statistics() const;
};
class G1NodeIndexCheckClosure : public HeapRegionClosure {
const char* _desc;
G1NUMA* _numa;
// Records matched count of each node.
uint* _matched;
// Records mismatched count of each node.
uint* _mismatched;
// Records total count of each node.
// Total = matched + mismatched + unknown.
uint* _total;
LogStream* _ls;
public:
G1NodeIndexCheckClosure(const char* desc, G1NUMA* numa, LogStream* ls);
~G1NodeIndexCheckClosure();
bool do_heap_region(HeapRegion* hr);
};
#endif // SHARE_VM_GC_G1_NUMA_HPP

View File

@ -0,0 +1,232 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1NUMAStats.hpp"
#include "logging/logStream.hpp"
double G1NUMAStats::Stat::rate() const {
return _requested == 0 ? 0 : (double)_hit / _requested * 100;
}
G1NUMAStats::NodeDataArray::NodeDataArray(uint num_nodes) {
guarantee(num_nodes > 1, "Number of nodes (%u) should be set", num_nodes);
// The row represents the number of nodes.
_num_column = num_nodes;
// +1 for G1MemoryNodeManager::AnyNodeIndex.
_num_row = num_nodes + 1;
_data = NEW_C_HEAP_ARRAY(size_t*, _num_row, mtGC);
for (uint row = 0; row < _num_row; row++) {
_data[row] = NEW_C_HEAP_ARRAY(size_t, _num_column, mtGC);
}
clear();
}
G1NUMAStats::NodeDataArray::~NodeDataArray() {
for (uint row = 0; row < _num_row; row++) {
FREE_C_HEAP_ARRAY(size_t, _data[row]);
}
FREE_C_HEAP_ARRAY(size_t*, _data);
}
void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result) const {
size_t requested = 0;
size_t hit = 0;
for (size_t row = 0; row < _num_row; row++) {
for (size_t column = 0; column < _num_column; column++) {
requested += _data[row][column];
if (row == column) {
hit += _data[row][column];
}
}
}
assert(result != NULL, "Invariant");
result->_hit = hit;
result->_requested = requested;
}
void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result, uint req_index) const {
size_t requested = 0;
size_t hit = _data[req_index][req_index];
for (size_t column = 0; column < _num_column; column++) {
requested += _data[req_index][column];
}
assert(result != NULL, "Invariant");
result->_hit = hit;
result->_requested = requested;
}
size_t G1NUMAStats::NodeDataArray::sum(uint req_index) const {
size_t sum = 0;
for (size_t column = 0; column < _num_column; column++) {
sum += _data[req_index][column];
}
return sum;
}
void G1NUMAStats::NodeDataArray::increase(uint req_index, uint alloc_index) {
assert(req_index < _num_row,
"Requested index %u should be less than the row size %u",
req_index, _num_row);
assert(alloc_index < _num_column,
"Allocated index %u should be less than the column size %u",
alloc_index, _num_column);
_data[req_index][alloc_index] += 1;
}
void G1NUMAStats::NodeDataArray::clear() {
for (uint row = 0; row < _num_row; row++) {
memset((void*)_data[row], 0, sizeof(size_t) * _num_column);
}
}
size_t G1NUMAStats::NodeDataArray::get(uint req_index, uint alloc_index) {
return _data[req_index][alloc_index];
}
void G1NUMAStats::NodeDataArray::copy(uint req_index, size_t* stat) {
assert(stat != NULL, "Invariant");
for (uint column = 0; column < _num_column; column++) {
_data[req_index][column] += stat[column];
}
}
G1NUMAStats::G1NUMAStats(const int* node_ids, uint num_node_ids) :
_node_ids(node_ids), _num_node_ids(num_node_ids), _node_data() {
assert(_num_node_ids > 1, "Should have more than one active memory nodes %u", _num_node_ids);
for (int i = 0; i < NodeDataItemsSentinel; i++) {
_node_data[i] = new NodeDataArray(_num_node_ids);
}
}
G1NUMAStats::~G1NUMAStats() {
for (int i = 0; i < NodeDataItemsSentinel; i++) {
delete _node_data[i];
}
}
void G1NUMAStats::clear(G1NUMAStats::NodeDataItems phase) {
_node_data[phase]->clear();
}
void G1NUMAStats::update(G1NUMAStats::NodeDataItems phase,
uint requested_node_index,
uint allocated_node_index) {
_node_data[phase]->increase(requested_node_index, allocated_node_index);
}
void G1NUMAStats::copy(G1NUMAStats::NodeDataItems phase,
uint requested_node_index,
size_t* allocated_stat) {
_node_data[phase]->copy(requested_node_index, allocated_stat);
}
static const char* phase_to_explanatory_string(G1NUMAStats::NodeDataItems phase) {
switch(phase) {
case G1NUMAStats::NewRegionAlloc:
return "Placement match ratio";
case G1NUMAStats::LocalObjProcessAtCopyToSurv:
return "Worker task locality match ratio";
default:
return "";
}
}
#define RATE_TOTAL_FORMAT "%0.0f%% " SIZE_FORMAT "/" SIZE_FORMAT
void G1NUMAStats::print_info(G1NUMAStats::NodeDataItems phase) {
LogTarget(Info, gc, heap, numa) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
Stat result;
size_t array_width = _num_node_ids;
_node_data[phase]->create_hit_rate(&result);
ls.print("%s: " RATE_TOTAL_FORMAT " (",
phase_to_explanatory_string(phase), result.rate(), result._hit, result._requested);
for (uint i = 0; i < array_width; i++) {
if (i != 0) {
ls.print(", ");
}
_node_data[phase]->create_hit_rate(&result, i);
ls.print("%d: " RATE_TOTAL_FORMAT,
_node_ids[i], result.rate(), result._hit, result._requested);
}
ls.print_cr(")");
}
}
void G1NUMAStats::print_mutator_alloc_stat_debug() {
LogTarget(Debug, gc, heap, numa) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
uint array_width = _num_node_ids;
ls.print("Allocated NUMA ids ");
for (uint i = 0; i < array_width; i++) {
ls.print("%8d", _node_ids[i]);
}
ls.print_cr(" Total");
ls.print("Requested NUMA id ");
for (uint req = 0; req < array_width; req++) {
ls.print("%3d ", _node_ids[req]);
for (uint alloc = 0; alloc < array_width; alloc++) {
ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->get(req, alloc));
}
ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->sum(req));
ls.print_cr("");
// Add padding to align with the string 'Requested NUMA id'.
ls.print(" ");
}
ls.print("Any ");
for (uint alloc = 0; alloc < array_width; alloc++) {
ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->get(array_width, alloc));
}
ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->sum(array_width));
ls.print_cr("");
}
}
void G1NUMAStats::print_statistics() {
print_info(NewRegionAlloc);
print_mutator_alloc_stat_debug();
print_info(LocalObjProcessAtCopyToSurv);
}

View File

@ -0,0 +1,119 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_NODE_TIMES_HPP
#define SHARE_VM_GC_G1_NODE_TIMES_HPP
#include "memory/allocation.hpp"
// Manages statistics of multi nodes.
class G1NUMAStats : public CHeapObj<mtGC> {
struct Stat {
// Hit count: if requested id equals to returned id.
size_t _hit;
// Total request count
size_t _requested;
// Hit count / total request count
double rate() const;
};
// Holds data array which has a size of (node count) * (node count + 1) to
// represent request node * allocated node. The request node includes any node case.
// All operations are NOT thread-safe.
// The row index indicates a requested node index while the column node index
// indicates an allocated node index. The last row is for any node index request.
// E.g. (req, alloc) = (0,0) (1,0) (2,0) (0,1) (Any, 3) (0,2) (0,3) (0,3) (3,3)
// Allocated node index 0 1 2 3 Total
// Requested node index 0 1 1 1 2 5
// 1 1 0 0 0 1
// 2 1 0 0 0 1
// 3 0 0 0 1 1
// Any 0 0 0 1 1
class NodeDataArray : public CHeapObj<mtGC> {
// The number of nodes.
uint _num_column;
// The number of nodes + 1 (for any node request)
uint _num_row;
// 2-dimension array that holds count of allocated / requested node index.
size_t** _data;
public:
NodeDataArray(uint num_nodes);
~NodeDataArray();
// Create Stat result of hit count, requested count and hit rate.
// The result is copied to the given result parameter.
void create_hit_rate(Stat* result) const;
// Create Stat result of hit count, requested count and hit rate of the given index.
// The result is copied to the given result parameter.
void create_hit_rate(Stat* result, uint req_index) const;
// Return sum of the given index.
size_t sum(uint req_index) const;
// Increase at the request / allocated index.
void increase(uint req_index, uint alloc_index);
// Clear all data.
void clear();
// Return current value of the given request / allocated index.
size_t get(uint req_index, uint alloc_index);
// Copy values of the given request index.
void copy(uint req_index, size_t* stat);
};
public:
enum NodeDataItems {
// Statistics of a new region allocation.
NewRegionAlloc,
// Statistics of object processing during copy to survivor region.
LocalObjProcessAtCopyToSurv,
NodeDataItemsSentinel
};
private:
const int* _node_ids;
uint _num_node_ids;
NodeDataArray* _node_data[NodeDataItemsSentinel];
void print_info(G1NUMAStats::NodeDataItems phase);
void print_mutator_alloc_stat_debug();
public:
G1NUMAStats(const int* node_ids, uint num_node_ids);
~G1NUMAStats();
void clear(G1NUMAStats::NodeDataItems phase);
// Update the given phase of requested and allocated node index.
void update(G1NUMAStats::NodeDataItems phase, uint requested_node_index, uint allocated_node_index);
// Copy all allocated statistics of the given phase and requested node.
// Precondition: allocated_stat should have same length of active nodes.
void copy(G1NUMAStats::NodeDataItems phase, uint requested_node_index, size_t* allocated_stat);
void print_statistics();
};
#endif // SHARE_VM_GC_G1_NODE_TIMES_HPP

View File

@ -57,7 +57,9 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
_stack_trim_lower_threshold(GCDrainStackTargetSize),
_trim_ticks(),
_old_gen_is_full(false),
_num_optional_regions(optional_cset_length)
_num_optional_regions(optional_cset_length),
_numa(g1h->numa()),
_obj_alloc_stat(NULL)
{
// We allocate number of young gen regions in the collection set plus one
// entries, since entry 0 keeps track of surviving bytes for non-young regions.
@ -79,6 +81,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
_closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
_oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
initialize_numa_stats();
}
// Pass locally gathered statistics to global state.
@ -92,6 +96,7 @@ void G1ParScanThreadState::flush(size_t* surviving_young_words) {
for (uint i = 0; i < length; i++) {
surviving_young_words[i] += _surviving_young_words[i];
}
flush_numa_stats();
}
G1ParScanThreadState::~G1ParScanThreadState() {
@ -99,6 +104,7 @@ G1ParScanThreadState::~G1ParScanThreadState() {
delete _closures;
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
delete[] _oops_into_optional_regions;
FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
}
size_t G1ParScanThreadState::lab_waste_words() const {
@ -248,6 +254,8 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
return handle_evacuation_failure_par(old, old_mark);
}
}
update_numa_stats(node_index);
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
// The events are checked individually as part of the actual commit
report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index);

View File

@ -95,6 +95,13 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
size_t _num_optional_regions;
G1OopStarChunkedList* _oops_into_optional_regions;
G1NUMA* _numa;
// Records how many object allocations happened at each node during copy to survivor.
// Only starts recording when log of gc+heap+numa is enabled and its data is
// transferred when flushed.
size_t* _obj_alloc_stat;
public:
G1ParScanThreadState(G1CollectedHeap* g1h,
G1RedirtyCardsQueueSet* rdcqs,
@ -207,6 +214,12 @@ private:
inline bool is_partially_trimmed() const;
inline void trim_queue_to_threshold(uint threshold);
// NUMA statistics related methods.
inline void initialize_numa_stats();
inline void flush_numa_stats();
inline void update_numa_stats(uint node_index);
public:
oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markWord const old_mark);

View File

@ -230,4 +230,30 @@ G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const Heap
return &_oops_into_optional_regions[hr->index_in_opt_cset()];
}
void G1ParScanThreadState::initialize_numa_stats() {
if (_numa->is_enabled()) {
LogTarget(Info, gc, heap, numa) lt;
if (lt.is_enabled()) {
uint num_nodes = _numa->num_active_nodes();
// Record only if there are multiple active nodes.
_obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC);
memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes);
}
}
}
void G1ParScanThreadState::flush_numa_stats() {
if (_obj_alloc_stat != NULL) {
uint node_index = _numa->index_of_current_thread();
_numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
}
}
void G1ParScanThreadState::update_numa_stats(uint node_index) {
if (_obj_alloc_stat != NULL) {
_obj_alloc_stat[node_index]++;
}
}
#endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1NUMA.hpp"
#include "gc/g1/g1RegionsOnNodes.hpp"
#include "gc/g1/heapRegion.hpp"
G1RegionsOnNodes::G1RegionsOnNodes() : _count_per_node(NULL), _numa(G1NUMA::numa()) {
_count_per_node = NEW_C_HEAP_ARRAY(uint, _numa->num_active_nodes(), mtGC);
clear();
}
G1RegionsOnNodes::~G1RegionsOnNodes() {
FREE_C_HEAP_ARRAY(uint, _count_per_node);
}
uint G1RegionsOnNodes::add(HeapRegion* hr) {
uint node_index = hr->node_index();
// Update only if the node index is valid.
if (node_index < _numa->num_active_nodes()) {
*(_count_per_node + node_index) += 1;
return node_index;
}
return G1NUMA::UnknownNodeIndex;
}
void G1RegionsOnNodes::clear() {
for (uint i = 0; i < _numa->num_active_nodes(); i++) {
_count_per_node[i] = 0;
}
}
uint G1RegionsOnNodes::count(uint node_index) const {
return _count_per_node[node_index];
}

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1REGIONS_HPP
#define SHARE_VM_GC_G1_G1REGIONS_HPP
#include "memory/allocation.hpp"
class G1NUMA;
class HeapRegion;
// Contains per node index region count
class G1RegionsOnNodes : public StackObj {
volatile uint* _count_per_node;
G1NUMA* _numa;
public:
G1RegionsOnNodes();
~G1RegionsOnNodes();
// Increase _count_per_node for the node of given heap region and returns node index.
uint add(HeapRegion* hr);
void clear();
uint count(uint node_index) const;
};
#endif // SHARE_VM_GC_G1_G1REGIONS_HPP

View File

@ -30,17 +30,23 @@
G1SurvivorRegions::G1SurvivorRegions() :
_regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(8, true, mtGC)),
_used_bytes(0) {}
_used_bytes(0),
_regions_on_node() {}
void G1SurvivorRegions::add(HeapRegion* hr) {
uint G1SurvivorRegions::add(HeapRegion* hr) {
assert(hr->is_survivor(), "should be flagged as survivor region");
_regions->append(hr);
return _regions_on_node.add(hr);
}
uint G1SurvivorRegions::length() const {
return (uint)_regions->length();
}
uint G1SurvivorRegions::regions_on_node(uint node_index) const {
return _regions_on_node.count(node_index);
}
void G1SurvivorRegions::convert_to_eden() {
for (GrowableArrayIterator<HeapRegion*> it = _regions->begin();
it != _regions->end();
@ -54,6 +60,7 @@ void G1SurvivorRegions::convert_to_eden() {
void G1SurvivorRegions::clear() {
_regions->clear();
_used_bytes = 0;
_regions_on_node.clear();
}
void G1SurvivorRegions::add_used_bytes(size_t used_bytes) {

View File

@ -25,6 +25,7 @@
#ifndef SHARE_GC_G1_G1SURVIVORREGIONS_HPP
#define SHARE_GC_G1_G1SURVIVORREGIONS_HPP
#include "gc/g1/g1RegionsOnNodes.hpp"
#include "runtime/globals.hpp"
template <typename T>
@ -35,17 +36,19 @@ class G1SurvivorRegions {
private:
GrowableArray<HeapRegion*>* _regions;
volatile size_t _used_bytes;
G1RegionsOnNodes _regions_on_node;
public:
G1SurvivorRegions();
void add(HeapRegion* hr);
virtual uint add(HeapRegion* hr);
void convert_to_eden();
void clear();
uint length() const;
uint regions_on_node(uint node_index) const;
const GrowableArray<HeapRegion*>* regions() const {
return _regions;

View File

@ -477,9 +477,9 @@ void HeapRegion::print_on(outputStream* st) const {
if (UseNUMA) {
G1NUMA* numa = G1NUMA::numa();
if (node_index() < numa->num_active_nodes()) {
st->print("|%02d", numa->numa_id(node_index()));
st->print("|%d", numa->numa_id(node_index()));
} else {
st->print("|--");
st->print("|-");
}
}
st->print_cr("");

View File

@ -26,6 +26,7 @@
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1NUMAStats.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
@ -107,10 +108,11 @@ bool HeapRegionManager::is_available(uint region) const {
HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
HeapRegion* hr = NULL;
bool from_head = !type.is_young();
G1NUMA* numa = G1NUMA::numa();
if (requested_node_index != G1NUMA::AnyNodeIndex && G1NUMA::numa()->is_enabled()) {
if (requested_node_index != G1NUMA::AnyNodeIndex && numa->is_enabled()) {
// Try to allocate with requested node index.
hr = _free_list.remove_region_with_node_index(from_head, requested_node_index, NULL);
hr = _free_list.remove_region_with_node_index(from_head, requested_node_index);
}
if (hr == NULL) {
@ -122,6 +124,10 @@ HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint re
if (hr != NULL) {
assert(hr->next() == NULL, "Single region should not have next");
assert(is_available(hr->hrm_index()), "Must be committed");
if (numa->is_enabled() && hr->node_index() < numa->num_active_nodes()) {
numa->update_statistics(G1NUMAStats::NewRegionAlloc, requested_node_index, hr->node_index());
}
}
return hr;

View File

@ -192,6 +192,10 @@ public:
return _free_list.length();
}
uint num_free_regions(uint node_index) const {
return _free_list.length(node_index);
}
size_t total_free_bytes() const {
return num_free_regions() * HeapRegion::GrainBytes;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1NUMA.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
@ -101,6 +102,9 @@ void FreeRegionList::remove_all() {
curr->set_next(NULL);
curr->set_prev(NULL);
curr->set_containing_set(NULL);
decrease_length(curr->node_index());
curr = next;
}
clear();
@ -119,6 +123,10 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
return;
}
if (_node_info != NULL && from_list->_node_info != NULL) {
_node_info->add(from_list->_node_info);
}
#ifdef ASSERT
FreeRegionListIterator iter(from_list);
while (iter.more_available()) {
@ -220,6 +228,9 @@ void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
remove(curr);
count++;
decrease_length(curr->node_index());
curr = next;
}
@ -267,6 +278,10 @@ void FreeRegionList::clear() {
_head = NULL;
_tail = NULL;
_last = NULL;
if (_node_info!= NULL) {
_node_info->clear();
}
}
void FreeRegionList::verify_list() {
@ -303,3 +318,41 @@ void FreeRegionList::verify_list() {
guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
guarantee(length() == count, "%s count mismatch. Expected %u, actual %u.", name(), length(), count);
}
FreeRegionList::FreeRegionList(const char* name, HeapRegionSetChecker* checker):
HeapRegionSetBase(name, checker),
_node_info(G1NUMA::numa()->is_enabled() ? new NodeInfo() : NULL) {
clear();
}
FreeRegionList::~FreeRegionList() {
if (_node_info != NULL) {
delete _node_info;
}
}
FreeRegionList::NodeInfo::NodeInfo() : _numa(G1NUMA::numa()), _length_of_node(NULL),
_num_nodes(_numa->num_active_nodes()) {
assert(UseNUMA, "Invariant");
_length_of_node = NEW_C_HEAP_ARRAY(uint, _num_nodes, mtGC);
}
FreeRegionList::NodeInfo::~NodeInfo() {
FREE_C_HEAP_ARRAY(uint, _length_of_node);
}
void FreeRegionList::NodeInfo::clear() {
for (uint i = 0; i < _num_nodes; ++i) {
_length_of_node[i] = 0;
}
}
void FreeRegionList::NodeInfo::add(NodeInfo* info) {
for (uint i = 0; i < _num_nodes; ++i) {
_length_of_node[i] += info->_length_of_node[i];
}
}

View File

@ -136,11 +136,33 @@ public:
// add / remove one region at a time or concatenate two lists.
class FreeRegionListIterator;
class G1NUMA;
class FreeRegionList : public HeapRegionSetBase {
friend class FreeRegionListIterator;
private:
// This class is only initialized if there are multiple active nodes.
class NodeInfo : public CHeapObj<mtGC> {
G1NUMA* _numa;
uint* _length_of_node;
uint _num_nodes;
public:
NodeInfo();
~NodeInfo();
inline void increase_length(uint node_index);
inline void decrease_length(uint node_index);
inline uint length(uint index) const;
void clear();
void add(NodeInfo* info);
};
HeapRegion* _head;
HeapRegion* _tail;
@ -148,20 +170,23 @@ private:
// time. It helps to improve performance when adding several ordered items in a row.
HeapRegion* _last;
NodeInfo* _node_info;
static uint _unrealistically_long_length;
inline HeapRegion* remove_from_head_impl();
inline HeapRegion* remove_from_tail_impl();
inline void increase_length(uint node_index);
inline void decrease_length(uint node_index);
protected:
// See the comment for HeapRegionSetBase::clear()
virtual void clear();
public:
FreeRegionList(const char* name, HeapRegionSetChecker* checker = NULL):
HeapRegionSetBase(name, checker) {
clear();
}
FreeRegionList(const char* name, HeapRegionSetChecker* checker = NULL);
~FreeRegionList();
void verify_list();
@ -182,8 +207,7 @@ public:
HeapRegion* remove_region(bool from_head);
HeapRegion* remove_region_with_node_index(bool from_head,
const uint requested_node_index,
uint* region_node_index);
uint requested_node_index);
// Merge two ordered lists. The result is also ordered. The order is
// determined by hrm_index.
@ -200,6 +224,9 @@ public:
virtual void verify();
uint num_of_regions_in_range(uint start, uint end) const;
using HeapRegionSetBase::length;
uint length(uint node_index) const;
};
// Iterator class that provides a convenient way to iterate over the

View File

@ -95,6 +95,8 @@ inline void FreeRegionList::add_ordered(HeapRegion* hr) {
_head = hr;
}
_last = hr;
increase_length(hr->node_index());
}
inline HeapRegion* FreeRegionList::remove_from_head_impl() {
@ -145,12 +147,14 @@ inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
// remove() will verify the region and check mt safety.
remove(hr);
decrease_length(hr->node_index());
return hr;
}
inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
const uint requested_node_index,
uint* allocated_node_index) {
uint requested_node_index) {
assert(UseNUMA, "Invariant");
const uint max_search_depth = G1NUMA::numa()->max_search_depth();
@ -202,11 +206,48 @@ inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
}
remove(cur);
if (allocated_node_index != NULL) {
*allocated_node_index = cur->node_index();
}
decrease_length(cur->node_index());
return cur;
}
inline void FreeRegionList::NodeInfo::increase_length(uint node_index) {
if (node_index < _num_nodes) {
_length_of_node[node_index] += 1;
}
}
inline void FreeRegionList::NodeInfo::decrease_length(uint node_index) {
if (node_index < _num_nodes) {
assert(_length_of_node[node_index] > 0,
"Current length %u should be greater than zero for node %u",
_length_of_node[node_index], node_index);
_length_of_node[node_index] -= 1;
}
}
inline uint FreeRegionList::NodeInfo::length(uint node_index) const {
return _length_of_node[node_index];
}
inline void FreeRegionList::increase_length(uint node_index) {
if (_node_info != NULL) {
return _node_info->increase_length(node_index);
}
}
inline void FreeRegionList::decrease_length(uint node_index) {
if (_node_info != NULL) {
return _node_info->decrease_length(node_index);
}
}
inline uint FreeRegionList::length(uint node_index) const {
if (_node_info != NULL) {
return _node_info->length(node_index);
} else {
return 0;
}
}
#endif // SHARE_GC_G1_HEAPREGIONSET_INLINE_HPP

View File

@ -111,8 +111,8 @@ public class TestG1NUMATouchRegions {
// Each 'int' represents a numa id of single HeapRegion (bottom page).
// e.g. 1MB heap region, 2MB page size and 2 NUMA nodes system
// Check the first set(2 regions)
// 0| ...omitted..| 00
// 1| ...omitted..| 01
// 0| ...omitted..| 0
// 1| ...omitted..| 1
static void checkCase1Pattern(OutputAnalyzer output, int index, long g1HeapRegionSize, long actualPageSize, int[] memoryNodeIds) throws Exception {
StringBuilder sb = new StringBuilder();
@ -121,7 +121,7 @@ public class TestG1NUMATouchRegions {
sb.append("| .* | ");
// Append page node id.
sb.append(String.format("%02d", memoryNodeIds[index]));
sb.append(memoryNodeIds[index]);
output.shouldMatch(sb.toString());
}
@ -132,10 +132,10 @@ public class TestG1NUMATouchRegions {
// printed multiple times for same numa id.
// e.g. 1MB heap region, 2MB page size and 2 NUMA nodes system
// Check the first set(4 regions)
// 0| ...omitted..| 00
// 1| ...omitted..| 00
// 2| ...omitted..| 01
// 3| ...omitted..| 01
// 0| ...omitted..| 0
// 1| ...omitted..| 0
// 2| ...omitted..| 1
// 3| ...omitted..| 1
static void checkCase2Pattern(OutputAnalyzer output, int index, long g1HeapRegionSize, long actualPageSize, int[] memoryNodeIds) throws Exception {
StringBuilder sb = new StringBuilder();
@ -147,7 +147,7 @@ public class TestG1NUMATouchRegions {
sb.append("| .* | ");
// Append page node id.
sb.append(String.format("%02d", memoryNodeIds[index]));
sb.append(memoryNodeIds[index]);
output.shouldMatch(sb.toString());
sb.setLength(0);