This commit is contained in:
Jon Masamitsu 2014-05-19 08:09:35 -07:00
commit 92baa3214c
8 changed files with 81 additions and 72 deletions

View File

@ -819,7 +819,7 @@ void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurren
// false before we start remark. At this point we should also be // false before we start remark. At this point we should also be
// in a STW phase. // in a STW phase.
assert(!concurrent_marking_in_progress(), "invariant"); assert(!concurrent_marking_in_progress(), "invariant");
assert(_finger == _heap_end, assert(out_of_regions(),
err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
p2i(_finger), p2i(_heap_end))); p2i(_finger), p2i(_heap_end)));
update_g1_committed(true); update_g1_committed(true);
@ -978,7 +978,9 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
if (concurrent()) { if (concurrent()) {
SuspendibleThreadSet::leave(); SuspendibleThreadSet::leave();
} }
_first_overflow_barrier_sync.enter();
bool barrier_aborted = !_first_overflow_barrier_sync.enter();
if (concurrent()) { if (concurrent()) {
SuspendibleThreadSet::join(); SuspendibleThreadSet::join();
} }
@ -986,8 +988,18 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
// more work // more work
if (verbose_low()) { if (verbose_low()) {
if (barrier_aborted) {
gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
} else {
gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
} }
}
if (barrier_aborted) {
// If the barrier aborted we ignore the overflow condition and
// just abort the whole marking phase as quickly as possible.
return;
}
// If we're executing the concurrent phase of marking, reset the marking // If we're executing the concurrent phase of marking, reset the marking
// state; otherwise the marking state is reset after reference processing, // state; otherwise the marking state is reset after reference processing,
@ -1026,16 +1038,22 @@ void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
if (concurrent()) { if (concurrent()) {
SuspendibleThreadSet::leave(); SuspendibleThreadSet::leave();
} }
_second_overflow_barrier_sync.enter();
bool barrier_aborted = !_second_overflow_barrier_sync.enter();
if (concurrent()) { if (concurrent()) {
SuspendibleThreadSet::join(); SuspendibleThreadSet::join();
} }
// at this point everything should be re-initialized and ready to go // at this point everything should be re-initialized and ready to go
if (verbose_low()) { if (verbose_low()) {
if (barrier_aborted) {
gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
} else {
gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
} }
} }
}
#ifndef PRODUCT #ifndef PRODUCT
void ForceOverflowSettings::init() { void ForceOverflowSettings::init() {
@ -3240,6 +3258,8 @@ void ConcurrentMark::abort() {
for (uint i = 0; i < _max_worker_id; ++i) { for (uint i = 0; i < _max_worker_id; ++i) {
_tasks[i]->clear_region_fields(); _tasks[i]->clear_region_fields();
} }
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();
_has_aborted = true; _has_aborted = true;
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();

View File

@ -542,8 +542,12 @@ protected:
// frequently. // frequently.
HeapRegion* claim_region(uint worker_id); HeapRegion* claim_region(uint worker_id);
// It determines whether we've run out of regions to scan // It determines whether we've run out of regions to scan. Note that
bool out_of_regions() { return _finger == _heap_end; } // the finger can point past the heap end in case the heap was expanded
// to satisfy an allocation without doing a GC. This is fine, because all
// objects in those regions will be considered live anyway because of
// SATB guarantees (i.e. their TAMS will be equal to bottom).
bool out_of_regions() { return _finger >= _heap_end; }
// Returns the task with the given id // Returns the task with the given id
CMTask* task(int id) { CMTask* task(int id) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -194,23 +194,16 @@ bool RSHashTable::add_card(RegionIdx_t region_ind, CardIdx_t card_index) {
} }
bool RSHashTable::get_cards(RegionIdx_t region_ind, CardIdx_t* cards) { bool RSHashTable::get_cards(RegionIdx_t region_ind, CardIdx_t* cards) {
int ind = (int) (region_ind & capacity_mask()); SparsePRTEntry* entry = get_entry(region_ind);
int cur_ind = _buckets[ind]; if (entry == NULL) {
SparsePRTEntry* cur; return false;
while (cur_ind != NullEntry &&
(cur = entry(cur_ind))->r_ind() != region_ind) {
cur_ind = cur->next_index();
} }
if (cur_ind == NullEntry) return false;
// Otherwise... // Otherwise...
assert(cur->r_ind() == region_ind, "Postcondition of loop + test above."); entry->copy_cards(cards);
assert(cur->num_valid_cards() > 0, "Inv");
cur->copy_cards(cards);
return true; return true;
} }
SparsePRTEntry* RSHashTable::get_entry(RegionIdx_t region_ind) { SparsePRTEntry* RSHashTable::get_entry(RegionIdx_t region_ind) const {
int ind = (int) (region_ind & capacity_mask()); int ind = (int) (region_ind & capacity_mask());
int cur_ind = _buckets[ind]; int cur_ind = _buckets[ind];
SparsePRTEntry* cur; SparsePRTEntry* cur;
@ -246,28 +239,9 @@ bool RSHashTable::delete_entry(RegionIdx_t region_ind) {
return true; return true;
} }
SparsePRTEntry*
RSHashTable::entry_for_region_ind(RegionIdx_t region_ind) const {
assert(occupied_entries() < capacity(), "Precondition");
int ind = (int) (region_ind & capacity_mask());
int cur_ind = _buckets[ind];
SparsePRTEntry* cur;
while (cur_ind != NullEntry &&
(cur = entry(cur_ind))->r_ind() != region_ind) {
cur_ind = cur->next_index();
}
if (cur_ind != NullEntry) {
assert(cur->r_ind() == region_ind, "Loop postcondition + test");
return cur;
} else {
return NULL;
}
}
SparsePRTEntry* SparsePRTEntry*
RSHashTable::entry_for_region_ind_create(RegionIdx_t region_ind) { RSHashTable::entry_for_region_ind_create(RegionIdx_t region_ind) {
SparsePRTEntry* res = entry_for_region_ind(region_ind); SparsePRTEntry* res = get_entry(region_ind);
if (res == NULL) { if (res == NULL) {
int new_ind = alloc_entry(); int new_ind = alloc_entry();
assert(0 <= new_ind && (size_t)new_ind < capacity(), "There should be room."); assert(0 <= new_ind && (size_t)new_ind < capacity(), "There should be room.");
@ -365,7 +339,7 @@ bool RSHashTableIter::has_next(size_t& card_index) {
} }
bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index) const { bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index) const {
SparsePRTEntry* e = entry_for_region_ind(region_index); SparsePRTEntry* e = get_entry(region_index);
return (e != NULL && e->contains_card(card_index)); return (e != NULL && e->contains_card(card_index));
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -119,12 +119,6 @@ class RSHashTable : public CHeapObj<mtGC> {
int _free_region; int _free_region;
int _free_list; int _free_list;
// Requires that the caller hold a lock preventing parallel modifying
// operations, and that the the table be less than completely full. If
// an entry for "region_ind" is already in the table, finds it and
// returns its address; otherwise returns "NULL."
SparsePRTEntry* entry_for_region_ind(RegionIdx_t region_ind) const;
// Requires that the caller hold a lock preventing parallel modifying // Requires that the caller hold a lock preventing parallel modifying
// operations, and that the the table be less than completely full. If // operations, and that the the table be less than completely full. If
// an entry for "region_ind" is already in the table, finds it and // an entry for "region_ind" is already in the table, finds it and
@ -158,7 +152,7 @@ public:
void add_entry(SparsePRTEntry* e); void add_entry(SparsePRTEntry* e);
SparsePRTEntry* get_entry(RegionIdx_t region_id); SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
void clear(); void clear();

View File

@ -1433,10 +1433,9 @@ size_t MetaspaceGC::allowed_expansion() {
} }
size_t capacity_until_gc = capacity_until_GC(); size_t capacity_until_gc = capacity_until_GC();
assert(capacity_until_gc >= committed_bytes,
if (capacity_until_gc <= committed_bytes) { err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
return 0; capacity_until_gc, committed_bytes));
}
size_t left_until_GC = capacity_until_gc - committed_bytes; size_t left_until_GC = capacity_until_gc - committed_bytes;
size_t left_to_commit = MIN2(left_until_GC, left_until_max); size_t left_to_commit = MIN2(left_until_GC, left_until_max);
@ -1449,7 +1448,15 @@ void MetaspaceGC::compute_new_size() {
uint current_shrink_factor = _shrink_factor; uint current_shrink_factor = _shrink_factor;
_shrink_factor = 0; _shrink_factor = 0;
const size_t used_after_gc = MetaspaceAux::capacity_bytes(); // Using committed_bytes() for used_after_gc is an overestimation, since the
// chunk free lists are included in committed_bytes() and the memory in an
// un-fragmented chunk free list is available for future allocations.
// However, if the chunk free lists becomes fragmented, then the memory may
// not be available for future allocations and the memory is therefore "in use".
// Including the chunk free lists in the definition of "in use" is therefore
// necessary. Not including the chunk free lists can cause capacity_until_GC to
// shrink below committed_bytes() and this has caused serious bugs in the past.
const size_t used_after_gc = MetaspaceAux::committed_bytes();
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;

View File

@ -378,21 +378,22 @@ const char* AbstractGangTask::name() const {
WorkGangBarrierSync::WorkGangBarrierSync() WorkGangBarrierSync::WorkGangBarrierSync()
: _monitor(Mutex::safepoint, "work gang barrier sync", true), : _monitor(Mutex::safepoint, "work gang barrier sync", true),
_n_workers(0), _n_completed(0), _should_reset(false) { _n_workers(0), _n_completed(0), _should_reset(false), _aborted(false) {
} }
WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name) WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name)
: _monitor(Mutex::safepoint, name, true), : _monitor(Mutex::safepoint, name, true),
_n_workers(n_workers), _n_completed(0), _should_reset(false) { _n_workers(n_workers), _n_completed(0), _should_reset(false), _aborted(false) {
} }
void WorkGangBarrierSync::set_n_workers(uint n_workers) { void WorkGangBarrierSync::set_n_workers(uint n_workers) {
_n_workers = n_workers; _n_workers = n_workers;
_n_completed = 0; _n_completed = 0;
_should_reset = false; _should_reset = false;
_aborted = false;
} }
void WorkGangBarrierSync::enter() { bool WorkGangBarrierSync::enter() {
MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag); MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
if (should_reset()) { if (should_reset()) {
// The should_reset() was set and we are the first worker to enter // The should_reset() was set and we are the first worker to enter
@ -415,10 +416,17 @@ void WorkGangBarrierSync::enter() {
set_should_reset(true); set_should_reset(true);
monitor()->notify_all(); monitor()->notify_all();
} else { } else {
while (n_completed() != n_workers()) { while (n_completed() != n_workers() && !aborted()) {
monitor()->wait(/* no_safepoint_check */ true); monitor()->wait(/* no_safepoint_check */ true);
} }
} }
return !aborted();
}
void WorkGangBarrierSync::abort() {
MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
set_aborted();
monitor()->notify_all();
} }
// SubTasksDone functions. // SubTasksDone functions.

View File

@ -362,15 +362,17 @@ protected:
uint _n_workers; uint _n_workers;
uint _n_completed; uint _n_completed;
bool _should_reset; bool _should_reset;
bool _aborted;
Monitor* monitor() { return &_monitor; } Monitor* monitor() { return &_monitor; }
uint n_workers() { return _n_workers; } uint n_workers() { return _n_workers; }
uint n_completed() { return _n_completed; } uint n_completed() { return _n_completed; }
bool should_reset() { return _should_reset; } bool should_reset() { return _should_reset; }
bool aborted() { return _aborted; }
void zero_completed() { _n_completed = 0; } void zero_completed() { _n_completed = 0; }
void inc_completed() { _n_completed++; } void inc_completed() { _n_completed++; }
void set_aborted() { _aborted = true; }
void set_should_reset(bool v) { _should_reset = v; } void set_should_reset(bool v) { _should_reset = v; }
public: public:
@ -383,8 +385,14 @@ public:
// Enter the barrier. A worker that enters the barrier will // Enter the barrier. A worker that enters the barrier will
// not be allowed to leave until all other threads have // not be allowed to leave until all other threads have
// also entered the barrier. // also entered the barrier or the barrier is aborted.
void enter(); // Returns false if the barrier was aborted.
bool enter();
// Aborts the barrier and wakes up any threads waiting for
// the barrier to complete. The barrier will remain in the
// aborted state until the next call to set_n_workers().
void abort();
}; };
// A class to manage claiming of subtasks within a group of tasks. The // A class to manage claiming of subtasks within a group of tasks. The

View File

@ -22,7 +22,7 @@
*/ */
/* /*
* @test TestPrintGCDetails * @test TestGCLogMessages
* @bug 8035406 8027295 8035398 8019342 * @bug 8035406 8027295 8035398 8019342
* @summary Ensure that the PrintGCDetails output for a minor GC with G1 * @summary Ensure that the PrintGCDetails output for a minor GC with G1
* includes the expected necessary messages. * includes the expected necessary messages.
@ -90,12 +90,6 @@ public class TestGCLogMessages {
output.shouldContain("[String Dedup Fixup"); output.shouldContain("[String Dedup Fixup");
output.shouldContain("[Young Free CSet"); output.shouldContain("[Young Free CSet");
output.shouldContain("[Non-Young Free CSet"); output.shouldContain("[Non-Young Free CSet");
// also check evacuation failure messages once
output.shouldNotContain("[Evacuation Failure");
output.shouldNotContain("[Recalculate Used");
output.shouldNotContain("[Remove Self Forwards");
output.shouldNotContain("[Restore RemSet");
output.shouldHaveExitValue(0); output.shouldHaveExitValue(0);
} }