This commit is contained in:
Keith McGuigan 2011-05-06 11:25:16 -04:00
commit cb6e2c12f4
21 changed files with 295 additions and 148 deletions

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@
// Defines Linux-specific default values. The flags are available on all // Defines Linux-specific default values. The flags are available on all
// platforms, but they may have different default values on other platforms. // platforms, but they may have different default values on other platforms.
// //
define_pd_global(bool, UseLargePages, false); define_pd_global(bool, UseLargePages, true);
define_pd_global(bool, UseLargePagesIndividualAllocation, false); define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseOSErrorReporting, false); define_pd_global(bool, UseOSErrorReporting, false);
define_pd_global(bool, UseThreadPriorities, true) ; define_pd_global(bool, UseThreadPriorities, true) ;

View File

@ -2914,16 +2914,21 @@ static void set_coredump_filter(void) {
static size_t _large_page_size = 0; static size_t _large_page_size = 0;
bool os::large_page_init() { void os::large_page_init() {
if (!UseLargePages) { if (!UseLargePages) {
UseHugeTLBFS = false; UseHugeTLBFS = false;
UseSHM = false; UseSHM = false;
return false; return;
} }
if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) { if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
// Our user has not expressed a preference, so we'll try both. // If UseLargePages is specified on the command line try both methods,
UseHugeTLBFS = UseSHM = true; // if it's default, then try only HugeTLBFS.
if (FLAG_IS_DEFAULT(UseLargePages)) {
UseHugeTLBFS = true;
} else {
UseHugeTLBFS = UseSHM = true;
}
} }
if (LargePageSizeInBytes) { if (LargePageSizeInBytes) {
@ -2978,7 +2983,6 @@ bool os::large_page_init() {
_page_sizes[1] = default_page_size; _page_sizes[1] = default_page_size;
_page_sizes[2] = 0; _page_sizes[2] = 0;
} }
UseHugeTLBFS = UseHugeTLBFS && UseHugeTLBFS = UseHugeTLBFS &&
Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size); Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
@ -2988,12 +2992,6 @@ bool os::large_page_init() {
UseLargePages = UseHugeTLBFS || UseSHM; UseLargePages = UseHugeTLBFS || UseSHM;
set_coredump_filter(); set_coredump_filter();
// Large page support is available on 2.6 or newer kernel, some vendors
// (e.g. Redhat) have backported it to their 2.4 based distributions.
// We optimistically assume the support is available. If later it turns out
// not true, VM will automatically switch to use regular page size.
return true;
} }
#ifndef SHM_HUGETLB #ifndef SHM_HUGETLB
@ -4118,7 +4116,7 @@ jint os::init_2(void)
#endif #endif
} }
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); os::large_page_init();
// initialize suspend/resume support - must do this before signal_sets_init() // initialize suspend/resume support - must do this before signal_sets_init()
if (SR_initialize() != 0) { if (SR_initialize() != 0) {

View File

@ -3336,11 +3336,11 @@ bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
return true; return true;
} }
bool os::large_page_init() { void os::large_page_init() {
if (!UseLargePages) { if (!UseLargePages) {
UseISM = false; UseISM = false;
UseMPSS = false; UseMPSS = false;
return false; return;
} }
// print a warning if any large page related flag is specified on command line // print a warning if any large page related flag is specified on command line
@ -3361,7 +3361,6 @@ bool os::large_page_init() {
Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
UseLargePages = UseISM || UseMPSS; UseLargePages = UseISM || UseMPSS;
return UseLargePages;
} }
bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
@ -4992,7 +4991,7 @@ jint os::init_2(void) {
#endif #endif
} }
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); os::large_page_init();
// Check minimum allowable stack size for thread creation and to initialize // Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page // the java system classes, including StackOverflowError - depends on page

View File

@ -2762,8 +2762,8 @@ static void cleanup_after_large_page_init() {
_hToken = NULL; _hToken = NULL;
} }
bool os::large_page_init() { void os::large_page_init() {
if (!UseLargePages) return false; if (!UseLargePages) return;
// print a warning if any large page related flag is specified on command line // print a warning if any large page related flag is specified on command line
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
@ -2808,7 +2808,7 @@ bool os::large_page_init() {
} }
cleanup_after_large_page_init(); cleanup_after_large_page_init();
return success; UseLargePages = success;
} }
// On win32, one cannot release just a part of reserved memory, it's an // On win32, one cannot release just a part of reserved memory, it's an
@ -3561,7 +3561,7 @@ jint os::init_2(void) {
#endif #endif
} }
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); os::large_page_init();
// Setup Windows Exceptions // Setup Windows Exceptions

View File

@ -826,6 +826,14 @@ public:
void ConcurrentMark::checkpointRootsInitialPost() { void ConcurrentMark::checkpointRootsInitialPost() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
// If we force an overflow during remark, the remark operation will
// actually abort and we'll restart concurrent marking. If we always
// force an oveflow during remark we'll never actually complete the
// marking phase. So, we initilize this here, at the start of the
// cycle, so that at the remaining overflow number will decrease at
// every remark and we'll eventually not need to cause one.
force_overflow_stw()->init();
// For each region note start of marking. // For each region note start of marking.
NoteStartOfMarkHRClosure startcl; NoteStartOfMarkHRClosure startcl;
g1h->heap_region_iterate(&startcl); g1h->heap_region_iterate(&startcl);
@ -893,27 +901,37 @@ void ConcurrentMark::checkpointRootsInitial() {
} }
/* /*
Notice that in the next two methods, we actually leave the STS * Notice that in the next two methods, we actually leave the STS
during the barrier sync and join it immediately afterwards. If we * during the barrier sync and join it immediately afterwards. If we
do not do this, this then the following deadlock can occur: one * do not do this, the following deadlock can occur: one thread could
thread could be in the barrier sync code, waiting for the other * be in the barrier sync code, waiting for the other thread to also
thread to also sync up, whereas another one could be trying to * sync up, whereas another one could be trying to yield, while also
yield, while also waiting for the other threads to sync up too. * waiting for the other threads to sync up too.
*
Because the thread that does the sync barrier has left the STS, it * Note, however, that this code is also used during remark and in
is possible to be suspended for a Full GC or an evacuation pause * this case we should not attempt to leave / enter the STS, otherwise
could occur. This is actually safe, since the entering the sync * we'll either hit an asseert (debug / fastdebug) or deadlock
barrier is one of the last things do_marking_step() does, and it * (product). So we should only leave / enter the STS if we are
doesn't manipulate any data structures afterwards. * operating concurrently.
*/ *
* Because the thread that does the sync barrier has left the STS, it
* is possible to be suspended for a Full GC or an evacuation pause
* could occur. This is actually safe, since the entering the sync
* barrier is one of the last things do_marking_step() does, and it
* doesn't manipulate any data structures afterwards.
*/
void ConcurrentMark::enter_first_sync_barrier(int task_num) { void ConcurrentMark::enter_first_sync_barrier(int task_num) {
if (verbose_low()) if (verbose_low())
gclog_or_tty->print_cr("[%d] entering first barrier", task_num); gclog_or_tty->print_cr("[%d] entering first barrier", task_num);
ConcurrentGCThread::stsLeave(); if (concurrent()) {
ConcurrentGCThread::stsLeave();
}
_first_overflow_barrier_sync.enter(); _first_overflow_barrier_sync.enter();
ConcurrentGCThread::stsJoin(); if (concurrent()) {
ConcurrentGCThread::stsJoin();
}
// at this point everyone should have synced up and not be doing any // at this point everyone should have synced up and not be doing any
// more work // more work
@ -923,7 +941,12 @@ void ConcurrentMark::enter_first_sync_barrier(int task_num) {
// let task 0 do this // let task 0 do this
if (task_num == 0) { if (task_num == 0) {
// task 0 is responsible for clearing the global data structures // task 0 is responsible for clearing the global data structures
clear_marking_state(); // We should be here because of an overflow. During STW we should
// not clear the overflow flag since we rely on it being true when
// we exit this method to abort the pause and restart concurent
// marking.
clear_marking_state(concurrent() /* clear_overflow */);
force_overflow()->update();
if (PrintGC) { if (PrintGC) {
gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGCDateStamps);
@ -940,15 +963,45 @@ void ConcurrentMark::enter_second_sync_barrier(int task_num) {
if (verbose_low()) if (verbose_low())
gclog_or_tty->print_cr("[%d] entering second barrier", task_num); gclog_or_tty->print_cr("[%d] entering second barrier", task_num);
ConcurrentGCThread::stsLeave(); if (concurrent()) {
ConcurrentGCThread::stsLeave();
}
_second_overflow_barrier_sync.enter(); _second_overflow_barrier_sync.enter();
ConcurrentGCThread::stsJoin(); if (concurrent()) {
ConcurrentGCThread::stsJoin();
}
// at this point everything should be re-initialised and ready to go // at this point everything should be re-initialised and ready to go
if (verbose_low()) if (verbose_low())
gclog_or_tty->print_cr("[%d] leaving second barrier", task_num); gclog_or_tty->print_cr("[%d] leaving second barrier", task_num);
} }
#ifndef PRODUCT
void ForceOverflowSettings::init() {
_num_remaining = G1ConcMarkForceOverflow;
_force = false;
update();
}
void ForceOverflowSettings::update() {
if (_num_remaining > 0) {
_num_remaining -= 1;
_force = true;
} else {
_force = false;
}
}
bool ForceOverflowSettings::should_force() {
if (_force) {
_force = false;
return true;
} else {
return false;
}
}
#endif // !PRODUCT
void ConcurrentMark::grayRoot(oop p) { void ConcurrentMark::grayRoot(oop p) {
HeapWord* addr = (HeapWord*) p; HeapWord* addr = (HeapWord*) p;
// We can't really check against _heap_start and _heap_end, since it // We can't really check against _heap_start and _heap_end, since it
@ -1117,6 +1170,7 @@ void ConcurrentMark::markFromRoots() {
_restart_for_overflow = false; _restart_for_overflow = false;
size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
force_overflow_conc()->init();
set_phase(active_workers, true /* concurrent */); set_phase(active_workers, true /* concurrent */);
CMConcurrentMarkingTask markingTask(this, cmThread()); CMConcurrentMarkingTask markingTask(this, cmThread());
@ -1845,7 +1899,7 @@ void ConcurrentMark::completeCleanup() {
while (!_cleanup_list.is_empty()) { while (!_cleanup_list.is_empty()) {
HeapRegion* hr = _cleanup_list.remove_head(); HeapRegion* hr = _cleanup_list.remove_head();
assert(hr != NULL, "the list was not empty"); assert(hr != NULL, "the list was not empty");
hr->rem_set()->clear(); hr->par_clear();
tmp_free_list.add_as_tail(hr); tmp_free_list.add_as_tail(hr);
// Instead of adding one region at a time to the secondary_free_list, // Instead of adding one region at a time to the secondary_free_list,
@ -2703,12 +2757,16 @@ void ConcurrentMark::oops_do(OopClosure* cl) {
} }
void ConcurrentMark::clear_marking_state() { void ConcurrentMark::clear_marking_state(bool clear_overflow) {
_markStack.setEmpty(); _markStack.setEmpty();
_markStack.clear_overflow(); _markStack.clear_overflow();
_regionStack.setEmpty(); _regionStack.setEmpty();
_regionStack.clear_overflow(); _regionStack.clear_overflow();
clear_has_overflown(); if (clear_overflow) {
clear_has_overflown();
} else {
assert(has_overflown(), "pre-condition");
}
_finger = _heap_start; _finger = _heap_start;
for (int i = 0; i < (int)_max_task_num; ++i) { for (int i = 0; i < (int)_max_task_num; ++i) {
@ -4279,6 +4337,15 @@ void CMTask::do_marking_step(double time_target_ms,
} }
} }
// If we are about to wrap up and go into termination, check if we
// should raise the overflow flag.
if (do_termination && !has_aborted()) {
if (_cm->force_overflow()->should_force()) {
_cm->set_has_overflown();
regular_clock_call();
}
}
// We still haven't aborted. Now, let's try to get into the // We still haven't aborted. Now, let's try to get into the
// termination protocol. // termination protocol.
if (do_termination && !has_aborted()) { if (do_termination && !has_aborted()) {

View File

@ -316,6 +316,19 @@ public:
void setEmpty() { _index = 0; clear_overflow(); } void setEmpty() { _index = 0; clear_overflow(); }
}; };
class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
private:
#ifndef PRODUCT
uintx _num_remaining;
bool _force;
#endif // !defined(PRODUCT)
public:
void init() PRODUCT_RETURN;
void update() PRODUCT_RETURN;
bool should_force() PRODUCT_RETURN_( return false; );
};
// this will enable a variety of different statistics per GC task // this will enable a variety of different statistics per GC task
#define _MARKING_STATS_ 0 #define _MARKING_STATS_ 0
// this will enable the higher verbose levels // this will enable the higher verbose levels
@ -462,6 +475,9 @@ protected:
WorkGang* _parallel_workers; WorkGang* _parallel_workers;
ForceOverflowSettings _force_overflow_conc;
ForceOverflowSettings _force_overflow_stw;
void weakRefsWork(bool clear_all_soft_refs); void weakRefsWork(bool clear_all_soft_refs);
void swapMarkBitMaps(); void swapMarkBitMaps();
@ -470,7 +486,7 @@ protected:
// task local ones; should be called during initial mark. // task local ones; should be called during initial mark.
void reset(); void reset();
// It resets all the marking data structures. // It resets all the marking data structures.
void clear_marking_state(); void clear_marking_state(bool clear_overflow = true);
// It should be called to indicate which phase we're in (concurrent // It should be called to indicate which phase we're in (concurrent
// mark or remark) and how many threads are currently active. // mark or remark) and how many threads are currently active.
@ -547,6 +563,22 @@ protected:
void enter_first_sync_barrier(int task_num); void enter_first_sync_barrier(int task_num);
void enter_second_sync_barrier(int task_num); void enter_second_sync_barrier(int task_num);
ForceOverflowSettings* force_overflow_conc() {
return &_force_overflow_conc;
}
ForceOverflowSettings* force_overflow_stw() {
return &_force_overflow_stw;
}
ForceOverflowSettings* force_overflow() {
if (concurrent()) {
return force_overflow_conc();
} else {
return force_overflow_stw();
}
}
public: public:
// Manipulation of the global mark stack. // Manipulation of the global mark stack.
// Notice that the first mark_stack_push is CAS-based, whereas the // Notice that the first mark_stack_push is CAS-based, whereas the

View File

@ -3975,6 +3975,9 @@ void G1CollectedHeap::drain_evac_failure_scan_stack() {
oop oop
G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
oop old) { oop old) {
assert(obj_in_cs(old),
err_msg("obj: "PTR_FORMAT" should still be in the CSet",
(HeapWord*) old));
markOop m = old->mark(); markOop m = old->mark();
oop forward_ptr = old->forward_to_atomic(old); oop forward_ptr = old->forward_to_atomic(old);
if (forward_ptr == NULL) { if (forward_ptr == NULL) {
@ -3997,7 +4000,13 @@ G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
} }
return old; return old;
} else { } else {
// Someone else had a place to copy it. // Forward-to-self failed. Either someone else managed to allocate
// space for this object (old != forward_ptr) or they beat us in
// self-forwarding it (old == forward_ptr).
assert(old == forward_ptr || !obj_in_cs(forward_ptr),
err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
"should not be in the CSet",
(HeapWord*) old, (HeapWord*) forward_ptr));
return forward_ptr; return forward_ptr;
} }
} }
@ -4308,11 +4317,10 @@ template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
T heap_oop = oopDesc::load_heap_oop(p); T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) { if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop(heap_oop); oop obj = oopDesc::decode_heap_oop(heap_oop);
assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
"shouldn't still be in the CSet if evacuation didn't fail.");
HeapWord* addr = (HeapWord*)obj; HeapWord* addr = (HeapWord*)obj;
if (_g1->is_in_g1_reserved(addr)) if (_g1->is_in_g1_reserved(addr)) {
_cm->grayRoot(oop(addr)); _cm->grayRoot(oop(addr));
}
} }
} }
@ -4961,36 +4969,45 @@ public:
#ifndef PRODUCT #ifndef PRODUCT
class G1VerifyCardTableCleanup: public HeapRegionClosure { class G1VerifyCardTableCleanup: public HeapRegionClosure {
G1CollectedHeap* _g1h;
CardTableModRefBS* _ct_bs; CardTableModRefBS* _ct_bs;
public: public:
G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
: _ct_bs(ct_bs) { } : _g1h(g1h), _ct_bs(ct_bs) { }
virtual bool doHeapRegion(HeapRegion* r) { virtual bool doHeapRegion(HeapRegion* r) {
MemRegion mr(r->bottom(), r->end());
if (r->is_survivor()) { if (r->is_survivor()) {
_ct_bs->verify_dirty_region(mr); _g1h->verify_dirty_region(r);
} else { } else {
_ct_bs->verify_clean_region(mr); _g1h->verify_not_dirty_region(r);
} }
return false; return false;
} }
}; };
void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
// All of the region should be clean.
CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
MemRegion mr(hr->bottom(), hr->end());
ct_bs->verify_not_dirty_region(mr);
}
void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
// We cannot guarantee that [bottom(),end()] is dirty. Threads
// dirty allocated blocks as they allocate them. The thread that
// retires each region and replaces it with a new one will do a
// maximal allocation to fill in [pre_dummy_top(),end()] but will
// not dirty that area (one less thing to have to do while holding
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
// is dirty.
CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
ct_bs->verify_dirty_region(mr);
}
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
// We cannot guarantee that [bottom(),end()] is dirty. Threads verify_dirty_region(hr);
// dirty allocated blocks as they allocate them. The thread that
// retires each region and replaces it with a new one will do a
// maximal allocation to fill in [pre_dummy_top(),end()] but will
// not dirty that area (one less thing to have to do while holding
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
// is dirty. Also note that verify_dirty_region() requires
// mr.start() and mr.end() to be card aligned and pre_dummy_top()
// is not guaranteed to be.
MemRegion mr(hr->bottom(),
ct_bs->align_to_card_boundary(hr->pre_dummy_top()));
ct_bs->verify_dirty_region(mr);
} }
} }
@ -5033,7 +5050,7 @@ void G1CollectedHeap::cleanUpCardTable() {
g1_policy()->record_clear_ct_time( elapsed * 1000.0); g1_policy()->record_clear_ct_time( elapsed * 1000.0);
#ifndef PRODUCT #ifndef PRODUCT
if (G1VerifyCTCleanup || VerifyAfterGC) { if (G1VerifyCTCleanup || VerifyAfterGC) {
G1VerifyCardTableCleanup cleanup_verifier(ct_bs); G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
heap_region_iterate(&cleanup_verifier); heap_region_iterate(&cleanup_verifier);
} }
#endif #endif

View File

@ -970,6 +970,8 @@ public:
// The number of regions available for "regular" expansion. // The number of regions available for "regular" expansion.
size_t expansion_regions() { return _expansion_regions; } size_t expansion_regions() { return _expansion_regions; }
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
void verify_dirty_young_regions() PRODUCT_RETURN; void verify_dirty_young_regions() PRODUCT_RETURN;

View File

@ -157,7 +157,6 @@ public:
void set_try_claimed() { _try_claimed = true; } void set_try_claimed() { _try_claimed = true; }
void scanCard(size_t index, HeapRegion *r) { void scanCard(size_t index, HeapRegion *r) {
_cards_done++;
DirtyCardToOopClosure* cl = DirtyCardToOopClosure* cl =
r->new_dcto_closure(_oc, r->new_dcto_closure(_oc,
CardTableModRefBS::Precise, CardTableModRefBS::Precise,
@ -168,17 +167,14 @@ public:
HeapWord* card_start = _bot_shared->address_for_index(index); HeapWord* card_start = _bot_shared->address_for_index(index);
HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
Space *sp = SharedHeap::heap()->space_containing(card_start); Space *sp = SharedHeap::heap()->space_containing(card_start);
MemRegion sm_region; MemRegion sm_region = sp->used_region_at_save_marks();
if (ParallelGCThreads > 0) {
// first find the used area
sm_region = sp->used_region_at_save_marks();
} else {
// The closure is not idempotent. We shouldn't look at objects
// allocated during the GC.
sm_region = sp->used_region_at_save_marks();
}
MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
if (!mr.is_empty()) { if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
// We make the card as "claimed" lazily (so races are possible
// but they're benign), which reduces the number of duplicate
// scans (the rsets of the regions in the cset can intersect).
_ct_bs->set_card_claimed(index);
_cards_done++;
cl->do_MemRegion(mr); cl->do_MemRegion(mr);
} }
} }
@ -199,6 +195,9 @@ public:
HeapRegionRemSet* hrrs = r->rem_set(); HeapRegionRemSet* hrrs = r->rem_set();
if (hrrs->iter_is_complete()) return false; // All done. if (hrrs->iter_is_complete()) return false; // All done.
if (!_try_claimed && !hrrs->claim_iter()) return false; if (!_try_claimed && !hrrs->claim_iter()) return false;
// If we ever free the collection set concurrently, we should also
// clear the card table concurrently therefore we won't need to
// add regions of the collection set to the dirty cards region.
_g1h->push_dirty_cards_region(r); _g1h->push_dirty_cards_region(r);
// If we didn't return above, then // If we didn't return above, then
// _try_claimed || r->claim_iter() // _try_claimed || r->claim_iter()
@ -230,15 +229,10 @@ public:
_g1h->push_dirty_cards_region(card_region); _g1h->push_dirty_cards_region(card_region);
} }
// If the card is dirty, then we will scan it during updateRS. // If the card is dirty, then we will scan it during updateRS.
if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) { if (!card_region->in_collection_set() &&
// We make the card as "claimed" lazily (so races are possible but they're benign), !_ct_bs->is_card_dirty(card_index)) {
// which reduces the number of duplicate scans (the rsets of the regions in the cset scanCard(card_index, card_region);
// can intersect).
if (!_ct_bs->is_card_claimed(card_index)) {
_ct_bs->set_card_claimed(card_index);
scanCard(card_index, card_region);
}
} }
} }
if (!_try_claimed) { if (!_try_claimed) {
@ -246,8 +240,6 @@ public:
} }
return false; return false;
} }
// Set all cards back to clean.
void cleanup() {_g1h->cleanUpCardTable();}
size_t cards_done() { return _cards_done;} size_t cards_done() { return _cards_done;}
size_t cards_looked_up() { return _cards;} size_t cards_looked_up() { return _cards;}
}; };
@ -566,8 +558,9 @@ public:
update_rs_cl.set_region(r); update_rs_cl.set_region(r);
HeapWord* stop_point = HeapWord* stop_point =
r->oops_on_card_seq_iterate_careful(scanRegion, r->oops_on_card_seq_iterate_careful(scanRegion,
&filter_then_update_rs_cset_oop_cl, &filter_then_update_rs_cset_oop_cl,
false /* filter_young */); false /* filter_young */,
NULL /* card_ptr */);
// Since this is performed in the event of an evacuation failure, we // Since this is performed in the event of an evacuation failure, we
// we shouldn't see a non-null stop point // we shouldn't see a non-null stop point
@ -735,12 +728,6 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
(OopClosure*)&mux : (OopClosure*)&mux :
(OopClosure*)&update_rs_oop_cl)); (OopClosure*)&update_rs_oop_cl));
// Undirty the card.
*card_ptr = CardTableModRefBS::clean_card_val();
// We must complete this write before we do any of the reads below.
OrderAccess::storeload();
// And process it, being careful of unallocated portions of TLAB's.
// The region for the current card may be a young region. The // The region for the current card may be a young region. The
// current card may have been a card that was evicted from the // current card may have been a card that was evicted from the
// card cache. When the card was inserted into the cache, we had // card cache. When the card was inserted into the cache, we had
@ -749,7 +736,7 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
// and tagged as young. // and tagged as young.
// //
// We wish to filter out cards for such a region but the current // We wish to filter out cards for such a region but the current
// thread, if we're running conucrrently, may "see" the young type // thread, if we're running concurrently, may "see" the young type
// change at any time (so an earlier "is_young" check may pass or // change at any time (so an earlier "is_young" check may pass or
// fail arbitrarily). We tell the iteration code to perform this // fail arbitrarily). We tell the iteration code to perform this
// filtering when it has been determined that there has been an actual // filtering when it has been determined that there has been an actual
@ -759,7 +746,8 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
HeapWord* stop_point = HeapWord* stop_point =
r->oops_on_card_seq_iterate_careful(dirtyRegion, r->oops_on_card_seq_iterate_careful(dirtyRegion,
&filter_then_update_rs_oop_cl, &filter_then_update_rs_oop_cl,
filter_young); filter_young,
card_ptr);
// If stop_point is non-null, then we encountered an unallocated region // If stop_point is non-null, then we encountered an unallocated region
// (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the

View File

@ -311,7 +311,11 @@
\ \
develop(bool, G1ExitOnExpansionFailure, false, \ develop(bool, G1ExitOnExpansionFailure, false, \
"Raise a fatal VM exit out of memory failure in the event " \ "Raise a fatal VM exit out of memory failure in the event " \
" that heap expansion fails due to running out of swap.") " that heap expansion fails due to running out of swap.") \
\
develop(uintx, G1ConcMarkForceOverflow, 0, \
"The number of times we'll force an overflow during " \
"concurrent marking")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)

View File

@ -376,6 +376,17 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
if (clear_space) clear(SpaceDecorator::Mangle); if (clear_space) clear(SpaceDecorator::Mangle);
} }
void HeapRegion::par_clear() {
assert(used() == 0, "the region should have been already cleared");
assert(capacity() == (size_t) HeapRegion::GrainBytes,
"should be back to normal");
HeapRegionRemSet* hrrs = rem_set();
hrrs->clear();
CardTableModRefBS* ct_bs =
(CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
ct_bs->clear(MemRegion(bottom(), end()));
}
// <PREDICTION> // <PREDICTION>
void HeapRegion::calc_gc_efficiency() { void HeapRegion::calc_gc_efficiency() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -600,7 +611,15 @@ HeapWord*
HeapRegion:: HeapRegion::
oops_on_card_seq_iterate_careful(MemRegion mr, oops_on_card_seq_iterate_careful(MemRegion mr,
FilterOutOfRegionClosure* cl, FilterOutOfRegionClosure* cl,
bool filter_young) { bool filter_young,
jbyte* card_ptr) {
// Currently, we should only have to clean the card if filter_young
// is true and vice versa.
if (filter_young) {
assert(card_ptr != NULL, "pre-condition");
} else {
assert(card_ptr == NULL, "pre-condition");
}
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
// If we're within a stop-world GC, then we might look at a card in a // If we're within a stop-world GC, then we might look at a card in a
@ -626,6 +645,15 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
assert(!is_young(), "check value of filter_young"); assert(!is_young(), "check value of filter_young");
// We can only clean the card here, after we make the decision that
// the card is not young. And we only clean the card if we have been
// asked to (i.e., card_ptr != NULL).
if (card_ptr != NULL) {
*card_ptr = CardTableModRefBS::clean_card_val();
// We must complete this write before we do any of the reads below.
OrderAccess::storeload();
}
// We used to use "block_start_careful" here. But we're actually happy // We used to use "block_start_careful" here. But we're actually happy
// to update the BOT while we do this... // to update the BOT while we do this...
HeapWord* cur = block_start(mr.start()); HeapWord* cur = block_start(mr.start());

View File

@ -584,6 +584,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Reset HR stuff to default values. // Reset HR stuff to default values.
void hr_clear(bool par, bool clear_space); void hr_clear(bool par, bool clear_space);
void par_clear();
void initialize(MemRegion mr, bool clear_space, bool mangle_space); void initialize(MemRegion mr, bool clear_space, bool mangle_space);
@ -802,12 +803,16 @@ class HeapRegion: public G1OffsetTableContigSpace {
HeapWord* HeapWord*
object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
// In this version - if filter_young is true and the region // filter_young: if true and the region is a young region then we
// is a young region then we skip the iteration. // skip the iteration.
// card_ptr: if not NULL, and we decide that the card is not young
// and we iterate over it, we'll clean the card before we start the
// iteration.
HeapWord* HeapWord*
oops_on_card_seq_iterate_careful(MemRegion mr, oops_on_card_seq_iterate_careful(MemRegion mr,
FilterOutOfRegionClosure* cl, FilterOutOfRegionClosure* cl,
bool filter_young); bool filter_young,
jbyte* card_ptr);
// A version of block start that is guaranteed to find *some* block // A version of block start that is guaranteed to find *some* block
// boundary at or before "p", but does not object iteration, and may // boundary at or before "p", but does not object iteration, and may

View File

@ -224,6 +224,12 @@ void PSOldGen::expand(size_t bytes) {
const size_t alignment = virtual_space()->alignment(); const size_t alignment = virtual_space()->alignment();
size_t aligned_bytes = align_size_up(bytes, alignment); size_t aligned_bytes = align_size_up(bytes, alignment);
size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
if (UseNUMA) {
// With NUMA we use round-robin page allocation for the old gen. Expand by at least
// providing a page per lgroup. Alignment is larger or equal to the page size.
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
}
if (aligned_bytes == 0){ if (aligned_bytes == 0){
// The alignment caused the number of bytes to wrap. An expand_by(0) will // The alignment caused the number of bytes to wrap. An expand_by(0) will
// return true with the implication that and expansion was done when it // return true with the implication that and expansion was done when it

View File

@ -327,6 +327,7 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
// 1. check if klass is not interface // 1. check if klass is not interface
if (resolved_klass->is_interface()) { if (resolved_klass->is_interface()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name()); jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name());
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
@ -413,6 +414,7 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
// check if klass is interface // check if klass is interface
if (!resolved_klass->is_interface()) { if (!resolved_klass->is_interface()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Found class %s, but interface was expected", Klass::cast(resolved_klass())->external_name()); jio_snprintf(buf, sizeof(buf), "Found class %s, but interface was expected", Klass::cast(resolved_klass())->external_name());
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
@ -534,6 +536,7 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
// check for errors // check for errors
if (is_static != fd.is_static()) { if (is_static != fd.is_static()) {
ResourceMark rm(THREAD);
char msg[200]; char msg[200];
jio_snprintf(msg, sizeof(msg), "Expected %s field %s.%s", is_static ? "static" : "non-static", Klass::cast(resolved_klass())->external_name(), fd.name()->as_C_string()); jio_snprintf(msg, sizeof(msg), "Expected %s field %s.%s", is_static ? "static" : "non-static", Klass::cast(resolved_klass())->external_name(), fd.name()->as_C_string());
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), msg); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), msg);
@ -631,6 +634,7 @@ void LinkResolver::linktime_resolve_static_method(methodHandle& resolved_method,
// check if static // check if static
if (!resolved_method->is_static()) { if (!resolved_method->is_static()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Expected static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), jio_snprintf(buf, sizeof(buf), "Expected static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
resolved_method->name(), resolved_method->name(),
@ -671,6 +675,7 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
// check if not static // check if not static
if (resolved_method->is_static()) { if (resolved_method->is_static()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), jio_snprintf(buf, sizeof(buf),
"Expecting non-static method %s", "Expecting non-static method %s",
@ -717,6 +722,7 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result, methodHandle
// check if not static // check if not static
if (sel_method->is_static()) { if (sel_method->is_static()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
resolved_method->name(), resolved_method->name(),
@ -757,6 +763,7 @@ void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method
// check if not static // check if not static
if (resolved_method->is_static()) { if (resolved_method->is_static()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
resolved_method->name(), resolved_method->name(),
@ -873,6 +880,7 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
// check if receiver klass implements the resolved interface // check if receiver klass implements the resolved interface
if (!recv_klass->is_subtype_of(resolved_klass())) { if (!recv_klass->is_subtype_of(resolved_klass())) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s", jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s",
(Klass::cast(recv_klass()))->external_name(), (Klass::cast(recv_klass()))->external_name(),

View File

@ -652,43 +652,37 @@ void CardTableModRefBS::verify() {
} }
#ifndef PRODUCT #ifndef PRODUCT
class GuaranteeNotModClosure: public MemRegionClosure { void CardTableModRefBS::verify_region(MemRegion mr,
CardTableModRefBS* _ct; jbyte val, bool val_equals) {
public: jbyte* start = byte_for(mr.start());
GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {} jbyte* end = byte_for(mr.last());
void do_MemRegion(MemRegion mr) { bool failures = false;
jbyte* entry = _ct->byte_for(mr.start()); for (jbyte* curr = start; curr <= end; ++curr) {
guarantee(*entry != CardTableModRefBS::clean_card, jbyte curr_val = *curr;
"Dirty card in region that should be clean"); bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
if (failed) {
if (!failures) {
tty->cr();
tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]");
tty->print_cr("== %sexpecting value: %d",
(val_equals) ? "" : "not ", val);
failures = true;
}
tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
"val: %d", curr, addr_for(curr),
(HeapWord*) (((size_t) addr_for(curr)) + card_size),
(int) curr_val);
}
} }
}; guarantee(!failures, "there should not have been any failures");
void CardTableModRefBS::verify_clean_region(MemRegion mr) {
GuaranteeNotModClosure blk(this);
non_clean_card_iterate_serial(mr, &blk);
} }
// To verify a MemRegion is entirely dirty this closure is passed to void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
// dirty_card_iterate. If the region is dirty do_MemRegion will be verify_region(mr, dirty_card, false /* val_equals */);
// invoked only once with a MemRegion equal to the one being }
// verified.
class GuaranteeDirtyClosure: public MemRegionClosure {
CardTableModRefBS* _ct;
MemRegion _mr;
bool _result;
public:
GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
: _ct(ct), _mr(mr), _result(false) {}
void do_MemRegion(MemRegion mr) {
_result = _mr.equals(mr);
}
bool result() const { return _result; }
};
void CardTableModRefBS::verify_dirty_region(MemRegion mr) { void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
GuaranteeDirtyClosure blk(this, mr); verify_region(mr, dirty_card, true /* val_equals */);
dirty_card_iterate(mr, &blk);
guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
} }
#endif #endif

View File

@ -475,7 +475,10 @@ public:
void verify(); void verify();
void verify_guard(); void verify_guard();
void verify_clean_region(MemRegion mr) PRODUCT_RETURN; // val_equals -> it will check that all cards covered by mr equal val
// !val_equals -> it will check that all cards covered by mr do not equal val
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
static size_t par_chunk_heapword_alignment() { static size_t par_chunk_heapword_alignment() {

View File

@ -265,8 +265,6 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
always_do_update_barrier = UseConcMarkSweepGC; always_do_update_barrier = UseConcMarkSweepGC;
BlockOffsetArrayUseUnallocatedBlock =
BlockOffsetArrayUseUnallocatedBlock || ParallelGCThreads > 0;
// Check validity of heap flags // Check validity of heap flags
assert(OldSize % min_alignment() == 0, "old space alignment"); assert(OldSize % min_alignment() == 0, "old space alignment");

View File

@ -100,12 +100,6 @@ public:
// Pass along the argument to the superclass. // Pass along the argument to the superclass.
ModRefBarrierSet(int max_covered_regions) : ModRefBarrierSet(int max_covered_regions) :
BarrierSet(max_covered_regions) {} BarrierSet(max_covered_regions) {}
#ifndef PRODUCT
// Verifies that the given region contains no modified references.
virtual void verify_clean_region(MemRegion mr) = 0;
#endif
}; };
#endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP

View File

@ -1423,6 +1423,11 @@ void Arguments::set_parallel_gc_flags() {
} }
} }
} }
if (UseNUMA) {
if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
}
}
} }
void Arguments::set_g1_gc_flags() { void Arguments::set_g1_gc_flags() {
@ -2376,7 +2381,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
_gc_log_filename = strdup(tail); _gc_log_filename = strdup(tail);
FLAG_SET_CMDLINE(bool, PrintGC, true); FLAG_SET_CMDLINE(bool, PrintGC, true);
FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true); FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
// JNI hooks // JNI hooks
} else if (match_option(option, "-Xcheck", &tail)) { } else if (match_option(option, "-Xcheck", &tail)) {

View File

@ -1827,7 +1827,7 @@ class CommandLineFlags {
develop(bool, VerifyBlockOffsetArray, false, \ develop(bool, VerifyBlockOffsetArray, false, \
"Do (expensive!) block offset array verification") \ "Do (expensive!) block offset array verification") \
\ \
product(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \
"Maintain _unallocated_block in BlockOffsetArray" \ "Maintain _unallocated_block in BlockOffsetArray" \
" (currently applicable only to CMS collector)") \ " (currently applicable only to CMS collector)") \
\ \

View File

@ -274,7 +274,7 @@ class os: AllStatic {
static char* reserve_memory_special(size_t size, char* addr = NULL, static char* reserve_memory_special(size_t size, char* addr = NULL,
bool executable = false); bool executable = false);
static bool release_memory_special(char* addr, size_t bytes); static bool release_memory_special(char* addr, size_t bytes);
static bool large_page_init(); static void large_page_init();
static size_t large_page_size(); static size_t large_page_size();
static bool can_commit_large_page_memory(); static bool can_commit_large_page_memory();
static bool can_execute_large_page_memory(); static bool can_execute_large_page_memory();