Merge
This commit is contained in:
commit
40f65439e2
@ -112,18 +112,37 @@ public:
|
||||
|
||||
class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
|
||||
private:
|
||||
size_t _num_processed;
|
||||
size_t _num_dirtied;
|
||||
G1CollectedHeap* _g1h;
|
||||
G1SATBCardTableLoggingModRefBS* _g1_bs;
|
||||
|
||||
HeapRegion* region_for_card(jbyte* card_ptr) const {
|
||||
return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
|
||||
}
|
||||
|
||||
bool will_become_free(HeapRegion* hr) const {
|
||||
// A region will be freed by free_collection_set if the region is in the
|
||||
// collection set and has not had an evacuation failure.
|
||||
return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
|
||||
}
|
||||
|
||||
public:
|
||||
RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
|
||||
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
|
||||
_num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
|
||||
|
||||
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||
*card_ptr = CardTableModRefBS::dirty_card_val();
|
||||
_num_processed++;
|
||||
HeapRegion* hr = region_for_card(card_ptr);
|
||||
|
||||
// Should only dirty cards in regions that won't be freed.
|
||||
if (!will_become_free(hr)) {
|
||||
*card_ptr = CardTableModRefBS::dirty_card_val();
|
||||
_num_dirtied++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t num_processed() const { return _num_processed; }
|
||||
size_t num_dirtied() const { return _num_dirtied; }
|
||||
};
|
||||
|
||||
|
||||
@ -2268,15 +2287,21 @@ size_t G1CollectedHeap::recalculate_used() const {
|
||||
return blk.result();
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
|
||||
switch (cause) {
|
||||
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
|
||||
case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
|
||||
case GCCause::_update_allocation_context_stats_inc: return true;
|
||||
case GCCause::_wb_conc_mark: return true;
|
||||
default : return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||
switch (cause) {
|
||||
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
|
||||
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
|
||||
case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
|
||||
case GCCause::_g1_humongous_allocation: return true;
|
||||
case GCCause::_update_allocation_context_stats_inc: return true;
|
||||
case GCCause::_wb_conc_mark: return true;
|
||||
default: return false;
|
||||
default: return is_user_requested_concurrent_full_gc(cause);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4619,24 +4644,26 @@ void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive
|
||||
class G1RedirtyLoggedCardsTask : public AbstractGangTask {
|
||||
private:
|
||||
DirtyCardQueueSet* _queue;
|
||||
G1CollectedHeap* _g1h;
|
||||
public:
|
||||
G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
|
||||
G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
|
||||
_queue(queue), _g1h(g1h) { }
|
||||
|
||||
virtual void work(uint worker_id) {
|
||||
G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
|
||||
G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
|
||||
|
||||
RedirtyLoggedCardTableEntryClosure cl;
|
||||
RedirtyLoggedCardTableEntryClosure cl(_g1h);
|
||||
_queue->par_apply_closure_to_all_completed_buffers(&cl);
|
||||
|
||||
phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
|
||||
phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::redirty_logged_cards() {
|
||||
double redirty_logged_cards_start = os::elapsedTime();
|
||||
|
||||
G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
|
||||
G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
|
||||
dirty_card_queue_set().reset_for_par_iteration();
|
||||
workers()->run_task(&redirty_task);
|
||||
|
||||
|
@ -245,9 +245,11 @@ private:
|
||||
// instead of doing a STW GC. Currently, a concurrent cycle is
|
||||
// explicitly started if:
|
||||
// (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
|
||||
// (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
|
||||
// (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
|
||||
// (d) cause == _g1_humongous_allocation
|
||||
// (b) cause == _g1_humongous_allocation
|
||||
// (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
|
||||
// (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
|
||||
// (e) cause == _update_allocation_context_stats_inc
|
||||
// (f) cause == _wb_conc_mark
|
||||
bool should_do_concurrent_full_gc(GCCause::Cause cause);
|
||||
|
||||
// indicates whether we are in young or mixed GC mode
|
||||
@ -579,6 +581,8 @@ public:
|
||||
_in_cset_fast_test.clear();
|
||||
}
|
||||
|
||||
bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
|
||||
|
||||
// This is called at the start of either a concurrent cycle or a Full
|
||||
// GC to update the number of old marking cycles started.
|
||||
void increment_old_marking_cycles_started();
|
||||
|
@ -291,7 +291,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
// for the first time during initialization.
|
||||
_reserve_regions = 0;
|
||||
|
||||
_collectionSetChooser = new CollectionSetChooser();
|
||||
_cset_chooser = new CollectionSetChooser();
|
||||
}
|
||||
|
||||
G1CollectorPolicy::~G1CollectorPolicy() {
|
||||
@ -854,7 +854,7 @@ void G1CollectorPolicy::record_full_collection_end() {
|
||||
_survivor_surv_rate_group->reset();
|
||||
update_young_list_max_and_target_length();
|
||||
update_rs_lengths_prediction();
|
||||
_collectionSetChooser->clear();
|
||||
cset_chooser()->clear();
|
||||
|
||||
_bytes_allocated_in_old_since_last_gc = 0;
|
||||
|
||||
@ -1237,7 +1237,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
|
||||
update_rs_time_goal_ms);
|
||||
|
||||
_collectionSetChooser->verify();
|
||||
cset_chooser()->verify();
|
||||
}
|
||||
|
||||
G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
|
||||
@ -1710,6 +1710,11 @@ bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_ca
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::initiate_conc_mark() {
|
||||
collector_state()->set_during_initial_mark_pause(true);
|
||||
collector_state()->set_initiate_conc_mark_if_possible(false);
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::decide_on_conc_mark_initiation() {
|
||||
// We are about to decide on whether this pause will be an
|
||||
// initial-mark pause.
|
||||
@ -1726,17 +1731,22 @@ void G1CollectorPolicy::decide_on_conc_mark_initiation() {
|
||||
// concurrent marking cycle. So we might initiate one.
|
||||
|
||||
if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
|
||||
// Initiate a new initial mark only if there is no marking or reclamation going
|
||||
// on.
|
||||
|
||||
collector_state()->set_during_initial_mark_pause(true);
|
||||
// And we can now clear initiate_conc_mark_if_possible() as
|
||||
// we've already acted on it.
|
||||
collector_state()->set_initiate_conc_mark_if_possible(false);
|
||||
|
||||
// Initiate a new initial mark if there is no marking or reclamation going on.
|
||||
initiate_conc_mark();
|
||||
ergo_verbose0(ErgoConcCycles,
|
||||
"initiate concurrent cycle",
|
||||
ergo_format_reason("concurrent cycle initiation requested"));
|
||||
"initiate concurrent cycle",
|
||||
ergo_format_reason("concurrent cycle initiation requested"));
|
||||
} else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
|
||||
// Initiate a user requested initial mark. An initial mark must be young only
|
||||
// GC, so the collector state must be updated to reflect this.
|
||||
collector_state()->set_gcs_are_young(true);
|
||||
collector_state()->set_last_young_gc(false);
|
||||
|
||||
abort_time_to_mixed_tracking();
|
||||
initiate_conc_mark();
|
||||
ergo_verbose0(ErgoConcCycles,
|
||||
"initiate concurrent cycle",
|
||||
ergo_format_reason("user requested concurrent cycle"));
|
||||
} else {
|
||||
// The concurrent marking thread is still finishing up the
|
||||
// previous cycle. If we start one right now the two cycles
|
||||
@ -1807,18 +1817,18 @@ uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
|
||||
_collectionSetChooser->clear();
|
||||
cset_chooser()->clear();
|
||||
|
||||
WorkGang* workers = _g1->workers();
|
||||
uint n_workers = workers->active_workers();
|
||||
|
||||
uint n_regions = _g1->num_regions();
|
||||
uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
|
||||
_collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
|
||||
ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
|
||||
cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
|
||||
ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers);
|
||||
workers->run_task(&par_known_garbage_task);
|
||||
|
||||
_collectionSetChooser->sort_regions();
|
||||
cset_chooser()->sort_regions();
|
||||
|
||||
double end_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
|
||||
@ -2097,8 +2107,7 @@ void G1CollectorPolicy::abort_time_to_mixed_tracking() {
|
||||
|
||||
bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
||||
const char* false_action_str) const {
|
||||
CollectionSetChooser* cset_chooser = _collectionSetChooser;
|
||||
if (cset_chooser->is_empty()) {
|
||||
if (cset_chooser()->is_empty()) {
|
||||
ergo_verbose0(ErgoMixedGCs,
|
||||
false_action_str,
|
||||
ergo_format_reason("candidate old regions not available"));
|
||||
@ -2106,7 +2115,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
||||
}
|
||||
|
||||
// Is the amount of uncollected reclaimable space above G1HeapWastePercent?
|
||||
size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
|
||||
size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
|
||||
double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
|
||||
double threshold = (double) G1HeapWastePercent;
|
||||
if (reclaimable_perc <= threshold) {
|
||||
@ -2116,7 +2125,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
||||
ergo_format_region("candidate old regions")
|
||||
ergo_format_byte_perc("reclaimable")
|
||||
ergo_format_perc("threshold"),
|
||||
cset_chooser->remaining_regions(),
|
||||
cset_chooser()->remaining_regions(),
|
||||
reclaimable_bytes,
|
||||
reclaimable_perc, threshold);
|
||||
return false;
|
||||
@ -2128,7 +2137,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
||||
ergo_format_region("candidate old regions")
|
||||
ergo_format_byte_perc("reclaimable")
|
||||
ergo_format_perc("threshold"),
|
||||
cset_chooser->remaining_regions(),
|
||||
cset_chooser()->remaining_regions(),
|
||||
reclaimable_bytes,
|
||||
reclaimable_perc, threshold);
|
||||
return true;
|
||||
@ -2145,7 +2154,7 @@ uint G1CollectorPolicy::calc_min_old_cset_length() const {
|
||||
// to the CSet chooser in the first place, not how many remain, so
|
||||
// that the result is the same during all mixed GCs that follow a cycle.
|
||||
|
||||
const size_t region_num = (size_t) _collectionSetChooser->length();
|
||||
const size_t region_num = (size_t) cset_chooser()->length();
|
||||
const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
|
||||
size_t result = region_num / gc_num;
|
||||
// emulate ceiling
|
||||
@ -2254,15 +2263,14 @@ void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
|
||||
|
||||
|
||||
if (!collector_state()->gcs_are_young()) {
|
||||
CollectionSetChooser* cset_chooser = _collectionSetChooser;
|
||||
cset_chooser->verify();
|
||||
cset_chooser()->verify();
|
||||
const uint min_old_cset_length = calc_min_old_cset_length();
|
||||
const uint max_old_cset_length = calc_max_old_cset_length();
|
||||
|
||||
uint expensive_region_num = 0;
|
||||
bool check_time_remaining = adaptive_young_list_length();
|
||||
|
||||
HeapRegion* hr = cset_chooser->peek();
|
||||
HeapRegion* hr = cset_chooser()->peek();
|
||||
while (hr != NULL) {
|
||||
if (old_cset_region_length() >= max_old_cset_length) {
|
||||
// Added maximum number of old regions to the CSet.
|
||||
@ -2278,7 +2286,7 @@ void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
|
||||
|
||||
// Stop adding regions if the remaining reclaimable space is
|
||||
// not above G1HeapWastePercent.
|
||||
size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
|
||||
size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
|
||||
double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
|
||||
double threshold = (double) G1HeapWastePercent;
|
||||
if (reclaimable_perc <= threshold) {
|
||||
@ -2340,11 +2348,11 @@ void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
|
||||
// We will add this region to the CSet.
|
||||
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
cset_chooser->pop(); // already have region via peek()
|
||||
cset_chooser()->pop(); // already have region via peek()
|
||||
_g1->old_set_remove(hr);
|
||||
add_old_region_to_cset(hr);
|
||||
|
||||
hr = cset_chooser->peek();
|
||||
hr = cset_chooser()->peek();
|
||||
}
|
||||
if (hr == NULL) {
|
||||
ergo_verbose0(ErgoCSetConstruction,
|
||||
@ -2369,7 +2377,7 @@ void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
|
||||
time_remaining_ms);
|
||||
}
|
||||
|
||||
cset_chooser->verify();
|
||||
cset_chooser()->verify();
|
||||
}
|
||||
|
||||
stop_incremental_cset_building();
|
||||
|
@ -191,7 +191,7 @@ class G1CollectorPolicy: public CollectorPolicy {
|
||||
void initialize_alignments();
|
||||
void initialize_flags();
|
||||
|
||||
CollectionSetChooser* _collectionSetChooser;
|
||||
CollectionSetChooser* _cset_chooser;
|
||||
|
||||
double _full_collection_start_sec;
|
||||
|
||||
@ -405,6 +405,10 @@ protected:
|
||||
double non_young_other_time_ms() const;
|
||||
double constant_other_time_ms(double pause_time_ms) const;
|
||||
|
||||
CollectionSetChooser* cset_chooser() const {
|
||||
return _cset_chooser;
|
||||
}
|
||||
|
||||
private:
|
||||
// Statistics kept per GC stoppage, pause or full.
|
||||
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
|
||||
@ -725,6 +729,11 @@ private:
|
||||
// (should not be called directly).
|
||||
void add_region_to_incremental_cset_common(HeapRegion* hr);
|
||||
|
||||
// Set the state to start a concurrent marking cycle and clear
|
||||
// _initiate_conc_mark_if_possible because it has now been
|
||||
// acted on.
|
||||
void initiate_conc_mark();
|
||||
|
||||
public:
|
||||
// Add hr to the LHS of the incremental collection set.
|
||||
void add_region_to_incremental_cset_lhs(HeapRegion* hr);
|
||||
|
@ -47,8 +47,9 @@ public:
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert(_from->is_in_reserved(p), "paranoia");
|
||||
if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
|
||||
!_from->is_survivor()) {
|
||||
assert(!_from->is_survivor(), "Unexpected evac failure in survivor region");
|
||||
|
||||
if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p))) {
|
||||
size_t card_index = _ct_bs->index_for(p);
|
||||
if (_ct_bs->mark_card_deferred(card_index)) {
|
||||
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
|
||||
|
@ -141,6 +141,7 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
|
||||
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
|
||||
_active_gc_threads = active_gc_threads;
|
||||
_cur_expand_heap_time_ms = 0.0;
|
||||
_external_accounted_time_ms = 0.0;
|
||||
|
||||
for (int i = 0; i < GCParPhasesSentinel; i++) {
|
||||
_gc_par_phases[i]->reset();
|
||||
@ -185,9 +186,12 @@ void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint
|
||||
}
|
||||
|
||||
double G1GCPhaseTimes::accounted_time_ms() {
|
||||
// First subtract any externally accounted time
|
||||
double misc_time_ms = _external_accounted_time_ms;
|
||||
|
||||
// Subtract the root region scanning wait time. It's initialized to
|
||||
// zero at the start of the pause.
|
||||
double misc_time_ms = _root_region_scan_wait_time_ms;
|
||||
misc_time_ms += _root_region_scan_wait_time_ms;
|
||||
|
||||
misc_time_ms += _cur_collection_par_time_ms;
|
||||
|
||||
|
@ -99,6 +99,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
double _cur_collection_start_sec;
|
||||
double _root_region_scan_wait_time_ms;
|
||||
|
||||
double _external_accounted_time_ms;
|
||||
|
||||
double _recorded_young_cset_choice_time_ms;
|
||||
double _recorded_non_young_cset_choice_time_ms;
|
||||
|
||||
@ -244,6 +246,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_cur_verify_after_time_ms = time_ms;
|
||||
}
|
||||
|
||||
void inc_external_accounted_time_ms(double time_ms) {
|
||||
_external_accounted_time_ms += time_ms;
|
||||
}
|
||||
|
||||
double accounted_time_ms();
|
||||
|
||||
double cur_collection_start_sec() {
|
||||
|
@ -91,7 +91,7 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
if (state.is_humongous()) {
|
||||
_g1->set_humongous_is_live(obj);
|
||||
}
|
||||
_par_scan_state->update_rs(_from, p);
|
||||
_par_scan_state->update_rs(_from, p, obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -98,10 +98,10 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||
|
||||
template <class T> void push_on_queue(T* ref);
|
||||
|
||||
template <class T> void update_rs(HeapRegion* from, T* p) {
|
||||
template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
|
||||
// If the new value of the field points to the same region or
|
||||
// is the to-space, we don't need to include it in the Rset updates.
|
||||
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
||||
if (!HeapRegion::is_in_same_region(p, o) && !from->is_young()) {
|
||||
size_t card_index = ctbs()->index_for(p);
|
||||
// If the card hasn't been added to the buffer, do it.
|
||||
if (ctbs()->mark_card_deferred(card_index)) {
|
||||
|
@ -56,7 +56,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
|
||||
}
|
||||
|
||||
assert(obj != NULL, "Must be");
|
||||
update_rs(from, p);
|
||||
update_rs(from, p, obj);
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
|
||||
|
@ -85,11 +85,6 @@ bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (val == g1_young_gen) {
|
||||
// the card is for a young gen region. We don't need to keep track of all pointers into young
|
||||
return false;
|
||||
}
|
||||
|
||||
// Cached bit can be installed either on a clean card or on a claimed card.
|
||||
jbyte new_val = val;
|
||||
if (val == clean_card_val()) {
|
||||
|
@ -351,6 +351,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
~((1 << (size_t) LogOfHRGrainBytes) - 1);
|
||||
}
|
||||
|
||||
|
||||
// Returns whether a field is in the same region as the obj it points to.
|
||||
template <typename T>
|
||||
static bool is_in_same_region(T* p, oop obj) {
|
||||
assert(p != NULL, "p can't be NULL");
|
||||
assert(obj != NULL, "obj can't be NULL");
|
||||
return (((uintptr_t) p ^ cast_from_oop<uintptr_t>(obj)) >> LogOfHRGrainBytes) == 0;
|
||||
}
|
||||
|
||||
static size_t max_region_size();
|
||||
static size_t min_region_size_in_words();
|
||||
|
||||
|
@ -163,9 +163,9 @@ public class IgnoreUnrecognizedVMOptions {
|
||||
-IgnoreUnrecognizedVMOptions ERR ERR
|
||||
+IgnoreUnrecognizedVMOptions ERR ERR
|
||||
*/
|
||||
runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
|
||||
runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
|
||||
runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
|
||||
runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
|
||||
runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
|
||||
runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
|
||||
runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
|
||||
runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user