This commit is contained in:
Jesper Wilhelmsson 2015-03-16 18:13:35 +01:00
commit e7a218b4b6
41 changed files with 855 additions and 637 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -186,7 +186,7 @@ HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
cp->space->set_compaction_top(compact_top);
cp->space = cp->space->next_compaction_space();
if (cp->space == NULL) {
cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
cp->gen = GenCollectedHeap::heap()->young_gen();
assert(cp->gen != NULL, "compaction must succeed");
cp->space = cp->gen->first_compaction_space();
assert(cp->space != NULL, "generation must have a first compaction space");
@ -900,7 +900,6 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
}
}
// Callers of this iterator beware: The closure application should
// be robust in the face of uninitialized objects and should (always)
// return a correct size so that the next addr + size below gives us a

View File

@ -396,6 +396,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Resizing support
void set_end(HeapWord* value); // override
// Never mangle CompactibleFreeListSpace
void mangle_unused_area() {}
void mangle_unused_area_complete() {}
// Mutual exclusion support
Mutex* freelistLock() const { return &_freelistLock; }

View File

@ -369,7 +369,7 @@ void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
double CMSStats::time_until_cms_gen_full() const {
size_t cms_free = _cms_gen->cmsSpace()->free();
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
if (cms_free > expected_promotion) {
// Start a cms collection if there isn't enough space to promote
@ -626,8 +626,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// Support for parallelizing young gen rescan
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->prev_gen(_cmsGen)->kind() == Generation::ParNew, "CMS can only be used with ParNew");
_young_gen = (ParNewGeneration*)gch->prev_gen(_cmsGen);
assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
_young_gen = (ParNewGeneration*)gch->young_gen();
if (gch->supports_inline_contig_alloc()) {
_top_addr = gch->top_addr();
_end_addr = gch->end_addr();
@ -869,7 +869,7 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
if (prev_level >= 0) {
size_t prev_size = 0;
GenCollectedHeap* gch = GenCollectedHeap::heap();
Generation* prev_gen = gch->get_gen(prev_level);
Generation* prev_gen = gch->young_gen();
prev_size = prev_gen->capacity();
gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
prev_size/1000);
@ -1049,11 +1049,8 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
// expand and retry
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
// Since there's currently no next generation, we don't try to promote
// Since this is the old generation, we don't try to promote
// into a more senior generation.
assert(next_gen() == NULL, "assumption, based upon which no attempt "
"is made to pass on a possibly failing "
"promotion to next generation");
res = _cmsSpace->promote(obj, obj_size);
}
if (res != NULL) {

View File

@ -2206,11 +2206,11 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
int n_completed_buffers = 0;
size_t n_completed_buffers = 0;
while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
n_completed_buffers++;
}
g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
dcqs.clear_n_completed_buffers();
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
}
@ -3751,9 +3751,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
int active_workers = workers()->active_workers();
uint active_workers = workers()->active_workers();
double pause_start_sec = os::elapsedTime();
g1_policy()->phase_times()->note_gc_start(active_workers);
g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
log_gc_header();
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
@ -4486,8 +4486,7 @@ public:
void work(uint worker_id) {
if (worker_id >= _n_workers) return; // no work needed this round
double start_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
_g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());
{
ResourceMark rm;
@ -4567,10 +4566,11 @@ public:
double start = os::elapsedTime();
G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
evac.do_void();
double elapsed_ms = (os::elapsedTime()-start)*1000.0;
double term_ms = pss.term_time()*1000.0;
_g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
_g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
double elapsed_sec = os::elapsedTime() - start;
double term_sec = pss.term_time();
_g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
_g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
_g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
}
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
_g1h->update_surviving_young_words(pss.surviving_young_words()+1);
@ -4586,9 +4586,7 @@ public:
// destructors are executed here and are included as part of the
// "GC Worker Time".
}
double end_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
_g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
}
};
@ -4650,27 +4648,20 @@ g1_process_roots(OopClosure* scan_non_heap_roots,
double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
+ buf_scan_non_heap_weak_roots.closure_app_seconds();
g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
double ext_root_time_ms =
((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
// During conc marking we have to filter the per-thread SATB buffers
// to make sure we remove any oops into the CSet (which will show up
// as implicitly live).
double satb_filtering_ms = 0.0;
if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
if (mark_in_progress()) {
double satb_filter_start = os::elapsedTime();
{
G1GCParPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i);
if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers) && mark_in_progress()) {
JavaThread::satb_mark_queue_set().filter_thread_buffers();
satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
}
}
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
// Now scan the complement of the collection set.
G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
@ -5073,14 +5064,13 @@ class G1RedirtyLoggedCardsTask : public AbstractGangTask {
G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
virtual void work(uint worker_id) {
double start_time = os::elapsedTime();
G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
RedirtyLoggedCardTableEntryClosure cl;
_queue->par_apply_closure_to_all_completed_buffers(&cl);
G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
}
};
@ -5658,12 +5648,14 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
// reported parallel time.
}
G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
g1_policy()->phase_times()->record_par_time(par_time_ms);
phase_times->record_par_time(par_time_ms);
double code_root_fixup_time_ms =
(os::elapsedTime() - end_par_time_sec) * 1000.0;
g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
set_par_threads(0);
@ -5675,9 +5667,14 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
process_discovered_references(n_workers);
if (G1StringDedup::is_enabled()) {
double fixup_start = os::elapsedTime();
G1STWIsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, phase_times);
double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
phase_times->record_string_dedup_fixup_time(fixup_time_ms);
}
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);

View File

@ -1118,6 +1118,10 @@ public:
// The number of regions that are completely free.
uint num_free_regions() const { return _hrm.num_free_regions(); }
MemoryUsage get_auxiliary_data_memory_usage() const {
return _hrm.get_auxiliary_data_memory_usage();
}
// The number of regions that are not completely free.
uint num_used_regions() const { return num_regions() - num_free_regions(); }

View File

@ -1073,7 +1073,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
if (update_stats) {
double cost_per_card_ms = 0.0;
if (_pending_cards > 0) {
cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
cost_per_card_ms = phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) / (double) _pending_cards;
_cost_per_card_ms_seq->add(cost_per_card_ms);
}
@ -1081,7 +1081,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) {
cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
if (_last_gc_was_young) {
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
} else {
@ -1123,7 +1123,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
double cost_per_byte_ms = 0.0;
if (copied_bytes > 0) {
cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
cost_per_byte_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
if (_in_marking_window) {
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
} else {
@ -1132,8 +1132,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
}
double all_other_time_ms = pause_time_ms -
(phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
+ phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) + phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) +
phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) + phase_times()->average_time_ms(G1GCPhaseTimes::Termination));
double young_other_time_ms = 0.0;
if (young_cset_region_length() > 0) {
@ -1174,8 +1174,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS),
phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms);
_collectionSetChooser->verify();
}
@ -2114,19 +2114,19 @@ void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhas
_other.add(pause_time_ms - phase_times->accounted_time_ms());
_root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
_parallel.add(phase_times->cur_collection_par_time_ms());
_ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
_satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
_update_rs.add(phase_times->average_last_update_rs_time());
_scan_rs.add(phase_times->average_last_scan_rs_time());
_obj_copy.add(phase_times->average_last_obj_copy_time());
_termination.add(phase_times->average_last_termination_time());
_ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan));
_satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering));
_update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS));
_scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS));
_obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy));
_termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination));
double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
phase_times->average_last_satb_filtering_times_ms() +
phase_times->average_last_update_rs_time() +
phase_times->average_last_scan_rs_time() +
phase_times->average_last_obj_copy_time() +
+ phase_times->average_last_termination_time();
double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) +
phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) +
phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) +
phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) +
phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) +
phase_times->average_time_ms(G1GCPhaseTimes::Termination);
double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
_parallel_other.add(parallel_other_time);

View File

@ -22,13 +22,13 @@
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "runtime/atomic.inline.hpp"
#include "memory/allocation.hpp"
#include "runtime/os.hpp"
// Helper class for avoiding interleaved logging
class LineBuffer: public StackObj {
@ -71,184 +71,243 @@ public:
va_end(ap);
}
void print_cr() {
gclog_or_tty->print_cr("%s", _buffer);
_cur = _indent_level * INDENT_CHARS;
}
void append_and_print_cr(const char* format, ...) ATTRIBUTE_PRINTF(2, 3) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
gclog_or_tty->print_cr("%s", _buffer);
_cur = _indent_level * INDENT_CHARS;
print_cr();
}
};
PRAGMA_DIAG_PUSH
PRAGMA_FORMAT_NONLITERAL_IGNORED
template <class T>
void WorkerDataArray<T>::print(int level, const char* title) {
if (_length == 1) {
// No need for min, max, average and sum for only one worker
LineBuffer buf(level);
buf.append("[%s: ", title);
buf.append(_print_format, _data[0]);
buf.append_and_print_cr("]");
return;
class WorkerDataArray : public CHeapObj<mtGC> {
friend class G1GCParPhasePrinter;
T* _data;
uint _length;
const char* _title;
bool _print_sum;
int _log_level;
uint _indent_level;
bool _enabled;
WorkerDataArray<size_t>* _thread_work_items;
NOT_PRODUCT(T uninitialized();)
// We are caching the sum and average to only have to calculate them once.
// This is not done in an MT-safe way. It is intended to allow single
// threaded code to call sum() and average() multiple times in any order
// without having to worry about the cost.
bool _has_new_data;
T _sum;
T _min;
T _max;
double _average;
public:
WorkerDataArray(uint length, const char* title, bool print_sum, int log_level, uint indent_level) :
_title(title), _length(0), _print_sum(print_sum), _log_level(log_level), _indent_level(indent_level),
_has_new_data(true), _thread_work_items(NULL), _enabled(true) {
assert(length > 0, "Must have some workers to store data for");
_length = length;
_data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
}
T min = _data[0];
T max = _data[0];
T sum = 0;
~WorkerDataArray() {
FREE_C_HEAP_ARRAY(T, _data);
}
LineBuffer buf(level);
buf.append("[%s:", title);
for (uint i = 0; i < _length; ++i) {
T val = _data[i];
min = MIN2(val, min);
max = MAX2(val, max);
sum += val;
if (G1Log::finest()) {
buf.append(" ");
buf.append(_print_format, val);
void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items) {
_thread_work_items = thread_work_items;
}
WorkerDataArray<size_t>* thread_work_items() { return _thread_work_items; }
void set(uint worker_i, T value) {
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
assert(_data[worker_i] == WorkerDataArray<T>::uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title));
_data[worker_i] = value;
_has_new_data = true;
}
void set_thread_work_item(uint worker_i, size_t value) {
assert(_thread_work_items != NULL, "No sub count");
_thread_work_items->set(worker_i, value);
}
T get(uint worker_i) {
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data added for worker %d", worker_i));
return _data[worker_i];
}
void add(uint worker_i, T value) {
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i));
_data[worker_i] += value;
_has_new_data = true;
}
double average(){
calculate_totals();
return _average;
}
T sum() {
calculate_totals();
return _sum;
}
T minimum() {
calculate_totals();
return _min;
}
T maximum() {
calculate_totals();
return _max;
}
void reset() PRODUCT_RETURN;
void verify() PRODUCT_RETURN;
void set_enabled(bool enabled) { _enabled = enabled; }
int log_level() { return _log_level; }
private:
void calculate_totals(){
if (!_has_new_data) {
return;
}
}
if (G1Log::finest()) {
buf.append_and_print_cr("%s", "");
_sum = (T)0;
_min = _data[0];
_max = _min;
for (uint i = 0; i < _length; ++i) {
T val = _data[i];
_sum += val;
_min = MIN2(_min, val);
_max = MAX2(_max, val);
}
_average = (double)_sum / (double)_length;
_has_new_data = false;
}
};
double avg = (double)sum / (double)_length;
buf.append(" Min: ");
buf.append(_print_format, min);
buf.append(", Avg: ");
buf.append("%.1lf", avg); // Always print average as a double
buf.append(", Max: ");
buf.append(_print_format, max);
buf.append(", Diff: ");
buf.append(_print_format, max - min);
if (_print_sum) {
// for things like the start and end times the sum is not
// that relevant
buf.append(", Sum: ");
buf.append(_print_format, sum);
}
buf.append_and_print_cr("]");
}
PRAGMA_DIAG_POP
#ifndef PRODUCT
template <> const int WorkerDataArray<int>::_uninitialized = -1;
template <> const double WorkerDataArray<double>::_uninitialized = -1.0;
template <> const size_t WorkerDataArray<size_t>::_uninitialized = (size_t)-1;
template <>
size_t WorkerDataArray<size_t>::uninitialized() {
return (size_t)-1;
}
template <>
double WorkerDataArray<double>::uninitialized() {
return -1.0;
}
template <class T>
void WorkerDataArray<T>::reset() {
for (uint i = 0; i < _length; i++) {
_data[i] = (T)_uninitialized;
_data[i] = WorkerDataArray<T>::uninitialized();
}
if (_thread_work_items != NULL) {
_thread_work_items->reset();
}
}
template <class T>
void WorkerDataArray<T>::verify() {
if (!_enabled) {
return;
}
for (uint i = 0; i < _length; i++) {
assert(_data[i] != _uninitialized,
err_msg("Invalid data for worker %u, data: %lf, uninitialized: %lf",
i, (double)_data[i], (double)_uninitialized));
assert(_data[i] != WorkerDataArray<T>::uninitialized(),
err_msg("Invalid data for worker %u in '%s'", i, _title));
}
if (_thread_work_items != NULL) {
_thread_work_items->verify();
}
}
#endif
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_max_gc_threads(max_gc_threads),
_last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false),
_last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"),
_last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"),
_last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
_last_update_rs_processed_buffers(_max_gc_threads, "%d"),
_last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
_last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
_last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
_last_gc_worker_end_times_ms(_max_gc_threads, "%.1lf", false),
_last_gc_worker_times_ms(_max_gc_threads, "%.1lf"),
_last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf"),
_last_redirty_logged_cards_time_ms(_max_gc_threads, "%.1lf"),
_last_redirty_logged_cards_processed_cards(_max_gc_threads, SIZE_FORMAT),
_cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads, "%.1lf"),
_cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads, "%.1lf")
_max_gc_threads(max_gc_threads)
{
assert(max_gc_threads > 0, "Must have some GC threads");
_gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2);
_gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms)", false, G1Log::LevelFiner, 2);
_gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms)", true, G1Log::LevelFiner, 2);
_update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers", true, G1Log::LevelFiner, 3);
_gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers);
_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts", true, G1Log::LevelFinest, 3);
_gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
_gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms)", true, G1Log::LevelFiner, 2);
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3);
_redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3);
_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
}
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) {
assert(active_gc_threads > 0, "The number of threads must be > 0");
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max nubmer of threads");
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
_active_gc_threads = active_gc_threads;
_last_gc_worker_start_times_ms.reset();
_last_ext_root_scan_times_ms.reset();
_last_satb_filtering_times_ms.reset();
_last_update_rs_times_ms.reset();
_last_update_rs_processed_buffers.reset();
_last_scan_rs_times_ms.reset();
_last_strong_code_root_scan_times_ms.reset();
_last_obj_copy_times_ms.reset();
_last_termination_times_ms.reset();
_last_termination_attempts.reset();
_last_gc_worker_end_times_ms.reset();
_last_gc_worker_times_ms.reset();
_last_gc_worker_other_times_ms.reset();
for (int i = 0; i < GCParPhasesSentinel; i++) {
_gc_par_phases[i]->reset();
}
_last_redirty_logged_cards_time_ms.reset();
_last_redirty_logged_cards_processed_cards.reset();
_gc_par_phases[SATBFiltering]->set_enabled(mark_in_progress);
_gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());
_gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());
}
void G1GCPhaseTimes::note_gc_end() {
_last_gc_worker_start_times_ms.verify();
_last_ext_root_scan_times_ms.verify();
_last_satb_filtering_times_ms.verify();
_last_update_rs_times_ms.verify();
_last_update_rs_processed_buffers.verify();
_last_scan_rs_times_ms.verify();
_last_strong_code_root_scan_times_ms.verify();
_last_obj_copy_times_ms.verify();
_last_termination_times_ms.verify();
_last_termination_attempts.verify();
_last_gc_worker_end_times_ms.verify();
for (uint i = 0; i < _active_gc_threads; i++) {
double worker_time = _last_gc_worker_end_times_ms.get(i) - _last_gc_worker_start_times_ms.get(i);
_last_gc_worker_times_ms.set(i, worker_time);
double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
record_time_secs(GCWorkerTotal, i , worker_time);
double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
_last_satb_filtering_times_ms.get(i) +
_last_update_rs_times_ms.get(i) +
_last_scan_rs_times_ms.get(i) +
_last_strong_code_root_scan_times_ms.get(i) +
_last_obj_copy_times_ms.get(i) +
_last_termination_times_ms.get(i);
double worker_known_time =
_gc_par_phases[ExtRootScan]->get(i) +
_gc_par_phases[SATBFiltering]->get(i) +
_gc_par_phases[UpdateRS]->get(i) +
_gc_par_phases[ScanRS]->get(i) +
_gc_par_phases[CodeRoots]->get(i) +
_gc_par_phases[ObjCopy]->get(i) +
_gc_par_phases[Termination]->get(i);
double worker_other_time = worker_time - worker_known_time;
_last_gc_worker_other_times_ms.set(i, worker_other_time);
record_time_secs(Other, i, worker_time - worker_known_time);
}
_last_gc_worker_times_ms.verify();
_last_gc_worker_other_times_ms.verify();
_last_redirty_logged_cards_time_ms.verify();
_last_redirty_logged_cards_processed_cards.verify();
}
void G1GCPhaseTimes::note_string_dedup_fixup_start() {
_cur_string_dedup_queue_fixup_worker_times_ms.reset();
_cur_string_dedup_table_fixup_worker_times_ms.reset();
}
void G1GCPhaseTimes::note_string_dedup_fixup_end() {
_cur_string_dedup_queue_fixup_worker_times_ms.verify();
_cur_string_dedup_table_fixup_worker_times_ms.verify();
for (int i = 0; i < GCParPhasesSentinel; i++) {
_gc_par_phases[i]->verify();
}
}
void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
@ -288,35 +347,172 @@ double G1GCPhaseTimes::accounted_time_ms() {
return misc_time_ms;
}
// record the time a phase took in seconds
void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {
_gc_par_phases[phase]->set(worker_i, secs);
}
// add a number of seconds to a phase
void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs) {
_gc_par_phases[phase]->add(worker_i, secs);
}
void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count) {
_gc_par_phases[phase]->set_thread_work_item(worker_i, count);
}
// return the average time for a phase in milliseconds
double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->average() * 1000.0;
}
double G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {
return _gc_par_phases[phase]->get(worker_i) * 1000.0;
}
double G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->sum() * 1000.0;
}
double G1GCPhaseTimes::min_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->minimum() * 1000.0;
}
double G1GCPhaseTimes::max_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->maximum() * 1000.0;
}
size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->get(worker_i);
}
size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->sum();
}
double G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->average();
}
size_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->minimum();
}
size_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->maximum();
}
class G1GCParPhasePrinter : public StackObj {
G1GCPhaseTimes* _phase_times;
public:
G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {}
void print(G1GCPhaseTimes::GCParPhases phase_id) {
WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
if (phase->_log_level > G1Log::level() || !phase->_enabled) {
return;
}
if (phase->_length == 1) {
print_single_length(phase_id, phase);
} else {
print_multi_length(phase_id, phase);
}
}
private:
void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
// No need for min, max, average and sum for only one worker
LineBuffer buf(phase->_indent_level);
buf.append_and_print_cr("[%s: %.1lf]", phase->_title, _phase_times->get_time_ms(phase_id, 0));
if (phase->_thread_work_items != NULL) {
LineBuffer buf2(phase->_thread_work_items->_indent_level);
buf2.append_and_print_cr("[%s: "SIZE_FORMAT"]", phase->_thread_work_items->_title, _phase_times->sum_thread_work_items(phase_id));
}
}
void print_time_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
for (uint i = 0; i < phase->_length; ++i) {
buf.append(" %.1lf", _phase_times->get_time_ms(phase_id, i));
}
buf.print_cr();
}
void print_count_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
for (uint i = 0; i < thread_work_items->_length; ++i) {
buf.append(" " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
}
buf.print_cr();
}
void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
LineBuffer buf(thread_work_items->_indent_level);
buf.append("[%s:", thread_work_items->_title);
if (G1Log::finest()) {
print_count_values(buf, phase_id, thread_work_items);
}
assert(thread_work_items->_print_sum, err_msg("%s does not have print sum true even though it is a count", thread_work_items->_title));
buf.append_and_print_cr(" Min: " SIZE_FORMAT ", Avg: %.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT "]",
_phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
_phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
}
void print_multi_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
LineBuffer buf(phase->_indent_level);
buf.append("[%s:", phase->_title);
if (G1Log::finest()) {
print_time_values(buf, phase_id, phase);
}
buf.append(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf",
_phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
_phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));
if (phase->_print_sum) {
// for things like the start and end times the sum is not
// that relevant
buf.append(", Sum: %.1lf", _phase_times->sum_time_ms(phase_id));
}
buf.append_and_print_cr("]");
if (phase->_thread_work_items != NULL) {
print_thread_work_items(phase_id, phase->_thread_work_items);
}
}
};
void G1GCPhaseTimes::print(double pause_time_sec) {
G1GCParPhasePrinter par_phase_printer(this);
if (_root_region_scan_wait_time_ms > 0.0) {
print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
}
print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
_last_gc_worker_start_times_ms.print(2, "GC Worker Start (ms)");
_last_ext_root_scan_times_ms.print(2, "Ext Root Scanning (ms)");
if (_last_satb_filtering_times_ms.sum() > 0.0) {
_last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
for (int i = 0; i <= GCMainParPhasesLast; i++) {
par_phase_printer.print((GCParPhases) i);
}
_last_update_rs_times_ms.print(2, "Update RS (ms)");
_last_update_rs_processed_buffers.print(3, "Processed Buffers");
_last_scan_rs_times_ms.print(2, "Scan RS (ms)");
_last_strong_code_root_scan_times_ms.print(2, "Code Root Scanning (ms)");
_last_obj_copy_times_ms.print(2, "Object Copy (ms)");
_last_termination_times_ms.print(2, "Termination (ms)");
if (G1Log::finest()) {
_last_termination_attempts.print(3, "Termination Attempts");
}
_last_gc_worker_other_times_ms.print(2, "GC Worker Other (ms)");
_last_gc_worker_times_ms.print(2, "GC Worker Total (ms)");
_last_gc_worker_end_times_ms.print(2, "GC Worker End (ms)");
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
if (G1StringDedup::is_enabled()) {
print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
_cur_string_dedup_queue_fixup_worker_times_ms.print(2, "Queue Fixup (ms)");
_cur_string_dedup_table_fixup_worker_times_ms.print(2, "Table Fixup (ms)");
for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {
par_phase_printer.print((GCParPhases) i);
}
}
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
@ -340,10 +536,7 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
if (G1Log::finest()) {
_last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty");
_last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
}
par_phase_printer.print(RedirtyCards);
if (G1EagerReclaimHumongousObjects) {
print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
if (G1Log::finest()) {
@ -366,3 +559,17 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
print_stats(2, "Verify After", _cur_verify_after_time_ms);
}
}
G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
_phase_times(phase_times), _phase(phase), _worker_id(worker_id) {
if (_phase_times != NULL) {
_start_time = os::elapsedTime();
}
}
G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
if (_phase_times != NULL) {
_phase_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time);
}
}

View File

@ -26,106 +26,46 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP
#include "memory/allocation.hpp"
#include "gc_interface/gcCause.hpp"
template <class T>
class WorkerDataArray : public CHeapObj<mtGC> {
T* _data;
uint _length;
const char* _print_format;
bool _print_sum;
class LineBuffer;
NOT_PRODUCT(static const T _uninitialized;)
// We are caching the sum and average to only have to calculate them once.
// This is not done in an MT-safe way. It is intended to allow single
// threaded code to call sum() and average() multiple times in any order
// without having to worry about the cost.
bool _has_new_data;
T _sum;
double _average;
public:
WorkerDataArray(uint length, const char* print_format, bool print_sum = true) :
_length(length), _print_format(print_format), _print_sum(print_sum), _has_new_data(true) {
assert(length > 0, "Must have some workers to store data for");
_data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
}
~WorkerDataArray() {
FREE_C_HEAP_ARRAY(T, _data);
}
void set(uint worker_i, T value) {
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
assert(_data[worker_i] == (T)-1, err_msg("Overwriting data for worker %d", worker_i));
_data[worker_i] = value;
_has_new_data = true;
}
T get(uint worker_i) {
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
return _data[worker_i];
}
void add(uint worker_i, T value) {
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
_data[worker_i] += value;
_has_new_data = true;
}
double average(){
if (_has_new_data) {
calculate_totals();
}
return _average;
}
T sum() {
if (_has_new_data) {
calculate_totals();
}
return _sum;
}
void print(int level, const char* title);
void reset() PRODUCT_RETURN;
void verify() PRODUCT_RETURN;
private:
void calculate_totals(){
_sum = (T)0;
for (uint i = 0; i < _length; ++i) {
_sum += _data[i];
}
_average = (double)_sum / (double)_length;
_has_new_data = false;
}
};
template <class T> class WorkerDataArray;
class G1GCPhaseTimes : public CHeapObj<mtGC> {
friend class G1GCParPhasePrinter;
private:
uint _active_gc_threads;
uint _max_gc_threads;
WorkerDataArray<double> _last_gc_worker_start_times_ms;
WorkerDataArray<double> _last_ext_root_scan_times_ms;
WorkerDataArray<double> _last_satb_filtering_times_ms;
WorkerDataArray<double> _last_update_rs_times_ms;
WorkerDataArray<int> _last_update_rs_processed_buffers;
WorkerDataArray<double> _last_scan_rs_times_ms;
WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
WorkerDataArray<double> _last_obj_copy_times_ms;
WorkerDataArray<double> _last_termination_times_ms;
WorkerDataArray<size_t> _last_termination_attempts;
WorkerDataArray<double> _last_gc_worker_end_times_ms;
WorkerDataArray<double> _last_gc_worker_times_ms;
WorkerDataArray<double> _last_gc_worker_other_times_ms;
public:
enum GCParPhases {
GCWorkerStart,
ExtRootScan,
SATBFiltering,
UpdateRS,
ScanRS,
CodeRoots,
ObjCopy,
Termination,
Other,
GCWorkerTotal,
GCWorkerEnd,
StringDedupQueueFixup,
StringDedupTableFixup,
RedirtyCards,
GCParPhasesSentinel
};
private:
// Markers for grouping the phases in the GCPhases enum above
static const int GCMainParPhasesLast = GCWorkerEnd;
static const int StringDedupPhasesFirst = StringDedupQueueFixup;
static const int StringDedupPhasesLast = StringDedupTableFixup;
WorkerDataArray<double>* _gc_par_phases[GCParPhasesSentinel];
WorkerDataArray<size_t>* _update_rs_processed_buffers;
WorkerDataArray<size_t>* _termination_attempts;
WorkerDataArray<size_t>* _redirtied_cards;
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
@ -135,9 +75,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cur_evac_fail_restore_remsets;
double _cur_evac_fail_remove_self_forwards;
double _cur_string_dedup_fixup_time_ms;
WorkerDataArray<double> _cur_string_dedup_queue_fixup_worker_times_ms;
WorkerDataArray<double> _cur_string_dedup_table_fixup_worker_times_ms;
double _cur_string_dedup_fixup_time_ms;
double _cur_clear_ct_time_ms;
double _cur_ref_proc_time_ms;
@ -149,8 +87,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _recorded_young_cset_choice_time_ms;
double _recorded_non_young_cset_choice_time_ms;
WorkerDataArray<double> _last_redirty_logged_cards_time_ms;
WorkerDataArray<size_t> _last_redirty_logged_cards_processed_cards;
double _recorded_redirty_logged_cards_time_ms;
double _recorded_young_free_cset_time_ms;
@ -172,54 +108,34 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
public:
G1GCPhaseTimes(uint max_gc_threads);
void note_gc_start(uint active_gc_threads);
void note_gc_start(uint active_gc_threads, bool mark_in_progress);
void note_gc_end();
void print(double pause_time_sec);
void record_gc_worker_start_time(uint worker_i, double ms) {
_last_gc_worker_start_times_ms.set(worker_i, ms);
}
// record the time a phase took in seconds
void record_time_secs(GCParPhases phase, uint worker_i, double secs);
void record_ext_root_scan_time(uint worker_i, double ms) {
_last_ext_root_scan_times_ms.set(worker_i, ms);
}
// add a number of seconds to a phase
void add_time_secs(GCParPhases phase, uint worker_i, double secs);
void record_satb_filtering_time(uint worker_i, double ms) {
_last_satb_filtering_times_ms.set(worker_i, ms);
}
void record_thread_work_item(GCParPhases phase, uint worker_i, size_t count);
void record_update_rs_time(uint worker_i, double ms) {
_last_update_rs_times_ms.set(worker_i, ms);
}
// return the average time for a phase in milliseconds
double average_time_ms(GCParPhases phase);
void record_update_rs_processed_buffers(uint worker_i, int processed_buffers) {
_last_update_rs_processed_buffers.set(worker_i, processed_buffers);
}
size_t sum_thread_work_items(GCParPhases phase);
void record_scan_rs_time(uint worker_i, double ms) {
_last_scan_rs_times_ms.set(worker_i, ms);
}
private:
double get_time_ms(GCParPhases phase, uint worker_i);
double sum_time_ms(GCParPhases phase);
double min_time_ms(GCParPhases phase);
double max_time_ms(GCParPhases phase);
size_t get_thread_work_item(GCParPhases phase, uint worker_i);
double average_thread_work_items(GCParPhases phase);
size_t min_thread_work_items(GCParPhases phase);
size_t max_thread_work_items(GCParPhases phase);
void record_strong_code_root_scan_time(uint worker_i, double ms) {
_last_strong_code_root_scan_times_ms.set(worker_i, ms);
}
void record_obj_copy_time(uint worker_i, double ms) {
_last_obj_copy_times_ms.set(worker_i, ms);
}
void add_obj_copy_time(uint worker_i, double ms) {
_last_obj_copy_times_ms.add(worker_i, ms);
}
void record_termination(uint worker_i, double ms, size_t attempts) {
_last_termination_times_ms.set(worker_i, ms);
_last_termination_attempts.set(worker_i, attempts);
}
void record_gc_worker_end_time(uint worker_i, double ms) {
_last_gc_worker_end_times_ms.set(worker_i, ms);
}
public:
void record_clear_ct_time(double ms) {
_cur_clear_ct_time_ms = ms;
@ -249,21 +165,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_cur_evac_fail_remove_self_forwards = ms;
}
void note_string_dedup_fixup_start();
void note_string_dedup_fixup_end();
void record_string_dedup_fixup_time(double ms) {
_cur_string_dedup_fixup_time_ms = ms;
}
void record_string_dedup_queue_fixup_worker_time(uint worker_id, double ms) {
_cur_string_dedup_queue_fixup_worker_times_ms.set(worker_id, ms);
}
void record_string_dedup_table_fixup_worker_time(uint worker_id, double ms) {
_cur_string_dedup_table_fixup_worker_times_ms.set(worker_id, ms);
}
void record_ref_proc_time(double ms) {
_cur_ref_proc_time_ms = ms;
}
@ -303,14 +208,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_recorded_non_young_cset_choice_time_ms = time_ms;
}
void record_redirty_logged_cards_time_ms(uint worker_i, double time_ms) {
_last_redirty_logged_cards_time_ms.set(worker_i, time_ms);
}
void record_redirty_logged_cards_processed_cards(uint worker_i, size_t processed_buffers) {
_last_redirty_logged_cards_processed_cards.set(worker_i, processed_buffers);
}
void record_redirty_logged_cards_time_ms(double time_ms) {
_recorded_redirty_logged_cards_time_ms = time_ms;
}
@ -364,38 +261,16 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double fast_reclaim_humongous_time_ms() {
return _cur_fast_reclaim_humongous_time_ms;
}
};
double average_last_update_rs_time() {
return _last_update_rs_times_ms.average();
}
int sum_last_update_rs_processed_buffers() {
return _last_update_rs_processed_buffers.sum();
}
double average_last_scan_rs_time(){
return _last_scan_rs_times_ms.average();
}
double average_last_strong_code_root_scan_time(){
return _last_strong_code_root_scan_times_ms.average();
}
double average_last_obj_copy_time() {
return _last_obj_copy_times_ms.average();
}
double average_last_termination_time() {
return _last_termination_times_ms.average();
}
double average_last_ext_root_scan_time() {
return _last_ext_root_scan_times_ms.average();
}
double average_last_satb_filtering_times_ms() {
return _last_satb_filtering_times_ms.average();
}
class G1GCParPhaseTimesTracker : public StackObj {
double _start_time;
G1GCPhaseTimes::GCParPhases _phase;
G1GCPhaseTimes* _phase_times;
uint _worker_id;
public:
G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id);
~G1GCParPhaseTimesTracker();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP

View File

@ -28,6 +28,7 @@
#include "memory/allocation.hpp"
class G1Log : public AllStatic {
public:
typedef enum {
LevelNone,
LevelFine,
@ -35,6 +36,7 @@ class G1Log : public AllStatic {
LevelFinest
} LogLevel;
private:
static LogLevel _level;
public:
@ -50,6 +52,10 @@ class G1Log : public AllStatic {
return _level == LevelFinest;
}
static LogLevel level() {
return _level;
}
static void init();
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,6 +57,9 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
public:
MemRegion reserved() { return _storage.reserved(); }
size_t reserved_size() { return _storage.reserved_size(); }
size_t committed_size() { return _storage.committed_size(); }
void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
virtual ~G1RegionToSpaceMapper() {

View File

@ -248,9 +248,8 @@ void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
assert(_cards_scanned != NULL, "invariant");
_cards_scanned[worker_i] = scanRScl.cards_done();
_g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
_g1p->phase_times()->record_strong_code_root_scan_time(worker_i,
scanRScl.strong_code_root_scan_time_sec() * 1000.0);
_g1p->phase_times()->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec);
_g1p->phase_times()->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, scanRScl.strong_code_root_scan_time_sec());
}
// Closure used for updating RSets and recording references that
@ -287,13 +286,11 @@ public:
};
void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
double start = os::elapsedTime();
G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
// Apply the given closure to all remaining log entries.
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
_g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
_g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
}
void G1RemSet::cleanupHRRS() {

View File

@ -106,7 +106,7 @@ void G1StringDedup::deduplicate(oop java_string) {
void G1StringDedup::oops_do(OopClosure* keep_alive) {
assert(is_enabled(), "String deduplication not enabled");
unlink_or_oops_do(NULL, keep_alive);
unlink_or_oops_do(NULL, keep_alive, true /* allow_resize_and_rehash */);
}
void G1StringDedup::unlink(BoolObjectClosure* is_alive) {
@ -123,45 +123,39 @@ void G1StringDedup::unlink(BoolObjectClosure* is_alive) {
class G1StringDedupUnlinkOrOopsDoTask : public AbstractGangTask {
private:
G1StringDedupUnlinkOrOopsDoClosure _cl;
G1GCPhaseTimes* _phase_times;
public:
G1StringDedupUnlinkOrOopsDoTask(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool allow_resize_and_rehash) :
bool allow_resize_and_rehash,
G1GCPhaseTimes* phase_times) :
AbstractGangTask("G1StringDedupUnlinkOrOopsDoTask"),
_cl(is_alive, keep_alive, allow_resize_and_rehash) {
}
_cl(is_alive, keep_alive, allow_resize_and_rehash), _phase_times(phase_times) { }
virtual void work(uint worker_id) {
double queue_fixup_start = os::elapsedTime();
G1StringDedupQueue::unlink_or_oops_do(&_cl);
double table_fixup_start = os::elapsedTime();
G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
double queue_fixup_time_ms = (table_fixup_start - queue_fixup_start) * 1000.0;
double table_fixup_time_ms = (os::elapsedTime() - table_fixup_start) * 1000.0;
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->phase_times()->record_string_dedup_queue_fixup_worker_time(worker_id, queue_fixup_time_ms);
g1p->phase_times()->record_string_dedup_table_fixup_worker_time(worker_id, table_fixup_time_ms);
{
G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupQueueFixup, worker_id);
G1StringDedupQueue::unlink_or_oops_do(&_cl);
}
{
G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
}
}
};
void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, bool allow_resize_and_rehash) {
void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool allow_resize_and_rehash,
G1GCPhaseTimes* phase_times) {
assert(is_enabled(), "String deduplication not enabled");
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->phase_times()->note_string_dedup_fixup_start();
double fixup_start = os::elapsedTime();
G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash);
G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash, phase_times);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->set_par_threads();
g1h->workers()->run_task(&task);
g1h->set_par_threads(0);
double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
g1p->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
g1p->phase_times()->note_string_dedup_fixup_end();
}
void G1StringDedup::threads_do(ThreadClosure* tc) {

View File

@ -91,6 +91,7 @@ class BoolObjectClosure;
class ThreadClosure;
class outputStream;
class G1StringDedupTable;
class G1GCPhaseTimes;
//
// Main interface for interacting with string deduplication.
@ -131,7 +132,7 @@ public:
static void oops_do(OopClosure* keep_alive);
static void unlink(BoolObjectClosure* is_alive);
static void unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive,
bool allow_resize_and_rehash = true);
bool allow_resize_and_rehash, G1GCPhaseTimes* phase_times = NULL);
static void threads_do(ThreadClosure* tc);
static void print_worker_threads_on(outputStream* st);

View File

@ -934,6 +934,16 @@ void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
_offsets.resize(new_end - bottom());
}
#ifndef PRODUCT
void G1OffsetTableContigSpace::mangle_unused_area() {
mangle_unused_area_complete();
}
void G1OffsetTableContigSpace::mangle_unused_area_complete() {
SpaceMangler::mangle_region(MemRegion(top(), end()));
}
#endif
void G1OffsetTableContigSpace::print() const {
print_short();
gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "

View File

@ -155,6 +155,9 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
void set_bottom(HeapWord* value);
void set_end(HeapWord* value);
void mangle_unused_area() PRODUCT_RETURN;
void mangle_unused_area_complete() PRODUCT_RETURN;
HeapWord* scan_top() const;
void record_timestamp();
void reset_gc_time_stamp() { _gc_time_stamp = 0; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -145,6 +145,24 @@ void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
}
}
MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
size_t used_sz =
_prev_bitmap_mapper->committed_size() +
_next_bitmap_mapper->committed_size() +
_bot_mapper->committed_size() +
_cardtable_mapper->committed_size() +
_card_counts_mapper->committed_size();
size_t committed_sz =
_prev_bitmap_mapper->reserved_size() +
_next_bitmap_mapper->reserved_size() +
_bot_mapper->reserved_size() +
_cardtable_mapper->reserved_size() +
_card_counts_mapper->reserved_size();
return MemoryUsage(0, used_sz, committed_sz, committed_sz);
}
uint HeapRegionManager::expand_by(uint num_regions) {
return expand_at(0, num_regions);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "services/memoryUsage.hpp"
class HeapRegion;
class HeapRegionClosure;
@ -196,6 +197,8 @@ public:
// Return the maximum number of regions in the heap.
uint max_length() const { return (uint)_regions.length(); }
MemoryUsage get_auxiliary_data_memory_usage() const;
MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
// Expand the sequence to reflect that the heap has grown. Either create new

View File

@ -325,7 +325,7 @@ public:
private:
ParallelTaskTerminator& _term;
ParNewGeneration& _gen;
Generation& _next_gen;
Generation& _old_gen;
public:
bool is_valid(int id) const { return id < length(); }
ParallelTaskTerminator* terminator() { return &_term; }
@ -338,7 +338,7 @@ ParScanThreadStateSet::ParScanThreadStateSet(
Stack<oop, mtGC>* overflow_stacks,
size_t desired_plab_sz, ParallelTaskTerminator& term)
: ResourceArray(sizeof(ParScanThreadState), num_threads),
_gen(gen), _next_gen(old_gen), _term(term)
_gen(gen), _old_gen(old_gen), _term(term)
{
assert(num_threads > 0, "sanity check!");
assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
@ -471,8 +471,8 @@ void ParScanThreadStateSet::flush()
_gen.age_table()->merge(local_table);
// Inform old gen that we're done.
_next_gen.par_promote_alloc_done(i);
_next_gen.par_oop_since_save_marks_iterate_done(i);
_old_gen.par_promote_alloc_done(i);
_old_gen.par_oop_since_save_marks_iterate_done(i);
}
if (UseConcMarkSweepGC) {
@ -574,10 +574,10 @@ void ParEvacuateFollowersClosure::do_void() {
par_scan_state()->end_term_time();
}
ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
AbstractGangTask("ParNewGeneration collection"),
_gen(gen), _next_gen(next_gen),
_gen(gen), _old_gen(old_gen),
_young_old_boundary(young_old_boundary),
_state_set(state_set)
{}
@ -601,8 +601,6 @@ void ParNewGenTask::work(uint worker_id) {
// We would need multiple old-gen queues otherwise.
assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
Generation* old_gen = gch->next_gen(_gen);
ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
assert(_state_set->is_valid(worker_id), "Should not have been called");
@ -763,8 +761,9 @@ void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier
class ParNewRefProcTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
public:
ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,
Generation& next_gen,
ParNewRefProcTaskProxy(ProcessTask& task,
ParNewGeneration& gen,
Generation& old_gen,
HeapWord* young_old_boundary,
ParScanThreadStateSet& state_set);
@ -776,20 +775,20 @@ private:
private:
ParNewGeneration& _gen;
ProcessTask& _task;
Generation& _next_gen;
Generation& _old_gen;
HeapWord* _young_old_boundary;
ParScanThreadStateSet& _state_set;
};
ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
ProcessTask& task, ParNewGeneration& gen,
Generation& next_gen,
HeapWord* young_old_boundary,
ParScanThreadStateSet& state_set)
ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
ParNewGeneration& gen,
Generation& old_gen,
HeapWord* young_old_boundary,
ParScanThreadStateSet& state_set)
: AbstractGangTask("ParNewGeneration parallel reference processing"),
_gen(gen),
_task(task),
_next_gen(next_gen),
_old_gen(old_gen),
_young_old_boundary(young_old_boundary),
_state_set(state_set)
{
@ -893,7 +892,7 @@ void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThr
from()->set_next_compaction_space(to());
gch->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred();
_old_gen->promotion_failure_occurred();
// Trace promotion failure in the parallel GC threads
thread_state_set.trace_promotion_failed(gc_tracer());
@ -927,7 +926,7 @@ void ParNewGeneration::collect(bool full,
workers->set_active_workers(active_workers);
assert(gch->n_gens() == 2,
"Par collection currently only works with single older gen.");
_next_gen = gch->next_gen(this);
_old_gen = gch->old_gen();
// If the next generation is too full to accommodate worst-case promotion
// from this generation, pass on collection; let the next generation
@ -968,10 +967,10 @@ void ParNewGeneration::collect(bool full,
// because only those workers go through the termination protocol.
ParallelTaskTerminator _term(n_workers, task_queues());
ParScanThreadStateSet thread_state_set(workers->active_workers(),
*to(), *this, *_next_gen, *task_queues(),
*to(), *this, *_old_gen, *task_queues(),
_overflow_stacks, desired_plab_sz(), _term);
ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
gch->set_par_threads(n_workers);
gch->rem_set()->prepare_for_younger_refs_iterate(true);
// It turns out that even when we're using 1 thread, doing the work in a
@ -1191,8 +1190,8 @@ oop ParNewGeneration::copy_to_survivor_space(
}
if (!_promotion_failed) {
new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
old, m, sz);
new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
old, m, sz);
}
if (new_obj == NULL) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -233,13 +233,13 @@ class ParScanThreadState {
class ParNewGenTask: public AbstractGangTask {
private:
ParNewGeneration* _gen;
Generation* _next_gen;
Generation* _old_gen;
HeapWord* _young_old_boundary;
class ParScanThreadStateSet* _state_set;
public:
ParNewGenTask(ParNewGeneration* gen,
Generation* next_gen,
Generation* old_gen,
HeapWord* young_old_boundary,
ParScanThreadStateSet* state_set);

View File

@ -601,7 +601,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
HandleMark hm; // Discard any handles allocated in each iteration.
// First allocation attempt is lock-free.
Generation *young = gch->get_gen(0);
Generation *young = gch->young_gen();
assert(young->supports_inline_contig_alloc(),
"Otherwise, must do alloc within heap lock");
if (young->should_allocate(size, is_tlab)) {
@ -615,8 +615,8 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
{
MutexLocker ml(Heap_lock);
if (PrintGC && Verbose) {
gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
" attempting locked slow path allocation");
gclog_or_tty->print_cr("GenCollectorPolicy::mem_allocate_work:"
" attempting locked slow path allocation");
}
// Note that only large objects get a shot at being
// allocated in later generations.
@ -705,7 +705,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
warning("GenCollectorPolicy::mem_allocate_work retries %d times \n\t"
" size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
}
}
@ -715,10 +715,14 @@ HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
bool is_tlab) {
GenCollectedHeap *gch = GenCollectedHeap::heap();
HeapWord* result = NULL;
for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
Generation *gen = gch->get_gen(i);
if (gen->should_allocate(size, is_tlab)) {
result = gen->expand_and_allocate(size, is_tlab);
Generation *old = gch->old_gen();
if (old->should_allocate(size, is_tlab)) {
result = old->expand_and_allocate(size, is_tlab);
}
if (result == NULL) {
Generation *young = gch->young_gen();
if (young->should_allocate(size, is_tlab)) {
result = young->expand_and_allocate(size, is_tlab);
}
}
assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
@ -891,7 +895,7 @@ MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
bool GenCollectorPolicy::should_try_older_generation_allocation(
size_t word_size) const {
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t young_capacity = gch->get_gen(0)->capacity_before_gc();
size_t young_capacity = gch->young_gen()->capacity_before_gc();
return (word_size > heap_word_size(young_capacity))
|| GC_locker::is_active_and_needs_gc()
|| gch->incremental_collection_failed();

View File

@ -226,7 +226,7 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
update_counters();
_next_gen = NULL;
_old_gen = NULL;
_tenuring_threshold = MaxTenuringThreshold;
_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
@ -383,8 +383,8 @@ void DefNewGeneration::compute_new_size() {
assert(next_level < gch->_n_gens,
"DefNewGeneration cannot be an oldest gen");
Generation* next_gen = gch->get_gen(next_level);
size_t old_size = next_gen->capacity();
Generation* old_gen = gch->old_gen();
size_t old_size = old_gen->capacity();
size_t new_size_before = _virtual_space.committed_size();
size_t min_new_size = spec()->init_size();
size_t max_new_size = reserved().byte_size();
@ -568,7 +568,7 @@ void DefNewGeneration::collect(bool full,
DefNewTracer gc_tracer;
gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
_next_gen = gch->next_gen(this);
_old_gen = gch->old_gen();
// If the next generation is too full to accommodate promotion
// from this generation, pass on collection; let the next generation
@ -688,7 +688,7 @@ void DefNewGeneration::collect(bool full,
gch->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred();
_old_gen->promotion_failure_occurred();
gc_tracer.report_promotion_failed(_promotion_failed_info);
// Reset the PromotionFailureALot counters.
@ -793,7 +793,7 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
// Otherwise try allocating obj tenured
if (obj == NULL) {
obj = _next_gen->promote(old, s);
obj = _old_gen->promote(old, s);
if (obj == NULL) {
handle_promotion_failure(old);
return old;
@ -898,11 +898,11 @@ bool DefNewGeneration::collection_attempt_is_safe() {
}
return false;
}
if (_next_gen == NULL) {
if (_old_gen == NULL) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
_next_gen = gch->next_gen(this);
_old_gen = gch->old_gen();
}
return _next_gen->promotion_attempt_is_safe(used());
return _old_gen->promotion_attempt_is_safe(used());
}
void DefNewGeneration::gc_epilogue(bool full) {
@ -1022,8 +1022,7 @@ CompactibleSpace* DefNewGeneration::first_compaction_space() const {
return eden();
}
HeapWord* DefNewGeneration::allocate(size_t word_size,
bool is_tlab) {
HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
// This is the slow-path allocation for the DefNewGeneration.
// Most allocations are fast-path in compiled code.
// We try to allocate from the eden. If that works, we are happy.
@ -1031,8 +1030,8 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
// have to use it here, as well.
HeapWord* result = eden()->par_allocate(word_size);
if (result != NULL) {
if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
_next_gen->sample_eden_chunk();
if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
_old_gen->sample_eden_chunk();
}
} else {
// If the eden is full and the last collection bailed out, we are running
@ -1047,8 +1046,8 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
HeapWord* DefNewGeneration::par_allocate(size_t word_size,
bool is_tlab) {
HeapWord* res = eden()->par_allocate(word_size);
if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
_next_gen->sample_eden_chunk();
if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
_old_gen->sample_eden_chunk();
}
return res;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,7 @@ class DefNewGeneration: public Generation {
friend class VMStructs;
protected:
Generation* _next_gen;
Generation* _old_gen;
uint _tenuring_threshold; // Tenuring threshold for next collection.
ageTable _age_table;
// Size of object to pretenure in words; command line provides bytes

View File

@ -177,18 +177,17 @@ void GenCollectedHeap::post_initialize() {
SharedHeap::post_initialize();
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
guarantee(policy->is_generation_policy(), "Illegal policy type");
assert((get_gen(0)->kind() == Generation::DefNew) ||
(get_gen(0)->kind() == Generation::ParNew),
assert((_young_gen->kind() == Generation::DefNew) ||
(_young_gen->kind() == Generation::ParNew),
"Wrong youngest generation type");
DefNewGeneration* def_new_gen = (DefNewGeneration*)get_gen(0);
DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
Generation* old_gen = get_gen(1);
assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
old_gen->kind() == Generation::MarkSweepCompact,
assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
_old_gen->kind() == Generation::MarkSweepCompact,
"Wrong generation kind");
policy->initialize_size_policy(def_new_gen->eden()->capacity(),
old_gen->capacity(),
_old_gen->capacity(),
def_new_gen->from()->capacity());
policy->initialize_gc_policy_counters();
}
@ -1113,10 +1112,10 @@ void GenCollectedHeap::print_on_error(outputStream* st) const {
void GenCollectedHeap::print_tracing_info() const {
if (TraceYoungGenTime) {
get_gen(0)->print_summary_info();
_young_gen->print_summary_info();
}
if (TraceOldGenTime) {
get_gen(1)->print_summary_info();
_old_gen->print_summary_info();
}
}

View File

@ -373,27 +373,6 @@ public:
// collection.
virtual bool is_maximal_no_gc() const;
// Return the generation before "gen".
Generation* prev_gen(Generation* gen) const {
guarantee(gen->level() == 1, "Out of bounds");
return _young_gen;
}
// Return the generation after "gen".
Generation* next_gen(Generation* gen) const {
guarantee(gen->level() == 0, "Out of bounds");
return _old_gen;
}
Generation* get_gen(int i) const {
guarantee(i == 0 || i == 1, "Out of bounds");
if (i == 0) {
return _young_gen;
} else {
return _old_gen;
}
}
int n_gens() const {
assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
return _n_gens;
@ -486,7 +465,7 @@ public:
assert(heap()->collector_policy()->is_generation_policy(),
"the following definition may not be suitable for an n(>2)-generation system");
return incremental_collection_failed() ||
(consult_young && !get_gen(0)->collection_attempt_is_safe());
(consult_young && !_young_gen->collection_attempt_is_safe());
}
// If a generation bails out of an incremental collection,

View File

@ -109,20 +109,16 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool c
deallocate_stacks();
// If compaction completely evacuated all generations younger than this
// one, then we can clear the card table. Otherwise, we must invalidate
// If compaction completely evacuated the young generation then we
// can clear the card table. Otherwise, we must invalidate
// it (consider all cards dirty). In the future, we might consider doing
// compaction within generations only, and doing card-table sliding.
bool all_empty = true;
for (int i = 0; all_empty && i < level; i++) {
Generation* g = gch->get_gen(i);
all_empty = all_empty && gch->get_gen(i)->used() == 0;
}
GenRemSet* rs = gch->rem_set();
Generation* old_gen = gch->get_gen(level);
Generation* old_gen = gch->old_gen();
// Clear/invalidate below make use of the "prev_used_regions" saved earlier.
if (all_empty) {
// We've evacuated all generations below us.
if (gch->young_gen()->used() == 0) {
// We've evacuated the young generation.
rs->clear_into_younger(old_gen);
} else {
// Invalidate the cards corresponding to the currently used
@ -157,9 +153,8 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool c
void GenMarkSweep::allocate_stacks() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Scratch request on behalf of oldest generation; will do no
// allocation.
ScratchBlock* scratch = gch->gather_scratch(gch->get_gen(gch->_n_gens-1), 0);
// Scratch request on behalf of old generation; will do no allocation.
ScratchBlock* scratch = gch->gather_scratch(gch->old_gen(), 0);
// $$$ To cut a corner, we'll only use the first scratch block, and then
// revert to malloc.
@ -188,7 +183,7 @@ void GenMarkSweep::deallocate_stacks() {
}
void GenMarkSweep::mark_sweep_phase1(int level,
bool clear_all_softrefs) {
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace(" 1");
@ -199,7 +194,8 @@ void GenMarkSweep::mark_sweep_phase1(int level,
// use OopsInGenClosure constructor which takes a generation,
// as the Universe has not been created when the static constructors
// are run.
follow_root_closure.set_orig_generation(gch->get_gen(level));
assert(level == 1, "We don't use mark-sweep on young generations");
follow_root_closure.set_orig_generation(gch->old_gen());
// Need new claim bits before marking starts.
ClassLoaderDataGraph::clear_claimed_marks();
@ -287,7 +283,8 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
// use OopsInGenClosure constructor which takes a generation,
// as the Universe has not been created when the static constructors
// are run.
adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
assert(level == 1, "We don't use mark-sweep on young generations.");
adjust_pointer_closure.set_orig_generation(gch->old_gen());
gch->gen_process_roots(level,
false, // Younger gens are not roots.

View File

@ -153,9 +153,8 @@ bool Generation::is_in(const void* p) const {
Generation* Generation::next_gen() const {
GenCollectedHeap* gch = GenCollectedHeap::heap();
int next = level() + 1;
if (next < gch->_n_gens) {
return gch->get_gen(next);
if (level() == 0) {
return gch->old_gen();
} else {
return NULL;
}

View File

@ -353,15 +353,6 @@ void ContiguousSpace::mangle_unused_area() {
void ContiguousSpace::mangle_unused_area_complete() {
mangler()->mangle_unused_area_complete();
}
void ContiguousSpace::mangle_region(MemRegion mr) {
// Although this method uses SpaceMangler::mangle_region() which
// is not specific to a space, the when the ContiguousSpace version
// is called, it is always with regard to a space and this
// bounds checking is appropriate.
MemRegion space_mr(bottom(), end());
assert(space_mr.contains(mr), "Mangling outside space");
SpaceMangler::mangle_region(mr);
}
#endif // NOT_PRODUCT
void CompactibleSpace::initialize(MemRegion mr,
@ -388,7 +379,7 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size,
cp->space->set_compaction_top(compact_top);
cp->space = cp->space->next_compaction_space();
if (cp->space == NULL) {
cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
cp->gen = GenCollectedHeap::heap()->young_gen();
assert(cp->gen != NULL, "compaction must succeed");
cp->space = cp->gen->first_compaction_space();
assert(cp->space != NULL, "generation must have a first compaction space");

View File

@ -128,11 +128,10 @@ class Space: public CHeapObj<mtGC> {
// For detecting GC bugs. Should only be called at GC boundaries, since
// some unused space may be used as scratch space during GC's.
// Default implementation does nothing. We also call this when expanding
// a space to satisfy an allocation request. See bug #4668531
virtual void mangle_unused_area() {}
virtual void mangle_unused_area_complete() {}
virtual void mangle_region(MemRegion mr) {}
// We also call this when expanding a space to satisfy an allocation
// request. See bug #4668531
virtual void mangle_unused_area() = 0;
virtual void mangle_unused_area_complete() = 0;
// Testers
bool is_empty() const { return used() == 0; }
@ -559,8 +558,6 @@ class ContiguousSpace: public CompactibleSpace {
void mangle_unused_area() PRODUCT_RETURN;
// Mangle [top, end)
void mangle_unused_area_complete() PRODUCT_RETURN;
// Mangle the given MemRegion.
void mangle_region(MemRegion mr) PRODUCT_RETURN;
// Do some sparse checking on the area that should have been mangled.
void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;

View File

@ -295,6 +295,12 @@ WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
return hr->is_humongous();
WB_END
WB_ENTRY(jlong, WB_G1NumMaxRegions(JNIEnv* env, jobject o))
G1CollectedHeap* g1 = G1CollectedHeap::heap();
size_t nr = g1->max_regions();
return (jlong)nr;
WB_END
WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
G1CollectedHeap* g1 = G1CollectedHeap::heap();
size_t nr = g1->num_free_regions();
@ -318,6 +324,14 @@ WB_END
WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
return (jint)HeapRegion::GrainBytes;
WB_END
WB_ENTRY(jobject, WB_G1AuxiliaryMemoryUsage(JNIEnv* env))
ResourceMark rm(THREAD);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
MemoryUsage usage = g1h->get_auxiliary_data_memory_usage();
Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
return JNIHandles::make_local(env, h());
WB_END
#endif // INCLUDE_ALL_GCS
#if INCLUDE_NMT
@ -1309,9 +1323,12 @@ static JNINativeMethod methods[] = {
#if INCLUDE_ALL_GCS
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
{CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
{CC"g1NumMaxRegions", CC"()J", (void*)&WB_G1NumMaxRegions },
{CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions },
{CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize },
{CC"g1StartConcMarkCycle", CC"()Z", (void*)&WB_G1StartMarkCycle },
{CC"g1AuxiliaryMemoryUsage", CC"()Ljava/lang/management/MemoryUsage;",
(void*)&WB_G1AuxiliaryMemoryUsage },
#endif // INCLUDE_ALL_GCS
#if INCLUDE_NMT
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },

View File

@ -536,7 +536,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
nonstatic_field(ContiguousSpace, _concurrent_iteration_safe_limit, HeapWord*) \
nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
\
nonstatic_field(DefNewGeneration, _next_gen, Generation*) \
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
nonstatic_field(DefNewGeneration, _age_table, ageTable) \
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -160,8 +160,8 @@ void MemoryService::add_gen_collected_heap_info(GenCollectedHeap* heap) {
_managers_list->append(_minor_gc_manager);
_managers_list->append(_major_gc_manager);
add_generation_memory_pool(heap->get_gen(minor), _major_gc_manager, _minor_gc_manager);
add_generation_memory_pool(heap->get_gen(major), _major_gc_manager);
add_generation_memory_pool(heap->young_gen(), _major_gc_manager, _minor_gc_manager);
add_generation_memory_pool(heap->old_gen(), _major_gc_manager);
}
#if INCLUDE_ALL_GCS

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,13 +57,6 @@ private:
init_code_heap_pools_size = 9
};
// index for minor and major generations
enum {
minor = 0,
major = 1,
n_gens = 2
};
static GrowableArray<MemoryPool*>* _pools_list;
static GrowableArray<MemoryManager*>* _managers_list;

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestDisableExplicitGC
* @requires vm.opt.DisableExplicitGC == null
* @summary Verify GC behavior with DisableExplicitGC flag.
* @library /testlibrary
* @run main/othervm -XX:+PrintGCDetails TestDisableExplicitGC
* @run main/othervm/fail -XX:+DisableExplicitGC -XX:+PrintGCDetails TestDisableExplicitGC
* @run main/othervm -XX:-DisableExplicitGC -XX:+PrintGCDetails TestDisableExplicitGC
*/
import java.lang.management.GarbageCollectorMXBean;
import java.util.List;
import static com.oracle.java.testlibrary.Asserts.*;
public class TestDisableExplicitGC {
public static void main(String[] args) throws InterruptedException {
List<GarbageCollectorMXBean> list = java.lang.management.ManagementFactory.getGarbageCollectorMXBeans();
long collectionCountBefore = getCollectionCount(list);
System.gc();
long collectionCountAfter = getCollectionCount(list);
assertLT(collectionCountBefore, collectionCountAfter);
}
private static long getCollectionCount(List<GarbageCollectorMXBean> list) {
int collectionCount = 0;
for (GarbageCollectorMXBean gcMXBean : list) {
collectionCount += gcMXBean.getCollectionCount();
}
return collectionCount;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -21,7 +21,7 @@
* questions.
*/
import static com.oracle.java.testlibrary.Asserts.assertLessThanOrEqual;
import com.oracle.java.testlibrary.Asserts;
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.Platform;
import com.oracle.java.testlibrary.ProcessTools;
@ -36,23 +36,29 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import sun.misc.Unsafe;
import sun.misc.Unsafe; // for ADDRESS_SIZE
import sun.hotspot.WhiteBox;
public class TestShrinkAuxiliaryData {
private static final int REGION_SIZE = 1024 * 1024;
private final static String[] initialOpts = new String[]{
"-XX:MinHeapFreeRatio=10",
"-XX:MaxHeapFreeRatio=11",
"-XX:+UseG1GC",
"-XX:G1HeapRegionSize=1m",
"-XX:G1HeapRegionSize=" + REGION_SIZE,
"-XX:-ExplicitGCInvokesConcurrent",
"-XX:+PrintGCDetails"
"-XX:+PrintGCDetails",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-Xbootclasspath/a:.",
};
private final int RSetCacheSize;
private final int hotCardTableSize;
protected TestShrinkAuxiliaryData(int RSetCacheSize) {
this.RSetCacheSize = RSetCacheSize;
protected TestShrinkAuxiliaryData(int hotCardTableSize) {
this.hotCardTableSize = hotCardTableSize;
}
protected void test() throws Exception {
@ -60,16 +66,16 @@ public class TestShrinkAuxiliaryData {
Collections.addAll(vmOpts, initialOpts);
int maxCacheSize = Math.max(0, Math.min(31, getMaxCacheSize()));
if (maxCacheSize < RSetCacheSize) {
if (maxCacheSize < hotCardTableSize) {
System.out.format("Skiping test for %d cache size due max cache size %d",
RSetCacheSize, maxCacheSize
hotCardTableSize, maxCacheSize
);
return;
}
printTestInfo(maxCacheSize);
vmOpts.add("-XX:G1ConcRSLogCacheSize=" + RSetCacheSize);
vmOpts.add("-XX:G1ConcRSLogCacheSize=" + hotCardTableSize);
vmOpts.addAll(Arrays.asList(Utils.getTestJavaOpts()));
// for 32 bits ObjectAlignmentInBytes is not a option
@ -92,11 +98,13 @@ public class TestShrinkAuxiliaryData {
private void performTest(List<String> opts) throws Exception {
ProcessBuilder pb
= ProcessTools.createJavaProcessBuilder(
opts.toArray(new String[opts.size()])
);
= ProcessTools.createJavaProcessBuilder(
opts.toArray(new String[opts.size()])
);
OutputAnalyzer output = new OutputAnalyzer(pb.start());
System.out.println(output.getStdout());
System.err.println(output.getStderr());
output.shouldHaveExitValue(0);
}
@ -107,12 +115,13 @@ public class TestShrinkAuxiliaryData {
formatSymbols.setGroupingSeparator(' ');
grouped.setDecimalFormatSymbols(formatSymbols);
System.out.format("Test will use %s bytes of memory of %s available%n"
System.out.format(
"Test will use %s bytes of memory of %s available%n"
+ "Available memory is %s with %d bytes pointer size - can save %s pointers%n"
+ "Max cache size: 2^%d = %s elements%n",
grouped.format(ShrinkAuxiliaryDataTest.getMemoryUsedByTest()),
grouped.format(Runtime.getRuntime().freeMemory()),
grouped.format(Runtime.getRuntime().freeMemory()
grouped.format(Runtime.getRuntime().maxMemory()),
grouped.format(Runtime.getRuntime().maxMemory()
- ShrinkAuxiliaryDataTest.getMemoryUsedByTest()),
Unsafe.ADDRESS_SIZE,
grouped.format((Runtime.getRuntime().freeMemory()
@ -135,6 +144,7 @@ public class TestShrinkAuxiliaryData {
if (availableMemory <= 0) {
return 0;
}
long availablePointersCount = availableMemory / Unsafe.ADDRESS_SIZE;
return (63 - (int) Long.numberOfLeadingZeros(availablePointersCount));
}
@ -142,17 +152,48 @@ public class TestShrinkAuxiliaryData {
static class ShrinkAuxiliaryDataTest {
public static void main(String[] args) throws IOException {
int iterateCount = DEFAULT_ITERATION_COUNT;
if (args.length > 0) {
try {
iterateCount = Integer.parseInt(args[0]);
} catch (NumberFormatException e) {
//num_iterate remains default
}
ShrinkAuxiliaryDataTest testCase = new ShrinkAuxiliaryDataTest();
if (!testCase.checkEnvApplicability()) {
return;
}
new ShrinkAuxiliaryDataTest().test(iterateCount);
testCase.test();
}
/**
* Checks is this environment suitable to run this test
* - memory is enough to decommit (page size is not big)
* - RSet cache size is not too big
*
* @return true if test could run, false if test should be skipped
*/
protected boolean checkEnvApplicability() {
int pageSize = WhiteBox.getWhiteBox().getVMPageSize();
System.out.println( "Page size = " + pageSize
+ " region size = " + REGION_SIZE
+ " aux data ~= " + (REGION_SIZE * 3 / 100));
// If auxdata size will be less than page size it wouldn't decommit.
// Auxiliary data size is about ~3.6% of heap size.
if (pageSize >= REGION_SIZE * 3 / 100) {
System.out.format("Skipping test for too large page size = %d",
pageSize
);
return false;
}
if (REGION_SIZE * REGIONS_TO_ALLOCATE > Runtime.getRuntime().maxMemory()) {
System.out.format("Skipping test for too low available memory. "
+ "Need %d, available %d",
REGION_SIZE * REGIONS_TO_ALLOCATE,
Runtime.getRuntime().maxMemory()
);
return false;
}
return true;
}
class GarbageObject {
@ -177,41 +218,54 @@ public class TestShrinkAuxiliaryData {
private final List<GarbageObject> garbage = new ArrayList();
public void test(int num_iterate) throws IOException {
public void test() throws IOException {
MemoryUsage muFull, muFree, muAuxDataFull, muAuxDataFree;
float auxFull, auxFree;
allocate();
link();
mutate();
muFull = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
long numUsedRegions = WhiteBox.getWhiteBox().g1NumMaxRegions()
- WhiteBox.getWhiteBox().g1NumFreeRegions();
muAuxDataFull = WhiteBox.getWhiteBox().g1AuxiliaryMemoryUsage();
auxFull = (float)muAuxDataFull.getUsed() / numUsedRegions;
System.out.format("Full aux data ratio= %f, regions max= %d, used= %d\n",
auxFull, WhiteBox.getWhiteBox().g1NumMaxRegions(), numUsedRegions
);
deallocate();
MemoryUsage muBeforeHeap
= ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
MemoryUsage muBeforeNonHeap
= ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage();
for (int i = 0; i < num_iterate; i++) {
allocate();
link();
mutate();
deallocate();
}
System.gc();
MemoryUsage muAfterHeap
= ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
MemoryUsage muAfterNonHeap
= ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage();
assertLessThanOrEqual(muAfterHeap.getCommitted(), muBeforeHeap.getCommitted(),
String.format("heap decommit failed - after > before: %d > %d",
muAfterHeap.getCommitted(), muBeforeHeap.getCommitted()
muFree = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
muAuxDataFree = WhiteBox.getWhiteBox().g1AuxiliaryMemoryUsage();
numUsedRegions = WhiteBox.getWhiteBox().g1NumMaxRegions()
- WhiteBox.getWhiteBox().g1NumFreeRegions();
auxFree = (float)muAuxDataFree.getUsed() / numUsedRegions;
System.out.format("Free aux data ratio= %f, regions max= %d, used= %d\n",
auxFree, WhiteBox.getWhiteBox().g1NumMaxRegions(), numUsedRegions
);
Asserts.assertLessThanOrEqual(muFree.getCommitted(), muFull.getCommitted(),
String.format("heap decommit failed - full > free: %d > %d",
muFree.getCommitted(), muFull.getCommitted()
)
);
if (muAfterHeap.getCommitted() < muBeforeHeap.getCommitted()) {
assertLessThanOrEqual(muAfterNonHeap.getCommitted(), muBeforeNonHeap.getCommitted(),
String.format("non-heap decommit failed - after > before: %d > %d",
muAfterNonHeap.getCommitted(), muBeforeNonHeap.getCommitted()
System.out.format("State used committed\n");
System.out.format("Full aux data: %10d %10d\n", muAuxDataFull.getUsed(), muAuxDataFull.getCommitted());
System.out.format("Free aux data: %10d %10d\n", muAuxDataFree.getUsed(), muAuxDataFree.getCommitted());
// if decommited check that aux data has same ratio
if (muFree.getCommitted() < muFull.getCommitted()) {
Asserts.assertLessThanOrEqual(auxFree, auxFull,
String.format("auxiliary data decommit failed - full > free: %f > %f",
auxFree, auxFull
)
);
}
@ -238,8 +292,7 @@ public class TestShrinkAuxiliaryData {
for (int i = 0; i < NUM_LINKS; i++) {
int regionToLink;
do {
regionToLink = (int) (Math.random()
* REGIONS_TO_ALLOCATE);
regionToLink = (int) (Math.random() * REGIONS_TO_ALLOCATE);
} while (regionToLink == regionNumber);
// get random garbage object from random region
@ -265,11 +318,8 @@ public class TestShrinkAuxiliaryData {
return REGIONS_TO_ALLOCATE * REGION_SIZE;
}
private static final int REGION_SIZE = 1024 * 1024;
private static final int DEFAULT_ITERATION_COUNT = 1; // iterate main scenario
private static final int REGIONS_TO_ALLOCATE = 5;
private static final int REGIONS_TO_ALLOCATE = 100;
private static final int NUM_OBJECTS_PER_REGION = 10;
private static final int NUM_LINKS = 20; // how many links create for each object
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,11 +23,15 @@
/**
* @test TestShrinkAuxiliaryData00
* @bug 8038423
* @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /../../test/lib
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData00
* @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* TestShrinkAuxiliaryData TestShrinkAuxiliaryData00
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData00
*/
public class TestShrinkAuxiliaryData00 {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,15 @@
/**
* @test TestShrinkAuxiliaryData05
* @bug 8038423
* @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /../../test/lib
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData05
* @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* TestShrinkAuxiliaryData TestShrinkAuxiliaryData05
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData05
*/
public class TestShrinkAuxiliaryData05 {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,15 @@
/**
* @test TestShrinkAuxiliaryData10
* @bug 8038423
* @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /../../test/lib
* @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData10
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData10
*/
public class TestShrinkAuxiliaryData10 {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,15 @@
/**
* @test TestShrinkAuxiliaryData15
* @bug 8038423
* @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /../../test/lib
* @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData15
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData15
*/
public class TestShrinkAuxiliaryData15 {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,15 @@
/**
* @test TestShrinkAuxiliaryData20
* @bug 8038423
* @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /../../test/lib
* @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData20
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData20
*/
public class TestShrinkAuxiliaryData20 {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,15 @@
/**
* @test TestShrinkAuxiliaryData25
* @bug 8038423
* @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /../../test/lib
* @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData25
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData25
*/
public class TestShrinkAuxiliaryData25 {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,15 @@
/**
* @test TestShrinkAuxiliaryData30
* @bug 8038423
* @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /../../test/lib
* @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData30
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData30
*/
public class TestShrinkAuxiliaryData30 {