8149127: Rename g1/concurrentMarkThread.* to g1/g1ConcurrentMarkThread.*

Reviewed-by: sjohanss, sangheki
This commit is contained in:
Thomas Schatzl 2017-11-06 14:24:31 +01:00
parent c2935fa4a4
commit b59c920e12
11 changed files with 100 additions and 98 deletions

View File

@ -29,14 +29,14 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/g1/bufferingOopClosure.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentG1RefineThread.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1FullGCScope.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
@ -1554,7 +1554,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_bot(NULL),
_hot_card_cache(NULL),
_g1_rem_set(NULL),
_cg1r(NULL),
_cr(NULL),
_g1mm(NULL),
_preserved_marks_set(true /* in_c_heap */),
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
@ -1633,7 +1633,7 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
jint G1CollectedHeap::initialize_concurrent_refinement() {
jint ecode = JNI_OK;
_cg1r = ConcurrentG1Refine::create(&ecode);
_cr = G1ConcurrentRefine::create(&ecode);
return ecode;
}
@ -1791,8 +1791,8 @@ jint G1CollectedHeap::initialize() {
JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
DirtyCardQ_FL_lock,
(int)concurrent_g1_refine()->yellow_zone(),
(int)concurrent_g1_refine()->red_zone(),
(int)concurrent_refine()->yellow_zone(),
(int)concurrent_refine()->red_zone(),
Shared_DirtyCardQ_lock,
NULL, // fl_owner
true); // init_free_ids
@ -1836,7 +1836,7 @@ void G1CollectedHeap::stop() {
// Stop all concurrent threads. We do this to make sure these threads
// do not continue to execute and access resources (e.g. logging)
// that are destroyed during shutdown.
_cg1r->stop();
_cr->stop();
_cmThread->stop();
if (G1StringDedup::is_enabled()) {
G1StringDedup::stop();
@ -2436,7 +2436,7 @@ void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
_cmThread->print_on(st);
st->cr();
_cm->print_worker_threads_on(st);
_cg1r->print_worker_threads_on(st); // also prints the sample thread
_cr->print_worker_threads_on(st); // also prints the sample thread
if (G1StringDedup::is_enabled()) {
G1StringDedup::print_worker_threads_on(st);
}
@ -2446,7 +2446,7 @@ void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
workers()->threads_do(tc);
tc->do_thread(_cmThread);
_cm->threads_do(tc);
_cg1r->threads_do(tc); // also iterates over the sample thread
_cr->threads_do(tc); // also iterates over the sample thread
if (G1StringDedup::is_enabled()) {
G1StringDedup::threads_do(tc);
}

View File

@ -76,7 +76,7 @@ class G1RemSet;
class HeapRegionRemSetIterator;
class G1ConcurrentMark;
class ConcurrentMarkThread;
class ConcurrentG1Refine;
class G1ConcurrentRefine;
class GenerationCounters;
class STWGCTimer;
class G1NewTracer;
@ -806,7 +806,7 @@ protected:
ConcurrentMarkThread* _cmThread;
// The concurrent refiner.
ConcurrentG1Refine* _cg1r;
G1ConcurrentRefine* _cr;
// The parallel task queues
RefToScanQueueSet *_task_queues;
@ -1389,7 +1389,7 @@ public:
// Refinement
ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
G1ConcurrentRefine* concurrent_refine() const { return _cr; }
// Optimized nmethod scanning support routines

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentG1RefineThread.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
#include "logging/log.hpp"
#include "runtime/java.hpp"
@ -97,7 +97,7 @@ static Thresholds calc_thresholds(size_t green_zone,
size_t yellow_zone,
uint worker_i) {
double yellow_size = yellow_zone - green_zone;
double step = yellow_size / ConcurrentG1Refine::thread_num();
double step = yellow_size / G1ConcurrentRefine::thread_num();
if (worker_i == 0) {
// Potentially activate worker 0 more aggressively, to keep
// available buffers near green_zone value. When yellow_size is
@ -112,7 +112,7 @@ static Thresholds calc_thresholds(size_t green_zone,
green_zone + deactivate_offset);
}
ConcurrentG1Refine::ConcurrentG1Refine(size_t green_zone,
G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
size_t yellow_zone,
size_t red_zone,
size_t min_yellow_zone_size) :
@ -129,7 +129,7 @@ ConcurrentG1Refine::ConcurrentG1Refine(size_t green_zone,
static size_t calc_min_yellow_zone_size() {
size_t step = G1ConcRefinementThresholdStep;
uint n_workers = ConcurrentG1Refine::thread_num();
uint n_workers = G1ConcurrentRefine::thread_num();
if ((max_yellow_zone / step) < n_workers) {
return max_yellow_zone;
} else {
@ -169,7 +169,7 @@ static size_t calc_init_red_zone(size_t green, size_t yellow) {
return MIN2(yellow + size, max_red_zone);
}
ConcurrentG1Refine* ConcurrentG1Refine::create(jint* ecode) {
G1ConcurrentRefine* G1ConcurrentRefine::create(jint* ecode) {
size_t min_yellow_zone_size = calc_min_yellow_zone_size();
size_t green_zone = calc_init_green_zone();
size_t yellow_zone = calc_init_yellow_zone(green_zone, min_yellow_zone_size);
@ -182,31 +182,31 @@ ConcurrentG1Refine* ConcurrentG1Refine::create(jint* ecode) {
"min yellow size: " SIZE_FORMAT,
green_zone, yellow_zone, red_zone, min_yellow_zone_size);
ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(green_zone,
yellow_zone,
red_zone,
min_yellow_zone_size);
G1ConcurrentRefine* cr = new G1ConcurrentRefine(green_zone,
yellow_zone,
red_zone,
min_yellow_zone_size);
if (cg1r == NULL) {
if (cr == NULL) {
*ecode = JNI_ENOMEM;
vm_shutdown_during_initialization("Could not create ConcurrentG1Refine");
vm_shutdown_during_initialization("Could not create G1ConcurrentRefine");
return NULL;
}
cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_worker_threads, mtGC);
if (cg1r->_threads == NULL) {
cr->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, cr->_n_worker_threads, mtGC);
if (cr->_threads == NULL) {
*ecode = JNI_ENOMEM;
vm_shutdown_during_initialization("Could not allocate an array for ConcurrentG1RefineThread");
vm_shutdown_during_initialization("Could not allocate an array for G1ConcurrentRefineThread");
return NULL;
}
uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
ConcurrentG1RefineThread *next = NULL;
for (uint i = cg1r->_n_worker_threads - 1; i != UINT_MAX; i--) {
G1ConcurrentRefineThread *next = NULL;
for (uint i = cr->_n_worker_threads - 1; i != UINT_MAX; i--) {
Thresholds thresholds = calc_thresholds(green_zone, yellow_zone, i);
ConcurrentG1RefineThread* t =
new ConcurrentG1RefineThread(cg1r,
G1ConcurrentRefineThread* t =
new G1ConcurrentRefineThread(cr,
next,
worker_id_offset,
i,
@ -215,34 +215,34 @@ ConcurrentG1Refine* ConcurrentG1Refine::create(jint* ecode) {
assert(t != NULL, "Conc refine should have been created");
if (t->osthread() == NULL) {
*ecode = JNI_ENOMEM;
vm_shutdown_during_initialization("Could not create ConcurrentG1RefineThread");
vm_shutdown_during_initialization("Could not create G1ConcurrentRefineThread");
return NULL;
}
assert(t->cg1r() == cg1r, "Conc refine thread should refer to this");
cg1r->_threads[i] = t;
assert(t->cr() == cr, "Conc refine thread should refer to this");
cr->_threads[i] = t;
next = t;
}
cg1r->_sample_thread = new G1YoungRemSetSamplingThread();
if (cg1r->_sample_thread->osthread() == NULL) {
cr->_sample_thread = new G1YoungRemSetSamplingThread();
if (cr->_sample_thread->osthread() == NULL) {
*ecode = JNI_ENOMEM;
vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
return NULL;
}
*ecode = JNI_OK;
return cg1r;
return cr;
}
void ConcurrentG1Refine::stop() {
void G1ConcurrentRefine::stop() {
for (uint i = 0; i < _n_worker_threads; i++) {
_threads[i]->stop();
}
_sample_thread->stop();
}
void ConcurrentG1Refine::update_thread_thresholds() {
void G1ConcurrentRefine::update_thread_thresholds() {
for (uint i = 0; i < _n_worker_threads; i++) {
Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, i);
_threads[i]->update_thresholds(activation_level(thresholds),
@ -250,31 +250,31 @@ void ConcurrentG1Refine::update_thread_thresholds() {
}
}
ConcurrentG1Refine::~ConcurrentG1Refine() {
G1ConcurrentRefine::~G1ConcurrentRefine() {
for (uint i = 0; i < _n_worker_threads; i++) {
delete _threads[i];
}
FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads);
delete _sample_thread;
}
void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
void G1ConcurrentRefine::threads_do(ThreadClosure *tc) {
worker_threads_do(tc);
tc->do_thread(_sample_thread);
}
void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
void G1ConcurrentRefine::worker_threads_do(ThreadClosure * tc) {
for (uint i = 0; i < _n_worker_threads; i++) {
tc->do_thread(_threads[i]);
}
}
uint ConcurrentG1Refine::thread_num() {
uint G1ConcurrentRefine::thread_num() {
return G1ConcRefinementThreads;
}
void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
void G1ConcurrentRefine::print_worker_threads_on(outputStream* st) const {
for (uint i = 0; i < _n_worker_threads; ++i) {
_threads[i]->print_on(st);
st->cr();
@ -312,7 +312,7 @@ static size_t calc_new_red_zone(size_t green, size_t yellow) {
return MIN2(yellow + (yellow - green), max_red_zone);
}
void ConcurrentG1Refine::update_zones(double update_rs_time,
void G1ConcurrentRefine::update_zones(double update_rs_time,
size_t update_rs_processed_buffers,
double goal_ms) {
log_trace( CTRL_TAGS )("Updating Refinement Zones: "
@ -338,7 +338,7 @@ void ConcurrentG1Refine::update_zones(double update_rs_time,
_green_zone, _yellow_zone, _red_zone);
}
void ConcurrentG1Refine::adjust(double update_rs_time,
void G1ConcurrentRefine::adjust(double update_rs_time,
size_t update_rs_processed_buffers,
double goal_ms) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,23 +22,23 @@
*
*/
#ifndef SHARE_VM_GC_G1_CONCURRENTG1REFINE_HPP
#define SHARE_VM_GC_G1_CONCURRENTG1REFINE_HPP
#ifndef SHARE_VM_GC_G1_G1CONCURRENTREFINE_HPP
#define SHARE_VM_GC_G1_G1CONCURRENTREFINE_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
// Forward decl
class CardTableEntryClosure;
class ConcurrentG1RefineThread;
class G1ConcurrentRefineThread;
class G1YoungRemSetSamplingThread;
class outputStream;
class ThreadClosure;
class ConcurrentG1Refine: public CHeapObj<mtGC> {
class G1ConcurrentRefine : public CHeapObj<mtGC> {
G1YoungRemSetSamplingThread* _sample_thread;
ConcurrentG1RefineThread** _threads;
G1ConcurrentRefineThread** _threads;
uint _n_worker_threads;
/*
* The value of the update buffer queue length falls into one of 3 zones:
@ -62,7 +62,7 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
size_t _red_zone;
size_t _min_yellow_zone_size;
ConcurrentG1Refine(size_t green_zone,
G1ConcurrentRefine(size_t green_zone,
size_t yellow_zone,
size_t red_zone,
size_t min_yellow_zone_size);
@ -76,11 +76,11 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
void update_thread_thresholds();
public:
~ConcurrentG1Refine();
~G1ConcurrentRefine();
// Returns ConcurrentG1Refine instance if succeeded to create/initialize ConcurrentG1Refine and ConcurrentG1RefineThread.
// Returns a G1ConcurrentRefine instance if succeeded to create/initialize G1ConcurrentRefine and G1ConcurrentRefineThreads.
// Otherwise, returns NULL with error code.
static ConcurrentG1Refine* create(jint* ecode);
static G1ConcurrentRefine* create(jint* ecode);
void stop();
@ -104,4 +104,4 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
size_t red_zone() const { return _red_zone; }
};
#endif // SHARE_VM_GC_G1_CONCURRENTG1REFINE_HPP
#endif // SHARE_VM_GC_G1_G1CONCURRENTREFINE_HPP

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentG1RefineThread.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
@ -33,17 +33,19 @@
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
ConcurrentG1RefineThread::
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
uint worker_id_offset, uint worker_id,
size_t activate, size_t deactivate) :
G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr,
G1ConcurrentRefineThread *next,
uint worker_id_offset,
uint worker_id,
size_t activate,
size_t deactivate) :
ConcurrentGCThread(),
_worker_id_offset(worker_id_offset),
_worker_id(worker_id),
_active(false),
_next(next),
_monitor(NULL),
_cg1r(cg1r),
_cr(cr),
_vtime_accum(0.0),
_activation_threshold(activate),
_deactivation_threshold(deactivate)
@ -65,26 +67,26 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
create_and_start();
}
void ConcurrentG1RefineThread::update_thresholds(size_t activate,
void G1ConcurrentRefineThread::update_thresholds(size_t activate,
size_t deactivate) {
assert(deactivate < activate, "precondition");
_activation_threshold = activate;
_deactivation_threshold = deactivate;
}
void ConcurrentG1RefineThread::wait_for_completed_buffers() {
void G1ConcurrentRefineThread::wait_for_completed_buffers() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
while (!should_terminate() && !is_active()) {
_monitor->wait(Mutex::_no_safepoint_check_flag);
}
}
bool ConcurrentG1RefineThread::is_active() {
bool G1ConcurrentRefineThread::is_active() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
return is_primary() ? dcqs.process_completed_buffers() : _active;
}
void ConcurrentG1RefineThread::activate() {
void G1ConcurrentRefineThread::activate() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
if (!is_primary()) {
set_active(true);
@ -95,7 +97,7 @@ void ConcurrentG1RefineThread::activate() {
_monitor->notify();
}
void ConcurrentG1RefineThread::deactivate() {
void G1ConcurrentRefineThread::deactivate() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
if (!is_primary()) {
set_active(false);
@ -105,7 +107,7 @@ void ConcurrentG1RefineThread::deactivate() {
}
}
void ConcurrentG1RefineThread::run_service() {
void G1ConcurrentRefineThread::run_service() {
_vtime_start = os::elapsedVTime();
while (!should_terminate()) {
@ -132,7 +134,7 @@ void ConcurrentG1RefineThread::run_service() {
size_t curr_buffer_num = dcqs.completed_buffers_num();
// If the number of the buffers falls down into the yellow zone,
// that means that the transition period after the evacuation pause has ended.
if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cr()->yellow_zone()) {
dcqs.set_completed_queue_padding(0);
}
@ -168,7 +170,7 @@ void ConcurrentG1RefineThread::run_service() {
log_debug(gc, refine)("Stopping %d", _worker_id);
}
void ConcurrentG1RefineThread::stop_service() {
void G1ConcurrentRefineThread::stop_service() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
_monitor->notify();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,19 +22,19 @@
*
*/
#ifndef SHARE_VM_GC_G1_CONCURRENTG1REFINETHREAD_HPP
#define SHARE_VM_GC_G1_CONCURRENTG1REFINETHREAD_HPP
#ifndef SHARE_VM_GC_G1_G1CONCURRENTREFINETHREAD_HPP
#define SHARE_VM_GC_G1_G1CONCURRENTREFINETHREAD_HPP
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/shared/concurrentGCThread.hpp"
// Forward Decl.
class CardTableEntryClosure;
class ConcurrentG1Refine;
class G1ConcurrentRefine;
// One or more G1 Concurrent Refinement Threads may be active if concurrent
// refinement is in progress.
class ConcurrentG1RefineThread: public ConcurrentGCThread {
class G1ConcurrentRefineThread: public ConcurrentGCThread {
friend class VMStructs;
friend class G1CollectedHeap;
@ -47,9 +47,9 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
// when the number of the rset update buffer crosses a certain threshold. A successor
// would self-deactivate when the number of the buffers falls below the threshold.
bool _active;
ConcurrentG1RefineThread* _next;
G1ConcurrentRefineThread* _next;
Monitor* _monitor;
ConcurrentG1Refine* _cg1r;
G1ConcurrentRefine* _cr;
// This thread's activation/deactivation thresholds
size_t _activation_threshold;
@ -69,7 +69,7 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
public:
// Constructor
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
G1ConcurrentRefineThread(G1ConcurrentRefine* cr, G1ConcurrentRefineThread* next,
uint worker_id_offset, uint worker_id,
size_t activate, size_t deactivate);
@ -79,7 +79,7 @@ public:
// Total virtual time so far.
double vtime_accum() { return _vtime_accum; }
ConcurrentG1Refine* cg1r() { return _cg1r; }
G1ConcurrentRefine* cr() { return _cr; }
};
#endif // SHARE_VM_GC_G1_CONCURRENTG1REFINETHREAD_HPP
#endif // SHARE_VM_GC_G1_G1CONCURRENTREFINETHREAD_HPP

View File

@ -23,12 +23,12 @@
*/
#include "precompiled.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1DefaultPolicy.hpp"
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/g1IHOPControl.hpp"
@ -745,7 +745,7 @@ void G1DefaultPolicy::record_collection_pause_end(double pause_time_ms, size_t c
} else {
update_rs_time_goal_ms -= scan_hcc_time_ms;
}
_g1->concurrent_g1_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
_g1->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
update_rs_time_goal_ms);

View File

@ -23,10 +23,10 @@
*/
#include "precompiled.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1FromCardCache.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HotCardCache.hpp"
@ -298,7 +298,7 @@ G1RemSet::~G1RemSet() {
}
uint G1RemSet::num_par_rem_sets() {
return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
return MAX2(DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::thread_num(), ParallelGCThreads);
}
void G1RemSet::initialize(size_t capacity, uint max_regions) {

View File

@ -23,9 +23,9 @@
*/
#include "precompiled.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentG1RefineThread.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1RemSet.inline.hpp"
#include "gc/g1/g1RemSetSummary.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
@ -45,7 +45,7 @@ public:
}
virtual void do_thread(Thread* t) {
ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
G1ConcurrentRefineThread* crt = (G1ConcurrentRefineThread*) t;
_summary->set_rs_thread_vtime(_counter, crt->vtime_accum());
_counter++;
}
@ -59,12 +59,12 @@ void G1RemSetSummary::update() {
_num_coarsenings = HeapRegionRemSet::n_coarsenings();
ConcurrentG1Refine * cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
G1ConcurrentRefine * cr = G1CollectedHeap::heap()->concurrent_refine();
if (_rs_threads_vtimes != NULL) {
GetRSThreadVTimeClosure p(this);
cg1r->worker_threads_do(&p);
cr->worker_threads_do(&p);
}
set_sampling_thread_vtime(cg1r->sampling_thread()->vtime_accum());
set_sampling_thread_vtime(cr->sampling_thread()->vtime_accum());
}
void G1RemSetSummary::set_rs_thread_vtime(uint thread, double value) {
@ -85,7 +85,7 @@ G1RemSetSummary::G1RemSetSummary() :
_num_processed_buf_mutator(0),
_num_processed_buf_rs_threads(0),
_num_coarsenings(0),
_num_vtimes(ConcurrentG1Refine::thread_num()),
_num_vtimes(G1ConcurrentRefine::thread_num()),
_rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)),
_sampling_thread_vtime(0.0f) {
@ -98,7 +98,7 @@ G1RemSetSummary::G1RemSetSummary(G1RemSet* rem_set) :
_num_processed_buf_mutator(0),
_num_processed_buf_rs_threads(0),
_num_coarsenings(0),
_num_vtimes(ConcurrentG1Refine::thread_num()),
_num_vtimes(G1ConcurrentRefine::thread_num()),
_rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)),
_sampling_thread_vtime(0.0f) {
update();

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"

View File

@ -23,9 +23,9 @@
*/
#include "precompiled.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1CardLiveData.inline.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"