Merge
This commit is contained in:
commit
000c4d9734
hotspot
make
src/share/vm
classfile
gc_implementation
g1
parallelScavenge
shared
memory
oops
prims
runtime
@ -86,7 +86,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
|
||||
concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \
|
||||
g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
|
||||
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
|
||||
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
|
||||
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \
|
||||
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
|
||||
g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
|
||||
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
|
||||
|
@ -73,7 +73,11 @@ ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
|
||||
|
||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
|
||||
_class_loader(h_class_loader()),
|
||||
_is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
|
||||
_is_anonymous(is_anonymous),
|
||||
// An anonymous class loader data doesn't have anything to keep
|
||||
// it from being unloaded during parsing of the anonymous class.
|
||||
// The null-class-loader should always be kept alive.
|
||||
_keep_alive(is_anonymous || h_class_loader.is_null()),
|
||||
_metaspace(NULL), _unloading(false), _klasses(NULL),
|
||||
_claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
|
||||
_next(NULL), _dependencies(dependencies),
|
||||
@ -317,11 +321,15 @@ void ClassLoaderData::unload() {
|
||||
}
|
||||
}
|
||||
|
||||
oop ClassLoaderData::keep_alive_object() const {
|
||||
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
|
||||
return is_anonymous() ? _klasses->java_mirror() : class_loader();
|
||||
}
|
||||
|
||||
bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
|
||||
bool alive =
|
||||
is_anonymous() ?
|
||||
is_alive_closure->do_object_b(_klasses->java_mirror()) :
|
||||
class_loader() == NULL || is_alive_closure->do_object_b(class_loader());
|
||||
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|
||||
|| is_alive_closure->do_object_b(keep_alive_object());
|
||||
|
||||
assert(!alive || claimed(), "must be claimed");
|
||||
return alive;
|
||||
}
|
||||
@ -598,8 +606,6 @@ void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, KlassClosure* klass
|
||||
|
||||
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
|
||||
if (ClassUnloading) {
|
||||
ClassLoaderData::the_null_class_loader_data()->oops_do(f, klass_closure, must_claim);
|
||||
// keep any special CLDs alive.
|
||||
ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
|
||||
} else {
|
||||
ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
|
||||
@ -705,7 +711,7 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
||||
bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
|
||||
MetadataOnStackMark md_on_stack;
|
||||
while (data != NULL) {
|
||||
if (data->keep_alive() || data->is_alive(is_alive_closure)) {
|
||||
if (data->is_alive(is_alive_closure)) {
|
||||
if (has_redefined_a_class) {
|
||||
data->classes_do(InstanceKlass::purge_previous_versions);
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
// classes in the class loader are allocated.
|
||||
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
|
||||
bool _unloading; // true if this class loader goes away
|
||||
bool _keep_alive; // if this CLD can be unloaded for anonymous loaders
|
||||
bool _keep_alive; // if this CLD is kept alive without a keep_alive_object().
|
||||
bool _is_anonymous; // if this CLD is for an anonymous class
|
||||
volatile int _claimed; // true if claimed, for example during GC traces.
|
||||
// To avoid applying oop closure more than once.
|
||||
@ -230,13 +230,16 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
|
||||
oop class_loader() const { return _class_loader; }
|
||||
|
||||
// The object the GC is using to keep this ClassLoaderData alive.
|
||||
oop keep_alive_object() const;
|
||||
|
||||
// Returns true if this class loader data is for a loader going away.
|
||||
bool is_unloading() const {
|
||||
assert(!(is_the_null_class_loader_data() && _unloading), "The null class loader can never be unloaded");
|
||||
return _unloading;
|
||||
}
|
||||
// Anonymous class loader data doesn't have anything to keep them from
|
||||
// being unloaded during parsing the anonymous class.
|
||||
|
||||
// Used to make sure that this CLD is not unloaded.
|
||||
void set_keep_alive(bool value) { _keep_alive = value; }
|
||||
|
||||
unsigned int identity_hash() {
|
||||
|
@ -461,12 +461,11 @@ bool java_lang_String::equals(oop str1, oop str2) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void java_lang_String::print(Handle java_string, outputStream* st) {
|
||||
oop obj = java_string();
|
||||
assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
|
||||
typeArrayOop value = java_lang_String::value(obj);
|
||||
int offset = java_lang_String::offset(obj);
|
||||
int length = java_lang_String::length(obj);
|
||||
void java_lang_String::print(oop java_string, outputStream* st) {
|
||||
assert(java_string->klass() == SystemDictionary::String_klass(), "must be java_string");
|
||||
typeArrayOop value = java_lang_String::value(java_string);
|
||||
int offset = java_lang_String::offset(java_string);
|
||||
int length = java_lang_String::length(java_string);
|
||||
|
||||
int end = MIN2(length, 100);
|
||||
if (value == NULL) {
|
||||
|
@ -198,7 +198,7 @@ class java_lang_String : AllStatic {
|
||||
}
|
||||
|
||||
// Debugging
|
||||
static void print(Handle java_string, outputStream* st);
|
||||
static void print(oop java_string, outputStream* st);
|
||||
friend class JavaClasses;
|
||||
};
|
||||
|
||||
|
@ -4534,7 +4534,7 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
||||
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
|
||||
ParGCAllocBuffer(gclab_word_size), _retired(false) { }
|
||||
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
|
||||
: _g1h(g1h),
|
||||
_refs(g1h->task_queue(queue_num)),
|
||||
_dcq(&g1h->dirty_card_queue_set()),
|
||||
@ -4544,7 +4544,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
|
||||
_term_attempts(0),
|
||||
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
|
||||
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
|
||||
_age_table(false),
|
||||
_age_table(false), _scanner(g1h, this, rp),
|
||||
_strong_roots_time(0), _term_time(0),
|
||||
_alloc_buffer_waste(0), _undo_waste(0) {
|
||||
// we allocate G1YoungSurvRateNumRegions plus one entries, since
|
||||
@ -4653,14 +4653,10 @@ void G1ParScanThreadState::trim_queue() {
|
||||
|
||||
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
|
||||
G1ParScanThreadState* par_scan_state) :
|
||||
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
|
||||
_par_scan_state(par_scan_state),
|
||||
_worker_id(par_scan_state->queue_num()),
|
||||
_during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
|
||||
_mark_in_progress(_g1->mark_in_progress()) { }
|
||||
_g1(g1), _par_scan_state(par_scan_state),
|
||||
_worker_id(par_scan_state->queue_num()) { }
|
||||
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
void G1ParCopyClosure<barrier, do_mark_object>::mark_object(oop obj) {
|
||||
void G1ParCopyHelper::mark_object(oop obj) {
|
||||
#ifdef ASSERT
|
||||
HeapRegion* hr = _g1->heap_region_containing(obj);
|
||||
assert(hr != NULL, "sanity");
|
||||
@ -4671,9 +4667,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::mark_object(oop obj) {
|
||||
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
|
||||
}
|
||||
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
void G1ParCopyClosure<barrier, do_mark_object>
|
||||
::mark_forwarded_object(oop from_obj, oop to_obj) {
|
||||
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
|
||||
#ifdef ASSERT
|
||||
assert(from_obj->is_forwarded(), "from obj should be forwarded");
|
||||
assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
|
||||
@ -4695,27 +4689,25 @@ void G1ParCopyClosure<barrier, do_mark_object>
|
||||
_cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
|
||||
}
|
||||
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
oop G1ParCopyClosure<barrier, do_mark_object>
|
||||
::copy_to_survivor_space(oop old) {
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
|
||||
size_t word_sz = old->size();
|
||||
HeapRegion* from_region = _g1->heap_region_containing_raw(old);
|
||||
HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
|
||||
// +1 to make the -1 indexes valid...
|
||||
int young_index = from_region->young_index_in_cset()+1;
|
||||
assert( (from_region->is_young() && young_index > 0) ||
|
||||
(!from_region->is_young() && young_index == 0), "invariant" );
|
||||
G1CollectorPolicy* g1p = _g1->g1_policy();
|
||||
G1CollectorPolicy* g1p = _g1h->g1_policy();
|
||||
markOop m = old->mark();
|
||||
int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
|
||||
: m->age();
|
||||
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
|
||||
word_sz);
|
||||
HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
|
||||
HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
|
||||
#ifndef PRODUCT
|
||||
// Should this evacuation fail?
|
||||
if (_g1->evacuation_should_fail()) {
|
||||
if (_g1h->evacuation_should_fail()) {
|
||||
if (obj_ptr != NULL) {
|
||||
_par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
||||
undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
||||
obj_ptr = NULL;
|
||||
}
|
||||
}
|
||||
@ -4724,7 +4716,7 @@ oop G1ParCopyClosure<barrier, do_mark_object>
|
||||
if (obj_ptr == NULL) {
|
||||
// This will either forward-to-self, or detect that someone else has
|
||||
// installed a forwarding pointer.
|
||||
return _g1->handle_evacuation_failure_par(_par_scan_state, old);
|
||||
return _g1h->handle_evacuation_failure_par(this, old);
|
||||
}
|
||||
|
||||
oop obj = oop(obj_ptr);
|
||||
@ -4757,12 +4749,12 @@ oop G1ParCopyClosure<barrier, do_mark_object>
|
||||
m = m->incr_age();
|
||||
obj->set_mark(m);
|
||||
}
|
||||
_par_scan_state->age_table()->add(obj, word_sz);
|
||||
age_table()->add(obj, word_sz);
|
||||
} else {
|
||||
obj->set_mark(m);
|
||||
}
|
||||
|
||||
size_t* surv_young_words = _par_scan_state->surviving_young_words();
|
||||
size_t* surv_young_words = surviving_young_words();
|
||||
surv_young_words[young_index] += word_sz;
|
||||
|
||||
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
|
||||
@ -4771,15 +4763,15 @@ oop G1ParCopyClosure<barrier, do_mark_object>
|
||||
// length field of the from-space object.
|
||||
arrayOop(obj)->set_length(0);
|
||||
oop* old_p = set_partial_array_mask(old);
|
||||
_par_scan_state->push_on_queue(old_p);
|
||||
push_on_queue(old_p);
|
||||
} else {
|
||||
// No point in using the slower heap_region_containing() method,
|
||||
// given that we know obj is in the heap.
|
||||
_scanner.set_region(_g1->heap_region_containing_raw(obj));
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(obj));
|
||||
obj->oop_iterate_backwards(&_scanner);
|
||||
}
|
||||
} else {
|
||||
_par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
||||
undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
||||
obj = forward_ptr;
|
||||
}
|
||||
return obj;
|
||||
@ -4794,19 +4786,23 @@ void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
|
||||
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
template <class T>
|
||||
void G1ParCopyClosure<barrier, do_mark_object>
|
||||
::do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
|
||||
if (oopDesc::is_null(heap_oop)) {
|
||||
return;
|
||||
}
|
||||
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
|
||||
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
|
||||
|
||||
// here the null check is implicit in the cset_fast_test() test
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
oop forwardee;
|
||||
if (obj->is_forwarded()) {
|
||||
forwardee = obj->forwardee();
|
||||
} else {
|
||||
forwardee = copy_to_survivor_space(obj);
|
||||
forwardee = _par_scan_state->copy_to_survivor_space(obj);
|
||||
}
|
||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||
@ -4823,12 +4819,12 @@ void G1ParCopyClosure<barrier, do_mark_object>
|
||||
// The object is not in collection set. If we're a root scanning
|
||||
// closure during an initial mark pause (i.e. do_mark_object will
|
||||
// be true) then attempt to mark the object.
|
||||
if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
|
||||
if (do_mark_object) {
|
||||
mark_object(obj);
|
||||
}
|
||||
}
|
||||
|
||||
if (barrier == G1BarrierEvac && obj != NULL) {
|
||||
if (barrier == G1BarrierEvac) {
|
||||
_par_scan_state->update_rs(_from, p, _worker_id);
|
||||
}
|
||||
}
|
||||
@ -5025,7 +5021,7 @@ public:
|
||||
|
||||
ReferenceProcessor* rp = _g1h->ref_processor_stw();
|
||||
|
||||
G1ParScanThreadState pss(_g1h, worker_id);
|
||||
G1ParScanThreadState pss(_g1h, worker_id, rp);
|
||||
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
|
||||
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
|
||||
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
|
||||
@ -5456,7 +5452,7 @@ public:
|
||||
|
||||
G1STWIsAliveClosure is_alive(_g1h);
|
||||
|
||||
G1ParScanThreadState pss(_g1h, worker_id);
|
||||
G1ParScanThreadState pss(_g1h, worker_id, NULL);
|
||||
|
||||
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
|
||||
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
|
||||
@ -5568,7 +5564,7 @@ public:
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
G1ParScanThreadState pss(_g1h, worker_id);
|
||||
G1ParScanThreadState pss(_g1h, worker_id, NULL);
|
||||
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
|
||||
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
|
||||
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
|
||||
@ -5694,7 +5690,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
|
||||
// JNI refs.
|
||||
|
||||
// Use only a single queue for this PSS.
|
||||
G1ParScanThreadState pss(this, 0);
|
||||
G1ParScanThreadState pss(this, 0, NULL);
|
||||
|
||||
// We do not embed a reference processor in the copying/scanning
|
||||
// closures while we're actually processing the discovered
|
||||
|
@ -606,6 +606,11 @@ protected:
|
||||
// may not be a humongous - it must fit into a single heap region.
|
||||
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
||||
|
||||
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||
HeapRegion* alloc_region,
|
||||
bool par,
|
||||
size_t word_size);
|
||||
|
||||
// Ensure that no further allocations can happen in "r", bearing in mind
|
||||
// that parallel threads might be attempting allocations.
|
||||
void par_allocate_remaining_space(HeapRegion* r);
|
||||
@ -698,23 +703,20 @@ public:
|
||||
}
|
||||
|
||||
// This is a fast test on whether a reference points into the
|
||||
// collection set or not. It does not assume that the reference
|
||||
// points into the heap; if it doesn't, it will return false.
|
||||
// collection set or not. Assume that the reference
|
||||
// points into the heap.
|
||||
bool in_cset_fast_test(oop obj) {
|
||||
assert(_in_cset_fast_test != NULL, "sanity");
|
||||
if (_g1_committed.contains((HeapWord*) obj)) {
|
||||
// no need to subtract the bottom of the heap from obj,
|
||||
// _in_cset_fast_test is biased
|
||||
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
|
||||
bool ret = _in_cset_fast_test[index];
|
||||
// let's make sure the result is consistent with what the slower
|
||||
// test returns
|
||||
assert( ret || !obj_in_cs(obj), "sanity");
|
||||
assert(!ret || obj_in_cs(obj), "sanity");
|
||||
return ret;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
|
||||
// no need to subtract the bottom of the heap from obj,
|
||||
// _in_cset_fast_test is biased
|
||||
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
|
||||
bool ret = _in_cset_fast_test[index];
|
||||
// let's make sure the result is consistent with what the slower
|
||||
// test returns
|
||||
assert( ret || !obj_in_cs(obj), "sanity");
|
||||
assert(!ret || obj_in_cs(obj), "sanity");
|
||||
return ret;
|
||||
}
|
||||
|
||||
void clear_cset_fast_test() {
|
||||
@ -1774,95 +1776,6 @@ public:
|
||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
||||
_retired = true;
|
||||
}
|
||||
|
||||
bool is_retired() {
|
||||
return _retired;
|
||||
}
|
||||
};
|
||||
|
||||
class G1ParGCAllocBufferContainer {
|
||||
protected:
|
||||
static int const _priority_max = 2;
|
||||
G1ParGCAllocBuffer* _priority_buffer[_priority_max];
|
||||
|
||||
public:
|
||||
G1ParGCAllocBufferContainer(size_t gclab_word_size) {
|
||||
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||
_priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
|
||||
}
|
||||
}
|
||||
|
||||
~G1ParGCAllocBufferContainer() {
|
||||
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||
assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
|
||||
delete _priority_buffer[pr];
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* allocate(size_t word_sz) {
|
||||
HeapWord* obj;
|
||||
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||
obj = _priority_buffer[pr]->allocate(word_sz);
|
||||
if (obj != NULL) return obj;
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
bool contains(void* addr) {
|
||||
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||
if (_priority_buffer[pr]->contains(addr)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void undo_allocation(HeapWord* obj, size_t word_sz) {
|
||||
bool finish_undo;
|
||||
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||
if (_priority_buffer[pr]->contains(obj)) {
|
||||
_priority_buffer[pr]->undo_allocation(obj, word_sz);
|
||||
finish_undo = true;
|
||||
}
|
||||
}
|
||||
if (!finish_undo) ShouldNotReachHere();
|
||||
}
|
||||
|
||||
size_t words_remaining() {
|
||||
size_t result = 0;
|
||||
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||
result += _priority_buffer[pr]->words_remaining();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t words_remaining_in_retired_buffer() {
|
||||
G1ParGCAllocBuffer* retired = _priority_buffer[0];
|
||||
return retired->words_remaining();
|
||||
}
|
||||
|
||||
void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
|
||||
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||
_priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
|
||||
}
|
||||
}
|
||||
|
||||
void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
|
||||
G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
|
||||
retired_and_set->retire(end_of_gc, retain);
|
||||
retired_and_set->set_buf(buf);
|
||||
retired_and_set->set_word_size(word_sz);
|
||||
adjust_priority_order();
|
||||
}
|
||||
|
||||
private:
|
||||
void adjust_priority_order() {
|
||||
G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
|
||||
|
||||
int last = _priority_max - 1;
|
||||
for (int pr = 0; pr < last; ++pr) {
|
||||
_priority_buffer[pr] = _priority_buffer[pr + 1];
|
||||
}
|
||||
_priority_buffer[last] = retired_and_set;
|
||||
}
|
||||
};
|
||||
|
||||
class G1ParScanThreadState : public StackObj {
|
||||
@ -1873,11 +1786,13 @@ protected:
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
G1RemSet* _g1_rem;
|
||||
|
||||
G1ParGCAllocBufferContainer _surviving_alloc_buffer;
|
||||
G1ParGCAllocBufferContainer _tenured_alloc_buffer;
|
||||
G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
|
||||
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
||||
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
||||
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
||||
ageTable _age_table;
|
||||
|
||||
G1ParScanClosure _scanner;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
|
||||
@ -1930,7 +1845,7 @@ protected:
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
|
||||
|
||||
~G1ParScanThreadState() {
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
|
||||
@ -1939,7 +1854,7 @@ public:
|
||||
RefToScanQueue* refs() { return _refs; }
|
||||
ageTable* age_table() { return &_age_table; }
|
||||
|
||||
G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
return _alloc_buffers[purpose];
|
||||
}
|
||||
|
||||
@ -1969,13 +1884,15 @@ public:
|
||||
HeapWord* obj = NULL;
|
||||
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||
G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
|
||||
|
||||
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
|
||||
if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
||||
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer());
|
||||
alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size);
|
||||
// Otherwise.
|
||||
alloc_buf->set_word_size(gclab_word_size);
|
||||
alloc_buf->set_buf(buf);
|
||||
|
||||
obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
@ -2065,6 +1982,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
oop copy_to_survivor_space(oop const obj);
|
||||
|
||||
template <class T> void deal_with_reference(T* ref_to_scan) {
|
||||
if (has_partial_array_mask(ref_to_scan)) {
|
||||
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
||||
@ -2087,6 +2006,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void trim_queue();
|
||||
};
|
||||
|
||||
|
31
hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp
Normal file
31
hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
|
||||
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
|
||||
_cm(_g1->concurrent_mark()) {}
|
@ -48,12 +48,8 @@ public:
|
||||
class G1ParClosureSuper : public OopsInHeapRegionClosure {
|
||||
protected:
|
||||
G1CollectedHeap* _g1;
|
||||
G1RemSet* _g1_rem;
|
||||
ConcurrentMark* _cm;
|
||||
G1ParScanThreadState* _par_scan_state;
|
||||
uint _worker_id;
|
||||
bool _during_initial_mark;
|
||||
bool _mark_in_progress;
|
||||
public:
|
||||
G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
@ -133,23 +129,10 @@ public:
|
||||
|
||||
// Add back base class for metadata
|
||||
class G1ParCopyHelper : public G1ParClosureSuper {
|
||||
Klass* _scanned_klass;
|
||||
|
||||
public:
|
||||
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
_scanned_klass(NULL),
|
||||
G1ParClosureSuper(g1, par_scan_state) {}
|
||||
|
||||
void set_scanned_klass(Klass* k) { _scanned_klass = k; }
|
||||
template <class T> void do_klass_barrier(T* p, oop new_obj);
|
||||
};
|
||||
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
class G1ParCopyClosure : public G1ParCopyHelper {
|
||||
G1ParScanClosure _scanner;
|
||||
template <class T> void do_oop_work(T* p);
|
||||
|
||||
protected:
|
||||
Klass* _scanned_klass;
|
||||
ConcurrentMark* _cm;
|
||||
|
||||
// Mark the object if it's not already marked. This is used to mark
|
||||
// objects pointed to by roots that are guaranteed not to move
|
||||
// during the GC (i.e., non-CSet objects). It is MT-safe.
|
||||
@ -159,22 +142,26 @@ protected:
|
||||
// objects pointed to by roots that have been forwarded during a
|
||||
// GC. It is MT-safe.
|
||||
void mark_forwarded_object(oop from_obj, oop to_obj);
|
||||
public:
|
||||
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
|
||||
|
||||
oop copy_to_survivor_space(oop obj);
|
||||
void set_scanned_klass(Klass* k) { _scanned_klass = k; }
|
||||
template <class T> void do_klass_barrier(T* p, oop new_obj);
|
||||
};
|
||||
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
class G1ParCopyClosure : public G1ParCopyHelper {
|
||||
private:
|
||||
template <class T> void do_oop_work(T* p);
|
||||
|
||||
public:
|
||||
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
|
||||
ReferenceProcessor* rp) :
|
||||
_scanner(g1, par_scan_state, rp),
|
||||
G1ParCopyHelper(g1, par_scan_state) {
|
||||
assert(_ref_processor == NULL, "sanity");
|
||||
}
|
||||
|
||||
G1ParScanClosure* scanner() { return &_scanner; }
|
||||
|
||||
template <class T> void do_oop_nv(T* p) {
|
||||
do_oop_work(p);
|
||||
}
|
||||
template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
@ -83,7 +83,7 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
|
||||
_par_scan_state->update_rs(_from, p, _worker_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
case threads:
|
||||
{
|
||||
ResourceMark rm;
|
||||
CLDToOopClosure* cld_closure = NULL; // Not needed. All CLDs are already visited.
|
||||
CLDClosure* cld_closure = NULL; // Not needed. All CLDs are already visited.
|
||||
Threads::oops_do(&roots_closure, cld_closure, NULL);
|
||||
}
|
||||
break;
|
||||
@ -122,7 +122,7 @@ void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
|
||||
PSScavengeRootsClosure roots_closure(pm);
|
||||
CLDToOopClosure* roots_from_clds = NULL; // Not needed. All CLDs are already visited.
|
||||
CLDClosure* roots_from_clds = NULL; // Not needed. All CLDs are already visited.
|
||||
CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
|
||||
|
||||
if (_java_thread != NULL)
|
||||
|
@ -158,7 +158,7 @@ public:
|
||||
// Fills in the unallocated portion of the buffer with a garbage object.
|
||||
// If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain"
|
||||
// is true, attempt to re-use the unused portion in the next GC.
|
||||
virtual void retire(bool end_of_gc, bool retain);
|
||||
void retire(bool end_of_gc, bool retain);
|
||||
|
||||
void print() PRODUCT_RETURN;
|
||||
};
|
||||
|
@ -128,6 +128,11 @@ class KlassClosure : public Closure {
|
||||
virtual void do_klass(Klass* k) = 0;
|
||||
};
|
||||
|
||||
class CLDClosure : public Closure {
|
||||
public:
|
||||
virtual void do_cld(ClassLoaderData* cld) = 0;
|
||||
};
|
||||
|
||||
class KlassToOopClosure : public KlassClosure {
|
||||
OopClosure* _oop_closure;
|
||||
public:
|
||||
@ -135,7 +140,7 @@ class KlassToOopClosure : public KlassClosure {
|
||||
virtual void do_klass(Klass* k);
|
||||
};
|
||||
|
||||
class CLDToOopClosure {
|
||||
class CLDToOopClosure : public CLDClosure {
|
||||
OopClosure* _oop_closure;
|
||||
KlassToOopClosure _klass_closure;
|
||||
bool _must_claim_cld;
|
||||
|
@ -2994,8 +2994,7 @@ void InstanceKlass::oop_print_on(oop obj, outputStream* st) {
|
||||
offset <= (juint) value->length() &&
|
||||
offset + length <= (juint) value->length()) {
|
||||
st->print(BULLET"string: ");
|
||||
Handle h_obj(obj);
|
||||
java_lang_String::print(h_obj, st);
|
||||
java_lang_String::print(obj, st);
|
||||
st->cr();
|
||||
if (!WizardMode) return; // that is enough
|
||||
}
|
||||
|
@ -692,3 +692,21 @@ bool Klass::verify_itable_index(int i) {
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/////////////// Unit tests ///////////////
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
class TestKlass {
|
||||
public:
|
||||
static void test_oop_is_instanceClassLoader() {
|
||||
assert(SystemDictionary::ClassLoader_klass()->oop_is_instanceClassLoader(), "assert");
|
||||
assert(!SystemDictionary::String_klass()->oop_is_instanceClassLoader(), "assert");
|
||||
}
|
||||
};
|
||||
|
||||
void TestKlass_test() {
|
||||
TestKlass::test_oop_is_instanceClassLoader();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -498,6 +498,7 @@ class Klass : public Metadata {
|
||||
virtual bool oop_is_objArray_slow() const { return false; }
|
||||
virtual bool oop_is_typeArray_slow() const { return false; }
|
||||
public:
|
||||
virtual bool oop_is_instanceClassLoader() const { return false; }
|
||||
virtual bool oop_is_instanceMirror() const { return false; }
|
||||
virtual bool oop_is_instanceRef() const { return false; }
|
||||
|
||||
|
@ -109,12 +109,13 @@ class oopDesc {
|
||||
int size_given_klass(Klass* klass);
|
||||
|
||||
// type test operations (inlined in oop.inline.h)
|
||||
bool is_instance() const;
|
||||
bool is_instanceMirror() const;
|
||||
bool is_instanceRef() const;
|
||||
bool is_array() const;
|
||||
bool is_objArray() const;
|
||||
bool is_typeArray() const;
|
||||
bool is_instance() const;
|
||||
bool is_instanceMirror() const;
|
||||
bool is_instanceClassLoader() const;
|
||||
bool is_instanceRef() const;
|
||||
bool is_array() const;
|
||||
bool is_objArray() const;
|
||||
bool is_typeArray() const;
|
||||
|
||||
private:
|
||||
// field addresses in oop
|
||||
|
@ -147,12 +147,13 @@ inline void oopDesc::init_mark() { set_mark(markOopDesc::proto
|
||||
|
||||
inline bool oopDesc::is_a(Klass* k) const { return klass()->is_subtype_of(k); }
|
||||
|
||||
inline bool oopDesc::is_instance() const { return klass()->oop_is_instance(); }
|
||||
inline bool oopDesc::is_instanceMirror() const { return klass()->oop_is_instanceMirror(); }
|
||||
inline bool oopDesc::is_instanceRef() const { return klass()->oop_is_instanceRef(); }
|
||||
inline bool oopDesc::is_array() const { return klass()->oop_is_array(); }
|
||||
inline bool oopDesc::is_objArray() const { return klass()->oop_is_objArray(); }
|
||||
inline bool oopDesc::is_typeArray() const { return klass()->oop_is_typeArray(); }
|
||||
inline bool oopDesc::is_instance() const { return klass()->oop_is_instance(); }
|
||||
inline bool oopDesc::is_instanceClassLoader() const { return klass()->oop_is_instanceClassLoader(); }
|
||||
inline bool oopDesc::is_instanceMirror() const { return klass()->oop_is_instanceMirror(); }
|
||||
inline bool oopDesc::is_instanceRef() const { return klass()->oop_is_instanceRef(); }
|
||||
inline bool oopDesc::is_array() const { return klass()->oop_is_array(); }
|
||||
inline bool oopDesc::is_objArray() const { return klass()->oop_is_objArray(); }
|
||||
inline bool oopDesc::is_typeArray() const { return klass()->oop_is_typeArray(); }
|
||||
|
||||
inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
|
||||
|
||||
|
@ -3876,9 +3876,10 @@ void TestVirtualSpace_test();
|
||||
void TestMetaspaceAux_test();
|
||||
void TestMetachunk_test();
|
||||
void TestVirtualSpaceNode_test();
|
||||
void TestOldFreeSpaceCalculation_test();
|
||||
void TestNewSize_test();
|
||||
void TestKlass_test();
|
||||
#if INCLUDE_ALL_GCS
|
||||
void TestOldFreeSpaceCalculation_test();
|
||||
void TestG1BiasedArray_test();
|
||||
void TestBufferingOopClosure_test();
|
||||
#endif
|
||||
@ -3899,12 +3900,13 @@ void execute_internal_vm_tests() {
|
||||
run_unit_test(QuickSort::test_quick_sort());
|
||||
run_unit_test(AltHashing::test_alt_hash());
|
||||
run_unit_test(test_loggc_filename());
|
||||
run_unit_test(TestOldFreeSpaceCalculation_test());
|
||||
run_unit_test(TestNewSize_test());
|
||||
run_unit_test(TestKlass_test());
|
||||
#if INCLUDE_VM_STRUCTS
|
||||
run_unit_test(VMStructs::test());
|
||||
#endif
|
||||
#if INCLUDE_ALL_GCS
|
||||
run_unit_test(TestOldFreeSpaceCalculation_test());
|
||||
run_unit_test(TestG1BiasedArray_test());
|
||||
run_unit_test(HeapRegionRemSet::test_prt());
|
||||
run_unit_test(TestBufferingOopClosure_test());
|
||||
|
@ -895,7 +895,7 @@ oop* frame::interpreter_callee_receiver_addr(Symbol* signature) {
|
||||
}
|
||||
|
||||
|
||||
void frame::oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f,
|
||||
void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f,
|
||||
const RegisterMap* map, bool query_oop_map_cache) {
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
assert(map != NULL, "map must be set");
|
||||
@ -1146,7 +1146,7 @@ void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) {
|
||||
}
|
||||
|
||||
|
||||
void frame::oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
|
||||
void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
|
||||
#ifndef PRODUCT
|
||||
// simulate GC crash here to dump java thread in error report
|
||||
if (CrashGCForDumpingJavaThread) {
|
||||
|
@ -419,19 +419,19 @@ class frame VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
// Oops-do's
|
||||
void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f);
|
||||
void oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
|
||||
void oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
|
||||
|
||||
private:
|
||||
void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f);
|
||||
|
||||
// Iteration of oops
|
||||
void oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
|
||||
void oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
|
||||
void oops_entry_do(OopClosure* f, const RegisterMap* map);
|
||||
void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map);
|
||||
int adjust_offset(Method* method, int index); // helper for above fn
|
||||
public:
|
||||
// Memory management
|
||||
void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); }
|
||||
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); }
|
||||
void nmethods_do(CodeBlobClosure* cf);
|
||||
|
||||
// RedefineClasses support for finding live interpreted methods on the stack
|
||||
|
@ -804,7 +804,7 @@ bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void Thread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void Thread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
active_handles()->oops_do(f);
|
||||
// Do oop for ThreadShadow
|
||||
f->do_oop((oop*)&_pending_exception);
|
||||
@ -2700,7 +2700,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void JavaThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
// Verify that the deferred card marks have been flushed.
|
||||
assert(deferred_card_mark().is_empty(), "Should be empty during GC");
|
||||
|
||||
@ -3223,7 +3223,7 @@ CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters)
|
||||
#endif
|
||||
}
|
||||
|
||||
void CompilerThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
JavaThread::oops_do(f, cld_f, cf);
|
||||
if (_scanned_nmethod != NULL && cf != NULL) {
|
||||
// Safepoints can occur when the sweeper is scanning an nmethod so
|
||||
@ -4111,14 +4111,14 @@ bool Threads::includes(JavaThread* p) {
|
||||
// uses the Threads_lock to guarantee this property. It also makes sure that
|
||||
// all threads gets blocked when exiting or starting).
|
||||
|
||||
void Threads::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
ALL_JAVA_THREADS(p) {
|
||||
p->oops_do(f, cld_f, cf);
|
||||
}
|
||||
VMThread::vm_thread()->oops_do(f, cld_f, cf);
|
||||
}
|
||||
|
||||
void Threads::possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
// Introduce a mechanism allowing parallel threads to claim threads as
|
||||
// root groups. Overhead should be small enough to use all the time,
|
||||
// even in sequential code.
|
||||
|
@ -487,7 +487,7 @@ class Thread: public ThreadShadow {
|
||||
// Apply "cld_f->do_cld" to CLDs that are otherwise not kept alive.
|
||||
// Used by JavaThread::oops_do.
|
||||
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
|
||||
virtual void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
|
||||
virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
|
||||
// Handles the parallel case for the method below.
|
||||
private:
|
||||
@ -1448,7 +1448,7 @@ class JavaThread: public Thread {
|
||||
void frames_do(void f(frame*, const RegisterMap*));
|
||||
|
||||
// Memory operations
|
||||
void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
|
||||
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
|
||||
// Sweeper operations
|
||||
void nmethods_do(CodeBlobClosure* cf);
|
||||
@ -1879,7 +1879,7 @@ class CompilerThread : public JavaThread {
|
||||
// GC support
|
||||
// Apply "f->do_oop" to all root oops in "this".
|
||||
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
|
||||
void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
|
||||
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
|
||||
#ifndef PRODUCT
|
||||
private:
|
||||
@ -1948,9 +1948,9 @@ class Threads: AllStatic {
|
||||
|
||||
// Apply "f->do_oop" to all root oops in all threads.
|
||||
// This version may only be called by sequential code.
|
||||
static void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
|
||||
static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
// This version may be called by sequential or parallel code.
|
||||
static void possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
|
||||
static void possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
// This creates a list of GCTasks, one per thread.
|
||||
static void create_thread_roots_tasks(GCTaskQueue* q);
|
||||
// This creates a list of GCTasks, one per thread, for marking objects.
|
||||
|
@ -656,7 +656,7 @@ void VMThread::execute(VM_Operation* op) {
|
||||
}
|
||||
|
||||
|
||||
void VMThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void VMThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
Thread::oops_do(f, cld_f, cf);
|
||||
_vm_queue->oops_do(f);
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ class VMThread: public NamedThread {
|
||||
static VMThread* vm_thread() { return _vm_thread; }
|
||||
|
||||
// GC support
|
||||
void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
|
||||
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
|
||||
// Debugging
|
||||
void print_on(outputStream* st) const;
|
||||
|
Loading…
x
Reference in New Issue
Block a user