8146395: Add inline qualifier in oop.hpp and fix inlining in gc files

Fix remaining issues after 8146401. Also fix windows VS2010 linkage problem (g1OopClosures.hpp).

Reviewed-by: stefank, mgerdin
This commit is contained in:
Goetz Lindenmaier 2016-01-18 10:25:41 +01:00
parent f089e2ceeb
commit 6397e809aa
50 changed files with 515 additions and 289 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,6 +35,7 @@
#include "gc/shared/space.hpp"
#include "gc/shared/vmGCOperations.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,15 +40,9 @@ class MarkFromRootsClosure;
class ParMarkFromRootsClosure;
// Decode the oop and call do_oop on it.
#define DO_OOP_WORK_DEFN \
void do_oop(oop obj); \
template <class T> inline void do_oop_work(T* p) { \
T heap_oop = oopDesc::load_heap_oop(p); \
if (!oopDesc::is_null(heap_oop)) { \
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
do_oop(obj); \
} \
}
#define DO_OOP_WORK_DEFN \
void do_oop(oop obj); \
template <class T> inline void do_oop_work(T* p);
// TODO: This duplication of the MetadataAwareOopClosure class is only needed
// because some CMS OopClosures derive from OopsInGenClosure. It would be
@ -131,8 +125,8 @@ class PushAndMarkClosure: public MetadataAwareOopClosure {
bool concurrent_precleaning);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
// In the parallel case, the bit map and the
@ -157,8 +151,8 @@ class ParPushAndMarkClosure: public MetadataAwareOopClosure {
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
// The non-parallel version (the parallel version appears further below).
@ -186,8 +180,8 @@ class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
bool concurrent_precleaning);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
void set_freelistLock(Mutex* m) {
_freelistLock = m;
@ -220,8 +214,8 @@ class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
void trim_queue(uint size);
};
@ -249,8 +243,8 @@ class PushOrMarkClosure: public MetadataAwareOopClosure {
MarkFromRootsClosure* parent);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
@ -287,8 +281,8 @@ class ParPushOrMarkClosure: public MetadataAwareOopClosure {
ParMarkFromRootsClosure* parent);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
@ -318,8 +312,8 @@ class CMSKeepAliveClosure: public MetadataAwareOopClosure {
bool concurrent_precleaning() const { return _concurrent_precleaning; }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
@ -336,8 +330,8 @@ class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
// A parallel (MT) version of the above, used when

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,21 +30,6 @@
#include "gc/shared/taskqueue.inline.hpp"
#include "oops/oop.inline.hpp"
// Trim our work_queue so its length is below max at return
inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop newOop;
if (_work_queue->pop_local(newOop)) {
assert(newOop->is_oop(), "Expected an oop");
assert(_bit_map->isMarked((HeapWord*)newOop),
"only grey objects on this stack");
// iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span).
newOop->oop_iterate(&_parPushAndMarkClosure);
}
}
}
// MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
// until we get rid of OopsInGenClosure.
@ -61,4 +46,48 @@ inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
}
// Decode the oop and call do_oop on it.
#define DO_OOP_WORK_IMPL(cls) \
template <class T> void cls::do_oop_work(T* p) { \
T heap_oop = oopDesc::load_heap_oop(p); \
if (!oopDesc::is_null(heap_oop)) { \
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
do_oop(obj); \
} \
}
#define DO_OOP_WORK_NV_IMPL(cls) \
DO_OOP_WORK_IMPL(cls) \
void cls::do_oop_nv(oop* p) { cls::do_oop_work(p); } \
void cls::do_oop_nv(narrowOop* p) { cls::do_oop_work(p); }
DO_OOP_WORK_IMPL(MarkRefsIntoClosure)
DO_OOP_WORK_IMPL(ParMarkRefsIntoClosure)
DO_OOP_WORK_IMPL(MarkRefsIntoVerifyClosure)
DO_OOP_WORK_NV_IMPL(PushAndMarkClosure)
DO_OOP_WORK_NV_IMPL(ParPushAndMarkClosure)
DO_OOP_WORK_NV_IMPL(MarkRefsIntoAndScanClosure)
DO_OOP_WORK_NV_IMPL(ParMarkRefsIntoAndScanClosure)
// Trim our work_queue so its length is below max at return
inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop newOop;
if (_work_queue->pop_local(newOop)) {
assert(newOop->is_oop(), "Expected an oop");
assert(_bit_map->isMarked((HeapWord*)newOop),
"only grey objects on this stack");
// iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span).
newOop->oop_iterate(&_parPushAndMarkClosure);
}
}
}
DO_OOP_WORK_NV_IMPL(PushOrMarkClosure)
DO_OOP_WORK_NV_IMPL(ParPushOrMarkClosure)
DO_OOP_WORK_NV_IMPL(CMSKeepAliveClosure)
DO_OOP_WORK_NV_IMPL(CMSInnerParMarkAndPushClosure)
DO_OOP_WORK_IMPL(CMSParKeepAliveClosure)
#endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -219,6 +219,10 @@ void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
}
}
size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
return adjustObjectSize(oop(addr)->size());
}
void CompactibleFreeListSpace::resetIndexedFreeListArray() {
for (size_t i = 1; i < IndexSetSize; i++) {
assert(_indexedFreeList[i].size() == (size_t) i,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -313,9 +313,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
return adjustObjectSize(size);
}
inline size_t obj_size(const HeapWord* addr) const {
return adjustObjectSize(oop(addr)->size());
}
inline size_t obj_size(const HeapWord* addr) const;
protected:
// Reset the indexed free list to its initial empty condition.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3324,6 +3324,8 @@ class ParConcMarkingClosure: public MetadataAwareOopClosure {
}
};
DO_OOP_WORK_IMPL(ParConcMarkingClosure)
// Grey object scanning during work stealing phase --
// the salient assumption here is that any references
// that are in these stolen objects being scanned must

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "gc/cms/parOopClosures.inline.hpp"
#include "gc/serial/defNewGeneration.inline.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/ageTable.hpp"
#include "gc/shared/ageTable.inline.hpp"
#include "gc/shared/copyFailedInfo.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcTimer.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,31 @@
/////////////////////////////////////////////////////////////////////////
PromotedObject* PromotedObject::next() const {
assert(!((FreeChunk*)this)->is_free(), "Error");
PromotedObject* res;
if (UseCompressedOops) {
// The next pointer is a compressed oop stored in the top 32 bits
res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
} else {
res = (PromotedObject*)(_next & next_mask);
}
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res)));
return res;
}
inline void PromotedObject::setNext(PromotedObject* x) {
assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
"or insufficient alignment of objects");
if (UseCompressedOops) {
assert(_data._narrow_next == 0, "Overwrite?");
_data._narrow_next = oopDesc::encode_heap_oop(oop(x));
} else {
_next |= (intptr_t)x;
}
assert(!((FreeChunk*)this)->is_free(), "Error");
}
//////////////////////////////////////////////////////////////////////////////
// We go over the list of promoted objects, removing each from the list,
// and applying the closure (this may, in turn, add more elements to

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,29 +64,8 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
Data _data;
};
public:
inline PromotedObject* next() const {
assert(!((FreeChunk*)this)->is_free(), "Error");
PromotedObject* res;
if (UseCompressedOops) {
// The next pointer is a compressed oop stored in the top 32 bits
res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
} else {
res = (PromotedObject*)(_next & next_mask);
}
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res)));
return res;
}
inline void setNext(PromotedObject* x) {
assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
"or insufficient alignment of objects");
if (UseCompressedOops) {
assert(_data._narrow_next == 0, "Overwrite?");
_data._narrow_next = oopDesc::encode_heap_oop(oop(x));
} else {
_next |= (intptr_t)x;
}
assert(!((FreeChunk*)this)->is_free(), "Error");
}
PromotedObject* next() const;
void setNext(PromotedObject* x);
inline void setPromotedMark() {
_next |= promoted_mask;
assert(!((FreeChunk*)this)->is_free(), "Error");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,12 +96,7 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
}
// The argument addr should be the start address of a valid object
HeapWord* nextObject(HeapWord* addr) {
oop obj = (oop) addr;
HeapWord* res = addr + obj->size();
assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
return res;
}
inline HeapWord* nextObject(HeapWord* addr);
void print_on_error(outputStream* st, const char* prefix) const;
@ -627,14 +622,7 @@ public:
// If marking is not in progress, it's a no-op.
void verify_no_cset_oops() PRODUCT_RETURN;
bool isPrevMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;
assert(addr >= _prevMarkBitMap->startWord() ||
addr < _prevMarkBitMap->endWord(), "in a region");
return _prevMarkBitMap->isMarked(addr);
}
inline bool isPrevMarked(oop p) const;
inline bool do_yield_check(uint worker_i = 0);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -185,6 +185,14 @@ inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
return true;
}
// The argument addr should be the start address of a valid object
HeapWord* CMBitMapRO::nextObject(HeapWord* addr) {
oop obj = (oop) addr;
HeapWord* res = addr + obj->size();
assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
return res;
}
#define check_mark(addr) \
assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \
"outside underlying space?"); \
@ -353,6 +361,15 @@ inline void ConcurrentMark::markPrev(oop p) {
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
}
bool ConcurrentMark::isPrevMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;
assert(addr >= _prevMarkBitMap->startWord() ||
addr < _prevMarkBitMap->endWord(), "in a region");
return _prevMarkBitMap->isMarked(addr);
}
inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
uint worker_id, HeapRegion* hr) {
assert(obj != NULL, "pre-condition");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,7 @@
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/isGCActiveMark.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/referenceProcessor.inline.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -206,9 +206,9 @@ class G1Mux2Closure : public OopClosure {
OopClosure* _c2;
public:
G1Mux2Closure(OopClosure *c1, OopClosure *c2);
template <class T> void do_oop_work(T* p);
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
template <class T> inline void do_oop_work(T* p);
virtual inline void do_oop(oop* p);
virtual inline void do_oop(narrowOop* p);
};
// A closure that returns true if it is actually applied
@ -219,9 +219,9 @@ class G1TriggerClosure : public OopClosure {
public:
G1TriggerClosure();
bool triggered() const { return _triggered; }
template <class T> void do_oop_work(T* p);
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
template <class T> inline void do_oop_work(T* p);
virtual inline void do_oop(oop* p);
virtual inline void do_oop(narrowOop* p);
};
// A closure which uses a triggering closure to determine
@ -232,9 +232,9 @@ class G1InvokeIfNotTriggeredClosure: public OopClosure {
OopClosure* _oop_cl;
public:
G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
template <class T> void do_oop_work(T* p);
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
template <class T> inline void do_oop_work(T* p);
virtual inline void do_oop(oop* p);
virtual inline void do_oop(narrowOop* p);
};
class G1UpdateRSOrPushRefOopClosure: public OopClosure {
@ -263,9 +263,9 @@ public:
return result;
}
template <class T> void do_oop_work(T* p);
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_work(p); }
template <class T> inline void do_oop_work(T* p);
virtual inline void do_oop(narrowOop* p);
virtual inline void do_oop(oop* p);
};
#endif // SHARE_VM_GC_G1_G1OOPCLOSURES_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -141,12 +141,16 @@ inline void G1Mux2Closure::do_oop_work(T* p) {
_c1->do_oop(p);
_c2->do_oop(p);
}
void G1Mux2Closure::do_oop(oop* p) { do_oop_work(p); }
void G1Mux2Closure::do_oop(narrowOop* p) { do_oop_work(p); }
template <class T>
inline void G1TriggerClosure::do_oop_work(T* p) {
// Record that this closure was actually applied (triggered).
_triggered = true;
}
void G1TriggerClosure::do_oop(oop* p) { do_oop_work(p); }
void G1TriggerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
template <class T>
inline void G1InvokeIfNotTriggeredClosure::do_oop_work(T* p) {
@ -154,6 +158,8 @@ inline void G1InvokeIfNotTriggeredClosure::do_oop_work(T* p) {
_oop_cl->do_oop(p);
}
}
void G1InvokeIfNotTriggeredClosure::do_oop(oop* p) { do_oop_work(p); }
void G1InvokeIfNotTriggeredClosure::do_oop(narrowOop* p) { do_oop_work(p); }
template <class T>
inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
@ -224,6 +230,8 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
to->rem_set()->add_reference(p, _worker_i);
}
}
void G1UpdateRSOrPushRefOopClosure::do_oop(oop* p) { do_oop_work(p); }
void G1UpdateRSOrPushRefOopClosure::do_oop(narrowOop* p) { do_oop_work(p); }
template <class T>
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {

View File

@ -34,6 +34,7 @@
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1RemSet.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/satbMarkQueue.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,20 +58,11 @@ public:
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
T heap_oop = oopDesc::load_heap_oop(field);
if (!oopDesc::is_null(heap_oop)) {
enqueue(oopDesc::decode_heap_oop(heap_oop));
}
}
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal);
// These are the more general virtual versions.
virtual void write_ref_field_pre_work(oop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
inline virtual void write_ref_field_pre_work(oop* field, oop new_val);
inline virtual void write_ref_field_pre_work(narrowOop* field, oop new_val);
virtual void write_ref_field_pre_work(void* field, oop new_val) {
guarantee(false, "Not needed");
}
@ -98,15 +89,7 @@ public:
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
}
void set_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
if (val == clean_card_val()) {
val = (jbyte)claimed_card_val();
} else {
val |= (jbyte)claimed_card_val();
}
_byte_map[card_index] = val;
}
inline void set_card_claimed(size_t card_index);
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
void g1_mark_as_young(const MemRegion& mr);

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "oops/oop.inline.hpp"
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
template <class T> void G1SATBCardTableModRefBS::inline_write_ref_field_pre(T* field, oop newVal) {
T heap_oop = oopDesc::load_heap_oop(field);
if (!oopDesc::is_null(heap_oop)) {
enqueue(oopDesc::decode_heap_oop(heap_oop));
}
}
// These are the more general virtual versions.
void G1SATBCardTableModRefBS::write_ref_field_pre_work(oop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
void G1SATBCardTableModRefBS::write_ref_field_pre_work(narrowOop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
if (val == clean_card_val()) {
val = (jbyte)claimed_card_val();
} else {
val |= (jbyte)claimed_card_val();
}
_byte_map[card_index] = val;
}
#endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#include "gc/parallel/asPSYoungGen.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psMarkSweepDecorator.hpp"
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/psScavenge.inline.hpp"
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/gcUtil.hpp"
#include "gc/shared/spaceDecorator.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/parallel/objectStartArray.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
@ -123,7 +123,6 @@ void ObjectStartArray::reset() {
memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());
}
bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
HeapWord* end_addr) const {
assert(start_addr <= end_addr,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -139,23 +139,7 @@ class ObjectStartArray : public CHeapObj<mtGC> {
// a given block. The blocks contain the offset of the last
// object in that block. Scroll backwards by one, and the first
// object hit should be at the beginning of the block
HeapWord* object_start(HeapWord* addr) const {
assert_covered_region_contains(addr);
jbyte* block = block_for_addr(addr);
HeapWord* scroll_forward = offset_addr_for_block(block--);
while (scroll_forward > addr) {
scroll_forward = offset_addr_for_block(block--);
}
HeapWord* next = scroll_forward;
while (next <= addr) {
scroll_forward = next;
next += oop(next)->size();
}
assert(scroll_forward <= addr, "wrong order for current and arg");
assert(addr <= next, "wrong order for arg and next");
return scroll_forward;
}
inline HeapWord* object_start(HeapWord* addr) const;
bool is_block_allocated(HeapWord* addr) {
assert_covered_region_contains(addr);
@ -165,7 +149,6 @@ class ObjectStartArray : public CHeapObj<mtGC> {
return true;
}
#undef assert_covered_region_contains
// Return true if an object starts in the range of heap addresses.
// If an object starts at an address corresponding to

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_PARALLEL_OBJECTSTARTARRAY_INLINE_HPP
#define SHARE_VM_GC_PARALLEL_OBJECTSTARTARRAY_INLINE_HPP
#include "gc/parallel/objectStartArray.hpp"
// Optimized for finding the first object that crosses into
// a given block. The blocks contain the offset of the last
// object in that block. Scroll backwards by one, and the first
// object hit should be at the beginning of the block
HeapWord* ObjectStartArray::object_start(HeapWord* addr) const {
assert_covered_region_contains(addr);
jbyte* block = block_for_addr(addr);
HeapWord* scroll_forward = offset_addr_for_block(block--);
while (scroll_forward > addr) {
scroll_forward = offset_addr_for_block(block--);
}
HeapWord* next = scroll_forward;
while (next <= addr) {
scroll_forward = next;
next += oop(next)->size();
}
assert(scroll_forward <= addr, "wrong order for current and arg");
assert(addr <= next, "wrong order for arg and next");
return scroll_forward;
}
#endif // SHARE_VM_GC_PARALLEL_OBJECTSTARTARRAY_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "gc/parallel/parMarkBitMap.hpp"
#include "gc/parallel/psParallelCompact.hpp"
#include "gc/parallel/psParallelCompact.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,10 +28,11 @@
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/generationSizer.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psMarkSweep.hpp"
#include "gc/parallel/psParallelCompact.hpp"
#include "gc/parallel/psParallelCompact.inline.hpp"
#include "gc/parallel/psPromotionManager.hpp"
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/vmPSOperations.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psMarkSweepDecorator.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3169,3 +3169,14 @@ UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
do_addr(addr);
return ParMarkBitMap::incomplete;
}
ParMarkBitMapClosure::IterationStatus
FillClosure::do_addr(HeapWord* addr, size_t size) {
CollectedHeap::fill_with_objects(addr, size);
HeapWord* const end = addr + size;
do {
_start_array->allocate_block(addr);
addr += oop(addr)->size();
} while (addr < end);
return ParMarkBitMap::incomplete;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1242,16 +1242,6 @@ class PSParallelCompact : AllStatic {
#endif // #ifdef ASSERT
};
inline bool PSParallelCompact::mark_obj(oop obj) {
const int obj_size = obj->size();
if (mark_bitmap()->mark_obj(obj, obj_size)) {
_summary_data.add_obj(obj, obj_size);
return true;
} else {
return false;
}
}
inline bool PSParallelCompact::is_marked(oop obj) {
return mark_bitmap()->is_marked(obj);
}
@ -1386,9 +1376,8 @@ class UpdateOnlyClosure: public ParMarkBitMapClosure {
inline void do_addr(HeapWord* addr);
};
class FillClosure: public ParMarkBitMapClosure
{
public:
class FillClosure: public ParMarkBitMapClosure {
public:
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
_start_array(PSParallelCompact::start_array(space_id))
@ -1397,17 +1386,9 @@ public:
"cannot use FillClosure in the young gen");
}
virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
CollectedHeap::fill_with_objects(addr, size);
HeapWord* const end = addr + size;
do {
_start_array->allocate_block(addr);
addr += oop(addr)->size();
} while (addr < end);
return ParMarkBitMap::incomplete;
}
virtual IterationStatus do_addr(HeapWord* addr, size_t size);
private:
private:
ObjectStartArray* const _start_array;
};

View File

@ -31,6 +31,16 @@
#include "oops/klass.hpp"
#include "oops/oop.inline.hpp"
inline bool PSParallelCompact::mark_obj(oop obj) {
const int obj_size = obj->size();
if (mark_bitmap()->mark_obj(obj, obj_size)) {
_summary_data.add_obj(obj, obj_size);
return true;
} else {
return false;
}
}
template <class T>
inline void PSParallelCompact::adjust_pointer(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psMarkSweep.hpp"
#include "gc/parallel/psParallelCompact.hpp"
#include "gc/parallel/psParallelCompact.inline.hpp"
#include "gc/parallel/psScavenge.inline.hpp"
#include "gc/parallel/psTasks.hpp"
#include "gc/shared/collectorPolicy.hpp"
@ -763,6 +763,15 @@ GCTaskManager* const PSScavenge::gc_task_manager() {
return ParallelScavengeHeap::gc_task_manager();
}
// Adaptive size policy support. When the young generation/old generation
// boundary moves, _young_generation_boundary must be reset
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
_young_generation_boundary = v;
if (UseCompressedOops) {
_young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
}
}
void PSScavenge::initialize() {
// Arguments must have been parsed

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -117,12 +117,7 @@ class PSScavenge: AllStatic {
}
// Adaptive size policy support. When the young generation/old generation
// boundary moves, _young_generation_boundary must be reset
static void set_young_generation_boundary(HeapWord* v) {
_young_generation_boundary = v;
if (UseCompressedOops) {
_young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
}
}
static void set_young_generation_boundary(HeapWord* v);
// Called by parallelScavengeHeap to init the tenuring threshold
static void initialize();

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/serial/defNewGeneration.inline.hpp"
#include "gc/shared/ageTable.inline.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectorCounters.hpp"
#include "gc/shared/gcHeapSummary.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,15 +23,16 @@
*/
#include "precompiled.hpp"
#include "gc/shared/ageTable.hpp"
#include "gc/shared/ageTable.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "memory/resourceArea.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/copy.hpp"
/* Copyright (c) 1992, 2015, Oracle and/or its affiliates, and Stanford University.
/* Copyright (c) 1992, 2016, Oracle and/or its affiliates, and Stanford University.
See the LICENSE file for license information. */
AgeTable::AgeTable(bool global) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@
class GCPolicyCounters;
/* Copyright (c) 1992, 2015, Oracle and/or its affiliates, and Stanford University.
/* Copyright (c) 1992, 2016, Oracle and/or its affiliates, and Stanford University.
See the LICENSE file for license information. */
// Age table for adaptive feedback-mediated tenuring (scavenging)
@ -56,9 +56,7 @@ class AgeTable VALUE_OBJ_CLASS_SPEC {
void clear();
// add entry
void add(oop p, size_t oop_size) {
add(p->age(), oop_size);
}
inline void add(oop p, size_t oop_size);
void add(uint age, size_t oop_size) {
assert(age > 0 && age < table_size, "invalid age of object");

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHARED_AGETABLE_INLINE_HPP
#define SHARE_VM_GC_SHARED_AGETABLE_INLINE_HPP
#include "gc/shared/ageTable.hpp"
#include "oops/oop.inline.hpp"
// add entry
void AgeTable::add(oop p, size_t oop_size) {
add(p->age(), oop_size);
}
#endif // SHARE_VM_GC_SHARED_AGETABLE_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,7 @@
#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
#include "memory/universe.hpp"
#include "oops/arrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -146,23 +146,15 @@ class FilteringClosure: public ExtendedOopClosure {
HeapWord* _boundary;
ExtendedOopClosure* _cl;
protected:
template <class T> inline void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
_cl->do_oop(p);
}
}
}
template <class T> inline void do_oop_work(T* p);
public:
FilteringClosure(HeapWord* boundary, ExtendedOopClosure* cl) :
ExtendedOopClosure(cl->ref_processor()), _boundary(boundary),
_cl(cl) {}
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { FilteringClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
virtual bool do_metadata() { return do_metadata_nv(); }
inline bool do_metadata_nv() { assert(!_cl->do_metadata(), "assumption broken, must change to 'return _cl->do_metadata()'"); return false; }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -124,6 +124,19 @@ template <class T> inline void FastScanClosure::do_oop_work(T* p) {
inline void FastScanClosure::do_oop_nv(oop* p) { FastScanClosure::do_oop_work(p); }
inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
template <class T> void FilteringClosure::do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
_cl->do_oop(p);
}
}
}
void FilteringClosure::do_oop_nv(oop* p) { FilteringClosure::do_oop_work(p); }
void FilteringClosure::do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
// Note similarity to ScanClosure; the difference is that
// the barrier set is taken care of outside this closure.
template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {

View File

@ -30,7 +30,7 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/referenceProcessor.inline.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.inline.hpp"

View File

@ -58,23 +58,13 @@ class AbstractRefProcTaskExecutor;
class DiscoveredList {
public:
DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
oop head() const {
return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
_oop_head;
}
inline oop head() const;
HeapWord* adr_head() {
return UseCompressedOops ? (HeapWord*)&_compressed_head :
(HeapWord*)&_oop_head;
}
void set_head(oop o) {
if (UseCompressedOops) {
// Must compress the head ptr.
_compressed_head = oopDesc::encode_heap_oop(o);
} else {
_oop_head = o;
}
}
bool is_empty() const { return head() == NULL; }
inline void set_head(oop o);
inline bool is_empty() const;
size_t length() { return _len; }
void set_length(size_t len) { _len = len; }
void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
@ -113,22 +103,7 @@ private:
public:
inline DiscoveredListIterator(DiscoveredList& refs_list,
OopClosure* keep_alive,
BoolObjectClosure* is_alive):
_refs_list(refs_list),
_prev_next(refs_list.adr_head()),
_prev(NULL),
_ref(refs_list.head()),
#ifdef ASSERT
_first_seen(refs_list.head()),
#endif
#ifndef PRODUCT
_processed(0),
_removed(0),
#endif
_next(NULL),
_keep_alive(keep_alive),
_is_alive(is_alive)
{ }
BoolObjectClosure* is_alive);
// End Of List.
inline bool has_next() const { return _ref != NULL; }

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_INLINE_HPP
#define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_INLINE_HPP
#include "gc/shared/referenceProcessor.hpp"
#include "oops/oop.inline.hpp"
oop DiscoveredList::head() const {
return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
_oop_head;
}
void DiscoveredList::set_head(oop o) {
if (UseCompressedOops) {
// Must compress the head ptr.
_compressed_head = oopDesc::encode_heap_oop(o);
} else {
_oop_head = o;
}
}
bool DiscoveredList::is_empty() const {
return head() == NULL;
}
DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
OopClosure* keep_alive,
BoolObjectClosure* is_alive):
_refs_list(refs_list),
_prev_next(refs_list.adr_head()),
_prev(NULL),
_ref(refs_list.head()),
#ifdef ASSERT
_first_seen(refs_list.head()),
#endif
#ifndef PRODUCT
_processed(0),
_removed(0),
#endif
_next(NULL),
_keep_alive(keep_alive),
_is_alive(is_alive) {
}
#endif // SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -360,9 +360,7 @@ private:
return size;
}
inline size_t obj_size(const HeapWord* addr) const {
return oop(addr)->size();
}
inline size_t obj_size(const HeapWord* addr) const;
public:
CompactibleSpace() :
@ -508,9 +506,7 @@ class ContiguousSpace: public CompactibleSpace {
return true; // Always true, since scan_limit is top
}
inline size_t scanned_block_size(const HeapWord* addr) const {
return oop(addr)->size();
}
inline size_t scanned_block_size(const HeapWord* addr) const;
protected:
HeapWord* _top;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,6 +72,10 @@ OffsetTableContigSpace::block_start_const(const void* p) const {
return _offsets.block_start(p);
}
size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
return oop(addr)->size();
}
template <class SpaceType>
inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
// Compute the new addresses for the live objects and store it in the mark
@ -331,4 +335,9 @@ inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
if (ZapUnusedHeapArea) space->mangle_unused_area();
}
}
size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
return oop(addr)->size();
}
#endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,6 +40,14 @@
// HeapInspection
int KlassSizeStats::count(oop x) {
return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
}
int KlassSizeStats::count_array(objArrayOop x) {
return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
}
inline KlassInfoEntry::~KlassInfoEntry() {
if (_subclasses != NULL) {
delete _subclasses;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -154,13 +154,9 @@ public:
HEAP_INSPECTION_COLUMNS_DO(DECLARE_KLASS_SIZE_STATS_FIELD)
static int count(oop x) {
return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
}
static int count(oop x);
static int count_array(objArrayOop x) {
return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
}
static int count_array(objArrayOop x);
template <class T> static int count(T* x) {
return (HeapWordSize * ((x) ? (x)->size() : 0));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "oops/klass.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/globals.hpp"
// Should this header be preserved during GC (when biased locking is enabled)?

View File

@ -81,7 +81,7 @@ private:
// Accessing
oop obj_at(int index) const;
void /*inline*/ obj_at_put(int index, oop value);
void inline obj_at_put(int index, oop value);
oop atomic_compare_exchange_oop(int index, oop exchange_value, oop compare_value);

View File

@ -39,7 +39,7 @@ inline oop objArrayOopDesc::obj_at(int index) const {
}
}
inline void objArrayOopDesc::obj_at_put(int index, oop value) {
void objArrayOopDesc::obj_at_put(int index, oop value) {
if (UseCompressedOops) {
oop_store(obj_at_addr<narrowOop>(index), value);
} else {

View File

@ -82,16 +82,16 @@ class oopDesc {
// objects during a GC) -- requires a valid klass pointer
inline void init_mark();
/*inline*/ Klass* klass() const;
inline Klass* klass() const;
inline Klass* klass_or_null() const volatile;
inline Klass** klass_addr();
inline narrowKlass* compressed_klass_addr();
/*inline*/ void set_klass(Klass* k);
inline void set_klass(Klass* k);
// For klass field compression
inline int klass_gap() const;
/*inline*/ void set_klass_gap(int z);
inline void set_klass_gap(int z);
// For when the klass pointer is being used as a linked list "next" field.
inline void set_klass_to_list_ptr(oop k);
inline oop list_ptr_from_klass();
@ -103,7 +103,7 @@ class oopDesc {
inline bool is_a(Klass* k) const;
// Returns the actual oop size of the object
/*inline*/ int size();
inline int size();
// Sometimes (for complicated concurrency-related reasons), it is useful
// to be able to figure out the size of an object knowing its klass.
@ -111,7 +111,7 @@ class oopDesc {
// type test operations (inlined in oop.inline.hpp)
inline bool is_instance() const;
/*inline*/ bool is_array() const;
inline bool is_array() const;
inline bool is_objArray() const;
inline bool is_typeArray() const;
@ -149,15 +149,15 @@ class oopDesc {
// These are overloaded for oop and narrowOop as are the other functions
// below so that they can be called in template functions.
static inline oop decode_heap_oop_not_null(oop v) { return v; }
static /*inline*/ oop decode_heap_oop_not_null(narrowOop v);
static inline oop decode_heap_oop_not_null(narrowOop v);
static inline oop decode_heap_oop(oop v) { return v; }
static /*inline*/ oop decode_heap_oop(narrowOop v);
static inline oop decode_heap_oop(narrowOop v);
// Encode an oop pointer to a narrow oop. The or_null versions accept
// null oop pointer, others do not in order to eliminate the
// null checking branches.
static inline narrowOop encode_heap_oop_not_null(oop v);
static /*inline*/ narrowOop encode_heap_oop(oop v);
static inline narrowOop encode_heap_oop(oop v);
// Load an oop out of the Java heap as is without decoding.
// Called by GC to check for null before decoding.
@ -284,8 +284,8 @@ class oopDesc {
inline bool has_bias_pattern() const;
// asserts
/*inline*/ bool is_oop(bool ignore_mark_word = false) const;
/*inline*/ bool is_oop_or_null(bool ignore_mark_word = false) const;
inline bool is_oop(bool ignore_mark_word = false) const;
inline bool is_oop_or_null(bool ignore_mark_word = false) const;
#ifndef PRODUCT
inline bool is_unlocked_oop() const;
#endif
@ -312,7 +312,7 @@ class oopDesc {
inline oop forwardee() const;
// Age of object during scavenge
/*inline*/ uint age() const;
inline uint age() const;
inline void incr_age();
// mark-sweep support

View File

@ -100,7 +100,7 @@ void oopDesc::init_mark() {
set_mark(markOopDesc::prototype_for_object(this));
}
inline Klass* oopDesc::klass() const {
Klass* oopDesc::klass() const {
if (UseCompressedClassPointers) {
return Klass::decode_klass_not_null(_metadata._compressed_klass);
} else {
@ -129,7 +129,7 @@ narrowKlass* oopDesc::compressed_klass_addr() {
return &_metadata._compressed_klass;
}
inline void oopDesc::set_klass(Klass* k) {
void oopDesc::set_klass(Klass* k) {
// since klasses are promoted no store check is needed
assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
@ -144,7 +144,7 @@ int oopDesc::klass_gap() const {
return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
}
inline void oopDesc::set_klass_gap(int v) {
void oopDesc::set_klass_gap(int v) {
if (UseCompressedClassPointers) {
*(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
}
@ -174,7 +174,7 @@ bool oopDesc::is_a(Klass* k) const {
return klass()->is_subtype_of(k);
}
inline int oopDesc::size() {
int oopDesc::size() {
return size_given_klass(klass());
}
@ -264,7 +264,7 @@ int oopDesc::size_given_klass(Klass* klass) {
}
bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
inline bool oopDesc::is_array() const { return klass()->is_array_klass(); }
bool oopDesc::is_array() const { return klass()->is_array_klass(); }
bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
@ -298,7 +298,7 @@ inline bool check_obj_alignment(oop obj) {
return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0;
}
inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
assert(!is_null(v), "narrow oop value can never be zero");
address base = Universe::narrow_oop_base();
int shift = Universe::narrow_oop_shift();
@ -307,7 +307,7 @@ inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
return result;
}
inline oop oopDesc::decode_heap_oop(narrowOop v) {
oop oopDesc::decode_heap_oop(narrowOop v) {
return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
}
@ -325,7 +325,7 @@ narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
return (narrowOop)result;
}
inline narrowOop oopDesc::encode_heap_oop(oop v) {
narrowOop oopDesc::encode_heap_oop(oop v) {
return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
}
@ -516,7 +516,7 @@ bool oopDesc::has_bias_pattern() const {
}
// used only for asserts
inline bool oopDesc::is_oop(bool ignore_mark_word) const {
bool oopDesc::is_oop(bool ignore_mark_word) const {
oop obj = (oop) this;
if (!check_obj_alignment(obj)) return false;
if (!Universe::heap()->is_in_reserved(obj)) return false;
@ -538,7 +538,7 @@ inline bool oopDesc::is_oop(bool ignore_mark_word) const {
// used only for asserts
inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
return this == NULL ? true : is_oop(ignore_mark_word);
}
@ -620,7 +620,7 @@ oop oopDesc::forwardee() const {
}
// The following method needs to be MT safe.
inline uint oopDesc::age() const {
uint oopDesc::age() const {
assert(!is_forwarded(), "Attempt to read age from forwarded mark");
if (has_displaced_mark()) {
return displaced_mark()->age();