Merge
This commit is contained in:
commit
8fa30e8884
@ -28,7 +28,7 @@
|
||||
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
|
||||
#include "memory/binaryTreeDictionary.hpp"
|
||||
#include "memory/blockOffsetTable.inline.hpp"
|
||||
#include "memory/blockOffsetTable.hpp"
|
||||
#include "memory/freeList.hpp"
|
||||
#include "memory/space.hpp"
|
||||
|
||||
|
@ -65,6 +65,7 @@
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "services/memoryService.hpp"
|
||||
#include "services/runtimeService.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
// statics
|
||||
CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
|
||||
|
@ -38,8 +38,8 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/virtualspace.hpp"
|
||||
#include "services/memoryService.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/stack.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
#include "utilities/yieldingWorkgroup.hpp"
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
|
||||
#include "gc_implementation/shared/concurrentGCThread.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
class ConcurrentMarkSweepGeneration;
|
||||
class CMSCollector;
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "gc_implementation/g1/g1Log.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/g1StringDedup.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
|
@ -29,6 +29,9 @@
|
||||
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||
#include "gc_implementation/g1/g1InCSetState.hpp"
|
||||
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
|
||||
class EvacuationInfo;
|
||||
|
||||
// Base class for G1 allocators.
|
||||
class G1Allocator : public CHeapObj<mtGC> {
|
||||
|
@ -70,6 +70,7 @@
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include "gc_implementation/g1/g1CardCounts.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class DirtyCardQueue;
|
||||
@ -123,7 +123,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
||||
// Resets the hot card cache and discards the entries.
|
||||
void reset_hot_cache() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
||||
assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
|
||||
assert(Thread::current_noinline()->is_VM_thread(), "Current thread should be the VMthread");
|
||||
if (default_use_cache()) {
|
||||
reset_hot_cache_internal();
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "memory/genMarkSweep.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1ParScanThreadState.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
|
||||
|
@ -26,8 +26,10 @@
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
|
||||
#include "gc_implementation/g1/g1StringDedup.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
|
||||
: _g1h(g1h),
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/intHisto.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
#define CARD_REPEAT_HISTO 0
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/g1StringDedup.hpp"
|
||||
#include "gc_implementation/g1/g1StringDedupTable.hpp"
|
||||
#include "memory/gcLocker.hpp"
|
||||
#include "memory/padded.inline.hpp"
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
class G1StringDedupEntryCache;
|
||||
class G1StringDedupUnlinkOrOopsDoClosure;
|
||||
|
||||
//
|
||||
// Table entry in the deduplication hashtable. Points weakly to the
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "gc_implementation/g1/survRateGroup.hpp"
|
||||
#include "gc_implementation/shared/ageTable.hpp"
|
||||
#include "gc_implementation/shared/spaceDecorator.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
#include "memory/watermark.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
|
@ -26,8 +26,8 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
|
||||
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
|
||||
#define VM_STRUCTS_G1(nonstatic_field, static_field) \
|
||||
\
|
||||
|
@ -53,6 +53,7 @@
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#include "utilities/workgroup.hpp"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
@ -170,27 +170,11 @@ size_t ParallelScavengeHeap::max_capacity() const {
|
||||
}
|
||||
|
||||
bool ParallelScavengeHeap::is_in(const void* p) const {
|
||||
if (young_gen()->is_in(p)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (old_gen()->is_in(p)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return young_gen()->is_in(p) || old_gen()->is_in(p);
|
||||
}
|
||||
|
||||
bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
|
||||
if (young_gen()->is_in_reserved(p)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (old_gen()->is_in_reserved(p)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
|
||||
}
|
||||
|
||||
bool ParallelScavengeHeap::is_scavengable(const void* addr) {
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
|
||||
#include "gc_implementation/shared/gcPolicyCounters.hpp"
|
||||
#include "gc_implementation/shared/gcWhen.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/collectorPolicy.hpp"
|
||||
#include "memory/strongRootsScope.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_HPP
|
||||
|
||||
#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
//
|
||||
|
@ -239,22 +239,11 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
}
|
||||
|
||||
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
||||
// Since this method can be expensive in general, we restrict its
|
||||
// use to assertion checking only.
|
||||
// This method can be expensive so avoid using it in performance critical
|
||||
// code.
|
||||
virtual bool is_in(const void* p) const = 0;
|
||||
|
||||
bool is_in_or_null(const void* p) const {
|
||||
return p == NULL || is_in(p);
|
||||
}
|
||||
|
||||
bool is_in_place(Metadata** p) {
|
||||
return !Universe::heap()->is_in(p);
|
||||
}
|
||||
bool is_in_place(oop* p) { return Universe::heap()->is_in(p); }
|
||||
bool is_in_place(narrowOop* p) {
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
return Universe::heap()->is_in((const void*)o);
|
||||
}
|
||||
DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
|
||||
|
||||
// Let's define some terms: a "closed" subset of a heap is one that
|
||||
//
|
||||
|
@ -50,9 +50,10 @@
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "services/management.hpp"
|
||||
#include "services/memoryService.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
#include "utilities/workgroup.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
|
||||
@ -905,17 +906,6 @@ bool GenCollectedHeap::is_in_young(oop p) {
|
||||
|
||||
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
||||
bool GenCollectedHeap::is_in(const void* p) const {
|
||||
#ifndef ASSERT
|
||||
guarantee(VerifyBeforeGC ||
|
||||
VerifyDuringGC ||
|
||||
VerifyBeforeExit ||
|
||||
VerifyDuringStartup ||
|
||||
PrintAssembly ||
|
||||
tty->count() != 0 || // already printing
|
||||
VerifyAfterGC ||
|
||||
VMError::fatal_error_in_progress(), "too expensive");
|
||||
|
||||
#endif
|
||||
return _young_gen->is_in(p) || _old_gen->is_in(p);
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,6 @@
|
||||
# include "gc_implementation/shared/spaceCounters.hpp"
|
||||
# include "gc_implementation/shared/spaceDecorator.hpp"
|
||||
# include "gc_interface/collectedHeap.hpp"
|
||||
# include "gc_interface/collectedHeap.inline.hpp"
|
||||
# include "gc_interface/gcCause.hpp"
|
||||
# include "interpreter/abstractInterpreter.hpp"
|
||||
# include "interpreter/bytecode.hpp"
|
||||
@ -114,9 +113,7 @@
|
||||
# include "memory/allocation.hpp"
|
||||
# include "memory/allocation.inline.hpp"
|
||||
# include "memory/barrierSet.hpp"
|
||||
# include "memory/barrierSet.inline.hpp"
|
||||
# include "memory/blockOffsetTable.hpp"
|
||||
# include "memory/blockOffsetTable.inline.hpp"
|
||||
# include "memory/cardTableModRefBS.hpp"
|
||||
# include "memory/collectorPolicy.hpp"
|
||||
# include "memory/defNewGeneration.hpp"
|
||||
@ -134,7 +131,6 @@
|
||||
# include "memory/resourceArea.hpp"
|
||||
# include "memory/space.hpp"
|
||||
# include "memory/threadLocalAllocBuffer.hpp"
|
||||
# include "memory/threadLocalAllocBuffer.inline.hpp"
|
||||
# include "memory/universe.hpp"
|
||||
# include "memory/universe.inline.hpp"
|
||||
# include "memory/watermark.hpp"
|
||||
@ -302,7 +298,6 @@
|
||||
# include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
|
||||
# include "gc_implementation/g1/dirtyCardQueue.hpp"
|
||||
# include "gc_implementation/g1/g1BlockOffsetTable.hpp"
|
||||
# include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
# include "gc_implementation/g1/g1OopClosures.hpp"
|
||||
# include "gc_implementation/g1/g1_globals.hpp"
|
||||
# include "gc_implementation/g1/ptrQueue.hpp"
|
||||
|
@ -272,6 +272,11 @@ Thread::Thread() {
|
||||
#endif // ASSERT
|
||||
}
|
||||
|
||||
// Non-inlined version to be used where thread.inline.hpp shouldn't be included.
|
||||
Thread* Thread::current_noinline() {
|
||||
return Thread::current();
|
||||
}
|
||||
|
||||
void Thread::initialize_thread_local_storage() {
|
||||
// Note: Make sure this method only calls
|
||||
// non-blocking operations. Otherwise, it might not work
|
||||
|
@ -324,6 +324,8 @@ class Thread: public ThreadShadow {
|
||||
|
||||
// Returns the current thread
|
||||
static inline Thread* current();
|
||||
// ... without having to include thread.inline.hpp.
|
||||
static Thread* current_noinline();
|
||||
|
||||
// Common thread operations
|
||||
static void set_priority(Thread* thread, ThreadPriority priority);
|
||||
|
@ -96,11 +96,16 @@ class Stack: public StackBase<F>
|
||||
public:
|
||||
friend class StackIterator<E, F>;
|
||||
|
||||
// Number of elements that fit in 4K bytes minus the size of two pointers
|
||||
// (link field and malloc header).
|
||||
static const size_t _default_segment_size = (4096 - 2 * sizeof(E*)) / sizeof(E);
|
||||
static size_t default_segment_size() { return _default_segment_size; }
|
||||
|
||||
// segment_size: number of items per segment
|
||||
// max_cache_size: maxmium number of *segments* to cache
|
||||
// max_size: maximum number of items allowed, rounded to a multiple of
|
||||
// the segment size (0 == unlimited)
|
||||
inline Stack(size_t segment_size = default_segment_size(),
|
||||
inline Stack(size_t segment_size = _default_segment_size,
|
||||
size_t max_cache_size = 4, size_t max_size = 0);
|
||||
inline ~Stack() { clear(true); }
|
||||
|
||||
@ -122,8 +127,6 @@ public:
|
||||
// clear_cache is true, also release any cached segments.
|
||||
void clear(bool clear_cache = false);
|
||||
|
||||
static inline size_t default_segment_size();
|
||||
|
||||
protected:
|
||||
// Each segment includes space for _seg_size elements followed by a link
|
||||
// (pointer) to the previous segment; the space is allocated as a single block
|
||||
|
@ -85,14 +85,6 @@ void Stack<E, F>::clear(bool clear_cache)
|
||||
reset(clear_cache);
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
size_t Stack<E, F>::default_segment_size()
|
||||
{
|
||||
// Number of elements that fit in 4K bytes minus the size of two pointers
|
||||
// (link field and malloc header).
|
||||
return (4096 - 2 * sizeof(E*)) / sizeof(E);
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
size_t Stack<E, F>::adjust_segment_size(size_t seg_size)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user