Merge
This commit is contained in:
commit
b22466d59d
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,7 +35,8 @@ import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
public class GenCollectedHeap extends SharedHeap {
|
||||
private static CIntegerField nGensField;
|
||||
private static long gensOffset;
|
||||
private static AddressField youngGenField;
|
||||
private static AddressField oldGenField;
|
||||
private static AddressField genSpecsField;
|
||||
|
||||
private static GenerationFactory genFactory;
|
||||
@ -52,7 +53,8 @@ public class GenCollectedHeap extends SharedHeap {
|
||||
Type type = db.lookupType("GenCollectedHeap");
|
||||
|
||||
nGensField = type.getCIntegerField("_n_gens");
|
||||
gensOffset = type.getField("_gens").getOffset();
|
||||
youngGenField = type.getAddressField("_young_gen");
|
||||
oldGenField = type.getAddressField("_old_gen");
|
||||
genSpecsField = type.getAddressField("_gen_specs");
|
||||
|
||||
genFactory = new GenerationFactory();
|
||||
@ -68,18 +70,19 @@ public class GenCollectedHeap extends SharedHeap {
|
||||
|
||||
public Generation getGen(int i) {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that((i >= 0) && (i < nGens()), "Index " + i +
|
||||
" out of range (should be between 0 and " + nGens() + ")");
|
||||
Assert.that((i == 0) || (i == 1), "Index " + i +
|
||||
" out of range (should be 0 or 1)");
|
||||
}
|
||||
|
||||
if ((i < 0) || (i >= nGens())) {
|
||||
switch (i) {
|
||||
case 0:
|
||||
return genFactory.newObject(youngGenField.getValue(addr));
|
||||
case 1:
|
||||
return genFactory.newObject(oldGenField.getValue(addr));
|
||||
default:
|
||||
// no generation for i, and assertions disabled.
|
||||
return null;
|
||||
}
|
||||
|
||||
Address genAddr = addr.getAddressAt(gensOffset +
|
||||
(i * VM.getVM().getAddressSize()));
|
||||
return genFactory.newObject(addr.getAddressAt(gensOffset +
|
||||
(i * VM.getVM().getAddressSize())));
|
||||
}
|
||||
|
||||
public boolean isIn(Address a) {
|
||||
|
@ -26,8 +26,8 @@
|
||||
#include "runtime/os.hpp"
|
||||
#include "vm_version_sparc.hpp"
|
||||
|
||||
static bool detect_niagara() {
|
||||
char cpu[128];
|
||||
static bool cpuinfo_field_contains(const char* field, const char* value) {
|
||||
char line[1024];
|
||||
bool rv = false;
|
||||
|
||||
FILE* fp = fopen("/proc/cpuinfo", "r");
|
||||
@ -35,9 +35,10 @@ static bool detect_niagara() {
|
||||
return rv;
|
||||
}
|
||||
|
||||
while (!feof(fp)) {
|
||||
if (fscanf(fp, "cpu\t\t: %100[^\n]", cpu) == 1) {
|
||||
if (strstr(cpu, "Niagara") != NULL) {
|
||||
while (fgets(line, sizeof(line), fp) != NULL) {
|
||||
assert(strlen(line) < sizeof(line) - 1, "buffer line[1024] is too small.");
|
||||
if (strncmp(line, field, strlen(field)) == 0) {
|
||||
if (strstr(line, value) != NULL) {
|
||||
rv = true;
|
||||
}
|
||||
break;
|
||||
@ -45,10 +46,17 @@ static bool detect_niagara() {
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
static bool detect_niagara() {
|
||||
return cpuinfo_field_contains("cpu", "Niagara");
|
||||
}
|
||||
|
||||
static bool detect_blkinit() {
|
||||
return cpuinfo_field_contains("cpucaps", "blkinit");
|
||||
}
|
||||
|
||||
int VM_Version::platform_features(int features) {
|
||||
// Default to generic v9
|
||||
features = generic_v9_m;
|
||||
@ -58,5 +66,9 @@ int VM_Version::platform_features(int features) {
|
||||
features = niagara1_m | T_family_m;
|
||||
}
|
||||
|
||||
if (detect_blkinit()) {
|
||||
features |= blk_init_instructions_m;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_CI_CIKLASS_HPP
|
||||
|
||||
#include "ci/ciType.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
|
||||
// ciKlass
|
||||
//
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,6 +26,7 @@
|
||||
#include "classfile/classFileParser.hpp"
|
||||
#include "classfile/stackMapTable.hpp"
|
||||
#include "classfile/verifier.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
|
||||
// Keep these in a separate file to prevent inlining
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/referenceType.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
#include "oops/constantPool.hpp"
|
||||
#include "oops/fieldStreams.hpp"
|
||||
@ -59,6 +60,7 @@
|
||||
#include "services/threadService.hpp"
|
||||
#include "utilities/array.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
@ -313,6 +315,19 @@ inline Symbol* check_symbol_at(constantPoolHandle cp, int index) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
PRAGMA_DIAG_PUSH
|
||||
PRAGMA_FORMAT_NONLITERAL_IGNORED
|
||||
void ClassFileParser::report_assert_property_failure(const char* msg, TRAPS) {
|
||||
ResourceMark rm(THREAD);
|
||||
fatal(err_msg(msg, _class_name->as_C_string()));
|
||||
}
|
||||
|
||||
void ClassFileParser::report_assert_property_failure(const char* msg, int index, TRAPS) {
|
||||
ResourceMark rm(THREAD);
|
||||
fatal(err_msg(msg, index, _class_name->as_C_string()));
|
||||
}
|
||||
PRAGMA_DIAG_POP
|
||||
|
||||
constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
|
||||
ClassFileStream* cfs = stream();
|
||||
constantPoolHandle nullHandle;
|
||||
|
@ -26,12 +26,15 @@
|
||||
#define SHARE_VM_CLASSFILE_CLASSFILEPARSER_HPP
|
||||
|
||||
#include "classfile/classFileStream.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "oops/annotations.hpp"
|
||||
#include "oops/constantPool.hpp"
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
#include "utilities/accessFlags.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
|
||||
class CompressedLineNumberWriteStream;
|
||||
class FieldAllocationCount;
|
||||
class FieldInfo;
|
||||
class FieldLayoutInfo;
|
||||
|
||||
|
||||
@ -315,13 +318,13 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
if (!b) { classfile_parse_error(msg, CHECK); }
|
||||
}
|
||||
|
||||
PRAGMA_DIAG_PUSH
|
||||
PRAGMA_FORMAT_NONLITERAL_IGNORED
|
||||
inline void assert_property(bool b, const char* msg, TRAPS) {
|
||||
void report_assert_property_failure(const char* msg, TRAPS);
|
||||
void report_assert_property_failure(const char* msg, int index, TRAPS);
|
||||
|
||||
inline void assert_property(bool b, const char* msg, TRAPS) {
|
||||
#ifdef ASSERT
|
||||
if (!b) {
|
||||
ResourceMark rm(THREAD);
|
||||
fatal(err_msg(msg, _class_name->as_C_string()));
|
||||
report_assert_property_failure(msg, THREAD);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -329,12 +332,10 @@ inline void assert_property(bool b, const char* msg, TRAPS) {
|
||||
inline void assert_property(bool b, const char* msg, int index, TRAPS) {
|
||||
#ifdef ASSERT
|
||||
if (!b) {
|
||||
ResourceMark rm(THREAD);
|
||||
fatal(err_msg(msg, index, _class_name->as_C_string()));
|
||||
report_assert_property_failure(msg, index, THREAD);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
PRAGMA_DIAG_POP
|
||||
|
||||
inline void check_property(bool property, const char* msg, int index, TRAPS) {
|
||||
if (_need_verify) {
|
||||
|
@ -26,6 +26,8 @@
|
||||
#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
|
||||
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
|
||||
class ClassLoaderExt: public ClassLoader { // AllStatic
|
||||
public:
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "code/nmethod.hpp"
|
||||
#include "code/pcDesc.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc_implementation/shared/markSweep.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/gcLocker.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,6 +31,7 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/compressedStream.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -869,7 +869,7 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
|
||||
if (prev_level >= 0) {
|
||||
size_t prev_size = 0;
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
Generation* prev_gen = gch->_gens[prev_level];
|
||||
Generation* prev_gen = gch->get_gen(prev_level);
|
||||
prev_size = prev_gen->capacity();
|
||||
gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
|
||||
prev_size/1000);
|
||||
|
@ -37,7 +37,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
|
||||
_use_cache = true;
|
||||
|
||||
_hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
|
||||
_hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
|
||||
_hot_cache = _hot_cache_memory.allocate(_hot_cache_size);
|
||||
|
||||
reset_hot_cache_internal();
|
||||
|
||||
@ -52,7 +52,8 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
|
||||
G1HotCardCache::~G1HotCardCache() {
|
||||
if (default_use_cache()) {
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
|
||||
_hot_cache_memory.free();
|
||||
_hot_cache = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,8 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
||||
|
||||
G1CardCounts _card_counts;
|
||||
|
||||
ArrayAllocator<jbyte*, mtGC> _hot_cache_memory;
|
||||
|
||||
// The card cache table
|
||||
jbyte** _hot_cache;
|
||||
|
||||
|
@ -26,6 +26,8 @@
|
||||
#include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
|
||||
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psTasks.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
@ -66,6 +66,15 @@ void PSPromotionManager::initialize() {
|
||||
// for work stealing.
|
||||
}
|
||||
|
||||
// Helper functions to get around the circular dependency between
|
||||
// psScavenge.inline.hpp and psPromotionManager.inline.hpp.
|
||||
bool PSPromotionManager::should_scavenge(oop* p, bool check_to_space) {
|
||||
return PSScavenge::should_scavenge(p, check_to_space);
|
||||
}
|
||||
bool PSPromotionManager::should_scavenge(narrowOop* p, bool check_to_space) {
|
||||
return PSScavenge::should_scavenge(p, check_to_space);
|
||||
}
|
||||
|
||||
PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
|
||||
assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
|
||||
assert(_manager_array != NULL, "Sanity");
|
||||
|
@ -203,6 +203,12 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
inline void process_popped_location_depth(StarTask p);
|
||||
|
||||
static bool should_scavenge(oop* p, bool check_to_space = false);
|
||||
static bool should_scavenge(narrowOop* p, bool check_to_space = false);
|
||||
|
||||
template <class T, bool promote_immediately>
|
||||
void copy_and_push_safe_barrier(T* p);
|
||||
|
||||
template <class T> inline void claim_or_forward_depth(T* p);
|
||||
|
||||
TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
|
||||
|
@ -56,7 +56,7 @@ inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
|
||||
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_depth(T* p) {
|
||||
assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
|
||||
assert(should_scavenge(p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
|
||||
"Sanity");
|
||||
assert(Universe::heap()->is_in(p), "pointer outside heap");
|
||||
@ -98,7 +98,7 @@ inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
|
||||
//
|
||||
template<bool promote_immediately>
|
||||
oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
assert(PSScavenge::should_scavenge(&o), "Sanity");
|
||||
assert(should_scavenge(&o), "Sanity");
|
||||
|
||||
oop new_obj = NULL;
|
||||
|
||||
@ -257,7 +257,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
// information.
|
||||
if (TraceScavenge) {
|
||||
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
|
||||
PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
|
||||
should_scavenge(&new_obj) ? "copying" : "tenuring",
|
||||
new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
|
||||
}
|
||||
#endif
|
||||
@ -265,6 +265,40 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
return new_obj;
|
||||
}
|
||||
|
||||
// Attempt to "claim" oop at p via CAS, push the new obj if successful
|
||||
// This version tests the oop* to make sure it is within the heap before
|
||||
// attempting marking.
|
||||
template <class T, bool promote_immediately>
|
||||
inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
|
||||
assert(should_scavenge(p, true), "revisiting object?");
|
||||
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop new_obj = o->is_forwarded()
|
||||
? o->forwardee()
|
||||
: copy_to_survivor_space<promote_immediately>(o);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// This code must come after the CAS test, or it will print incorrect
|
||||
// information.
|
||||
if (TraceScavenge && o->is_forwarded()) {
|
||||
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
|
||||
"forwarding",
|
||||
new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
|
||||
}
|
||||
#endif
|
||||
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
|
||||
// We cannot mark without test, as some code passes us pointers
|
||||
// that are outside the heap. These pointers are either from roots
|
||||
// or from metadata.
|
||||
if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
|
||||
Universe::heap()->is_in_reserved(p)) {
|
||||
if (PSScavenge::is_obj_in_young(new_obj)) {
|
||||
PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
|
||||
if (is_oop_masked(p)) {
|
||||
@ -274,9 +308,9 @@ inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
|
||||
} else {
|
||||
if (p.is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
|
||||
copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(p);
|
||||
} else {
|
||||
PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
|
||||
copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ public:
|
||||
|
||||
// Weak refs may be visited more than once.
|
||||
if (PSScavenge::should_scavenge(p, _to_space)) {
|
||||
PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p);
|
||||
_promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p);
|
||||
}
|
||||
}
|
||||
virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
|
||||
|
@ -144,9 +144,6 @@ class PSScavenge: AllStatic {
|
||||
template <class T> static inline bool should_scavenge(T* p, MutableSpace* to_space);
|
||||
template <class T> static inline bool should_scavenge(T* p, bool check_to_space);
|
||||
|
||||
template <class T, bool promote_immediately>
|
||||
inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
|
||||
|
||||
static void copy_and_push_safe_barrier_from_klass(PSPromotionManager* pm, oop* p);
|
||||
|
||||
// Is an object in the young generation
|
||||
|
@ -27,7 +27,6 @@
|
||||
|
||||
#include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
@ -63,42 +62,6 @@ inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
|
||||
return should_scavenge(p);
|
||||
}
|
||||
|
||||
// Attempt to "claim" oop at p via CAS, push the new obj if successful
|
||||
// This version tests the oop* to make sure it is within the heap before
|
||||
// attempting marking.
|
||||
template <class T, bool promote_immediately>
|
||||
inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
|
||||
T* p) {
|
||||
assert(should_scavenge(p, true), "revisiting object?");
|
||||
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop new_obj = o->is_forwarded()
|
||||
? o->forwardee()
|
||||
: pm->copy_to_survivor_space<promote_immediately>(o);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// This code must come after the CAS test, or it will print incorrect
|
||||
// information.
|
||||
if (TraceScavenge && o->is_forwarded()) {
|
||||
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
|
||||
"forwarding",
|
||||
new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
|
||||
}
|
||||
#endif
|
||||
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
|
||||
// We cannot mark without test, as some code passes us pointers
|
||||
// that are outside the heap. These pointers are either from roots
|
||||
// or from metadata.
|
||||
if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
|
||||
Universe::heap()->is_in_reserved(p)) {
|
||||
if (PSScavenge::is_obj_in_young(new_obj)) {
|
||||
card_table()->inline_write_ref_field_gc(p, new_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<bool promote_immediately>
|
||||
class PSRootsClosure: public OopClosure {
|
||||
private:
|
||||
@ -108,7 +71,7 @@ class PSRootsClosure: public OopClosure {
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
if (PSScavenge::should_scavenge(p)) {
|
||||
// We never card mark roots, maybe call a func without test?
|
||||
PSScavenge::copy_and_push_safe_barrier<T, promote_immediately>(_promotion_manager, p);
|
||||
_promotion_manager->copy_and_push_safe_barrier<T, promote_immediately>(p);
|
||||
}
|
||||
}
|
||||
public:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,7 @@
|
||||
#include "code/stubs.hpp"
|
||||
#include "interpreter/cppInterpreter.hpp"
|
||||
#include "interpreter/templateInterpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#ifdef ZERO
|
||||
#ifdef TARGET_ARCH_zero
|
||||
# include "entry_zero.hpp"
|
||||
|
@ -965,32 +965,38 @@ public:
|
||||
}
|
||||
|
||||
static void test_old_size() {
|
||||
size_t flag_value;
|
||||
size_t flag_value;
|
||||
size_t heap_alignment = CollectorPolicy::compute_heap_alignment();
|
||||
|
||||
save_flags();
|
||||
save_flags();
|
||||
|
||||
// If OldSize is set on the command line, it should be used
|
||||
// for both min and initial old size if less than min heap.
|
||||
flag_value = 20 * M;
|
||||
set_basic_flag_values();
|
||||
FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
|
||||
verify_old_min(flag_value);
|
||||
// If OldSize is set on the command line, it should be used
|
||||
// for both min and initial old size if less than min heap.
|
||||
flag_value = 20 * M;
|
||||
set_basic_flag_values();
|
||||
FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
|
||||
verify_old_min(flag_value);
|
||||
|
||||
set_basic_flag_values();
|
||||
FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
|
||||
verify_old_initial(flag_value);
|
||||
|
||||
// If MaxNewSize is large, the maximum OldSize will be less than
|
||||
// what's requested on the command line and it should be reset
|
||||
// ergonomically.
|
||||
flag_value = 30 * M;
|
||||
set_basic_flag_values();
|
||||
FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
|
||||
FLAG_SET_CMDLINE(uintx, MaxNewSize, 170*M);
|
||||
// Calculate what we expect the flag to be.
|
||||
flag_value = MaxHeapSize - MaxNewSize;
|
||||
verify_old_initial(flag_value);
|
||||
set_basic_flag_values();
|
||||
FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
|
||||
// Calculate what we expect the flag to be.
|
||||
size_t expected_old_initial = align_size_up(InitialHeapSize, heap_alignment) - MaxNewSize;
|
||||
verify_old_initial(expected_old_initial);
|
||||
|
||||
// If MaxNewSize is large, the maximum OldSize will be less than
|
||||
// what's requested on the command line and it should be reset
|
||||
// ergonomically.
|
||||
// We intentionally set MaxNewSize + OldSize > MaxHeapSize (see over_size).
|
||||
flag_value = 30 * M;
|
||||
set_basic_flag_values();
|
||||
FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
|
||||
size_t over_size = 20*M;
|
||||
size_t new_size_value = align_size_up(MaxHeapSize, heap_alignment) - flag_value + over_size;
|
||||
FLAG_SET_CMDLINE(uintx, MaxNewSize, new_size_value);
|
||||
// Calculate what we expect the flag to be.
|
||||
expected_old_initial = align_size_up(MaxHeapSize, heap_alignment) - MaxNewSize;
|
||||
verify_old_initial(expected_old_initial);
|
||||
restore_flags();
|
||||
}
|
||||
|
||||
static void verify_young_min(size_t expected) {
|
||||
@ -1011,6 +1017,12 @@ public:
|
||||
MarkSweepPolicy msp;
|
||||
msp.initialize_all();
|
||||
|
||||
if (InitialHeapSize > initial_heap_size) {
|
||||
// InitialHeapSize was adapted by msp.initialize_all, e.g. due to alignment
|
||||
// caused by 64K page size.
|
||||
initial_heap_size = InitialHeapSize;
|
||||
}
|
||||
|
||||
size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size);
|
||||
assert(msp.initial_young_size() == expected, err_msg("%zu != %zu", msp.initial_young_size(), expected));
|
||||
assert(FLAG_IS_ERGO(NewSize) && NewSize == expected,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -383,7 +383,7 @@ void DefNewGeneration::compute_new_size() {
|
||||
assert(next_level < gch->_n_gens,
|
||||
"DefNewGeneration cannot be an oldest gen");
|
||||
|
||||
Generation* next_gen = gch->_gens[next_level];
|
||||
Generation* next_gen = gch->get_gen(next_level);
|
||||
size_t old_size = next_gen->capacity();
|
||||
size_t new_size_before = _virtual_space.committed_size();
|
||||
size_t min_new_size = spec()->init_size();
|
||||
|
@ -86,6 +86,7 @@ jint GenCollectedHeap::initialize() {
|
||||
|
||||
int i;
|
||||
_n_gens = gen_policy()->number_of_generations();
|
||||
assert(_n_gens == 2, "There is no support for more than two generations");
|
||||
|
||||
// While there are no constraints in the GC code that HeapWordSize
|
||||
// be any particular value, there are multiple other areas in the
|
||||
@ -126,11 +127,12 @@ jint GenCollectedHeap::initialize() {
|
||||
|
||||
_gch = this;
|
||||
|
||||
for (i = 0; i < _n_gens; i++) {
|
||||
ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
|
||||
_gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
|
||||
heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
|
||||
}
|
||||
ReservedSpace young_rs = heap_rs.first_part(_gen_specs[0]->max_size(), false, false);
|
||||
_young_gen = _gen_specs[0]->init(young_rs, 0, rem_set());
|
||||
heap_rs = heap_rs.last_part(_gen_specs[0]->max_size());
|
||||
|
||||
ReservedSpace old_rs = heap_rs.first_part(_gen_specs[1]->max_size(), false, false);
|
||||
_old_gen = _gen_specs[1]->init(old_rs, 1, rem_set());
|
||||
clear_incremental_collection_failed();
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
@ -145,7 +147,6 @@ jint GenCollectedHeap::initialize() {
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
|
||||
char* GenCollectedHeap::allocate(size_t alignment,
|
||||
ReservedSpace* heap_rs){
|
||||
const char overflow_msg[] = "The size of the object heap + VM data exceeds "
|
||||
@ -172,7 +173,6 @@ char* GenCollectedHeap::allocate(size_t alignment,
|
||||
return heap_rs->base();
|
||||
}
|
||||
|
||||
|
||||
void GenCollectedHeap::post_initialize() {
|
||||
SharedHeap::post_initialize();
|
||||
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
|
||||
@ -195,41 +195,30 @@ void GenCollectedHeap::post_initialize() {
|
||||
|
||||
void GenCollectedHeap::ref_processing_init() {
|
||||
SharedHeap::ref_processing_init();
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->ref_processor_init();
|
||||
}
|
||||
_young_gen->ref_processor_init();
|
||||
_old_gen->ref_processor_init();
|
||||
}
|
||||
|
||||
size_t GenCollectedHeap::capacity() const {
|
||||
size_t res = 0;
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
res += _gens[i]->capacity();
|
||||
}
|
||||
return res;
|
||||
return _young_gen->capacity() + _old_gen->capacity();
|
||||
}
|
||||
|
||||
size_t GenCollectedHeap::used() const {
|
||||
size_t res = 0;
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
res += _gens[i]->used();
|
||||
}
|
||||
return res;
|
||||
return _young_gen->used() + _old_gen->used();
|
||||
}
|
||||
|
||||
// Save the "used_region" for generations level and lower.
|
||||
void GenCollectedHeap::save_used_regions(int level) {
|
||||
assert(level >= 0, "Illegal level parameter");
|
||||
assert(level < _n_gens, "Illegal level parameter");
|
||||
for (int i = level; i >= 0; i--) {
|
||||
_gens[i]->save_used_region();
|
||||
if (level == 1) {
|
||||
_old_gen->save_used_region();
|
||||
}
|
||||
_young_gen->save_used_region();
|
||||
}
|
||||
|
||||
size_t GenCollectedHeap::max_capacity() const {
|
||||
size_t res = 0;
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
res += _gens[i]->max_capacity();
|
||||
}
|
||||
return res;
|
||||
return _young_gen->max_capacity() + _old_gen->max_capacity();
|
||||
}
|
||||
|
||||
// Update the _full_collections_completed counter
|
||||
@ -293,16 +282,20 @@ void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
|
||||
HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
|
||||
bool is_tlab,
|
||||
bool first_only) {
|
||||
HeapWord* res;
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
if (_gens[i]->should_allocate(size, is_tlab)) {
|
||||
res = _gens[i]->allocate(size, is_tlab);
|
||||
if (res != NULL) return res;
|
||||
else if (first_only) break;
|
||||
HeapWord* res = NULL;
|
||||
|
||||
if (_young_gen->should_allocate(size, is_tlab)) {
|
||||
res = _young_gen->allocate(size, is_tlab);
|
||||
if (res != NULL || first_only) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
// Otherwise...
|
||||
return NULL;
|
||||
|
||||
if (_old_gen->should_allocate(size, is_tlab)) {
|
||||
res = _old_gen->allocate(size, is_tlab);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
HeapWord* GenCollectedHeap::mem_allocate(size_t size,
|
||||
@ -322,12 +315,107 @@ bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
|
||||
}
|
||||
|
||||
void GenCollectedHeap::do_collection(bool full,
|
||||
void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
|
||||
bool is_tlab, bool run_verification, bool clear_soft_refs,
|
||||
bool restore_marks_for_biased_locking) {
|
||||
// Timer for individual generations. Last argument is false: no CR
|
||||
// FIXME: We should try to start the timing earlier to cover more of the GC pause
|
||||
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
|
||||
// so we can assume here that the next GC id is what we want.
|
||||
GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
|
||||
TraceCollectorStats tcs(gen->counters());
|
||||
TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
|
||||
|
||||
size_t prev_used = gen->used();
|
||||
gen->stat_record()->invocations++;
|
||||
gen->stat_record()->accumulated_time.start();
|
||||
|
||||
// Must be done anew before each collection because
|
||||
// a previous collection will do mangling and will
|
||||
// change top of some spaces.
|
||||
record_gen_tops_before_GC();
|
||||
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
|
||||
gen->level(),
|
||||
gen->stat_record()->invocations,
|
||||
size * HeapWordSize);
|
||||
}
|
||||
|
||||
if (run_verification && VerifyBeforeGC) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
Universe::verify(" VerifyBeforeGC:");
|
||||
}
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
if (restore_marks_for_biased_locking) {
|
||||
// We perform this mark word preservation work lazily
|
||||
// because it's only at this point that we know whether we
|
||||
// absolutely have to do it; we want to avoid doing it for
|
||||
// scavenge-only collections where it's unnecessary
|
||||
BiasedLocking::preserve_marks();
|
||||
}
|
||||
|
||||
// Do collection work
|
||||
{
|
||||
// Note on ref discovery: For what appear to be historical reasons,
|
||||
// GCH enables and disabled (by enqueing) refs discovery.
|
||||
// In the future this should be moved into the generation's
|
||||
// collect method so that ref discovery and enqueueing concerns
|
||||
// are local to a generation. The collect method could return
|
||||
// an appropriate indication in the case that notification on
|
||||
// the ref lock was needed. This will make the treatment of
|
||||
// weak refs more uniform (and indeed remove such concerns
|
||||
// from GCH). XXX
|
||||
|
||||
HandleMark hm; // Discard invalid handles created during gc
|
||||
save_marks(); // save marks for all gens
|
||||
// We want to discover references, but not process them yet.
|
||||
// This mode is disabled in process_discovered_references if the
|
||||
// generation does some collection work, or in
|
||||
// enqueue_discovered_references if the generation returns
|
||||
// without doing any work.
|
||||
ReferenceProcessor* rp = gen->ref_processor();
|
||||
// If the discovery of ("weak") refs in this generation is
|
||||
// atomic wrt other collectors in this configuration, we
|
||||
// are guaranteed to have empty discovered ref lists.
|
||||
if (rp->discovery_is_atomic()) {
|
||||
rp->enable_discovery();
|
||||
rp->setup_policy(clear_soft_refs);
|
||||
} else {
|
||||
// collect() below will enable discovery as appropriate
|
||||
}
|
||||
gen->collect(full, clear_soft_refs, size, is_tlab);
|
||||
if (!rp->enqueuing_is_done()) {
|
||||
rp->enqueue_discovered_references();
|
||||
} else {
|
||||
rp->set_enqueuing_is_done(false);
|
||||
}
|
||||
rp->verify_no_references_recorded();
|
||||
}
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||
|
||||
gen->stat_record()->accumulated_time.stop();
|
||||
|
||||
update_gc_stats(gen->level(), full);
|
||||
|
||||
if (run_verification && VerifyAfterGC) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
Universe::verify(" VerifyAfterGC:");
|
||||
}
|
||||
|
||||
if (PrintGCDetails) {
|
||||
gclog_or_tty->print(":");
|
||||
gen->print_heap_change(prev_used);
|
||||
}
|
||||
}
|
||||
|
||||
void GenCollectedHeap::do_collection(bool full,
|
||||
bool clear_all_soft_refs,
|
||||
size_t size,
|
||||
bool is_tlab,
|
||||
int max_level) {
|
||||
bool prepared_for_verification = false;
|
||||
ResourceMark rm;
|
||||
DEBUG_ONLY(Thread* my_thread = Thread::current();)
|
||||
|
||||
@ -367,141 +455,62 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
increment_total_collections(complete);
|
||||
|
||||
size_t gch_prev_used = used();
|
||||
bool run_verification = total_collections() >= VerifyGCStartAt;
|
||||
|
||||
int starting_level = 0;
|
||||
if (full) {
|
||||
// Search for the oldest generation which will collect all younger
|
||||
// generations, and start collection loop there.
|
||||
for (int i = max_level; i >= 0; i--) {
|
||||
if (_gens[i]->full_collects_younger_generations()) {
|
||||
starting_level = i;
|
||||
break;
|
||||
}
|
||||
bool prepared_for_verification = false;
|
||||
int max_level_collected = 0;
|
||||
bool old_collects_young = (max_level == 1) &&
|
||||
full &&
|
||||
_old_gen->full_collects_younger_generations();
|
||||
if (!old_collects_young &&
|
||||
_young_gen->should_collect(full, size, is_tlab)) {
|
||||
if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
|
||||
prepare_for_verify();
|
||||
prepared_for_verification = true;
|
||||
}
|
||||
|
||||
assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
|
||||
collect_generation(_young_gen,
|
||||
full,
|
||||
size,
|
||||
is_tlab,
|
||||
run_verification && VerifyGCLevel <= 0,
|
||||
do_clear_all_soft_refs,
|
||||
false);
|
||||
|
||||
if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
|
||||
size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
|
||||
// Allocation request was met by young GC.
|
||||
size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool must_restore_marks_for_biased_locking = false;
|
||||
|
||||
int max_level_collected = starting_level;
|
||||
for (int i = starting_level; i <= max_level; i++) {
|
||||
if (_gens[i]->should_collect(full, size, is_tlab)) {
|
||||
if (i == n_gens() - 1) { // a major collection is to happen
|
||||
if (!complete) {
|
||||
// The full_collections increment was missed above.
|
||||
increment_total_full_collections();
|
||||
}
|
||||
pre_full_gc_dump(NULL); // do any pre full gc dumps
|
||||
}
|
||||
// Timer for individual generations. Last argument is false: no CR
|
||||
// FIXME: We should try to start the timing earlier to cover more of the GC pause
|
||||
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
|
||||
// so we can assume here that the next GC id is what we want.
|
||||
GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
|
||||
TraceCollectorStats tcs(_gens[i]->counters());
|
||||
TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
|
||||
|
||||
size_t prev_used = _gens[i]->used();
|
||||
_gens[i]->stat_record()->invocations++;
|
||||
_gens[i]->stat_record()->accumulated_time.start();
|
||||
|
||||
// Must be done anew before each collection because
|
||||
// a previous collection will do mangling and will
|
||||
// change top of some spaces.
|
||||
record_gen_tops_before_GC();
|
||||
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
|
||||
i,
|
||||
_gens[i]->stat_record()->invocations,
|
||||
size*HeapWordSize);
|
||||
}
|
||||
|
||||
if (VerifyBeforeGC && i >= VerifyGCLevel &&
|
||||
total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
if (!prepared_for_verification) {
|
||||
prepare_for_verify();
|
||||
prepared_for_verification = true;
|
||||
}
|
||||
Universe::verify(" VerifyBeforeGC:");
|
||||
}
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
if (!must_restore_marks_for_biased_locking &&
|
||||
_gens[i]->performs_in_place_marking()) {
|
||||
// We perform this mark word preservation work lazily
|
||||
// because it's only at this point that we know whether we
|
||||
// absolutely have to do it; we want to avoid doing it for
|
||||
// scavenge-only collections where it's unnecessary
|
||||
must_restore_marks_for_biased_locking = true;
|
||||
BiasedLocking::preserve_marks();
|
||||
}
|
||||
|
||||
// Do collection work
|
||||
{
|
||||
// Note on ref discovery: For what appear to be historical reasons,
|
||||
// GCH enables and disabled (by enqueing) refs discovery.
|
||||
// In the future this should be moved into the generation's
|
||||
// collect method so that ref discovery and enqueueing concerns
|
||||
// are local to a generation. The collect method could return
|
||||
// an appropriate indication in the case that notification on
|
||||
// the ref lock was needed. This will make the treatment of
|
||||
// weak refs more uniform (and indeed remove such concerns
|
||||
// from GCH). XXX
|
||||
|
||||
HandleMark hm; // Discard invalid handles created during gc
|
||||
save_marks(); // save marks for all gens
|
||||
// We want to discover references, but not process them yet.
|
||||
// This mode is disabled in process_discovered_references if the
|
||||
// generation does some collection work, or in
|
||||
// enqueue_discovered_references if the generation returns
|
||||
// without doing any work.
|
||||
ReferenceProcessor* rp = _gens[i]->ref_processor();
|
||||
// If the discovery of ("weak") refs in this generation is
|
||||
// atomic wrt other collectors in this configuration, we
|
||||
// are guaranteed to have empty discovered ref lists.
|
||||
if (rp->discovery_is_atomic()) {
|
||||
rp->enable_discovery();
|
||||
rp->setup_policy(do_clear_all_soft_refs);
|
||||
} else {
|
||||
// collect() below will enable discovery as appropriate
|
||||
}
|
||||
_gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
|
||||
if (!rp->enqueuing_is_done()) {
|
||||
rp->enqueue_discovered_references();
|
||||
} else {
|
||||
rp->set_enqueuing_is_done(false);
|
||||
}
|
||||
rp->verify_no_references_recorded();
|
||||
}
|
||||
max_level_collected = i;
|
||||
|
||||
// Determine if allocation request was met.
|
||||
if (size > 0) {
|
||||
if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
|
||||
if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
|
||||
size = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||
|
||||
_gens[i]->stat_record()->accumulated_time.stop();
|
||||
|
||||
update_gc_stats(i, full);
|
||||
|
||||
if (VerifyAfterGC && i >= VerifyGCLevel &&
|
||||
total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
Universe::verify(" VerifyAfterGC:");
|
||||
}
|
||||
|
||||
if (PrintGCDetails) {
|
||||
gclog_or_tty->print(":");
|
||||
_gens[i]->print_heap_change(prev_used);
|
||||
}
|
||||
if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
|
||||
if (!complete) {
|
||||
// The full_collections increment was missed above.
|
||||
increment_total_full_collections();
|
||||
}
|
||||
|
||||
pre_full_gc_dump(NULL); // do any pre full gc dumps
|
||||
|
||||
if (!prepared_for_verification && run_verification &&
|
||||
VerifyGCLevel <= 1 && VerifyBeforeGC) {
|
||||
prepare_for_verify();
|
||||
}
|
||||
|
||||
assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
|
||||
collect_generation(_old_gen,
|
||||
full,
|
||||
size,
|
||||
is_tlab,
|
||||
run_verification && VerifyGCLevel <= 1,
|
||||
do_clear_all_soft_refs,
|
||||
true);
|
||||
|
||||
must_restore_marks_for_biased_locking = true;
|
||||
max_level_collected = 1;
|
||||
}
|
||||
|
||||
// Update "complete" boolean wrt what actually transpired --
|
||||
@ -523,10 +532,11 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = max_level_collected; j >= 0; j -= 1) {
|
||||
// Adjust generation sizes.
|
||||
_gens[j]->compute_new_size();
|
||||
// Adjust generation sizes.
|
||||
if (max_level_collected == 1) {
|
||||
_old_gen->compute_new_size();
|
||||
}
|
||||
_young_gen->compute_new_size();
|
||||
|
||||
if (complete) {
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
@ -583,18 +593,18 @@ gen_process_roots(int level,
|
||||
|
||||
if (younger_gens_as_roots) {
|
||||
if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
|
||||
for (int i = 0; i < level; i++) {
|
||||
not_older_gens->set_generation(_gens[i]);
|
||||
_gens[i]->oop_iterate(not_older_gens);
|
||||
if (level == 1) {
|
||||
not_older_gens->set_generation(_young_gen);
|
||||
_young_gen->oop_iterate(not_older_gens);
|
||||
}
|
||||
not_older_gens->reset_generation();
|
||||
}
|
||||
}
|
||||
// When collection is parallel, all threads get to cooperate to do
|
||||
// older-gen scanning.
|
||||
for (int i = level+1; i < _n_gens; i++) {
|
||||
older_gens->set_generation(_gens[i]);
|
||||
rem_set()->younger_refs_iterate(_gens[i], older_gens);
|
||||
if (level == 0) {
|
||||
older_gens->set_generation(_old_gen);
|
||||
rem_set()->younger_refs_iterate(_old_gen, older_gens);
|
||||
older_gens->reset_generation();
|
||||
}
|
||||
|
||||
@ -635,9 +645,8 @@ gen_process_roots(int level,
|
||||
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
|
||||
SharedHeap::process_weak_roots(root_closure);
|
||||
// "Local" "weak" refs
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->ref_processor()->weak_oops_do(root_closure);
|
||||
}
|
||||
_young_gen->ref_processor()->weak_oops_do(root_closure);
|
||||
_old_gen->ref_processor()->weak_oops_do(root_closure);
|
||||
}
|
||||
|
||||
#define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
@ -645,9 +654,11 @@ void GenCollectedHeap:: \
|
||||
oop_since_save_marks_iterate(int level, \
|
||||
OopClosureType* cur, \
|
||||
OopClosureType* older) { \
|
||||
_gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
|
||||
for (int i = level+1; i < n_gens(); i++) { \
|
||||
_gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
|
||||
if (level == 0) { \
|
||||
_young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
|
||||
_old_gen->oop_since_save_marks_iterate##nv_suffix(older); \
|
||||
} else { \
|
||||
_old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
|
||||
} \
|
||||
}
|
||||
|
||||
@ -656,22 +667,22 @@ ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
|
||||
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
|
||||
|
||||
bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
|
||||
for (int i = level; i < _n_gens; i++) {
|
||||
if (!_gens[i]->no_allocs_since_save_marks()) return false;
|
||||
if (level == 0 && !_young_gen->no_allocs_since_save_marks()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return _old_gen->no_allocs_since_save_marks();
|
||||
}
|
||||
|
||||
bool GenCollectedHeap::supports_inline_contig_alloc() const {
|
||||
return _gens[0]->supports_inline_contig_alloc();
|
||||
return _young_gen->supports_inline_contig_alloc();
|
||||
}
|
||||
|
||||
HeapWord** GenCollectedHeap::top_addr() const {
|
||||
return _gens[0]->top_addr();
|
||||
return _young_gen->top_addr();
|
||||
}
|
||||
|
||||
HeapWord** GenCollectedHeap::end_addr() const {
|
||||
return _gens[0]->end_addr();
|
||||
return _young_gen->end_addr();
|
||||
}
|
||||
|
||||
// public collection interfaces
|
||||
@ -734,12 +745,12 @@ void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
bool GenCollectedHeap::create_cms_collector() {
|
||||
|
||||
assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep,
|
||||
assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
|
||||
"Unexpected generation kinds");
|
||||
// Skip two header words in the block content verification
|
||||
NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
|
||||
CMSCollector* collector = new CMSCollector(
|
||||
(ConcurrentMarkSweepGeneration*)_gens[1],
|
||||
(ConcurrentMarkSweepGeneration*)_old_gen,
|
||||
_rem_set->as_CardTableRS(),
|
||||
(ConcurrentMarkSweepPolicy*) collector_policy());
|
||||
|
||||
@ -806,8 +817,8 @@ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
|
||||
}
|
||||
|
||||
bool GenCollectedHeap::is_in_young(oop p) {
|
||||
bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
|
||||
assert(result == _gens[0]->is_in_reserved(p),
|
||||
bool result = ((HeapWord*)p) < _old_gen->reserved().start();
|
||||
assert(result == _young_gen->is_in_reserved(p),
|
||||
err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
|
||||
return result;
|
||||
}
|
||||
@ -825,13 +836,7 @@ bool GenCollectedHeap::is_in(const void* p) const {
|
||||
VMError::fatal_error_in_progress(), "too expensive");
|
||||
|
||||
#endif
|
||||
// This might be sped up with a cache of the last generation that
|
||||
// answered yes.
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
if (_gens[i]->is_in(p)) return true;
|
||||
}
|
||||
// Otherwise...
|
||||
return false;
|
||||
return _young_gen->is_in(p) || _old_gen->is_in(p);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -840,114 +845,97 @@ bool GenCollectedHeap::is_in(const void* p) const {
|
||||
bool GenCollectedHeap::is_in_partial_collection(const void* p) {
|
||||
assert(is_in_reserved(p) || p == NULL,
|
||||
"Does not work if address is non-null and outside of the heap");
|
||||
return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
|
||||
return p < _young_gen->reserved().end() && p != NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->oop_iterate(cl);
|
||||
}
|
||||
_young_gen->oop_iterate(cl);
|
||||
_old_gen->oop_iterate(cl);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->object_iterate(cl);
|
||||
}
|
||||
_young_gen->object_iterate(cl);
|
||||
_old_gen->object_iterate(cl);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->safe_object_iterate(cl);
|
||||
}
|
||||
_young_gen->safe_object_iterate(cl);
|
||||
_old_gen->safe_object_iterate(cl);
|
||||
}
|
||||
|
||||
Space* GenCollectedHeap::space_containing(const void* addr) const {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
Space* res = _gens[i]->space_containing(addr);
|
||||
if (res != NULL) return res;
|
||||
Space* res = _young_gen->space_containing(addr);
|
||||
if (res != NULL) {
|
||||
return res;
|
||||
}
|
||||
// Otherwise...
|
||||
assert(false, "Could not find containing space");
|
||||
return NULL;
|
||||
res = _old_gen->space_containing(addr);
|
||||
assert(res != NULL, "Could not find containing space");
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
HeapWord* GenCollectedHeap::block_start(const void* addr) const {
|
||||
assert(is_in_reserved(addr), "block_start of address outside of heap");
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
if (_gens[i]->is_in_reserved(addr)) {
|
||||
assert(_gens[i]->is_in(addr),
|
||||
"addr should be in allocated part of generation");
|
||||
return _gens[i]->block_start(addr);
|
||||
}
|
||||
if (_young_gen->is_in_reserved(addr)) {
|
||||
assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
|
||||
return _young_gen->block_start(addr);
|
||||
}
|
||||
assert(false, "Some generation should contain the address");
|
||||
return NULL;
|
||||
|
||||
assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
|
||||
assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
|
||||
return _old_gen->block_start(addr);
|
||||
}
|
||||
|
||||
size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
|
||||
assert(is_in_reserved(addr), "block_size of address outside of heap");
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
if (_gens[i]->is_in_reserved(addr)) {
|
||||
assert(_gens[i]->is_in(addr),
|
||||
"addr should be in allocated part of generation");
|
||||
return _gens[i]->block_size(addr);
|
||||
}
|
||||
if (_young_gen->is_in_reserved(addr)) {
|
||||
assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
|
||||
return _young_gen->block_size(addr);
|
||||
}
|
||||
assert(false, "Some generation should contain the address");
|
||||
return 0;
|
||||
|
||||
assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
|
||||
assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
|
||||
return _old_gen->block_size(addr);
|
||||
}
|
||||
|
||||
bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
|
||||
assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
|
||||
assert(block_start(addr) == addr, "addr must be a block start");
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
if (_gens[i]->is_in_reserved(addr)) {
|
||||
return _gens[i]->block_is_obj(addr);
|
||||
}
|
||||
if (_young_gen->is_in_reserved(addr)) {
|
||||
return _young_gen->block_is_obj(addr);
|
||||
}
|
||||
assert(false, "Some generation should contain the address");
|
||||
return false;
|
||||
|
||||
assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
|
||||
return _old_gen->block_is_obj(addr);
|
||||
}
|
||||
|
||||
bool GenCollectedHeap::supports_tlab_allocation() const {
|
||||
for (int i = 0; i < _n_gens; i += 1) {
|
||||
if (_gens[i]->supports_tlab_allocation()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
|
||||
return _young_gen->supports_tlab_allocation();
|
||||
}
|
||||
|
||||
size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < _n_gens; i += 1) {
|
||||
if (_gens[i]->supports_tlab_allocation()) {
|
||||
result += _gens[i]->tlab_capacity();
|
||||
}
|
||||
assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
|
||||
if (_young_gen->supports_tlab_allocation()) {
|
||||
return _young_gen->tlab_capacity();
|
||||
}
|
||||
return result;
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t GenCollectedHeap::tlab_used(Thread* thr) const {
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < _n_gens; i += 1) {
|
||||
if (_gens[i]->supports_tlab_allocation()) {
|
||||
result += _gens[i]->tlab_used();
|
||||
}
|
||||
assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
|
||||
if (_young_gen->supports_tlab_allocation()) {
|
||||
return _young_gen->tlab_used();
|
||||
}
|
||||
return result;
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < _n_gens; i += 1) {
|
||||
if (_gens[i]->supports_tlab_allocation()) {
|
||||
result += _gens[i]->unsafe_max_tlab_alloc();
|
||||
}
|
||||
assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
|
||||
if (_young_gen->supports_tlab_allocation()) {
|
||||
return _young_gen->unsafe_max_tlab_alloc();
|
||||
}
|
||||
return result;
|
||||
return 0;
|
||||
}
|
||||
|
||||
HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
|
||||
@ -996,17 +984,15 @@ static void sort_scratch_list(ScratchBlock*& list) {
|
||||
ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
|
||||
size_t max_alloc_words) {
|
||||
ScratchBlock* res = NULL;
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->contribute_scratch(res, requestor, max_alloc_words);
|
||||
}
|
||||
_young_gen->contribute_scratch(res, requestor, max_alloc_words);
|
||||
_old_gen->contribute_scratch(res, requestor, max_alloc_words);
|
||||
sort_scratch_list(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
void GenCollectedHeap::release_scratch() {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->reset_scratch();
|
||||
}
|
||||
_young_gen->reset_scratch();
|
||||
_old_gen->reset_scratch();
|
||||
}
|
||||
|
||||
class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
|
||||
@ -1021,39 +1007,29 @@ void GenCollectedHeap::prepare_for_verify() {
|
||||
generation_iterate(&blk, false);
|
||||
}
|
||||
|
||||
|
||||
void GenCollectedHeap::generation_iterate(GenClosure* cl,
|
||||
bool old_to_young) {
|
||||
if (old_to_young) {
|
||||
for (int i = _n_gens-1; i >= 0; i--) {
|
||||
cl->do_generation(_gens[i]);
|
||||
}
|
||||
cl->do_generation(_old_gen);
|
||||
cl->do_generation(_young_gen);
|
||||
} else {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
cl->do_generation(_gens[i]);
|
||||
}
|
||||
cl->do_generation(_young_gen);
|
||||
cl->do_generation(_old_gen);
|
||||
}
|
||||
}
|
||||
|
||||
void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->space_iterate(cl, true);
|
||||
}
|
||||
_young_gen->space_iterate(cl, true);
|
||||
_old_gen->space_iterate(cl, true);
|
||||
}
|
||||
|
||||
bool GenCollectedHeap::is_maximal_no_gc() const {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
if (!_gens[i]->is_maximal_no_gc()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
|
||||
}
|
||||
|
||||
void GenCollectedHeap::save_marks() {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->save_marks();
|
||||
}
|
||||
_young_gen->save_marks();
|
||||
_old_gen->save_marks();
|
||||
}
|
||||
|
||||
GenCollectedHeap* GenCollectedHeap::heap() {
|
||||
@ -1065,27 +1041,33 @@ GenCollectedHeap* GenCollectedHeap::heap() {
|
||||
|
||||
void GenCollectedHeap::prepare_for_compaction() {
|
||||
guarantee(_n_gens = 2, "Wrong number of generations");
|
||||
Generation* old_gen = _gens[1];
|
||||
// Start by compacting into same gen.
|
||||
CompactPoint cp(old_gen);
|
||||
old_gen->prepare_for_compaction(&cp);
|
||||
Generation* young_gen = _gens[0];
|
||||
young_gen->prepare_for_compaction(&cp);
|
||||
CompactPoint cp(_old_gen);
|
||||
_old_gen->prepare_for_compaction(&cp);
|
||||
_young_gen->prepare_for_compaction(&cp);
|
||||
}
|
||||
|
||||
GCStats* GenCollectedHeap::gc_stats(int level) const {
|
||||
return _gens[level]->gc_stats();
|
||||
if (level == 0) {
|
||||
return _young_gen->gc_stats();
|
||||
} else {
|
||||
return _old_gen->gc_stats();
|
||||
}
|
||||
}
|
||||
|
||||
void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
|
||||
for (int i = _n_gens-1; i >= 0; i--) {
|
||||
Generation* g = _gens[i];
|
||||
if (!silent) {
|
||||
gclog_or_tty->print("%s", g->name());
|
||||
gclog_or_tty->print(" ");
|
||||
}
|
||||
g->verify();
|
||||
if (!silent) {
|
||||
gclog_or_tty->print("%s", _old_gen->name());
|
||||
gclog_or_tty->print(" ");
|
||||
}
|
||||
_old_gen->verify();
|
||||
|
||||
if (!silent) {
|
||||
gclog_or_tty->print("%s", _young_gen->name());
|
||||
gclog_or_tty->print(" ");
|
||||
}
|
||||
_young_gen->verify();
|
||||
|
||||
if (!silent) {
|
||||
gclog_or_tty->print("remset ");
|
||||
}
|
||||
@ -1093,9 +1075,8 @@ void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
|
||||
}
|
||||
|
||||
void GenCollectedHeap::print_on(outputStream* st) const {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->print_on(st);
|
||||
}
|
||||
_young_gen->print_on(st);
|
||||
_old_gen->print_on(st);
|
||||
MetaspaceAux::print_on(st);
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
||||
class SubTasksDone;
|
||||
|
||||
// A "GenCollectedHeap" is a SharedHeap that uses generational
|
||||
// collection. It is represented with a sequence of Generation's.
|
||||
// collection. It has two generations, young and old.
|
||||
class GenCollectedHeap : public SharedHeap {
|
||||
friend class GenCollectorPolicy;
|
||||
friend class Generation;
|
||||
@ -63,7 +63,10 @@ public:
|
||||
|
||||
private:
|
||||
int _n_gens;
|
||||
Generation* _gens[max_gens];
|
||||
|
||||
Generation* _young_gen;
|
||||
Generation* _old_gen;
|
||||
|
||||
GenerationSpec** _gen_specs;
|
||||
|
||||
// The singleton Gen Remembered Set.
|
||||
@ -85,6 +88,11 @@ public:
|
||||
SubTasksDone* _gen_process_roots_tasks;
|
||||
SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
|
||||
|
||||
// Collects the given generation.
|
||||
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
|
||||
bool run_verification, bool clear_soft_refs,
|
||||
bool restore_marks_for_biased_locking);
|
||||
|
||||
// In block contents verification, the number of header words to skip
|
||||
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
|
||||
|
||||
@ -138,8 +146,12 @@ public:
|
||||
return CollectedHeap::GenCollectedHeap;
|
||||
}
|
||||
|
||||
Generation* young_gen() { return _young_gen; }
|
||||
Generation* old_gen() { return _old_gen; }
|
||||
|
||||
// The generational collector policy.
|
||||
GenCollectorPolicy* gen_policy() const { return _gen_policy; }
|
||||
|
||||
virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
|
||||
|
||||
// Adaptive size policy
|
||||
@ -309,20 +321,17 @@ public:
|
||||
// Update above counter, as appropriate, at the end of a concurrent GC cycle
|
||||
unsigned int update_full_collections_completed(unsigned int count);
|
||||
|
||||
// Update "time of last gc" for all constituent generations
|
||||
// to "now".
|
||||
// Update "time of last gc" for all generations to "now".
|
||||
void update_time_of_last_gc(jlong now) {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->update_time_of_last_gc(now);
|
||||
}
|
||||
_young_gen->update_time_of_last_gc(now);
|
||||
_old_gen->update_time_of_last_gc(now);
|
||||
}
|
||||
|
||||
// Update the gc statistics for each generation.
|
||||
// "level" is the level of the latest collection.
|
||||
void update_gc_stats(int current_level, bool full) {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->update_gc_stats(current_level, full);
|
||||
}
|
||||
_young_gen->update_gc_stats(current_level, full);
|
||||
_old_gen->update_gc_stats(current_level, full);
|
||||
}
|
||||
|
||||
// Override.
|
||||
@ -366,21 +375,23 @@ public:
|
||||
|
||||
// Return the generation before "gen".
|
||||
Generation* prev_gen(Generation* gen) const {
|
||||
int l = gen->level();
|
||||
guarantee(l > 0, "Out of bounds");
|
||||
return _gens[l-1];
|
||||
guarantee(gen->level() == 1, "Out of bounds");
|
||||
return _young_gen;
|
||||
}
|
||||
|
||||
// Return the generation after "gen".
|
||||
Generation* next_gen(Generation* gen) const {
|
||||
int l = gen->level() + 1;
|
||||
guarantee(l < _n_gens, "Out of bounds");
|
||||
return _gens[l];
|
||||
guarantee(gen->level() == 0, "Out of bounds");
|
||||
return _old_gen;
|
||||
}
|
||||
|
||||
Generation* get_gen(int i) const {
|
||||
guarantee(i >= 0 && i < _n_gens, "Out of bounds");
|
||||
return _gens[i];
|
||||
guarantee(i == 0 || i == 1, "Out of bounds");
|
||||
if (i == 0) {
|
||||
return _young_gen;
|
||||
} else {
|
||||
return _old_gen;
|
||||
}
|
||||
}
|
||||
|
||||
int n_gens() const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -159,7 +159,7 @@ void GenMarkSweep::allocate_stacks() {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
// Scratch request on behalf of oldest generation; will do no
|
||||
// allocation.
|
||||
ScratchBlock* scratch = gch->gather_scratch(gch->_gens[gch->_n_gens-1], 0);
|
||||
ScratchBlock* scratch = gch->gather_scratch(gch->get_gen(gch->_n_gens-1), 0);
|
||||
|
||||
// $$$ To cut a corner, we'll only use the first scratch block, and then
|
||||
// revert to malloc.
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "memory/cardTableRS.hpp"
|
||||
#include "memory/genRemSet.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
|
||||
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
|
||||
// enumerate ref fields that have been modified (since the last
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -155,7 +155,7 @@ Generation* Generation::next_gen() const {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
int next = level() + 1;
|
||||
if (next < gch->_n_gens) {
|
||||
return gch->_gens[next];
|
||||
return gch->get_gen(next);
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/rewriter.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
|
@ -41,7 +41,6 @@
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
#include "oops/oop.pcgc.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "memory/gcLocker.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
inline void oopDesc::update_contents(ParCompactionManager* cm) {
|
||||
|
@ -29,7 +29,6 @@
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// ParallelScavengeHeap methods
|
||||
|
@ -90,8 +90,6 @@
|
||||
# include "gc_implementation/shared/gcUtil.hpp"
|
||||
# include "gc_implementation/shared/generationCounters.hpp"
|
||||
# include "gc_implementation/shared/immutableSpace.hpp"
|
||||
# include "gc_implementation/shared/markSweep.hpp"
|
||||
# include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
# include "gc_implementation/shared/mutableSpace.hpp"
|
||||
# include "gc_implementation/shared/spaceCounters.hpp"
|
||||
# include "gc_implementation/shared/spaceDecorator.hpp"
|
||||
|
@ -214,4 +214,8 @@ ResetNoHandleMark::~ResetNoHandleMark() {
|
||||
area->_no_handle_mark_nesting = _no_handle_mark_nesting;
|
||||
}
|
||||
|
||||
bool instanceKlassHandle::is_instanceKlass(const Klass* k) {
|
||||
return k->oop_is_instance();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -25,7 +25,11 @@
|
||||
#ifndef SHARE_VM_RUNTIME_HANDLES_HPP
|
||||
#define SHARE_VM_RUNTIME_HANDLES_HPP
|
||||
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
|
||||
class InstanceKlass;
|
||||
class Klass;
|
||||
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
// In order to preserve oops during garbage collection, they should be
|
||||
@ -201,16 +205,16 @@ class instanceKlassHandle : public KlassHandle {
|
||||
/* Constructors */
|
||||
instanceKlassHandle () : KlassHandle() {}
|
||||
instanceKlassHandle (const Klass* k) : KlassHandle(k) {
|
||||
assert(k == NULL || k->oop_is_instance(),
|
||||
"illegal type");
|
||||
assert(k == NULL || is_instanceKlass(k), "illegal type");
|
||||
}
|
||||
instanceKlassHandle (Thread* thread, const Klass* k) : KlassHandle(thread, k) {
|
||||
assert(k == NULL || k->oop_is_instance(),
|
||||
"illegal type");
|
||||
assert(k == NULL || is_instanceKlass(k), "illegal type");
|
||||
}
|
||||
/* Access to klass part */
|
||||
InstanceKlass* operator () () const { return (InstanceKlass*)obj(); }
|
||||
InstanceKlass* operator -> () const { return (InstanceKlass*)obj(); }
|
||||
|
||||
debug_only(bool is_instanceKlass(const Klass* k));
|
||||
};
|
||||
|
||||
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/shared/markSweep.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
|
@ -67,7 +67,6 @@
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Compiler.hpp"
|
||||
|
@ -46,7 +46,6 @@
|
||||
#include "compiler/oopMap.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc_implementation/shared/immutableSpace.hpp"
|
||||
#include "gc_implementation/shared/markSweep.hpp"
|
||||
#include "gc_implementation/shared/mutableSpace.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "interpreter/bytecodeInterpreter.hpp"
|
||||
@ -552,8 +551,9 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
||||
nonstatic_field(GenerationSpec, _max_size, size_t) \
|
||||
\
|
||||
static_field(GenCollectedHeap, _gch, GenCollectedHeap*) \
|
||||
nonstatic_field(GenCollectedHeap, _young_gen, Generation*) \
|
||||
nonstatic_field(GenCollectedHeap, _old_gen, Generation*) \
|
||||
nonstatic_field(GenCollectedHeap, _n_gens, int) \
|
||||
unchecked_nonstatic_field(GenCollectedHeap, _gens, sizeof(GenCollectedHeap::_gens)) /* NOTE: no type */ \
|
||||
nonstatic_field(GenCollectedHeap, _gen_specs, GenerationSpec**) \
|
||||
\
|
||||
nonstatic_field(HeapWord, i, char*) \
|
||||
|
@ -22,7 +22,7 @@
|
||||
*
|
||||
*/
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "services/nmtDCmd.hpp"
|
||||
#include "services/memReporter.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "services/attachListener.hpp"
|
||||
#include "services/management.hpp"
|
||||
#include "services/runtimeService.hpp"
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "code/vtableStubs.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "gc_implementation/shared/markSweep.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "interpreter/bytecodeHistogram.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,6 +36,7 @@ import java.nio.charset.Charset;
|
||||
import java.util.List;
|
||||
|
||||
public class CompressedClassSpaceSizeInJmapHeap {
|
||||
// Note that on some platforms it may require root privileges to run this test.
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (!Platform.is64bit()) {
|
||||
// Compressed Class Space is only available on 64-bit JVMs
|
||||
|
Loading…
Reference in New Issue
Block a user