Merge
This commit is contained in:
commit
638bcd60aa
hotspot
src/share/vm
classfile
gc
cms
g1
concurrentG1Refine.cppconcurrentG1Refine.hppconcurrentG1RefineThread.cppconcurrentG1RefineThread.hppg1CollectedHeap.cppg1YoungGenSizer.cppg1_globals.hpp
shared
logging
memory
prims
runtime
arguments.cpparguments.hppcommandLineFlagConstraintList.cppcommandLineFlagConstraintList.hppcommandLineFlagRangeList.cppcommandLineFlagRangeList.hppglobals.cpp
services
utilities
test
@ -36,6 +36,7 @@
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
ModuleEntry* ModuleEntryTable::_javabase_module = NULL;
|
||||
|
||||
@ -359,31 +360,29 @@ void ModuleEntryTable::patch_javabase_entries(Handle module_handle) {
|
||||
java_lang_Class::set_fixup_module_field_list(NULL);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ModuleEntryTable::print() {
|
||||
tty->print_cr("Module Entry Table (table_size=%d, entries=%d)",
|
||||
table_size(), number_of_entries());
|
||||
void ModuleEntryTable::print(outputStream* st) {
|
||||
st->print_cr("Module Entry Table (table_size=%d, entries=%d)",
|
||||
table_size(), number_of_entries());
|
||||
for (int i = 0; i < table_size(); i++) {
|
||||
for (ModuleEntry* probe = bucket(i);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
probe->print();
|
||||
probe->print(st);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ModuleEntry::print() {
|
||||
void ModuleEntry::print(outputStream* st) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("entry "PTR_FORMAT" name %s module "PTR_FORMAT" loader %s version %s location %s strict %s next "PTR_FORMAT,
|
||||
p2i(this),
|
||||
name() == NULL ? UNNAMED_MODULE : name()->as_C_string(),
|
||||
p2i(module()),
|
||||
loader()->loader_name(),
|
||||
version() != NULL ? version()->as_C_string() : "NULL",
|
||||
location() != NULL ? location()->as_C_string() : "NULL",
|
||||
BOOL_TO_STR(!can_read_all_unnamed()), p2i(next()));
|
||||
st->print_cr("entry "PTR_FORMAT" name %s module "PTR_FORMAT" loader %s version %s location %s strict %s next "PTR_FORMAT,
|
||||
p2i(this),
|
||||
name() == NULL ? UNNAMED_MODULE : name()->as_C_string(),
|
||||
p2i(module()),
|
||||
loader()->loader_name(),
|
||||
version() != NULL ? version()->as_C_string() : "NULL",
|
||||
location() != NULL ? location()->as_C_string() : "NULL",
|
||||
BOOL_TO_STR(!can_read_all_unnamed()), p2i(next()));
|
||||
}
|
||||
#endif
|
||||
|
||||
void ModuleEntryTable::verify() {
|
||||
int element_count = 0;
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "trace/traceMacros.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
#define UNNAMED_MODULE "Unnamed Module"
|
||||
|
||||
@ -141,7 +142,7 @@ public:
|
||||
void purge_reads();
|
||||
void delete_reads();
|
||||
|
||||
void print() PRODUCT_RETURN;
|
||||
void print(outputStream* st = tty);
|
||||
void verify();
|
||||
};
|
||||
|
||||
@ -223,7 +224,7 @@ public:
|
||||
static void finalize_javabase(Handle module_handle, Symbol* version, Symbol* location);
|
||||
static void patch_javabase_entries(Handle module_handle);
|
||||
|
||||
void print() PRODUCT_RETURN;
|
||||
void print(outputStream* st = tty);
|
||||
void verify();
|
||||
};
|
||||
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// Return true if this package is exported to m.
|
||||
bool PackageEntry::is_qexported_to(ModuleEntry* m) const {
|
||||
@ -265,28 +266,26 @@ void PackageEntryTable::purge_all_package_exports() {
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void PackageEntryTable::print() {
|
||||
tty->print_cr("Package Entry Table (table_size=%d, entries=%d)",
|
||||
table_size(), number_of_entries());
|
||||
void PackageEntryTable::print(outputStream* st) {
|
||||
st->print_cr("Package Entry Table (table_size=%d, entries=%d)",
|
||||
table_size(), number_of_entries());
|
||||
for (int i = 0; i < table_size(); i++) {
|
||||
for (PackageEntry* probe = bucket(i);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
probe->print();
|
||||
probe->print(st);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PackageEntry::print() {
|
||||
void PackageEntry::print(outputStream* st) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("package entry "PTR_FORMAT" name %s module %s classpath_index "
|
||||
INT32_FORMAT " is_exported %d is_exported_allUnnamed %d " "next "PTR_FORMAT,
|
||||
p2i(this), name()->as_C_string(),
|
||||
(module()->is_named() ? module()->name()->as_C_string() : UNNAMED_MODULE),
|
||||
_classpath_index, _is_exported, _is_exported_allUnnamed, p2i(next()));
|
||||
st->print_cr("package entry "PTR_FORMAT" name %s module %s classpath_index "
|
||||
INT32_FORMAT " is_exported %d is_exported_allUnnamed %d " "next "PTR_FORMAT,
|
||||
p2i(this), name()->as_C_string(),
|
||||
(module()->is_named() ? module()->name()->as_C_string() : UNNAMED_MODULE),
|
||||
_classpath_index, _is_exported, _is_exported_allUnnamed, p2i(next()));
|
||||
}
|
||||
#endif
|
||||
|
||||
void PackageEntryTable::verify() {
|
||||
int element_count = 0;
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "oops/symbol.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// A PackageEntry basically represents a Java package. It contains:
|
||||
// - Symbol* containing the package's name.
|
||||
@ -144,7 +145,7 @@ public:
|
||||
void purge_qualified_exports();
|
||||
void delete_qualified_exports();
|
||||
|
||||
void print() PRODUCT_RETURN;
|
||||
void print(outputStream* st = tty);
|
||||
void verify();
|
||||
};
|
||||
|
||||
@ -195,7 +196,7 @@ public:
|
||||
// purge dead weak references out of exported list
|
||||
void purge_all_package_exports();
|
||||
|
||||
void print() PRODUCT_RETURN;
|
||||
void print(outputStream* st = tty);
|
||||
void verify();
|
||||
};
|
||||
|
||||
|
@ -82,6 +82,8 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
template <typename SpaceType>
|
||||
friend void CompactibleSpace::scan_and_compact(SpaceType* space);
|
||||
template <typename SpaceType>
|
||||
friend void CompactibleSpace::verify_up_to_first_dead(SpaceType* space);
|
||||
template <typename SpaceType>
|
||||
friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
|
||||
|
||||
// "Size" of chunks of work (executed during parallel remark phases
|
||||
|
@ -29,42 +29,174 @@
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1Predictions.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
#include <math.h>
|
||||
|
||||
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, const G1Predictions* predictor) :
|
||||
_threads(NULL),
|
||||
_sample_thread(NULL),
|
||||
_predictor_sigma(predictor->sigma()),
|
||||
_hot_card_cache(g1h)
|
||||
{
|
||||
// Ergonomically select initial concurrent refinement parameters
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, ParallelGCThreads);
|
||||
// Arbitrary but large limits, to simplify some of the zone calculations.
|
||||
// The general idea is to allow expressions like
|
||||
// MIN2(x OP y, max_XXX_zone)
|
||||
// without needing to check for overflow in "x OP y", because the
|
||||
// ranges for x and y have been restricted.
|
||||
STATIC_ASSERT(sizeof(LP64_ONLY(jint) NOT_LP64(jshort)) <= (sizeof(size_t)/2));
|
||||
const size_t max_yellow_zone = LP64_ONLY(max_jint) NOT_LP64(max_jshort);
|
||||
const size_t max_green_zone = max_yellow_zone / 2;
|
||||
const size_t max_red_zone = INT_MAX; // For dcqs.set_max_completed_queue.
|
||||
STATIC_ASSERT(max_yellow_zone <= max_red_zone);
|
||||
|
||||
// Range check assertions for green zone values.
|
||||
#define assert_zone_constraints_g(green) \
|
||||
do { \
|
||||
size_t azc_g_green = (green); \
|
||||
assert(azc_g_green <= max_green_zone, \
|
||||
"green exceeds max: " SIZE_FORMAT, azc_g_green); \
|
||||
} while (0)
|
||||
|
||||
// Range check assertions for green and yellow zone values.
|
||||
#define assert_zone_constraints_gy(green, yellow) \
|
||||
do { \
|
||||
size_t azc_gy_green = (green); \
|
||||
size_t azc_gy_yellow = (yellow); \
|
||||
assert_zone_constraints_g(azc_gy_green); \
|
||||
assert(azc_gy_yellow <= max_yellow_zone, \
|
||||
"yellow exceeds max: " SIZE_FORMAT, azc_gy_yellow); \
|
||||
assert(azc_gy_green <= azc_gy_yellow, \
|
||||
"green (" SIZE_FORMAT ") exceeds yellow (" SIZE_FORMAT ")", \
|
||||
azc_gy_green, azc_gy_yellow); \
|
||||
} while (0)
|
||||
|
||||
// Range check assertions for green, yellow, and red zone values.
|
||||
#define assert_zone_constraints_gyr(green, yellow, red) \
|
||||
do { \
|
||||
size_t azc_gyr_green = (green); \
|
||||
size_t azc_gyr_yellow = (yellow); \
|
||||
size_t azc_gyr_red = (red); \
|
||||
assert_zone_constraints_gy(azc_gyr_green, azc_gyr_yellow); \
|
||||
assert(azc_gyr_red <= max_red_zone, \
|
||||
"red exceeds max: " SIZE_FORMAT, azc_gyr_red); \
|
||||
assert(azc_gyr_yellow <= azc_gyr_red, \
|
||||
"yellow (" SIZE_FORMAT ") exceeds red (" SIZE_FORMAT ")", \
|
||||
azc_gyr_yellow, azc_gyr_red); \
|
||||
} while (0)
|
||||
|
||||
// Logging tag sequence for refinement control updates.
|
||||
#define CTRL_TAGS gc, ergo, refine
|
||||
|
||||
// For logging zone values, ensuring consistency of level and tags.
|
||||
#define LOG_ZONES(...) log_debug( CTRL_TAGS )(__VA_ARGS__)
|
||||
|
||||
// Package for pair of refinement thread activation and deactivation
|
||||
// thresholds. The activation and deactivation levels are resp. the first
|
||||
// and second values of the pair.
|
||||
typedef Pair<size_t, size_t> Thresholds;
|
||||
inline size_t activation_level(const Thresholds& t) { return t.first; }
|
||||
inline size_t deactivation_level(const Thresholds& t) { return t.second; }
|
||||
|
||||
static Thresholds calc_thresholds(size_t green_zone,
|
||||
size_t yellow_zone,
|
||||
uint worker_i) {
|
||||
double yellow_size = yellow_zone - green_zone;
|
||||
double step = yellow_size / ConcurrentG1Refine::thread_num();
|
||||
if (worker_i == 0) {
|
||||
// Potentially activate worker 0 more aggressively, to keep
|
||||
// available buffers near green_zone value. When yellow_size is
|
||||
// large we don't want to allow a full step to accumulate before
|
||||
// doing any processing, as that might lead to significantly more
|
||||
// than green_zone buffers to be processed by update_rs.
|
||||
step = MIN2(step, ParallelGCThreads / 2.0);
|
||||
}
|
||||
set_green_zone(G1ConcRefinementGreenZone);
|
||||
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
|
||||
}
|
||||
set_yellow_zone(MAX2(G1ConcRefinementYellowZone, green_zone()));
|
||||
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
|
||||
}
|
||||
set_red_zone(MAX2(G1ConcRefinementRedZone, yellow_zone()));
|
||||
|
||||
size_t activate_offset = static_cast<size_t>(ceil(step * (worker_i + 1)));
|
||||
size_t deactivate_offset = static_cast<size_t>(floor(step * worker_i));
|
||||
return Thresholds(green_zone + activate_offset,
|
||||
green_zone + deactivate_offset);
|
||||
}
|
||||
|
||||
ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure, jint* ecode) {
|
||||
G1CollectorPolicy* policy = g1h->g1_policy();
|
||||
ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(g1h, &policy->predictor());
|
||||
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h,
|
||||
size_t green_zone,
|
||||
size_t yellow_zone,
|
||||
size_t red_zone,
|
||||
size_t min_yellow_zone_size) :
|
||||
_threads(NULL),
|
||||
_sample_thread(NULL),
|
||||
_n_worker_threads(thread_num()),
|
||||
_green_zone(green_zone),
|
||||
_yellow_zone(yellow_zone),
|
||||
_red_zone(red_zone),
|
||||
_min_yellow_zone_size(min_yellow_zone_size),
|
||||
_hot_card_cache(g1h)
|
||||
{
|
||||
assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone);
|
||||
}
|
||||
|
||||
static size_t calc_min_yellow_zone_size() {
|
||||
size_t step = G1ConcRefinementThresholdStep;
|
||||
uint n_workers = ConcurrentG1Refine::thread_num();
|
||||
if ((max_yellow_zone / step) < n_workers) {
|
||||
return max_yellow_zone;
|
||||
} else {
|
||||
return step * n_workers;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t calc_init_green_zone() {
|
||||
size_t green = G1ConcRefinementGreenZone;
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
|
||||
green = ParallelGCThreads;
|
||||
}
|
||||
return MIN2(green, max_green_zone);
|
||||
}
|
||||
|
||||
static size_t calc_init_yellow_zone(size_t green, size_t min_size) {
|
||||
size_t config = G1ConcRefinementYellowZone;
|
||||
size_t size = 0;
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
|
||||
size = green * 2;
|
||||
} else if (green < config) {
|
||||
size = config - green;
|
||||
}
|
||||
size = MAX2(size, min_size);
|
||||
size = MIN2(size, max_yellow_zone);
|
||||
return MIN2(green + size, max_yellow_zone);
|
||||
}
|
||||
|
||||
static size_t calc_init_red_zone(size_t green, size_t yellow) {
|
||||
size_t size = yellow - green;
|
||||
if (!FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
|
||||
size_t config = G1ConcRefinementRedZone;
|
||||
if (yellow < config) {
|
||||
size = MAX2(size, config - yellow);
|
||||
}
|
||||
}
|
||||
return MIN2(yellow + size, max_red_zone);
|
||||
}
|
||||
|
||||
ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h,
|
||||
CardTableEntryClosure* refine_closure,
|
||||
jint* ecode) {
|
||||
size_t min_yellow_zone_size = calc_min_yellow_zone_size();
|
||||
size_t green_zone = calc_init_green_zone();
|
||||
size_t yellow_zone = calc_init_yellow_zone(green_zone, min_yellow_zone_size);
|
||||
size_t red_zone = calc_init_red_zone(green_zone, yellow_zone);
|
||||
|
||||
LOG_ZONES("Initial Refinement Zones: "
|
||||
"green: " SIZE_FORMAT ", "
|
||||
"yellow: " SIZE_FORMAT ", "
|
||||
"red: " SIZE_FORMAT ", "
|
||||
"min yellow size: " SIZE_FORMAT,
|
||||
green_zone, yellow_zone, red_zone, min_yellow_zone_size);
|
||||
|
||||
ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(g1h,
|
||||
green_zone,
|
||||
yellow_zone,
|
||||
red_zone,
|
||||
min_yellow_zone_size);
|
||||
|
||||
if (cg1r == NULL) {
|
||||
*ecode = JNI_ENOMEM;
|
||||
vm_shutdown_during_initialization("Could not create ConcurrentG1Refine");
|
||||
return NULL;
|
||||
}
|
||||
cg1r->_n_worker_threads = thread_num();
|
||||
|
||||
cg1r->reset_threshold_step();
|
||||
|
||||
cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_worker_threads, mtGC);
|
||||
if (cg1r->_threads == NULL) {
|
||||
@ -77,7 +209,15 @@ ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEn
|
||||
|
||||
ConcurrentG1RefineThread *next = NULL;
|
||||
for (uint i = cg1r->_n_worker_threads - 1; i != UINT_MAX; i--) {
|
||||
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(cg1r, next, refine_closure, worker_id_offset, i);
|
||||
Thresholds thresholds = calc_thresholds(green_zone, yellow_zone, i);
|
||||
ConcurrentG1RefineThread* t =
|
||||
new ConcurrentG1RefineThread(cg1r,
|
||||
next,
|
||||
refine_closure,
|
||||
worker_id_offset,
|
||||
i,
|
||||
activation_level(thresholds),
|
||||
deactivation_level(thresholds));
|
||||
assert(t != NULL, "Conc refine should have been created");
|
||||
if (t->osthread() == NULL) {
|
||||
*ecode = JNI_ENOMEM;
|
||||
@ -101,14 +241,6 @@ ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEn
|
||||
return cg1r;
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::reset_threshold_step() {
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
|
||||
_thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
|
||||
} else {
|
||||
_thread_threshold_step = G1ConcRefinementThresholdStep;
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
|
||||
_hot_card_cache.initialize(card_counts_storage);
|
||||
}
|
||||
@ -120,10 +252,11 @@ void ConcurrentG1Refine::stop() {
|
||||
_sample_thread->stop();
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::reinitialize_threads() {
|
||||
reset_threshold_step();
|
||||
void ConcurrentG1Refine::update_thread_thresholds() {
|
||||
for (uint i = 0; i < _n_worker_threads; i++) {
|
||||
_threads[i]->initialize();
|
||||
Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, i);
|
||||
_threads[i]->update_thresholds(activation_level(thresholds),
|
||||
deactivation_level(thresholds));
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,7 +275,7 @@ void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
|
||||
for (uint i = 0; i < worker_thread_num(); i++) {
|
||||
for (uint i = 0; i < _n_worker_threads; i++) {
|
||||
tc->do_thread(_threads[i]);
|
||||
}
|
||||
}
|
||||
@ -160,34 +293,80 @@ void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
static size_t calc_new_green_zone(size_t green,
|
||||
double update_rs_time,
|
||||
size_t update_rs_processed_buffers,
|
||||
double goal_ms) {
|
||||
// Adjust green zone based on whether we're meeting the time goal.
|
||||
// Limit to max_green_zone.
|
||||
const double inc_k = 1.1, dec_k = 0.9;
|
||||
if (update_rs_time > goal_ms) {
|
||||
if (green > 0) {
|
||||
green = static_cast<size_t>(green * dec_k);
|
||||
}
|
||||
} else if (update_rs_time < goal_ms &&
|
||||
update_rs_processed_buffers > green) {
|
||||
green = static_cast<size_t>(MAX2(green * inc_k, green + 1.0));
|
||||
green = MIN2(green, max_green_zone);
|
||||
}
|
||||
return green;
|
||||
}
|
||||
|
||||
static size_t calc_new_yellow_zone(size_t green, size_t min_yellow_size) {
|
||||
size_t size = green * 2;
|
||||
size = MAX2(size, min_yellow_size);
|
||||
return MIN2(green + size, max_yellow_zone);
|
||||
}
|
||||
|
||||
static size_t calc_new_red_zone(size_t green, size_t yellow) {
|
||||
return MIN2(yellow + (yellow - green), max_red_zone);
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::update_zones(double update_rs_time,
|
||||
size_t update_rs_processed_buffers,
|
||||
double goal_ms) {
|
||||
log_trace( CTRL_TAGS )("Updating Refinement Zones: "
|
||||
"update_rs time: %.3fms, "
|
||||
"update_rs buffers: " SIZE_FORMAT ", "
|
||||
"update_rs goal time: %.3fms",
|
||||
update_rs_time,
|
||||
update_rs_processed_buffers,
|
||||
goal_ms);
|
||||
|
||||
_green_zone = calc_new_green_zone(_green_zone,
|
||||
update_rs_time,
|
||||
update_rs_processed_buffers,
|
||||
goal_ms);
|
||||
_yellow_zone = calc_new_yellow_zone(_green_zone, _min_yellow_zone_size);
|
||||
_red_zone = calc_new_red_zone(_green_zone, _yellow_zone);
|
||||
|
||||
assert_zone_constraints_gyr(_green_zone, _yellow_zone, _red_zone);
|
||||
LOG_ZONES("Updated Refinement Zones: "
|
||||
"green: " SIZE_FORMAT ", "
|
||||
"yellow: " SIZE_FORMAT ", "
|
||||
"red: " SIZE_FORMAT,
|
||||
_green_zone, _yellow_zone, _red_zone);
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::adjust(double update_rs_time,
|
||||
double update_rs_processed_buffers,
|
||||
size_t update_rs_processed_buffers,
|
||||
double goal_ms) {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
|
||||
if (G1UseAdaptiveConcRefinement) {
|
||||
const int k_gy = 3, k_gr = 6;
|
||||
const double inc_k = 1.1, dec_k = 0.9;
|
||||
update_zones(update_rs_time, update_rs_processed_buffers, goal_ms);
|
||||
update_thread_thresholds();
|
||||
|
||||
size_t g = green_zone();
|
||||
if (update_rs_time > goal_ms) {
|
||||
g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
|
||||
} else {
|
||||
if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
|
||||
g = (size_t)MAX2(g * inc_k, g + 1.0);
|
||||
}
|
||||
}
|
||||
// Change the refinement threads params
|
||||
set_green_zone(g);
|
||||
set_yellow_zone(g * k_gy);
|
||||
set_red_zone(g * k_gr);
|
||||
reinitialize_threads();
|
||||
|
||||
size_t processing_threshold_delta = MAX2<size_t>(green_zone() * _predictor_sigma, 1);
|
||||
size_t processing_threshold = MIN2(green_zone() + processing_threshold_delta,
|
||||
yellow_zone());
|
||||
// Change the barrier params
|
||||
dcqs.set_process_completed_threshold((int)processing_threshold);
|
||||
if (_n_worker_threads == 0) {
|
||||
// Disable dcqs notification when there are no threads to notify.
|
||||
dcqs.set_process_completed_threshold(INT_MAX);
|
||||
} else {
|
||||
// Worker 0 is the primary; wakeup is via dcqs notification.
|
||||
STATIC_ASSERT(max_yellow_zone <= INT_MAX);
|
||||
size_t activate = _threads[0]->activation_threshold();
|
||||
dcqs.set_process_completed_threshold((int)activate);
|
||||
}
|
||||
dcqs.set_max_completed_queue((int)red_zone());
|
||||
}
|
||||
|
||||
|
@ -65,18 +65,24 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
size_t _green_zone;
|
||||
size_t _yellow_zone;
|
||||
size_t _red_zone;
|
||||
|
||||
size_t _thread_threshold_step;
|
||||
|
||||
double _predictor_sigma;
|
||||
size_t _min_yellow_zone_size;
|
||||
|
||||
// We delay the refinement of 'hot' cards using the hot card cache.
|
||||
G1HotCardCache _hot_card_cache;
|
||||
|
||||
// Reset the threshold step value based of the current zone boundaries.
|
||||
void reset_threshold_step();
|
||||
ConcurrentG1Refine(G1CollectedHeap* g1h,
|
||||
size_t green_zone,
|
||||
size_t yellow_zone,
|
||||
size_t red_zone,
|
||||
size_t min_yellow_zone_size);
|
||||
|
||||
ConcurrentG1Refine(G1CollectedHeap* g1h, const G1Predictions* predictions);
|
||||
// Update green/yellow/red zone values based on how well goals are being met.
|
||||
void update_zones(double update_rs_time,
|
||||
size_t update_rs_processed_buffers,
|
||||
double goal_ms);
|
||||
|
||||
// Update thread thresholds to account for updated zone values.
|
||||
void update_thread_thresholds();
|
||||
|
||||
public:
|
||||
~ConcurrentG1Refine();
|
||||
@ -88,9 +94,7 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
void init(G1RegionToSpaceMapper* card_counts_storage);
|
||||
void stop();
|
||||
|
||||
void adjust(double update_rs_time, double update_rs_processed_buffers, double goal_ms);
|
||||
|
||||
void reinitialize_threads();
|
||||
void adjust(double update_rs_time, size_t update_rs_processed_buffers, double goal_ms);
|
||||
|
||||
// Iterate over all concurrent refinement threads
|
||||
void threads_do(ThreadClosure *tc);
|
||||
@ -105,18 +109,10 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
|
||||
void print_worker_threads_on(outputStream* st) const;
|
||||
|
||||
void set_green_zone(size_t x) { _green_zone = x; }
|
||||
void set_yellow_zone(size_t x) { _yellow_zone = x; }
|
||||
void set_red_zone(size_t x) { _red_zone = x; }
|
||||
|
||||
size_t green_zone() const { return _green_zone; }
|
||||
size_t yellow_zone() const { return _yellow_zone; }
|
||||
size_t red_zone() const { return _red_zone; }
|
||||
|
||||
uint worker_thread_num() const { return _n_worker_threads; }
|
||||
|
||||
size_t thread_threshold_step() const { return _thread_threshold_step; }
|
||||
|
||||
G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
|
||||
|
||||
static bool hot_card_cache_enabled() { return G1HotCardCache::default_use_cache(); }
|
||||
|
@ -36,7 +36,8 @@
|
||||
ConcurrentG1RefineThread::
|
||||
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
|
||||
CardTableEntryClosure* refine_closure,
|
||||
uint worker_id_offset, uint worker_id) :
|
||||
uint worker_id_offset, uint worker_id,
|
||||
size_t activate, size_t deactivate) :
|
||||
ConcurrentGCThread(),
|
||||
_refine_closure(refine_closure),
|
||||
_worker_id_offset(worker_id_offset),
|
||||
@ -45,7 +46,9 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
|
||||
_next(next),
|
||||
_monitor(NULL),
|
||||
_cg1r(cg1r),
|
||||
_vtime_accum(0.0)
|
||||
_vtime_accum(0.0),
|
||||
_activation_threshold(activate),
|
||||
_deactivation_threshold(deactivate)
|
||||
{
|
||||
|
||||
// Each thread has its own monitor. The i-th thread is responsible for signaling
|
||||
@ -58,21 +61,17 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
|
||||
} else {
|
||||
_monitor = DirtyCardQ_CBL_mon;
|
||||
}
|
||||
initialize();
|
||||
|
||||
// set name
|
||||
set_name("G1 Refine#%d", worker_id);
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::initialize() {
|
||||
// Current thread activation threshold
|
||||
_threshold = MIN2(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
|
||||
cg1r()->yellow_zone());
|
||||
// A thread deactivates once the number of buffer reached a deactivation threshold
|
||||
_deactivation_threshold =
|
||||
MAX2(_threshold - MIN2(_threshold, cg1r()->thread_threshold_step()),
|
||||
cg1r()->green_zone());
|
||||
void ConcurrentG1RefineThread::update_thresholds(size_t activate,
|
||||
size_t deactivate) {
|
||||
assert(deactivate < activate, "precondition");
|
||||
_activation_threshold = activate;
|
||||
_deactivation_threshold = deactivate;
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::wait_for_completed_buffers() {
|
||||
@ -118,9 +117,10 @@ void ConcurrentG1RefineThread::run_service() {
|
||||
break;
|
||||
}
|
||||
|
||||
size_t buffers_processed = 0;
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
log_debug(gc, refine)("Activated %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
|
||||
_worker_id, _threshold, dcqs.completed_buffers_num());
|
||||
_worker_id, _activation_threshold, dcqs.completed_buffers_num());
|
||||
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
@ -139,7 +139,9 @@ void ConcurrentG1RefineThread::run_service() {
|
||||
}
|
||||
|
||||
// Check if we need to activate the next thread.
|
||||
if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
|
||||
if ((_next != NULL) &&
|
||||
!_next->is_active() &&
|
||||
(curr_buffer_num > _next->_activation_threshold)) {
|
||||
_next->activate();
|
||||
}
|
||||
|
||||
@ -150,14 +152,16 @@ void ConcurrentG1RefineThread::run_service() {
|
||||
false /* during_pause */)) {
|
||||
break; // Deactivate, number of buffers fell below threshold.
|
||||
}
|
||||
++buffers_processed;
|
||||
}
|
||||
}
|
||||
|
||||
deactivate();
|
||||
log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT
|
||||
", current: " SIZE_FORMAT,
|
||||
", current: " SIZE_FORMAT ", processed: " SIZE_FORMAT,
|
||||
_worker_id, _deactivation_threshold,
|
||||
dcqs.completed_buffers_num());
|
||||
dcqs.completed_buffers_num(),
|
||||
buffers_processed);
|
||||
|
||||
if (os::supports_vtime()) {
|
||||
_vtime_accum = (os::elapsedVTime() - _vtime_start);
|
||||
|
@ -53,10 +53,8 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
|
||||
// The closure applied to completed log buffers.
|
||||
CardTableEntryClosure* _refine_closure;
|
||||
|
||||
size_t _thread_threshold_step;
|
||||
// This thread activation threshold
|
||||
size_t _threshold;
|
||||
// This thread deactivation threshold
|
||||
// This thread's activation/deactivation thresholds
|
||||
size_t _activation_threshold;
|
||||
size_t _deactivation_threshold;
|
||||
|
||||
void wait_for_completed_buffers();
|
||||
@ -75,9 +73,11 @@ public:
|
||||
// Constructor
|
||||
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
|
||||
CardTableEntryClosure* refine_closure,
|
||||
uint worker_id_offset, uint worker_id);
|
||||
uint worker_id_offset, uint worker_id,
|
||||
size_t activate, size_t deactivate);
|
||||
|
||||
void initialize();
|
||||
void update_thresholds(size_t activate, size_t deactivate);
|
||||
size_t activation_threshold() const { return _activation_threshold; }
|
||||
|
||||
// Total virtual time so far.
|
||||
double vtime_accum() { return _vtime_accum; }
|
||||
|
@ -4452,7 +4452,6 @@ void G1CollectedHeap::process_weak_jni_handles() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
|
||||
double preserve_cm_referents_start = os::elapsedTime();
|
||||
// Any reference objects, in the collection set, that were 'discovered'
|
||||
// by the CM ref processor should have already been copied (either by
|
||||
// applying the external root copy closure to the discovered lists, or
|
||||
@ -4473,16 +4472,24 @@ void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_
|
||||
// objects discovered by the STW ref processor in case one of these
|
||||
// referents points to another object which is also referenced by an
|
||||
// object discovered by the STW ref processor.
|
||||
double preserve_cm_referents_time = 0.0;
|
||||
|
||||
uint no_of_gc_workers = workers()->active_workers();
|
||||
// To avoid spawning task when there is no work to do, check that
|
||||
// a concurrent cycle is active and that some references have been
|
||||
// discovered.
|
||||
if (concurrent_mark()->cmThread()->during_cycle() &&
|
||||
ref_processor_cm()->has_discovered_references()) {
|
||||
double preserve_cm_referents_start = os::elapsedTime();
|
||||
uint no_of_gc_workers = workers()->active_workers();
|
||||
G1ParPreserveCMReferentsTask keep_cm_referents(this,
|
||||
per_thread_states,
|
||||
no_of_gc_workers,
|
||||
_task_queues);
|
||||
workers()->run_task(&keep_cm_referents);
|
||||
preserve_cm_referents_time = os::elapsedTime() - preserve_cm_referents_start;
|
||||
}
|
||||
|
||||
G1ParPreserveCMReferentsTask keep_cm_referents(this,
|
||||
per_thread_states,
|
||||
no_of_gc_workers,
|
||||
_task_queues);
|
||||
workers()->run_task(&keep_cm_referents);
|
||||
|
||||
g1_policy()->phase_times()->record_preserve_cm_referents_time_ms((os::elapsedTime() - preserve_cm_referents_start) * 1000.0);
|
||||
g1_policy()->phase_times()->record_preserve_cm_referents_time_ms(preserve_cm_referents_time * 1000.0);
|
||||
}
|
||||
|
||||
// Weak Reference processing during an evacuation pause (part 1).
|
||||
|
@ -25,12 +25,13 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1YoungGenSizer.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
|
||||
_min_desired_young_length(0), _max_desired_young_length(0) {
|
||||
if (FLAG_IS_CMDLINE(NewRatio)) {
|
||||
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
|
||||
log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
|
||||
} else {
|
||||
_sizer_kind = SizerNewRatio;
|
||||
_adaptive_size = false;
|
||||
@ -40,9 +41,9 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(
|
||||
|
||||
if (NewSize > MaxNewSize) {
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
|
||||
"A new max generation size of " SIZE_FORMAT "k will be used.",
|
||||
NewSize/K, MaxNewSize/K, NewSize/K);
|
||||
log_warning(gc, ergo)("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
|
||||
"A new max generation size of " SIZE_FORMAT "k will be used.",
|
||||
NewSize/K, MaxNewSize/K, NewSize/K);
|
||||
}
|
||||
MaxNewSize = NewSize;
|
||||
}
|
||||
|
@ -112,8 +112,7 @@
|
||||
product(size_t, G1ConcRefinementRedZone, 0, \
|
||||
"Maximum number of enqueued update buffers before mutator " \
|
||||
"threads start processing new ones instead of enqueueing them. " \
|
||||
"Will be selected ergonomically by default. Zero will disable " \
|
||||
"concurrent processing.") \
|
||||
"Will be selected ergonomically by default.") \
|
||||
range(0, max_intx) \
|
||||
\
|
||||
product(size_t, G1ConcRefinementGreenZone, 0, \
|
||||
@ -127,11 +126,12 @@
|
||||
"specified number of milliseconds to do miscellaneous work.") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(size_t, G1ConcRefinementThresholdStep, 0, \
|
||||
product(size_t, G1ConcRefinementThresholdStep, 2, \
|
||||
"Each time the rset update queue increases by this amount " \
|
||||
"activate the next refinement thread if available. " \
|
||||
"Will be selected ergonomically by default.") \
|
||||
range(0, SIZE_MAX) \
|
||||
"The actual step size will be selected ergonomically by " \
|
||||
"default, with this value used to determine a lower bound.") \
|
||||
range(1, SIZE_MAX) \
|
||||
\
|
||||
product(intx, G1RSetUpdatingPauseTimePercent, 10, \
|
||||
"A target percentage of time that is allowed to be spend on " \
|
||||
@ -201,9 +201,9 @@
|
||||
range(0, 32*M) \
|
||||
constraint(G1HeapRegionSizeConstraintFunc,AfterMemoryInit) \
|
||||
\
|
||||
product(uintx, G1ConcRefinementThreads, 0, \
|
||||
"If non-0 is the number of parallel rem set update threads, " \
|
||||
"otherwise the value is determined ergonomically.") \
|
||||
product(uint, G1ConcRefinementThreads, 0, \
|
||||
"The number of parallel rem set update threads. " \
|
||||
"Will be set ergonomically by default.") \
|
||||
range(0, (max_jint-1)/wordSize) \
|
||||
\
|
||||
develop(bool, G1VerifyCTCleanup, false, \
|
||||
|
@ -1090,6 +1090,15 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ReferenceProcessor::has_discovered_references() {
|
||||
for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
||||
if (!_discovered_refs[i].is_empty()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Preclean the discovered references by removing those
|
||||
// whose referents are alive, and by marking from those that
|
||||
// are not active. These lists can be handled here
|
||||
|
@ -412,6 +412,9 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
// Discover a Reference object, using appropriate discovery criteria
|
||||
bool discover_reference(oop obj, ReferenceType rt);
|
||||
|
||||
// Has discovered references that need handling
|
||||
bool has_discovered_references();
|
||||
|
||||
// Process references found during GC (called by the garbage collector)
|
||||
ReferenceProcessorStats
|
||||
process_discovered_references(BoolObjectClosure* is_alive,
|
||||
|
@ -411,22 +411,6 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size,
|
||||
return compact_top;
|
||||
}
|
||||
|
||||
|
||||
bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
|
||||
HeapWord* q, size_t deadlength) {
|
||||
if (allowed_deadspace_words >= deadlength) {
|
||||
allowed_deadspace_words -= deadlength;
|
||||
CollectedHeap::fill_with_object(q, deadlength);
|
||||
oop(q)->set_mark(oop(q)->mark()->set_marked());
|
||||
assert((int) deadlength == oop(q)->size(), "bad filler object size");
|
||||
// Recall that we required "q == compaction_top".
|
||||
return true;
|
||||
} else {
|
||||
allowed_deadspace_words = 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
|
||||
scan_and_forward(this, cp);
|
||||
}
|
||||
|
@ -362,6 +362,12 @@ private:
|
||||
|
||||
inline size_t obj_size(const HeapWord* addr) const;
|
||||
|
||||
template <class SpaceType>
|
||||
static inline void verify_up_to_first_dead(SpaceType* space) NOT_DEBUG_RETURN;
|
||||
|
||||
template <class SpaceType>
|
||||
static inline void clear_empty_region(SpaceType* space);
|
||||
|
||||
public:
|
||||
CompactibleSpace() :
|
||||
_compaction_top(NULL), _next_compaction_space(NULL) {}
|
||||
@ -455,16 +461,6 @@ protected:
|
||||
return end();
|
||||
}
|
||||
|
||||
// Requires "allowed_deadspace_words > 0", that "q" is the start of a
|
||||
// free block of the given "word_len", and that "q", were it an object,
|
||||
// would not move if forwarded. If the size allows, fill the free
|
||||
// block with an object, to prevent excessive compaction. Returns "true"
|
||||
// iff the free region was made deadspace, and modifies
|
||||
// "allowed_deadspace_words" to reflect the number of available deadspace
|
||||
// words remaining after this operation.
|
||||
bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
|
||||
size_t word_len);
|
||||
|
||||
// Below are template functions for scan_and_* algorithms (avoiding virtual calls).
|
||||
// The space argument should be a subclass of CompactibleSpace, implementing
|
||||
// scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "gc/shared/spaceDecorator.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
@ -75,11 +76,61 @@ size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
|
||||
return oop(addr)->size();
|
||||
}
|
||||
|
||||
class DeadSpacer : StackObj {
|
||||
size_t _allowed_deadspace_words;
|
||||
bool _active;
|
||||
CompactibleSpace* _space;
|
||||
|
||||
public:
|
||||
DeadSpacer(CompactibleSpace* space) : _space(space), _allowed_deadspace_words(0) {
|
||||
size_t ratio = _space->allowed_dead_ratio();
|
||||
_active = ratio > 0;
|
||||
|
||||
if (_active) {
|
||||
assert(!UseG1GC, "G1 should not be using dead space");
|
||||
|
||||
// We allow some amount of garbage towards the bottom of the space, so
|
||||
// we don't start compacting before there is a significant gain to be made.
|
||||
// Occasionally, we want to ensure a full compaction, which is determined
|
||||
// by the MarkSweepAlwaysCompactCount parameter.
|
||||
if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
|
||||
_allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
|
||||
} else {
|
||||
_active = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
|
||||
if (!_active) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t dead_length = pointer_delta(dead_end, dead_start);
|
||||
if (_allowed_deadspace_words >= dead_length) {
|
||||
_allowed_deadspace_words -= dead_length;
|
||||
CollectedHeap::fill_with_object(dead_start, dead_length);
|
||||
oop obj = oop(dead_start);
|
||||
obj->set_mark(obj->mark()->set_marked());
|
||||
|
||||
assert(dead_length == (size_t)obj->size(), "bad filler object size");
|
||||
log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
|
||||
p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
|
||||
|
||||
return true;
|
||||
} else {
|
||||
_active = false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
template <class SpaceType>
|
||||
inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
|
||||
// Compute the new addresses for the live objects and store it in the mark
|
||||
// Used by universe::mark_sweep_phase2()
|
||||
HeapWord* compact_top; // This is where we are currently compacting to.
|
||||
|
||||
// We're sure to be here before any objects are compacted into this
|
||||
// space, so this is a good time to initialize this:
|
||||
@ -90,89 +141,73 @@ inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* c
|
||||
assert(cp->threshold == NULL, "just checking");
|
||||
assert(cp->gen->first_compaction_space() == space, "just checking");
|
||||
cp->space = cp->gen->first_compaction_space();
|
||||
compact_top = cp->space->bottom();
|
||||
cp->space->set_compaction_top(compact_top);
|
||||
cp->threshold = cp->space->initialize_threshold();
|
||||
} else {
|
||||
compact_top = cp->space->compaction_top();
|
||||
cp->space->set_compaction_top(cp->space->bottom());
|
||||
}
|
||||
|
||||
// We allow some amount of garbage towards the bottom of the space, so
|
||||
// we don't start compacting before there is a significant gain to be made.
|
||||
// Occasionally, we want to ensure a full compaction, which is determined
|
||||
// by the MarkSweepAlwaysCompactCount parameter.
|
||||
uint invocations = MarkSweep::total_invocations();
|
||||
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
|
||||
HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
|
||||
|
||||
size_t allowed_deadspace = 0;
|
||||
if (skip_dead) {
|
||||
const size_t ratio = space->allowed_dead_ratio();
|
||||
allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
|
||||
}
|
||||
DeadSpacer dead_spacer(space);
|
||||
|
||||
HeapWord* q = space->bottom();
|
||||
HeapWord* t = space->scan_limit();
|
||||
|
||||
HeapWord* end_of_live= q; // One byte beyond the last byte of the last
|
||||
// live object.
|
||||
HeapWord* first_dead = space->end(); // The first dead object.
|
||||
HeapWord* end_of_live = space->bottom(); // One byte beyond the last byte of the last live object.
|
||||
HeapWord* first_dead = NULL; // The first dead object.
|
||||
|
||||
const intx interval = PrefetchScanIntervalInBytes;
|
||||
|
||||
while (q < t) {
|
||||
assert(!space->scanned_block_is_obj(q) ||
|
||||
oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
|
||||
oop(q)->mark()->has_bias_pattern(),
|
||||
HeapWord* cur_obj = space->bottom();
|
||||
HeapWord* scan_limit = space->scan_limit();
|
||||
|
||||
while (cur_obj < scan_limit) {
|
||||
assert(!space->scanned_block_is_obj(cur_obj) ||
|
||||
oop(cur_obj)->mark()->is_marked() || oop(cur_obj)->mark()->is_unlocked() ||
|
||||
oop(cur_obj)->mark()->has_bias_pattern(),
|
||||
"these are the only valid states during a mark sweep");
|
||||
if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
|
||||
// prefetch beyond q
|
||||
Prefetch::write(q, interval);
|
||||
size_t size = space->scanned_block_size(q);
|
||||
compact_top = cp->space->forward(oop(q), size, cp, compact_top);
|
||||
q += size;
|
||||
end_of_live = q;
|
||||
if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
|
||||
// prefetch beyond cur_obj
|
||||
Prefetch::write(cur_obj, interval);
|
||||
size_t size = space->scanned_block_size(cur_obj);
|
||||
compact_top = cp->space->forward(oop(cur_obj), size, cp, compact_top);
|
||||
cur_obj += size;
|
||||
end_of_live = cur_obj;
|
||||
} else {
|
||||
// run over all the contiguous dead objects
|
||||
HeapWord* end = q;
|
||||
HeapWord* end = cur_obj;
|
||||
do {
|
||||
// prefetch beyond end
|
||||
Prefetch::write(end, interval);
|
||||
end += space->scanned_block_size(end);
|
||||
} while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
|
||||
} while (end < scan_limit && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
|
||||
|
||||
// see if we might want to pretend this object is alive so that
|
||||
// we don't have to compact quite as often.
|
||||
if (allowed_deadspace > 0 && q == compact_top) {
|
||||
size_t sz = pointer_delta(end, q);
|
||||
if (space->insert_deadspace(allowed_deadspace, q, sz)) {
|
||||
compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
|
||||
q = end;
|
||||
end_of_live = end;
|
||||
continue;
|
||||
if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
|
||||
oop obj = oop(cur_obj);
|
||||
compact_top = cp->space->forward(obj, obj->size(), cp, compact_top);
|
||||
end_of_live = end;
|
||||
} else {
|
||||
// otherwise, it really is a free region.
|
||||
|
||||
// cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
|
||||
*(HeapWord**)cur_obj = end;
|
||||
|
||||
// see if this is the first dead region.
|
||||
if (first_dead == NULL) {
|
||||
first_dead = cur_obj;
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise, it really is a free region.
|
||||
|
||||
// q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
|
||||
(*(HeapWord**)q) = end;
|
||||
|
||||
// see if this is the first dead region.
|
||||
if (q < first_dead) {
|
||||
first_dead = q;
|
||||
}
|
||||
|
||||
// move on to the next object
|
||||
q = end;
|
||||
cur_obj = end;
|
||||
}
|
||||
}
|
||||
|
||||
assert(q == t, "just checking");
|
||||
assert(cur_obj == scan_limit, "just checking");
|
||||
space->_end_of_live = end_of_live;
|
||||
if (end_of_live < first_dead) {
|
||||
first_dead = end_of_live;
|
||||
if (first_dead != NULL) {
|
||||
space->_first_dead = first_dead;
|
||||
} else {
|
||||
space->_first_dead = end_of_live;
|
||||
}
|
||||
space->_first_dead = first_dead;
|
||||
|
||||
// save the compaction_top of the compaction space.
|
||||
cp->space->set_compaction_top(compact_top);
|
||||
@ -183,127 +218,58 @@ inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
|
||||
// adjust all the interior pointers to point at the new locations of objects
|
||||
// Used by MarkSweep::mark_sweep_phase3()
|
||||
|
||||
HeapWord* q = space->bottom();
|
||||
HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction".
|
||||
HeapWord* cur_obj = space->bottom();
|
||||
HeapWord* const end_of_live = space->_end_of_live; // Established by "scan_and_forward".
|
||||
HeapWord* const first_dead = space->_first_dead; // Established by "scan_and_forward".
|
||||
|
||||
assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
|
||||
|
||||
if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
|
||||
// we have a chunk of the space which hasn't moved and we've
|
||||
// reinitialized the mark word during the previous pass, so we can't
|
||||
// use is_gc_marked for the traversal.
|
||||
HeapWord* end = space->_first_dead;
|
||||
|
||||
while (q < end) {
|
||||
// I originally tried to conjoin "block_start(q) == q" to the
|
||||
// assertion below, but that doesn't work, because you can't
|
||||
// accurately traverse previous objects to get to the current one
|
||||
// after their pointers have been
|
||||
// updated, until the actual compaction is done. dld, 4/00
|
||||
assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
|
||||
|
||||
// point all the oops to the new location
|
||||
size_t size = MarkSweep::adjust_pointers(oop(q));
|
||||
size = space->adjust_obj_size(size);
|
||||
|
||||
q += size;
|
||||
}
|
||||
|
||||
if (space->_first_dead == t) {
|
||||
q = t;
|
||||
} else {
|
||||
// The first dead object is no longer an object. At that memory address,
|
||||
// there is a pointer to the first live object that the previous phase found.
|
||||
q = *((HeapWord**)(space->_first_dead));
|
||||
}
|
||||
}
|
||||
assert(first_dead <= end_of_live, "Stands to reason, no?");
|
||||
|
||||
const intx interval = PrefetchScanIntervalInBytes;
|
||||
|
||||
debug_only(HeapWord* prev_q = NULL);
|
||||
while (q < t) {
|
||||
// prefetch beyond q
|
||||
Prefetch::write(q, interval);
|
||||
if (oop(q)->is_gc_marked()) {
|
||||
// q is alive
|
||||
debug_only(HeapWord* prev_obj = NULL);
|
||||
while (cur_obj < end_of_live) {
|
||||
Prefetch::write(cur_obj, interval);
|
||||
if (cur_obj < first_dead || oop(cur_obj)->is_gc_marked()) {
|
||||
// cur_obj is alive
|
||||
// point all the oops to the new location
|
||||
size_t size = MarkSweep::adjust_pointers(oop(q));
|
||||
size_t size = MarkSweep::adjust_pointers(oop(cur_obj));
|
||||
size = space->adjust_obj_size(size);
|
||||
debug_only(prev_q = q);
|
||||
q += size;
|
||||
debug_only(prev_obj = cur_obj);
|
||||
cur_obj += size;
|
||||
} else {
|
||||
debug_only(prev_q = q);
|
||||
// q is not a live object, instead it points at the next live object
|
||||
q = *(HeapWord**)q;
|
||||
assert(q > prev_q, "we should be moving forward through memory, q: " PTR_FORMAT ", prev_q: " PTR_FORMAT, p2i(q), p2i(prev_q));
|
||||
debug_only(prev_obj = cur_obj);
|
||||
// cur_obj is not a live object, instead it points at the next live object
|
||||
cur_obj = *(HeapWord**)cur_obj;
|
||||
assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
|
||||
}
|
||||
}
|
||||
|
||||
assert(q == t, "just checking");
|
||||
assert(cur_obj == end_of_live, "just checking");
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
template <class SpaceType>
|
||||
inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
|
||||
// Copy all live objects to their new location
|
||||
// Used by MarkSweep::mark_sweep_phase4()
|
||||
inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) {
|
||||
HeapWord* cur_obj = space->bottom();
|
||||
|
||||
HeapWord* q = space->bottom();
|
||||
HeapWord* const t = space->_end_of_live;
|
||||
debug_only(HeapWord* prev_q = NULL);
|
||||
if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) {
|
||||
// we have a chunk of the space which hasn't moved and we've reinitialized
|
||||
// the mark word during the previous pass, so we can't use is_gc_marked for
|
||||
// the traversal.
|
||||
HeapWord* prev_obj = NULL;
|
||||
|
||||
if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
|
||||
#ifdef ASSERT // Debug only
|
||||
// we have a chunk of the space which hasn't moved and we've reinitialized
|
||||
// the mark word during the previous pass, so we can't use is_gc_marked for
|
||||
// the traversal.
|
||||
HeapWord* const end = space->_first_dead;
|
||||
|
||||
while (q < end) {
|
||||
size_t size = space->obj_size(q);
|
||||
assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
|
||||
prev_q = q;
|
||||
q += size;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (space->_first_dead == t) {
|
||||
q = t;
|
||||
} else {
|
||||
// $$$ Funky
|
||||
q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
|
||||
}
|
||||
}
|
||||
|
||||
const intx scan_interval = PrefetchScanIntervalInBytes;
|
||||
const intx copy_interval = PrefetchCopyIntervalInBytes;
|
||||
while (q < t) {
|
||||
if (!oop(q)->is_gc_marked()) {
|
||||
// mark is pointer to next marked oop
|
||||
debug_only(prev_q = q);
|
||||
q = (HeapWord*) oop(q)->mark()->decode_pointer();
|
||||
assert(q > prev_q, "we should be moving forward through memory");
|
||||
} else {
|
||||
// prefetch beyond q
|
||||
Prefetch::read(q, scan_interval);
|
||||
|
||||
// size and destination
|
||||
size_t size = space->obj_size(q);
|
||||
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
|
||||
|
||||
// prefetch beyond compaction_top
|
||||
Prefetch::write(compaction_top, copy_interval);
|
||||
|
||||
// copy object and reinit its mark
|
||||
assert(q != compaction_top, "everything in this pass should be moving");
|
||||
Copy::aligned_conjoint_words(q, compaction_top, size);
|
||||
oop(compaction_top)->init_mark();
|
||||
assert(oop(compaction_top)->klass() != NULL, "should have a class");
|
||||
|
||||
debug_only(prev_q = q);
|
||||
q += size;
|
||||
}
|
||||
while (cur_obj < space->_first_dead) {
|
||||
size_t size = space->obj_size(cur_obj);
|
||||
assert(!oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
|
||||
prev_obj = cur_obj;
|
||||
cur_obj += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
template <class SpaceType>
|
||||
inline void CompactibleSpace::clear_empty_region(SpaceType* space) {
|
||||
// Let's remember if we were empty before we did the compaction.
|
||||
bool was_empty = space->used_region().is_empty();
|
||||
// Reset space after compaction is complete
|
||||
@ -320,6 +286,65 @@ inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class SpaceType>
|
||||
inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
|
||||
// Copy all live objects to their new location
|
||||
// Used by MarkSweep::mark_sweep_phase4()
|
||||
|
||||
verify_up_to_first_dead(space);
|
||||
|
||||
HeapWord* const end_of_live = space->_end_of_live;
|
||||
|
||||
assert(space->_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(space->_first_dead), p2i(end_of_live));
|
||||
if (space->_first_dead == end_of_live && !oop(space->bottom())->is_gc_marked()) {
|
||||
// Nothing to compact. The space is either empty or all live object should be left in place.
|
||||
clear_empty_region(space);
|
||||
return;
|
||||
}
|
||||
|
||||
const intx scan_interval = PrefetchScanIntervalInBytes;
|
||||
const intx copy_interval = PrefetchCopyIntervalInBytes;
|
||||
|
||||
assert(space->bottom() < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(space->bottom()), p2i(end_of_live));
|
||||
HeapWord* cur_obj = space->bottom();
|
||||
if (space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) {
|
||||
// All object before _first_dead can be skipped. They should not be moved.
|
||||
// A pointer to the first live object is stored at the memory location for _first_dead.
|
||||
cur_obj = *(HeapWord**)(space->_first_dead);
|
||||
}
|
||||
|
||||
debug_only(HeapWord* prev_obj = NULL);
|
||||
while (cur_obj < end_of_live) {
|
||||
if (!oop(cur_obj)->is_gc_marked()) {
|
||||
debug_only(prev_obj = cur_obj);
|
||||
// The first word of the dead object contains a pointer to the next live object or end of space.
|
||||
cur_obj = *(HeapWord**)cur_obj;
|
||||
assert(cur_obj > prev_obj, "we should be moving forward through memory");
|
||||
} else {
|
||||
// prefetch beyond q
|
||||
Prefetch::read(cur_obj, scan_interval);
|
||||
|
||||
// size and destination
|
||||
size_t size = space->obj_size(cur_obj);
|
||||
HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee();
|
||||
|
||||
// prefetch beyond compaction_top
|
||||
Prefetch::write(compaction_top, copy_interval);
|
||||
|
||||
// copy object and reinit its mark
|
||||
assert(cur_obj != compaction_top, "everything in this pass should be moving");
|
||||
Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
|
||||
oop(compaction_top)->init_mark();
|
||||
assert(oop(compaction_top)->klass() != NULL, "should have a class");
|
||||
|
||||
debug_only(prev_obj = cur_obj);
|
||||
cur_obj += size;
|
||||
}
|
||||
}
|
||||
|
||||
clear_empty_region(space);
|
||||
}
|
||||
|
||||
size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
|
||||
return oop(addr)->size();
|
||||
}
|
||||
|
@ -55,6 +55,7 @@ DEBUG_ONLY(size_t Test_log_prefix_prefixer(char* buf, size_t len);)
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, cset)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, heap)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, ihop)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, refine)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap, region)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, freelist)) \
|
||||
|
@ -143,8 +143,9 @@ enum MemoryType {
|
||||
mtTest = 0x0D, // Test type for verifying NMT
|
||||
mtTracing = 0x0E, // memory used for Tracing
|
||||
mtLogging = 0x0F, // memory for logging
|
||||
mtNone = 0x10, // undefined
|
||||
mt_number_of_types = 0x11 // number of memory types (mtDontTrack
|
||||
mtArguments = 0x10, // memory for argument processing
|
||||
mtNone = 0x11, // undefined
|
||||
mt_number_of_types = 0x12 // number of memory types (mtDontTrack
|
||||
// is not included as validate type)
|
||||
};
|
||||
|
||||
|
@ -2260,7 +2260,7 @@ void JvmtiExport::post_vm_object_alloc(JavaThread *thread, oop object) {
|
||||
if (env->is_enabled(JVMTI_EVENT_VM_OBJECT_ALLOC)) {
|
||||
EVT_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("JVMTI [%s] Evt vmobject alloc sent %s",
|
||||
JvmtiTrace::safe_get_thread_name(thread),
|
||||
object==NULL? "NULL" : java_lang_Class::as_Klass(object)->external_name()));
|
||||
object==NULL? "NULL" : object->klass()->external_name()));
|
||||
|
||||
JvmtiVMObjectAllocEventMark jem(thread, h());
|
||||
JvmtiJavaThreadEventTransition jet(thread);
|
||||
|
@ -698,7 +698,7 @@ char* ArgumentBootClassPath::combined_path() {
|
||||
assert(total_len > 0, "empty sysclasspath not allowed");
|
||||
|
||||
// Copy the _items to a single string.
|
||||
char* cp = NEW_C_HEAP_ARRAY(char, total_len, mtInternal);
|
||||
char* cp = NEW_C_HEAP_ARRAY(char, total_len, mtArguments);
|
||||
char* cp_tmp = cp;
|
||||
for (i = 0; i < _bcp_nitems; ++i) {
|
||||
if (_items[i] != NULL) {
|
||||
@ -719,7 +719,7 @@ ArgumentBootClassPath::add_to_path(const char* path, const char* str, bool prepe
|
||||
assert(str != NULL, "just checking");
|
||||
if (path == NULL) {
|
||||
size_t len = strlen(str) + 1;
|
||||
cp = NEW_C_HEAP_ARRAY(char, len, mtInternal);
|
||||
cp = NEW_C_HEAP_ARRAY(char, len, mtArguments);
|
||||
memcpy(cp, str, len); // copy the trailing null
|
||||
} else {
|
||||
const char separator = *os::path_separator();
|
||||
@ -728,7 +728,7 @@ ArgumentBootClassPath::add_to_path(const char* path, const char* str, bool prepe
|
||||
size_t len = old_len + str_len + 2;
|
||||
|
||||
if (prepend) {
|
||||
cp = NEW_C_HEAP_ARRAY(char, len, mtInternal);
|
||||
cp = NEW_C_HEAP_ARRAY(char, len, mtArguments);
|
||||
char* cp_tmp = cp;
|
||||
memcpy(cp_tmp, str, str_len);
|
||||
cp_tmp += str_len;
|
||||
@ -736,7 +736,7 @@ ArgumentBootClassPath::add_to_path(const char* path, const char* str, bool prepe
|
||||
memcpy(++cp_tmp, path, old_len + 1); // copy the trailing null
|
||||
FREE_C_HEAP_ARRAY(char, path);
|
||||
} else {
|
||||
cp = REALLOC_C_HEAP_ARRAY(char, path, len, mtInternal);
|
||||
cp = REALLOC_C_HEAP_ARRAY(char, path, len, mtArguments);
|
||||
char* cp_tmp = cp + old_len;
|
||||
*cp_tmp = separator;
|
||||
memcpy(++cp_tmp, str, str_len + 1); // copy the trailing null
|
||||
@ -758,7 +758,7 @@ char* ArgumentBootClassPath::add_jars_to_path(char* path, const char* directory)
|
||||
|
||||
/* Scan the directory for jars/zips, appending them to path. */
|
||||
struct dirent *entry;
|
||||
char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal);
|
||||
char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtArguments);
|
||||
while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
|
||||
const char* name = entry->d_name;
|
||||
const char* ext = name + strlen(name) - 4;
|
||||
@ -766,7 +766,7 @@ char* ArgumentBootClassPath::add_jars_to_path(char* path, const char* directory)
|
||||
(os::file_name_strcmp(ext, ".jar") == 0 ||
|
||||
os::file_name_strcmp(ext, ".zip") == 0);
|
||||
if (isJarOrZip) {
|
||||
char* jarpath = NEW_C_HEAP_ARRAY(char, directory_len + 2 + strlen(name), mtInternal);
|
||||
char* jarpath = NEW_C_HEAP_ARRAY(char, directory_len + 2 + strlen(name), mtArguments);
|
||||
sprintf(jarpath, "%s%s%s", directory, dir_sep, name);
|
||||
path = add_to_path(path, jarpath, false);
|
||||
FREE_C_HEAP_ARRAY(char, jarpath);
|
||||
@ -943,7 +943,7 @@ static bool append_to_string_flag(const char* name, const char* new_value, Flag:
|
||||
} else if (new_len == 0) {
|
||||
value = old_value;
|
||||
} else {
|
||||
char* buf = NEW_C_HEAP_ARRAY(char, old_len + 1 + new_len + 1, mtInternal);
|
||||
char* buf = NEW_C_HEAP_ARRAY(char, old_len + 1 + new_len + 1, mtArguments);
|
||||
// each new setting adds another LINE to the switch:
|
||||
sprintf(buf, "%s\n%s", old_value, new_value);
|
||||
value = buf;
|
||||
@ -1134,9 +1134,9 @@ void Arguments::add_string(char*** bldarray, int* count, const char* arg) {
|
||||
|
||||
// expand the array and add arg to the last element
|
||||
if (*bldarray == NULL) {
|
||||
*bldarray = NEW_C_HEAP_ARRAY(char*, new_count, mtInternal);
|
||||
*bldarray = NEW_C_HEAP_ARRAY(char*, new_count, mtArguments);
|
||||
} else {
|
||||
*bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtInternal);
|
||||
*bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtArguments);
|
||||
}
|
||||
(*bldarray)[*count] = os::strdup_check_oom(arg);
|
||||
*count = new_count;
|
||||
@ -1400,7 +1400,7 @@ bool Arguments::add_property(const char* prop) {
|
||||
// property have a value, thus extract it and save to the
|
||||
// allocated string
|
||||
size_t key_len = eq - prop;
|
||||
char* tmp_key = AllocateHeap(key_len + 1, mtInternal);
|
||||
char* tmp_key = AllocateHeap(key_len + 1, mtArguments);
|
||||
|
||||
strncpy(tmp_key, prop, key_len);
|
||||
tmp_key[key_len] = '\0';
|
||||
@ -1422,7 +1422,7 @@ bool Arguments::add_property(const char* prop) {
|
||||
} else {
|
||||
if (strcmp(key, "sun.java.command") == 0) {
|
||||
char *old_java_command = _java_command;
|
||||
_java_command = os::strdup_check_oom(value, mtInternal);
|
||||
_java_command = os::strdup_check_oom(value, mtArguments);
|
||||
if (old_java_command != NULL) {
|
||||
os::free(old_java_command);
|
||||
}
|
||||
@ -1430,7 +1430,7 @@ bool Arguments::add_property(const char* prop) {
|
||||
const char* old_java_vendor_url_bug = _java_vendor_url_bug;
|
||||
// save it in _java_vendor_url_bug, so JVM fatal error handler can access
|
||||
// its value without going through the property list or making a Java call.
|
||||
_java_vendor_url_bug = os::strdup_check_oom(value, mtInternal);
|
||||
_java_vendor_url_bug = os::strdup_check_oom(value, mtArguments);
|
||||
if (old_java_vendor_url_bug != DEFAULT_VENDOR_URL_BUG) {
|
||||
assert(old_java_vendor_url_bug != NULL, "_java_vendor_url_bug is NULL");
|
||||
os::free((void *)old_java_vendor_url_bug);
|
||||
@ -1458,7 +1458,7 @@ bool Arguments::append_to_addmods_property(const char* module_name) {
|
||||
if (old_value != NULL) {
|
||||
buf_len += strlen(old_value) + 1;
|
||||
}
|
||||
char* new_value = AllocateHeap(buf_len, mtInternal);
|
||||
char* new_value = AllocateHeap(buf_len, mtArguments);
|
||||
if (new_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
@ -2095,8 +2095,8 @@ void Arguments::set_g1_gc_flags() {
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (G1ConcRefinementThreads == 0) {
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementThreads, ParallelGCThreads);
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
|
||||
FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2852,13 +2852,13 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
if (tail != NULL) {
|
||||
const char* pos = strchr(tail, ':');
|
||||
size_t len = (pos == NULL) ? strlen(tail) : pos - tail;
|
||||
char* name = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len + 1, mtInternal), tail, len);
|
||||
char* name = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len + 1, mtArguments), tail, len);
|
||||
name[len] = '\0';
|
||||
|
||||
char *options = NULL;
|
||||
if(pos != NULL) {
|
||||
size_t len2 = strlen(pos+1) + 1; // options start after ':'. Final zero must be copied.
|
||||
options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2, mtInternal), pos+1, len2);
|
||||
options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2, mtArguments), pos+1, len2);
|
||||
}
|
||||
#if !INCLUDE_JVMTI
|
||||
if (strcmp(name, "jdwp") == 0) {
|
||||
@ -2875,12 +2875,12 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
if(tail != NULL) {
|
||||
const char* pos = strchr(tail, '=');
|
||||
size_t len = (pos == NULL) ? strlen(tail) : pos - tail;
|
||||
char* name = strncpy(NEW_C_HEAP_ARRAY(char, len + 1, mtInternal), tail, len);
|
||||
char* name = strncpy(NEW_C_HEAP_ARRAY(char, len + 1, mtArguments), tail, len);
|
||||
name[len] = '\0';
|
||||
|
||||
char *options = NULL;
|
||||
if(pos != NULL) {
|
||||
options = os::strdup_check_oom(pos + 1, mtInternal);
|
||||
options = os::strdup_check_oom(pos + 1, mtArguments);
|
||||
}
|
||||
#if !INCLUDE_JVMTI
|
||||
if (valid_jdwp_agent(name, is_absolute_path)) {
|
||||
@ -2899,7 +2899,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
return JNI_ERR;
|
||||
#else
|
||||
if (tail != NULL) {
|
||||
char *options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(tail) + 1, mtInternal), tail);
|
||||
char *options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(tail) + 1, mtArguments), tail);
|
||||
add_init_agent("instrument", options, false);
|
||||
// java agents need module java.instrument. Also -addmods ALL-SYSTEM because
|
||||
// the java agent is in the unmamed module of the application class loader
|
||||
@ -3201,7 +3201,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
size_t len = strlen(patch_dirs[x]);
|
||||
if (len != 0) { // Ignore empty strings.
|
||||
len += 11; // file_sep + "java.base" + null terminator.
|
||||
char* dir = NEW_C_HEAP_ARRAY(char, len, mtInternal);
|
||||
char* dir = NEW_C_HEAP_ARRAY(char, len, mtArguments);
|
||||
jio_snprintf(dir, len, "%s%cjava.base", patch_dirs[x], file_sep);
|
||||
|
||||
// See if Xpatch module path exists.
|
||||
@ -3507,7 +3507,7 @@ void Arguments::fix_appclasspath() {
|
||||
src ++;
|
||||
}
|
||||
|
||||
char* copy = os::strdup_check_oom(src, mtInternal);
|
||||
char* copy = os::strdup_check_oom(src, mtArguments);
|
||||
|
||||
// trim all trailing empty paths
|
||||
for (char* tail = copy + strlen(copy) - 1; tail >= copy && *tail == separator; tail--) {
|
||||
@ -3531,7 +3531,7 @@ static bool has_jar_files(const char* directory) {
|
||||
if (dir == NULL) return false;
|
||||
|
||||
struct dirent *entry;
|
||||
char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal);
|
||||
char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtArguments);
|
||||
bool hasJarFile = false;
|
||||
while (!hasJarFile && (entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
|
||||
const char* name = entry->d_name;
|
||||
@ -3557,7 +3557,7 @@ static int check_non_empty_dirs(const char* path) {
|
||||
}
|
||||
path = end;
|
||||
} else {
|
||||
char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1, mtInternal);
|
||||
char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1, mtArguments);
|
||||
memcpy(dirpath, path, tmp_end - path);
|
||||
dirpath[tmp_end - path] = '\0';
|
||||
if (has_jar_files(dirpath)) {
|
||||
@ -3729,7 +3729,7 @@ class ScopedVMInitArgs : public StackObj {
|
||||
jint set_args(GrowableArray<JavaVMOption>* options) {
|
||||
_is_set = true;
|
||||
JavaVMOption* options_arr = NEW_C_HEAP_ARRAY_RETURN_NULL(
|
||||
JavaVMOption, options->length(), mtInternal);
|
||||
JavaVMOption, options->length(), mtArguments);
|
||||
if (options_arr == NULL) {
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
@ -3784,7 +3784,7 @@ class ScopedVMInitArgs : public StackObj {
|
||||
assert(vm_options_file_pos != -1, "vm_options_file_pos should be set");
|
||||
|
||||
int length = args->nOptions + args_to_insert->nOptions - 1;
|
||||
GrowableArray<JavaVMOption> *options = new (ResourceObj::C_HEAP, mtInternal)
|
||||
GrowableArray<JavaVMOption> *options = new (ResourceObj::C_HEAP, mtArguments)
|
||||
GrowableArray<JavaVMOption>(length, true); // Construct new option array
|
||||
for (int i = 0; i < args->nOptions; i++) {
|
||||
if (i == vm_options_file_pos) {
|
||||
@ -3861,7 +3861,7 @@ jint Arguments::parse_vm_options_file(const char* file_name, ScopedVMInitArgs* v
|
||||
// '+ 1' for NULL termination even with max bytes
|
||||
size_t bytes_alloc = stbuf.st_size + 1;
|
||||
|
||||
char *buf = NEW_C_HEAP_ARRAY_RETURN_NULL(char, bytes_alloc, mtInternal);
|
||||
char *buf = NEW_C_HEAP_ARRAY_RETURN_NULL(char, bytes_alloc, mtArguments);
|
||||
if (NULL == buf) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Could not allocate read buffer for options file parse\n");
|
||||
@ -3898,7 +3898,7 @@ jint Arguments::parse_vm_options_file(const char* file_name, ScopedVMInitArgs* v
|
||||
}
|
||||
|
||||
jint Arguments::parse_options_buffer(const char* name, char* buffer, const size_t buf_len, ScopedVMInitArgs* vm_args) {
|
||||
GrowableArray<JavaVMOption> *options = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JavaVMOption>(2, true); // Construct option array
|
||||
GrowableArray<JavaVMOption> *options = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JavaVMOption>(2, true); // Construct option array
|
||||
|
||||
// some pointers to help with parsing
|
||||
char *buffer_end = buffer + buf_len;
|
||||
@ -4002,13 +4002,13 @@ static char* get_shared_archive_path() {
|
||||
size_t jvm_path_len = strlen(jvm_path);
|
||||
size_t file_sep_len = strlen(os::file_separator());
|
||||
const size_t len = jvm_path_len + file_sep_len + 20;
|
||||
shared_archive_path = NEW_C_HEAP_ARRAY(char, len, mtInternal);
|
||||
shared_archive_path = NEW_C_HEAP_ARRAY(char, len, mtArguments);
|
||||
if (shared_archive_path != NULL) {
|
||||
jio_snprintf(shared_archive_path, len, "%s%sclasses.jsa",
|
||||
jvm_path, os::file_separator());
|
||||
}
|
||||
} else {
|
||||
shared_archive_path = os::strdup_check_oom(SharedArchiveFile, mtInternal);
|
||||
shared_archive_path = os::strdup_check_oom(SharedArchiveFile, mtArguments);
|
||||
}
|
||||
return shared_archive_path;
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ class ArgumentBootClassPath;
|
||||
// PathString is used as the underlying value container for a
|
||||
// SystemProperty and for the string that represents the system
|
||||
// boot class path, Arguments::_system_boot_class_path.
|
||||
class PathString : public CHeapObj<mtInternal> {
|
||||
class PathString : public CHeapObj<mtArguments> {
|
||||
protected:
|
||||
char* _value;
|
||||
public:
|
||||
@ -57,7 +57,7 @@ class PathString : public CHeapObj<mtInternal> {
|
||||
if (_value != NULL) {
|
||||
FreeHeap(_value);
|
||||
}
|
||||
_value = AllocateHeap(strlen(value)+1, mtInternal);
|
||||
_value = AllocateHeap(strlen(value)+1, mtArguments);
|
||||
assert(_value != NULL, "Unable to allocate space for new path value");
|
||||
if (_value != NULL) {
|
||||
strcpy(_value, value);
|
||||
@ -76,7 +76,7 @@ class PathString : public CHeapObj<mtInternal> {
|
||||
if (_value != NULL) {
|
||||
len += strlen(_value);
|
||||
}
|
||||
sp = AllocateHeap(len+2, mtInternal);
|
||||
sp = AllocateHeap(len+2, mtArguments);
|
||||
assert(sp != NULL, "Unable to allocate space for new append path value");
|
||||
if (sp != NULL) {
|
||||
if (_value != NULL) {
|
||||
@ -97,7 +97,7 @@ class PathString : public CHeapObj<mtInternal> {
|
||||
if (value == NULL) {
|
||||
_value = NULL;
|
||||
} else {
|
||||
_value = AllocateHeap(strlen(value)+1, mtInternal);
|
||||
_value = AllocateHeap(strlen(value)+1, mtArguments);
|
||||
strcpy(_value, value);
|
||||
}
|
||||
}
|
||||
@ -143,7 +143,7 @@ class SystemProperty : public PathString {
|
||||
if (key == NULL) {
|
||||
_key = NULL;
|
||||
} else {
|
||||
_key = AllocateHeap(strlen(key)+1, mtInternal);
|
||||
_key = AllocateHeap(strlen(key)+1, mtArguments);
|
||||
strcpy(_key, key);
|
||||
}
|
||||
_next = NULL;
|
||||
@ -154,7 +154,7 @@ class SystemProperty : public PathString {
|
||||
|
||||
|
||||
// For use by -agentlib, -agentpath and -Xrun
|
||||
class AgentLibrary : public CHeapObj<mtInternal> {
|
||||
class AgentLibrary : public CHeapObj<mtArguments> {
|
||||
friend class AgentLibraryList;
|
||||
public:
|
||||
// Is this library valid or not. Don't rely on os_lib == NULL as statically
|
||||
@ -189,12 +189,12 @@ public:
|
||||
|
||||
// Constructor
|
||||
AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) {
|
||||
_name = AllocateHeap(strlen(name)+1, mtInternal);
|
||||
_name = AllocateHeap(strlen(name)+1, mtArguments);
|
||||
strcpy(_name, name);
|
||||
if (options == NULL) {
|
||||
_options = NULL;
|
||||
} else {
|
||||
_options = AllocateHeap(strlen(options)+1, mtInternal);
|
||||
_options = AllocateHeap(strlen(options)+1, mtArguments);
|
||||
strcpy(_options, options);
|
||||
}
|
||||
_is_absolute_path = is_absolute_path;
|
||||
|
@ -226,7 +226,7 @@ CommandLineFlagConstraint::ConstraintType CommandLineFlagConstraintList::_valida
|
||||
|
||||
// Check the ranges of all flags that have them or print them out and exit if requested
|
||||
void CommandLineFlagConstraintList::init(void) {
|
||||
_constraints = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<CommandLineFlagConstraint*>(INITIAL_CONSTRAINTS_SIZE, true);
|
||||
_constraints = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<CommandLineFlagConstraint*>(INITIAL_CONSTRAINTS_SIZE, true);
|
||||
|
||||
emit_constraint_no(NULL RUNTIME_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
|
||||
EMIT_CONSTRAINT_PD_DEVELOPER_FLAG,
|
||||
|
@ -48,7 +48,7 @@ typedef Flag::Error (*CommandLineFlagConstraintFunc_uint64_t)(uint64_t value, bo
|
||||
typedef Flag::Error (*CommandLineFlagConstraintFunc_size_t)(size_t value, bool verbose);
|
||||
typedef Flag::Error (*CommandLineFlagConstraintFunc_double)(double value, bool verbose);
|
||||
|
||||
class CommandLineFlagConstraint : public CHeapObj<mtInternal> {
|
||||
class CommandLineFlagConstraint : public CHeapObj<mtArguments> {
|
||||
public:
|
||||
// During VM initialization, constraint validation will be done order of ConstraintType.
|
||||
enum ConstraintType {
|
||||
|
@ -292,7 +292,7 @@ GrowableArray<CommandLineFlagRange*>* CommandLineFlagRangeList::_ranges = NULL;
|
||||
// Check the ranges of all flags that have them
|
||||
void CommandLineFlagRangeList::init(void) {
|
||||
|
||||
_ranges = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<CommandLineFlagRange*>(INITIAL_RANGES_SIZE, true);
|
||||
_ranges = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<CommandLineFlagRange*>(INITIAL_RANGES_SIZE, true);
|
||||
|
||||
emit_range_no(NULL RUNTIME_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
|
||||
EMIT_RANGE_PD_DEVELOPER_FLAG,
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
static void print(bool verbose, const char* msg, ...);
|
||||
};
|
||||
|
||||
class CommandLineFlagRange : public CHeapObj<mtInternal> {
|
||||
class CommandLineFlagRange : public CHeapObj<mtArguments> {
|
||||
private:
|
||||
const char* _name;
|
||||
public:
|
||||
|
@ -1292,7 +1292,7 @@ void CommandLineFlags::printSetFlags(outputStream* out) {
|
||||
const size_t length = Flag::numFlags - 1;
|
||||
|
||||
// Sort
|
||||
Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
|
||||
Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtArguments);
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
array[i] = &flagTable[i];
|
||||
}
|
||||
@ -1326,7 +1326,7 @@ void CommandLineFlags::printFlags(outputStream* out, bool withComments, bool pri
|
||||
const size_t length = Flag::numFlags - 1;
|
||||
|
||||
// Sort
|
||||
Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
|
||||
Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtArguments);
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
array[i] = &flagTable[i];
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ const char* NMTUtil::_memory_type_names[] = {
|
||||
"Test",
|
||||
"Tracing",
|
||||
"Logging",
|
||||
"Arguments",
|
||||
"Unknown"
|
||||
};
|
||||
|
||||
|
@ -199,9 +199,6 @@ const size_t M = K*K;
|
||||
const size_t G = M*K;
|
||||
const size_t HWperKB = K / sizeof(HeapWord);
|
||||
|
||||
const jint min_jint = (jint)1 << (sizeof(jint)*BitsPerByte-1); // 0x80000000 == smallest jint
|
||||
const jint max_jint = (juint)min_jint - 1; // 0x7FFFFFFF == largest jint
|
||||
|
||||
// Constants for converting from a base unit to milli-base units. For
|
||||
// example from seconds to milliseconds and microseconds
|
||||
|
||||
@ -381,6 +378,14 @@ typedef jshort s2;
|
||||
typedef jint s4;
|
||||
typedef jlong s8;
|
||||
|
||||
const jbyte min_jbyte = -(1 << 7); // smallest jbyte
|
||||
const jbyte max_jbyte = (1 << 7) - 1; // largest jbyte
|
||||
const jshort min_jshort = -(1 << 15); // smallest jshort
|
||||
const jshort max_jshort = (1 << 15) - 1; // largest jshort
|
||||
|
||||
const jint min_jint = (jint)1 << (sizeof(jint)*BitsPerByte-1); // 0x80000000 == smallest jint
|
||||
const jint max_jint = (juint)min_jint - 1; // 0x7FFFFFFF == largest jint
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// JVM spec restrictions
|
||||
|
||||
|
@ -338,6 +338,7 @@ hotspot_fast_gc_2 = \
|
||||
sanity/ExecuteInternalVMTests.java \
|
||||
gc/ \
|
||||
-gc/g1/ \
|
||||
-gc/stress \
|
||||
-gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java \
|
||||
-gc/cms/TestMBeanCMS.java \
|
||||
-gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java
|
||||
@ -346,7 +347,7 @@ hotspot_fast_gc_closed = \
|
||||
sanity/ExecuteInternalVMTests.java
|
||||
|
||||
hotspot_fast_gc_gcold = \
|
||||
stress/gc/TestGCOld.java
|
||||
gc/stress/TestGCOld.java
|
||||
|
||||
hotspot_fast_runtime = \
|
||||
runtime/ \
|
||||
|
@ -38,7 +38,7 @@ import java.util.regex.*;
|
||||
|
||||
public class TestG1ConcRefinementThreads {
|
||||
|
||||
static final int AUTO_SELECT_THREADS_COUNT = 0;
|
||||
static final int AUTO_SELECT_THREADS_COUNT = -1;
|
||||
static final int PASSED_THREADS_COUNT = 11;
|
||||
|
||||
public static void main(String args[]) throws Exception {
|
||||
@ -49,8 +49,8 @@ public class TestG1ConcRefinementThreads {
|
||||
|
||||
// zero setting case
|
||||
runG1ConcRefinementThreadsTest(
|
||||
new String[]{"-XX:G1ConcRefinementThreads=0"}, // automatically selected
|
||||
AUTO_SELECT_THREADS_COUNT /* set to zero */);
|
||||
new String[]{"-XX:G1ConcRefinementThreads=0"},
|
||||
0);
|
||||
|
||||
// non-zero sestting case
|
||||
runG1ConcRefinementThreadsTest(
|
||||
@ -77,7 +77,7 @@ public class TestG1ConcRefinementThreads {
|
||||
private static void checkG1ConcRefinementThreadsConsistency(String output, int expectedValue) {
|
||||
int actualValue = getIntValue("G1ConcRefinementThreads", output);
|
||||
|
||||
if (expectedValue == 0) {
|
||||
if (expectedValue == AUTO_SELECT_THREADS_COUNT) {
|
||||
// If expectedValue is automatically selected, set it same as ParallelGCThreads.
|
||||
expectedValue = getIntValue("ParallelGCThreads", output);
|
||||
}
|
||||
|
@ -38,29 +38,29 @@ import sun.hotspot.WhiteBox;
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm/timeout=300
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC
|
||||
* -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintGC -XX:+PrintGCTimeStamps -Xlog:gc
|
||||
* -Xmx500m -XX:G1HeapRegionSize=1m -XX:MaxGCPauseMillis=1000 TestStressRSetCoarsening 1 0 300
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+UseG1GC -Xlog:gc* -XX:MaxGCPauseMillis=1000
|
||||
* -Xmx500m -XX:G1HeapRegionSize=1m TestStressRSetCoarsening 1 0 300
|
||||
* @run main/othervm/timeout=300
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC
|
||||
* -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintGC -XX:+PrintGCTimeStamps -Xlog:gc
|
||||
* -Xmx500m -XX:G1HeapRegionSize=8m -XX:MaxGCPauseMillis=1000 TestStressRSetCoarsening 1 10 300
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+UseG1GC -Xlog:gc* -XX:MaxGCPauseMillis=1000
|
||||
* -Xmx500m -XX:G1HeapRegionSize=8m TestStressRSetCoarsening 1 10 300
|
||||
* @run main/othervm/timeout=300
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC
|
||||
* -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintGC -XX:+PrintGCTimeStamps -Xlog:gc
|
||||
* -Xmx500m -XX:G1HeapRegionSize=32m -XX:MaxGCPauseMillis=1000 TestStressRSetCoarsening 42 10 300
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+UseG1GC -Xlog:gc* -XX:MaxGCPauseMillis=1000
|
||||
* -Xmx500m -XX:G1HeapRegionSize=32m TestStressRSetCoarsening 42 10 300
|
||||
* @run main/othervm/timeout=300
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC
|
||||
* -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintGC -XX:+PrintGCTimeStamps -Xlog:gc
|
||||
* -Xmx500m -XX:G1HeapRegionSize=1m -XX:MaxGCPauseMillis=1000 TestStressRSetCoarsening 2 0 300
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+UseG1GC -Xlog:gc* -XX:MaxGCPauseMillis=1000
|
||||
* -Xmx500m -XX:G1HeapRegionSize=1m TestStressRSetCoarsening 2 0 300
|
||||
* @run main/othervm/timeout=1800
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC
|
||||
* -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintGC -XX:+PrintGCTimeStamps -Xlog:gc
|
||||
* -Xmx1G -XX:G1HeapRegionSize=1m -XX:MaxGCPauseMillis=1000 TestStressRSetCoarsening 500 0 1800
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+UseG1GC -Xlog:gc* -XX:MaxGCPauseMillis=1000
|
||||
* -Xmx1G -XX:G1HeapRegionSize=1m TestStressRSetCoarsening 500 0 1800
|
||||
* @run main/othervm/timeout=1800
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC
|
||||
* -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintGC -XX:+PrintGCTimeStamps -Xlog:gc
|
||||
* -Xmx1G -XX:G1HeapRegionSize=1m -XX:MaxGCPauseMillis=1000 TestStressRSetCoarsening 10 10 1800
|
||||
* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+UseG1GC -Xlog:gc* -XX:MaxGCPauseMillis=1000
|
||||
* -Xmx1G -XX:G1HeapRegionSize=1m TestStressRSetCoarsening 10 10 1800
|
||||
*/
|
||||
|
||||
/**
|
||||
@ -179,7 +179,13 @@ public class TestStressRSetCoarsening {
|
||||
// sizeOf(Object[N]) ~= (N+4)*refSize
|
||||
// ==>
|
||||
// N = regionSize / K / refSize - 4;
|
||||
N = (int) ((regionSize / K) / refSize) - 5;
|
||||
int n = (int) ((regionSize / K) / refSize) - 5; // best guess
|
||||
long objSize = WB.getObjectSize(new Object[n]);
|
||||
while (K*objSize > regionSize) { // adjust to avoid OOME
|
||||
n = n - 1;
|
||||
objSize = WB.getObjectSize(new Object[n]);
|
||||
}
|
||||
N = n;
|
||||
|
||||
/*
|
||||
* --------------
|
||||
@ -202,8 +208,9 @@ public class TestStressRSetCoarsening {
|
||||
System.out.println("%% Objects");
|
||||
System.out.println("%% N (array length) : " + N);
|
||||
System.out.println("%% K (objects in regions): " + K);
|
||||
System.out.println("%% Object size : " + objSize +
|
||||
" (sizeOf(new Object[" + N + "])");
|
||||
System.out.println("%% Reference size : " + refSize);
|
||||
System.out.println("%% Approximate obj size : " + (N + 2) * refSize / KB + "K)");
|
||||
|
||||
storage = new Object[regionCount * K][];
|
||||
for (int i = 0; i < storage.length; i++) {
|
@ -28,11 +28,12 @@
|
||||
* @summary Test if package p2 in module m2 is exported to all unnamed,
|
||||
* then class p1.c1 in an unnamed module can read p2.c2 in module m2.
|
||||
* @library /testlibrary /test/lib
|
||||
* @modules java.base/jdk.internal.module
|
||||
* @compile myloaders/MySameClassLoader.java
|
||||
* @compile p2/c2.java
|
||||
* @compile p1/c1.java
|
||||
* @compile -XaddExports:java.base/jdk.internal.module=ALL-UNNAMED ExportAllUnnamed.java
|
||||
* @run main/othervm -XaddExports:java.base/jdk.internal.module=ALL-UNNAMED -Xbootclasspath/a:. ExportAllUnnamed
|
||||
* @build ExportAllUnnamed
|
||||
* @run main/othervm -Xbootclasspath/a:. ExportAllUnnamed
|
||||
*/
|
||||
|
||||
import static jdk.test.lib.Asserts.*;
|
||||
|
Loading…
x
Reference in New Issue
Block a user