Merge
This commit is contained in:
commit
b4be5da833
@ -700,7 +700,7 @@ void CMSAdaptiveSizePolicy::ms_collection_end(GCCause::Cause gc_cause) {
|
||||
double latest_cms_sum_concurrent_phases_time_secs =
|
||||
concurrent_collection_time();
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::ms_collecton_end "
|
||||
gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::ms_collection_end "
|
||||
"STW_in_foreground_in_seconds %f "
|
||||
"_latest_cms_initial_mark_start_to_end_time_secs %f "
|
||||
"_latest_cms_remark_start_to_end_time_secs %f "
|
||||
|
@ -958,7 +958,7 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
|
||||
desired_free_percentage);
|
||||
gclog_or_tty->print_cr(" Maximum free fraction %f",
|
||||
maximum_free_percentage);
|
||||
gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
|
||||
gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000);
|
||||
gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
|
||||
desired_capacity/1000);
|
||||
int prev_level = level() - 1;
|
||||
@ -3313,7 +3313,7 @@ void CMSCollector::setup_cms_unloading_and_verification_state() {
|
||||
}
|
||||
|
||||
// Not unloading classes this cycle
|
||||
assert(!should_unload_classes(), "Inconsitency!");
|
||||
assert(!should_unload_classes(), "Inconsistency!");
|
||||
remove_root_scanning_option(SharedHeap::SO_SystemClasses);
|
||||
add_root_scanning_option(SharedHeap::SO_AllClasses);
|
||||
|
||||
@ -7243,7 +7243,7 @@ size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
|
||||
HeapWord* addr = (HeapWord*)p;
|
||||
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
|
||||
assert(!_span.contains(addr), "we are scanning the survivor spaces");
|
||||
assert(p->klass_or_null() != NULL, "object should be initializd");
|
||||
assert(p->klass_or_null() != NULL, "object should be initialized");
|
||||
// an initialized object; ignore mark word in verification below
|
||||
// since we are running concurrent with mutators
|
||||
assert(p->is_oop(true), "should be an oop");
|
||||
|
@ -254,7 +254,7 @@ bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
|
||||
// No need to do a young gc, we'll just nudge the CMS thread
|
||||
// in the doit() method above, to be executed soon.
|
||||
assert(_gc_count_before < gch->total_collections(),
|
||||
"total_collections() should be monotnically increasing");
|
||||
"total_collections() should be monotonically increasing");
|
||||
return false; // no need for foreground young gc
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,271 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/bufferingOopClosure.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
/////////////// Unit tests ///////////////
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
class TestBufferingOopClosure {
|
||||
|
||||
// Helper class to fake a set of oop*s and narrowOop*s.
|
||||
class FakeRoots {
|
||||
public:
|
||||
// Used for sanity checking of the values passed to the do_oops functions in the test.
|
||||
static const uintptr_t NarrowOopMarker = uintptr_t(1) << (BitsPerWord -1);
|
||||
|
||||
int _num_narrow;
|
||||
int _num_full;
|
||||
void** _narrow;
|
||||
void** _full;
|
||||
|
||||
FakeRoots(int num_narrow, int num_full) :
|
||||
_num_narrow(num_narrow),
|
||||
_num_full(num_full),
|
||||
_narrow((void**)::malloc(sizeof(void*) * num_narrow)),
|
||||
_full((void**)::malloc(sizeof(void*) * num_full)) {
|
||||
|
||||
for (int i = 0; i < num_narrow; i++) {
|
||||
_narrow[i] = (void*)(NarrowOopMarker + (uintptr_t)i);
|
||||
}
|
||||
for (int i = 0; i < num_full; i++) {
|
||||
_full[i] = (void*)(uintptr_t)i;
|
||||
}
|
||||
}
|
||||
|
||||
~FakeRoots() {
|
||||
::free(_narrow);
|
||||
::free(_full);
|
||||
}
|
||||
|
||||
void oops_do_narrow_then_full(OopClosure* cl) {
|
||||
for (int i = 0; i < _num_narrow; i++) {
|
||||
cl->do_oop((narrowOop*)_narrow[i]);
|
||||
}
|
||||
for (int i = 0; i < _num_full; i++) {
|
||||
cl->do_oop((oop*)_full[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void oops_do_full_then_narrow(OopClosure* cl) {
|
||||
for (int i = 0; i < _num_full; i++) {
|
||||
cl->do_oop((oop*)_full[i]);
|
||||
}
|
||||
for (int i = 0; i < _num_narrow; i++) {
|
||||
cl->do_oop((narrowOop*)_narrow[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void oops_do_mixed(OopClosure* cl) {
|
||||
int i;
|
||||
for (i = 0; i < _num_full && i < _num_narrow; i++) {
|
||||
cl->do_oop((oop*)_full[i]);
|
||||
cl->do_oop((narrowOop*)_narrow[i]);
|
||||
}
|
||||
for (int j = i; j < _num_full; j++) {
|
||||
cl->do_oop((oop*)_full[i]);
|
||||
}
|
||||
for (int j = i; j < _num_narrow; j++) {
|
||||
cl->do_oop((narrowOop*)_narrow[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static const int MaxOrder = 2;
|
||||
|
||||
void oops_do(OopClosure* cl, int do_oop_order) {
|
||||
switch(do_oop_order) {
|
||||
case 0:
|
||||
oops_do_narrow_then_full(cl);
|
||||
break;
|
||||
case 1:
|
||||
oops_do_full_then_narrow(cl);
|
||||
break;
|
||||
case 2:
|
||||
oops_do_mixed(cl);
|
||||
break;
|
||||
default:
|
||||
oops_do_narrow_then_full(cl);
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class CountOopClosure : public OopClosure {
|
||||
int _narrow_oop_count;
|
||||
int _full_oop_count;
|
||||
public:
|
||||
CountOopClosure() : _narrow_oop_count(0), _full_oop_count(0) {}
|
||||
void do_oop(narrowOop* p) {
|
||||
assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) != 0,
|
||||
"The narrowOop was unexpectedly not marked with the NarrowOopMarker");
|
||||
_narrow_oop_count++;
|
||||
}
|
||||
|
||||
void do_oop(oop* p){
|
||||
assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) == 0,
|
||||
"The oop was unexpectedly marked with the NarrowOopMarker");
|
||||
_full_oop_count++;
|
||||
}
|
||||
|
||||
int narrow_oop_count() { return _narrow_oop_count; }
|
||||
int full_oop_count() { return _full_oop_count; }
|
||||
int all_oop_count() { return _narrow_oop_count + _full_oop_count; }
|
||||
};
|
||||
|
||||
class DoNothingOopClosure : public OopClosure {
|
||||
public:
|
||||
void do_oop(narrowOop* p) {}
|
||||
void do_oop(oop* p) {}
|
||||
};
|
||||
|
||||
static void testCount(int num_narrow, int num_full, int do_oop_order) {
|
||||
FakeRoots fr(num_narrow, num_full);
|
||||
|
||||
CountOopClosure coc;
|
||||
BufferingOopClosure boc(&coc);
|
||||
|
||||
fr.oops_do(&boc, do_oop_order);
|
||||
|
||||
boc.done();
|
||||
|
||||
#define assert_testCount(got, expected) \
|
||||
assert((got) == (expected), \
|
||||
err_msg("Expected: %d, got: %d, when running testCount(%d, %d, %d)", \
|
||||
(got), (expected), num_narrow, num_full, do_oop_order))
|
||||
|
||||
assert_testCount(num_narrow, coc.narrow_oop_count());
|
||||
assert_testCount(num_full, coc.full_oop_count());
|
||||
assert_testCount(num_narrow + num_full, coc.all_oop_count());
|
||||
}
|
||||
|
||||
static void testCount() {
|
||||
int buffer_length = BufferingOopClosure::BufferLength;
|
||||
|
||||
for (int order = 0; order < FakeRoots::MaxOrder; order++) {
|
||||
testCount(0, 0, order);
|
||||
testCount(10, 0, order);
|
||||
testCount(0, 10, order);
|
||||
testCount(10, 10, order);
|
||||
testCount(buffer_length, 10, order);
|
||||
testCount(10, buffer_length, order);
|
||||
testCount(buffer_length, buffer_length, order);
|
||||
testCount(buffer_length + 1, 10, order);
|
||||
testCount(10, buffer_length + 1, order);
|
||||
testCount(buffer_length + 1, buffer_length, order);
|
||||
testCount(buffer_length, buffer_length + 1, order);
|
||||
testCount(buffer_length + 1, buffer_length + 1, order);
|
||||
}
|
||||
}
|
||||
|
||||
static void testIsBufferEmptyOrFull(int num_narrow, int num_full, bool expect_empty, bool expect_full) {
|
||||
FakeRoots fr(num_narrow, num_full);
|
||||
|
||||
DoNothingOopClosure cl;
|
||||
BufferingOopClosure boc(&cl);
|
||||
|
||||
fr.oops_do(&boc, 0);
|
||||
|
||||
#define assert_testIsBufferEmptyOrFull(got, expected) \
|
||||
assert((got) == (expected), \
|
||||
err_msg("Expected: %d, got: %d. testIsBufferEmptyOrFull(%d, %d, %s, %s)", \
|
||||
(got), (expected), num_narrow, num_full, \
|
||||
BOOL_TO_STR(expect_empty), BOOL_TO_STR(expect_full)))
|
||||
|
||||
assert_testIsBufferEmptyOrFull(expect_empty, boc.is_buffer_empty());
|
||||
assert_testIsBufferEmptyOrFull(expect_full, boc.is_buffer_full());
|
||||
}
|
||||
|
||||
static void testIsBufferEmptyOrFull() {
|
||||
int bl = BufferingOopClosure::BufferLength;
|
||||
|
||||
testIsBufferEmptyOrFull(0, 0, true, false);
|
||||
testIsBufferEmptyOrFull(1, 0, false, false);
|
||||
testIsBufferEmptyOrFull(0, 1, false, false);
|
||||
testIsBufferEmptyOrFull(1, 1, false, false);
|
||||
testIsBufferEmptyOrFull(10, 0, false, false);
|
||||
testIsBufferEmptyOrFull(0, 10, false, false);
|
||||
testIsBufferEmptyOrFull(10, 10, false, false);
|
||||
testIsBufferEmptyOrFull(0, bl, false, true);
|
||||
testIsBufferEmptyOrFull(bl, 0, false, true);
|
||||
testIsBufferEmptyOrFull(bl/2, bl/2, false, true);
|
||||
testIsBufferEmptyOrFull(bl-1, 1, false, true);
|
||||
testIsBufferEmptyOrFull(1, bl-1, false, true);
|
||||
// Processed
|
||||
testIsBufferEmptyOrFull(bl+1, 0, false, false);
|
||||
testIsBufferEmptyOrFull(bl*2, 0, false, true);
|
||||
}
|
||||
|
||||
static void testEmptyAfterDone(int num_narrow, int num_full) {
|
||||
FakeRoots fr(num_narrow, num_full);
|
||||
|
||||
DoNothingOopClosure cl;
|
||||
BufferingOopClosure boc(&cl);
|
||||
|
||||
fr.oops_do(&boc, 0);
|
||||
|
||||
// Make sure all get processed.
|
||||
boc.done();
|
||||
|
||||
assert(boc.is_buffer_empty(),
|
||||
err_msg("Should be empty after call to done(). testEmptyAfterDone(%d, %d)",
|
||||
num_narrow, num_full));
|
||||
}
|
||||
|
||||
static void testEmptyAfterDone() {
|
||||
int bl = BufferingOopClosure::BufferLength;
|
||||
|
||||
testEmptyAfterDone(0, 0);
|
||||
testEmptyAfterDone(1, 0);
|
||||
testEmptyAfterDone(0, 1);
|
||||
testEmptyAfterDone(1, 1);
|
||||
testEmptyAfterDone(10, 0);
|
||||
testEmptyAfterDone(0, 10);
|
||||
testEmptyAfterDone(10, 10);
|
||||
testEmptyAfterDone(0, bl);
|
||||
testEmptyAfterDone(bl, 0);
|
||||
testEmptyAfterDone(bl/2, bl/2);
|
||||
testEmptyAfterDone(bl-1, 1);
|
||||
testEmptyAfterDone(1, bl-1);
|
||||
// Processed
|
||||
testEmptyAfterDone(bl+1, 0);
|
||||
testEmptyAfterDone(bl*2, 0);
|
||||
}
|
||||
|
||||
public:
|
||||
static void test() {
|
||||
testCount();
|
||||
testIsBufferEmptyOrFull();
|
||||
testEmptyAfterDone();
|
||||
}
|
||||
};
|
||||
|
||||
void TestBufferingOopClosure_test() {
|
||||
TestBufferingOopClosure::test();
|
||||
}
|
||||
|
||||
#endif
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,10 +25,10 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP
|
||||
|
||||
#include "memory/genOopClosures.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
// A BufferingOops closure tries to separate out the cost of finding roots
|
||||
// from the cost of applying closures to them. It maintains an array of
|
||||
@ -41,60 +41,103 @@
|
||||
// The caller must be sure to call "done" to process any unprocessed
|
||||
// buffered entries.
|
||||
|
||||
class Generation;
|
||||
class HeapRegion;
|
||||
|
||||
class BufferingOopClosure: public OopClosure {
|
||||
friend class TestBufferingOopClosure;
|
||||
protected:
|
||||
enum PrivateConstants {
|
||||
BufferLength = 1024
|
||||
};
|
||||
static const size_t BufferLength = 1024;
|
||||
|
||||
StarTask _buffer[BufferLength];
|
||||
StarTask* _buffer_top;
|
||||
StarTask* _buffer_curr;
|
||||
// We need to know if the buffered addresses contain oops or narrowOops.
|
||||
// We can't tag the addresses the way StarTask does, because we need to
|
||||
// be able to handle unaligned addresses coming from oops embedded in code.
|
||||
//
|
||||
// The addresses for the full-sized oops are filled in from the bottom,
|
||||
// while the addresses for the narrowOops are filled in from the top.
|
||||
OopOrNarrowOopStar _buffer[BufferLength];
|
||||
OopOrNarrowOopStar* _oop_top;
|
||||
OopOrNarrowOopStar* _narrowOop_bottom;
|
||||
|
||||
OopClosure* _oc;
|
||||
double _closure_app_seconds;
|
||||
|
||||
void process_buffer () {
|
||||
double start = os::elapsedTime();
|
||||
for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
|
||||
if (curr->is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
_oc->do_oop((narrowOop*)(*curr));
|
||||
} else {
|
||||
_oc->do_oop((oop*)(*curr));
|
||||
}
|
||||
|
||||
bool is_buffer_empty() {
|
||||
return _oop_top == _buffer && _narrowOop_bottom == (_buffer + BufferLength - 1);
|
||||
}
|
||||
|
||||
bool is_buffer_full() {
|
||||
return _narrowOop_bottom < _oop_top;
|
||||
}
|
||||
|
||||
// Process addresses containing full-sized oops.
|
||||
void process_oops() {
|
||||
for (OopOrNarrowOopStar* curr = _buffer; curr < _oop_top; ++curr) {
|
||||
_oc->do_oop((oop*)(*curr));
|
||||
}
|
||||
_buffer_curr = _buffer;
|
||||
_oop_top = _buffer;
|
||||
}
|
||||
|
||||
// Process addresses containing narrow oops.
|
||||
void process_narrowOops() {
|
||||
for (OopOrNarrowOopStar* curr = _buffer + BufferLength - 1; curr > _narrowOop_bottom; --curr) {
|
||||
_oc->do_oop((narrowOop*)(*curr));
|
||||
}
|
||||
_narrowOop_bottom = _buffer + BufferLength - 1;
|
||||
}
|
||||
|
||||
// Apply the closure to all oops and clear the buffer.
|
||||
// Accumulate the time it took.
|
||||
void process_buffer() {
|
||||
double start = os::elapsedTime();
|
||||
|
||||
process_oops();
|
||||
process_narrowOops();
|
||||
|
||||
_closure_app_seconds += (os::elapsedTime() - start);
|
||||
}
|
||||
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
if (_buffer_curr == _buffer_top) {
|
||||
void process_buffer_if_full() {
|
||||
if (is_buffer_full()) {
|
||||
process_buffer();
|
||||
}
|
||||
StarTask new_ref(p);
|
||||
*_buffer_curr = new_ref;
|
||||
++_buffer_curr;
|
||||
}
|
||||
|
||||
void add_narrowOop(narrowOop* p) {
|
||||
assert(!is_buffer_full(), "Buffer should not be full");
|
||||
*_narrowOop_bottom = (OopOrNarrowOopStar)p;
|
||||
_narrowOop_bottom--;
|
||||
}
|
||||
|
||||
void add_oop(oop* p) {
|
||||
assert(!is_buffer_full(), "Buffer should not be full");
|
||||
*_oop_top = (OopOrNarrowOopStar)p;
|
||||
_oop_top++;
|
||||
}
|
||||
|
||||
public:
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
process_buffer_if_full();
|
||||
add_narrowOop(p);
|
||||
}
|
||||
|
||||
void done () {
|
||||
if (_buffer_curr > _buffer) {
|
||||
virtual void do_oop(oop* p) {
|
||||
process_buffer_if_full();
|
||||
add_oop(p);
|
||||
}
|
||||
|
||||
void done() {
|
||||
if (!is_buffer_empty()) {
|
||||
process_buffer();
|
||||
}
|
||||
}
|
||||
double closure_app_seconds () {
|
||||
|
||||
double closure_app_seconds() {
|
||||
return _closure_app_seconds;
|
||||
}
|
||||
BufferingOopClosure (OopClosure *oc) :
|
||||
|
||||
BufferingOopClosure(OopClosure *oc) :
|
||||
_oc(oc),
|
||||
_buffer_curr(_buffer), _buffer_top(_buffer + BufferLength),
|
||||
_oop_top(_buffer),
|
||||
_narrowOop_bottom(_buffer + BufferLength - 1),
|
||||
_closure_app_seconds(0.0) { }
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2266,7 +2266,7 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
// (for efficiency/performance)
|
||||
false);
|
||||
// Setting next fields of discovered
|
||||
// lists requires a barrier.
|
||||
// lists does not require a barrier.
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::capacity() const {
|
||||
@ -5115,15 +5115,12 @@ g1_process_strong_roots(bool is_scavenging,
|
||||
|
||||
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
|
||||
|
||||
assert(so & SO_AllCodeCache || scan_rs != NULL, "must scan code roots somehow");
|
||||
// Walk the code cache/strong code roots w/o buffering, because StarTask
|
||||
// cannot handle unaligned oop locations.
|
||||
CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
|
||||
CodeBlobToOopClosure scan_code_roots(&buf_scan_non_heap_roots, true /* do_marking */);
|
||||
|
||||
process_strong_roots(false, // no scoping; this is parallel code
|
||||
so,
|
||||
&buf_scan_non_heap_roots,
|
||||
&eager_scan_code_roots,
|
||||
&scan_code_roots,
|
||||
scan_klasses
|
||||
);
|
||||
|
||||
@ -5177,9 +5174,9 @@ g1_process_strong_roots(bool is_scavenging,
|
||||
g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
|
||||
|
||||
// Now scan the complement of the collection set.
|
||||
if (scan_rs != NULL) {
|
||||
g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
|
||||
}
|
||||
CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
|
||||
g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
|
||||
|
||||
_process_strong_tasks->all_tasks_completed();
|
||||
}
|
||||
|
||||
@ -5202,9 +5199,12 @@ private:
|
||||
bool _process_symbols;
|
||||
int _symbols_processed;
|
||||
int _symbols_removed;
|
||||
|
||||
bool _do_in_parallel;
|
||||
public:
|
||||
G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
|
||||
AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
|
||||
_do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
|
||||
_process_strings(process_strings), _strings_processed(0), _strings_removed(0),
|
||||
_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
|
||||
|
||||
@ -5219,16 +5219,16 @@ public:
|
||||
}
|
||||
|
||||
~G1StringSymbolTableUnlinkTask() {
|
||||
guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
|
||||
guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
|
||||
err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
|
||||
StringTable::parallel_claimed_index(), _initial_string_table_size));
|
||||
guarantee(!_process_strings || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
|
||||
guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
|
||||
err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
|
||||
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
if (_do_in_parallel) {
|
||||
int strings_processed = 0;
|
||||
int strings_removed = 0;
|
||||
int symbols_processed = 0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -86,13 +86,26 @@ public:
|
||||
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
template <class T> inline bool has_partial_array_mask(T* ref) {
|
||||
inline bool has_partial_array_mask(oop* ref) {
|
||||
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
|
||||
}
|
||||
|
||||
template <class T> inline T* set_partial_array_mask(T obj) {
|
||||
// We never encode partial array oops as narrowOop*, so return false immediately.
|
||||
// This allows the compiler to create optimized code when popping references from
|
||||
// the work queue.
|
||||
inline bool has_partial_array_mask(narrowOop* ref) {
|
||||
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
|
||||
// We always encode partial arrays as regular oop, to allow the
|
||||
// specialization for has_partial_array_mask() for narrowOops above.
|
||||
// This means that unintentional use of this method with narrowOops are caught
|
||||
// by the compiler.
|
||||
inline oop* set_partial_array_mask(oop obj) {
|
||||
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
|
||||
return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
|
||||
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
template <class T> inline oop clear_partial_array_mask(T* ref) {
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/bufferingOopClosure.hpp"
|
||||
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
||||
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -183,7 +183,7 @@
|
||||
"When true, record recent calls to rem set operations.") \
|
||||
\
|
||||
develop(intx, G1MaxVerifyFailures, -1, \
|
||||
"The maximum number of verification failrues to print. " \
|
||||
"The maximum number of verification failures to print. " \
|
||||
"-1 means print all.") \
|
||||
\
|
||||
develop(bool, G1ScrubRemSets, true, \
|
||||
|
@ -91,7 +91,7 @@ void ObjPtrQueue::filter() {
|
||||
assert(new_index > 0, "we should not have already filled up the buffer");
|
||||
new_index -= oopSize;
|
||||
assert(new_index >= i,
|
||||
"new_index should never be below i, as we alwaysr compact 'up'");
|
||||
"new_index should never be below i, as we always compact 'up'");
|
||||
oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
|
||||
assert(new_p >= p, "the destination location should never be below "
|
||||
"the source as we always compact 'up'");
|
||||
|
@ -250,7 +250,7 @@ process_chunk_boundaries(Space* sp,
|
||||
// right neighbor (up to the end of the first object).
|
||||
if (last_card_of_cur_chunk < last_card_of_first_obj) {
|
||||
tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"
|
||||
" might be efficient to get value from right neighbour?");
|
||||
" might be efficient to get value from right neighbor?");
|
||||
}
|
||||
})
|
||||
} else {
|
||||
|
@ -168,7 +168,7 @@ int AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
|
||||
|
||||
if (TraceDynamicGCThreads) {
|
||||
gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
|
||||
"active_workers(): %d new_acitve_workers: %d "
|
||||
"active_workers(): %d new_active_workers: %d "
|
||||
"prev_active_workers: %d\n"
|
||||
" active_workers_by_JT: %d active_workers_by_heap_size: %d",
|
||||
active_workers, new_active_workers, prev_active_workers,
|
||||
|
@ -559,7 +559,7 @@ void MutableNUMASpace::initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space,
|
||||
bool setup_pages) {
|
||||
assert(clear_space, "Reallocation will destory data!");
|
||||
assert(clear_space, "Reallocation will destroy data!");
|
||||
assert(lgrp_spaces()->length() > 0, "There should be at least one space");
|
||||
|
||||
MemRegion old_region = region(), new_region;
|
||||
|
@ -1352,7 +1352,7 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_free_lists(outputStream* s
|
||||
template <class Chunk_t, template <class> class FreeList_t>
|
||||
void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree() const {
|
||||
guarantee(root() == NULL || total_free_blocks() == 0 ||
|
||||
total_size() != 0, "_total_size should't be 0?");
|
||||
total_size() != 0, "_total_size shouldn't be 0?");
|
||||
guarantee(root() == NULL || root()->parent() == NULL, "_root shouldn't have parent");
|
||||
verify_tree_helper(root());
|
||||
}
|
||||
|
@ -54,8 +54,8 @@ size_t CardTableModRefBS::cards_required(size_t covered_words)
|
||||
size_t CardTableModRefBS::compute_byte_map_size()
|
||||
{
|
||||
assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
|
||||
"unitialized, check declaration order");
|
||||
assert(_page_size != 0, "unitialized, check declaration order");
|
||||
"uninitialized, check declaration order");
|
||||
assert(_page_size != 0, "uninitialized, check declaration order");
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
|
||||
}
|
||||
|
@ -446,18 +446,20 @@ void GenCollectorPolicy::initialize_size_info() {
|
||||
_max_gen0_size = max_new_size;
|
||||
} else {
|
||||
size_t desired_new_size = 0;
|
||||
if (!FLAG_IS_DEFAULT(NewSize)) {
|
||||
// If NewSize is set ergonomically (for example by cms), it
|
||||
// would make sense to use it. If it is used, also use it
|
||||
// to set the initial size. Although there is no reason
|
||||
// the minimum size and the initial size have to be the same,
|
||||
// the current implementation gets into trouble during the calculation
|
||||
// of the tenured generation sizes if they are different.
|
||||
// Note that this makes the initial size and the minimum size
|
||||
// generally small compared to the NewRatio calculation.
|
||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||
// If NewSize is set on the command line, we must use it as
|
||||
// the initial size and it also makes sense to use it as the
|
||||
// lower limit.
|
||||
_min_gen0_size = NewSize;
|
||||
desired_new_size = NewSize;
|
||||
max_new_size = MAX2(max_new_size, NewSize);
|
||||
} else if (FLAG_IS_ERGO(NewSize)) {
|
||||
// If NewSize is set ergonomically, we should use it as a lower
|
||||
// limit, but use NewRatio to calculate the initial size.
|
||||
_min_gen0_size = NewSize;
|
||||
desired_new_size =
|
||||
MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
|
||||
max_new_size = MAX2(max_new_size, NewSize);
|
||||
} else {
|
||||
// For the case where NewSize is the default, use NewRatio
|
||||
// to size the minimum and initial generation sizes.
|
||||
@ -980,3 +982,110 @@ void MarkSweepPolicy::initialize_gc_policy_counters() {
|
||||
_gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
|
||||
}
|
||||
}
|
||||
|
||||
/////////////// Unit tests ///////////////
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Testing that the NewSize flag is handled correct is hard because it
|
||||
// depends on so many other configurable variables. This test only tries to
|
||||
// verify that there are some basic rules for NewSize honored by the policies.
|
||||
class TestGenCollectorPolicy {
|
||||
public:
|
||||
static void test() {
|
||||
size_t flag_value;
|
||||
|
||||
save_flags();
|
||||
|
||||
// Set some limits that makes the math simple.
|
||||
FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
|
||||
FLAG_SET_ERGO(uintx, InitialHeapSize, 120 * M);
|
||||
Arguments::set_min_heap_size(40 * M);
|
||||
|
||||
// If NewSize is set on the command line, it should be used
|
||||
// for both min and initial young size if less than min heap.
|
||||
flag_value = 20 * M;
|
||||
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
|
||||
verify_min(flag_value);
|
||||
verify_initial(flag_value);
|
||||
|
||||
// If NewSize is set on command line, but is larger than the min
|
||||
// heap size, it should only be used for initial young size.
|
||||
flag_value = 80 * M;
|
||||
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
|
||||
verify_initial(flag_value);
|
||||
|
||||
// If NewSize has been ergonomically set, the collector policy
|
||||
// should use it for min but calculate the initial young size
|
||||
// using NewRatio.
|
||||
flag_value = 20 * M;
|
||||
FLAG_SET_ERGO(uintx, NewSize, flag_value);
|
||||
verify_min(flag_value);
|
||||
verify_scaled_initial(InitialHeapSize);
|
||||
|
||||
restore_flags();
|
||||
|
||||
}
|
||||
|
||||
static void verify_min(size_t expected) {
|
||||
MarkSweepPolicy msp;
|
||||
msp.initialize_all();
|
||||
|
||||
assert(msp.min_gen0_size() <= expected, err_msg("%zu > %zu", msp.min_gen0_size(), expected));
|
||||
}
|
||||
|
||||
static void verify_initial(size_t expected) {
|
||||
MarkSweepPolicy msp;
|
||||
msp.initialize_all();
|
||||
|
||||
assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
|
||||
}
|
||||
|
||||
static void verify_scaled_initial(size_t initial_heap_size) {
|
||||
MarkSweepPolicy msp;
|
||||
msp.initialize_all();
|
||||
|
||||
size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size);
|
||||
assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
|
||||
assert(FLAG_IS_ERGO(NewSize) && NewSize == expected,
|
||||
err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize));
|
||||
}
|
||||
|
||||
private:
|
||||
static size_t original_InitialHeapSize;
|
||||
static size_t original_MaxHeapSize;
|
||||
static size_t original_MaxNewSize;
|
||||
static size_t original_MinHeapDeltaBytes;
|
||||
static size_t original_NewSize;
|
||||
static size_t original_OldSize;
|
||||
|
||||
static void save_flags() {
|
||||
original_InitialHeapSize = InitialHeapSize;
|
||||
original_MaxHeapSize = MaxHeapSize;
|
||||
original_MaxNewSize = MaxNewSize;
|
||||
original_MinHeapDeltaBytes = MinHeapDeltaBytes;
|
||||
original_NewSize = NewSize;
|
||||
original_OldSize = OldSize;
|
||||
}
|
||||
|
||||
static void restore_flags() {
|
||||
InitialHeapSize = original_InitialHeapSize;
|
||||
MaxHeapSize = original_MaxHeapSize;
|
||||
MaxNewSize = original_MaxNewSize;
|
||||
MinHeapDeltaBytes = original_MinHeapDeltaBytes;
|
||||
NewSize = original_NewSize;
|
||||
OldSize = original_OldSize;
|
||||
}
|
||||
};
|
||||
|
||||
size_t TestGenCollectorPolicy::original_InitialHeapSize = 0;
|
||||
size_t TestGenCollectorPolicy::original_MaxHeapSize = 0;
|
||||
size_t TestGenCollectorPolicy::original_MaxNewSize = 0;
|
||||
size_t TestGenCollectorPolicy::original_MinHeapDeltaBytes = 0;
|
||||
size_t TestGenCollectorPolicy::original_NewSize = 0;
|
||||
size_t TestGenCollectorPolicy::original_OldSize = 0;
|
||||
|
||||
void TestNewSize_test() {
|
||||
TestGenCollectorPolicy::test();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -220,6 +220,7 @@ class ClearedAllSoftRefs : public StackObj {
|
||||
};
|
||||
|
||||
class GenCollectorPolicy : public CollectorPolicy {
|
||||
friend class TestGenCollectorPolicy;
|
||||
protected:
|
||||
size_t _min_gen0_size;
|
||||
size_t _initial_gen0_size;
|
||||
|
@ -746,7 +746,7 @@ void VirtualSpaceNode::inc_container_count() {
|
||||
assert_lock_strong(SpaceManager::expand_lock());
|
||||
_container_count++;
|
||||
assert(_container_count == container_count_slow(),
|
||||
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
|
||||
err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
|
||||
" container_count_slow() " SIZE_FORMAT,
|
||||
_container_count, container_count_slow()));
|
||||
}
|
||||
@ -759,7 +759,7 @@ void VirtualSpaceNode::dec_container_count() {
|
||||
#ifdef ASSERT
|
||||
void VirtualSpaceNode::verify_container_count() {
|
||||
assert(_container_count == container_count_slow(),
|
||||
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
|
||||
err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
|
||||
" container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
|
||||
}
|
||||
#endif
|
||||
|
@ -62,7 +62,7 @@ void ReferenceProcessor::init_statics() {
|
||||
}
|
||||
guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
|
||||
RefDiscoveryPolicy == ReferentBasedDiscovery,
|
||||
"Unrecongnized RefDiscoveryPolicy");
|
||||
"Unrecognized RefDiscoveryPolicy");
|
||||
_pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
|
||||
}
|
||||
|
||||
@ -95,11 +95,11 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
|
||||
uint mt_discovery_degree,
|
||||
bool atomic_discovery,
|
||||
BoolObjectClosure* is_alive_non_header,
|
||||
bool discovered_list_needs_barrier) :
|
||||
bool discovered_list_needs_post_barrier) :
|
||||
_discovering_refs(false),
|
||||
_enqueuing_is_done(false),
|
||||
_is_alive_non_header(is_alive_non_header),
|
||||
_discovered_list_needs_barrier(discovered_list_needs_barrier),
|
||||
_discovered_list_needs_post_barrier(discovered_list_needs_post_barrier),
|
||||
_processing_is_mt(mt_processing),
|
||||
_next_id(0)
|
||||
{
|
||||
@ -490,13 +490,13 @@ void DiscoveredListIterator::remove() {
|
||||
} else {
|
||||
new_next = _next;
|
||||
}
|
||||
|
||||
if (UseCompressedOops) {
|
||||
// Remove Reference object from list.
|
||||
oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
|
||||
} else {
|
||||
// Remove Reference object from list.
|
||||
oopDesc::store_heap_oop((oop*)_prev_next, new_next);
|
||||
// Remove Reference object from discovered list. Note that G1 does not need a
|
||||
// pre-barrier here because we know the Reference has already been found/marked,
|
||||
// that's how it ended up in the discovered list in the first place.
|
||||
oop_store_raw(_prev_next, new_next);
|
||||
if (_discovered_list_needs_post_barrier && _prev_next != _refs_list.adr_head()) {
|
||||
// Needs post-barrier and this is not the list head (which is not on the heap)
|
||||
oopDesc::bs()->write_ref_field(_prev_next, new_next);
|
||||
}
|
||||
NOT_PRODUCT(_removed++);
|
||||
_refs_list.dec_length(1);
|
||||
@ -544,7 +544,7 @@ ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc) {
|
||||
assert(policy != NULL, "Must have a non-NULL policy");
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
// Decide which softly reachable refs should be kept alive.
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
|
||||
@ -584,7 +584,7 @@ ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
|
||||
BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive) {
|
||||
assert(discovery_is_atomic(), "Error");
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
|
||||
DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
|
||||
@ -621,7 +621,7 @@ ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc) {
|
||||
assert(!discovery_is_atomic(), "Error");
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
|
||||
HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
|
||||
@ -664,7 +664,7 @@ ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc) {
|
||||
ResourceMark rm;
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
while (iter.has_next()) {
|
||||
iter.update_discovered();
|
||||
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
|
||||
@ -782,8 +782,8 @@ private:
|
||||
|
||||
void ReferenceProcessor::set_discovered(oop ref, oop value) {
|
||||
java_lang_ref_Reference::set_discovered_raw(ref, value);
|
||||
if (_discovered_list_needs_barrier) {
|
||||
oopDesc::bs()->write_ref_field(ref, value);
|
||||
if (_discovered_list_needs_post_barrier) {
|
||||
oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(ref), value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -980,7 +980,7 @@ void ReferenceProcessor::clean_up_discovered_references() {
|
||||
|
||||
void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
|
||||
assert(!discovery_is_atomic(), "Else why call this method?");
|
||||
DiscoveredListIterator iter(refs_list, NULL, NULL);
|
||||
DiscoveredListIterator iter(refs_list, NULL, NULL, _discovered_list_needs_post_barrier);
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
|
||||
oop next = java_lang_ref_Reference::next(iter.obj());
|
||||
@ -1076,7 +1076,7 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
||||
// elided this out for G1, but left in the test for some future
|
||||
// collector that might have need for a pre-barrier here, e.g.:-
|
||||
// oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
|
||||
assert(!_discovered_list_needs_barrier || UseG1GC,
|
||||
assert(!_discovered_list_needs_post_barrier || UseG1GC,
|
||||
"Need to check non-G1 collector: "
|
||||
"may need a pre-write-barrier for CAS from NULL below");
|
||||
oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
|
||||
@ -1087,7 +1087,7 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
||||
// is necessary.
|
||||
refs_list.set_head(obj);
|
||||
refs_list.inc_length(1);
|
||||
if (_discovered_list_needs_barrier) {
|
||||
if (_discovered_list_needs_post_barrier) {
|
||||
oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
|
||||
}
|
||||
|
||||
@ -1240,7 +1240,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
||||
if (_discovery_is_mt) {
|
||||
add_to_discovered_list_mt(*list, obj, discovered_addr);
|
||||
} else {
|
||||
// If "_discovered_list_needs_barrier", we do write barriers when
|
||||
// If "_discovered_list_needs_post_barrier", we do write barriers when
|
||||
// updating the discovered reference list. Otherwise, we do a raw store
|
||||
// here: the field will be visited later when processing the discovered
|
||||
// references.
|
||||
@ -1252,10 +1252,10 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
||||
// pre-value, we can safely elide the pre-barrier here for the case of G1.
|
||||
// e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
|
||||
assert(discovered == NULL, "control point invariant");
|
||||
assert(!_discovered_list_needs_barrier || UseG1GC,
|
||||
assert(!_discovered_list_needs_post_barrier || UseG1GC,
|
||||
"For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
|
||||
oop_store_raw(discovered_addr, next_discovered);
|
||||
if (_discovered_list_needs_barrier) {
|
||||
if (_discovered_list_needs_post_barrier) {
|
||||
oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
|
||||
}
|
||||
list->set_head(obj);
|
||||
@ -1351,7 +1351,7 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc,
|
||||
YieldClosure* yield) {
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
|
||||
oop obj = iter.obj();
|
||||
|
@ -99,6 +99,7 @@ private:
|
||||
oop _referent;
|
||||
OopClosure* _keep_alive;
|
||||
BoolObjectClosure* _is_alive;
|
||||
bool _discovered_list_needs_post_barrier;
|
||||
|
||||
DEBUG_ONLY(
|
||||
oop _first_seen; // cyclic linked list check
|
||||
@ -112,7 +113,8 @@ private:
|
||||
public:
|
||||
inline DiscoveredListIterator(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
BoolObjectClosure* is_alive):
|
||||
BoolObjectClosure* is_alive,
|
||||
bool discovered_list_needs_post_barrier = false):
|
||||
_refs_list(refs_list),
|
||||
_prev_next(refs_list.adr_head()),
|
||||
_prev(NULL),
|
||||
@ -126,7 +128,8 @@ public:
|
||||
#endif
|
||||
_next(NULL),
|
||||
_keep_alive(keep_alive),
|
||||
_is_alive(is_alive)
|
||||
_is_alive(is_alive),
|
||||
_discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
|
||||
{ }
|
||||
|
||||
// End Of List.
|
||||
@ -228,12 +231,12 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
bool _discovery_is_mt; // true if reference discovery is MT.
|
||||
|
||||
// If true, setting "next" field of a discovered refs list requires
|
||||
// write barrier(s). (Must be true if used in a collector in which
|
||||
// write post barrier. (Must be true if used in a collector in which
|
||||
// elements of a discovered list may be moved during discovery: for
|
||||
// example, a collector like Garbage-First that moves objects during a
|
||||
// long-term concurrent marking phase that does weak reference
|
||||
// discovery.)
|
||||
bool _discovered_list_needs_barrier;
|
||||
bool _discovered_list_needs_post_barrier;
|
||||
|
||||
bool _enqueuing_is_done; // true if all weak references enqueued
|
||||
bool _processing_is_mt; // true during phases when
|
||||
@ -380,8 +383,8 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
|
||||
protected:
|
||||
// Set the 'discovered' field of the given reference to
|
||||
// the given value - emitting barriers depending upon
|
||||
// the value of _discovered_list_needs_barrier.
|
||||
// the given value - emitting post barriers depending upon
|
||||
// the value of _discovered_list_needs_post_barrier.
|
||||
void set_discovered(oop ref, oop value);
|
||||
|
||||
// "Preclean" the given discovered reference list
|
||||
@ -425,7 +428,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
bool mt_discovery = false, uint mt_discovery_degree = 1,
|
||||
bool atomic_discovery = true,
|
||||
BoolObjectClosure* is_alive_non_header = NULL,
|
||||
bool discovered_list_needs_barrier = false);
|
||||
bool discovered_list_needs_post_barrier = false);
|
||||
|
||||
// RefDiscoveryPolicy values
|
||||
enum DiscoveryPolicy {
|
||||
|
@ -4969,8 +4969,10 @@ void TestMetaspaceAux_test();
|
||||
void TestMetachunk_test();
|
||||
void TestVirtualSpaceNode_test();
|
||||
void TestOldFreeSpaceCalculation_test();
|
||||
void TestNewSize_test();
|
||||
#if INCLUDE_ALL_GCS
|
||||
void TestG1BiasedArray_test();
|
||||
void TestBufferingOopClosure_test();
|
||||
#endif
|
||||
|
||||
void execute_internal_vm_tests() {
|
||||
@ -4990,12 +4992,14 @@ void execute_internal_vm_tests() {
|
||||
run_unit_test(AltHashing::test_alt_hash());
|
||||
run_unit_test(test_loggc_filename());
|
||||
run_unit_test(TestOldFreeSpaceCalculation_test());
|
||||
run_unit_test(TestNewSize_test());
|
||||
#if INCLUDE_VM_STRUCTS
|
||||
run_unit_test(VMStructs::test());
|
||||
#endif
|
||||
#if INCLUDE_ALL_GCS
|
||||
run_unit_test(TestG1BiasedArray_test());
|
||||
run_unit_test(HeapRegionRemSet::test_prt());
|
||||
run_unit_test(TestBufferingOopClosure_test());
|
||||
#endif
|
||||
tty->print_cr("All internal VM tests passed");
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ WB_END
|
||||
WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
|
||||
CollectorPolicy * p = Universe::heap()->collector_policy();
|
||||
gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
|
||||
SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT,
|
||||
SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Space alignment "SIZE_FORMAT" Heap alignment "SIZE_FORMAT,
|
||||
p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(),
|
||||
p->space_alignment(), p->heap_alignment());
|
||||
}
|
||||
|
@ -3818,18 +3818,24 @@ jint Arguments::apply_ergo() {
|
||||
}
|
||||
|
||||
jint Arguments::adjust_after_os() {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseParallelGC || UseParallelOldGC) {
|
||||
if (UseNUMA) {
|
||||
if (UseNUMA) {
|
||||
if (UseParallelGC || UseParallelOldGC) {
|
||||
if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
|
||||
FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
|
||||
FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
|
||||
}
|
||||
// For those collectors or operating systems (eg, Windows) that do
|
||||
// not support full UseNUMA, we will map to UseNUMAInterleaving for now
|
||||
UseNUMAInterleaving = true;
|
||||
}
|
||||
// UseNUMAInterleaving is set to ON for all collectors and
|
||||
// platforms when UseNUMA is set to ON. NUMA-aware collectors
|
||||
// such as the parallel collector for Linux and Solaris will
|
||||
// interleave old gen and survivor spaces on top of NUMA
|
||||
// allocation policy for the eden space.
|
||||
// Non NUMA-aware collectors such as CMS, G1 and Serial-GC on
|
||||
// all platforms and ParallelGC on Windows will interleave all
|
||||
// of the heap spaces across NUMA nodes.
|
||||
if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) {
|
||||
FLAG_SET_ERGO(bool, UseNUMAInterleaving, true);
|
||||
}
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
|
@ -565,7 +565,7 @@ class CommandLineFlags {
|
||||
"Force NUMA optimizations on single-node/UMA systems") \
|
||||
\
|
||||
product(uintx, NUMAChunkResizeWeight, 20, \
|
||||
"Percentage (0-100) used to weigh the current sample when " \
|
||||
"Percentage (0-100) used to weight the current sample when " \
|
||||
"computing exponentially decaying average for " \
|
||||
"AdaptiveNUMAChunkSizing") \
|
||||
\
|
||||
@ -1505,7 +1505,7 @@ class CommandLineFlags {
|
||||
"allocation") \
|
||||
\
|
||||
product(uintx, PLABWeight, 75, \
|
||||
"Percentage (0-100) used to weigh the current sample when " \
|
||||
"Percentage (0-100) used to weight the current sample when " \
|
||||
"computing exponentially decaying average for ResizePLAB") \
|
||||
\
|
||||
product(bool, ResizePLAB, true, \
|
||||
@ -1614,11 +1614,11 @@ class CommandLineFlags {
|
||||
"is shifted to the right within the period between young GCs") \
|
||||
\
|
||||
product(uintx, CMSExpAvgFactor, 50, \
|
||||
"Percentage (0-100) used to weigh the current sample when " \
|
||||
"Percentage (0-100) used to weight the current sample when " \
|
||||
"computing exponential averages for CMS statistics") \
|
||||
\
|
||||
product(uintx, CMS_FLSWeight, 75, \
|
||||
"Percentage (0-100) used to weigh the current sample when " \
|
||||
"Percentage (0-100) used to weight the current sample when " \
|
||||
"computing exponentially decaying averages for CMS FLS " \
|
||||
"statistics") \
|
||||
\
|
||||
@ -1730,19 +1730,15 @@ class CommandLineFlags {
|
||||
"to simulate overflow; a smaller number increases frequency") \
|
||||
\
|
||||
product(uintx, CMSMaxAbortablePrecleanLoops, 0, \
|
||||
"(Temporary, subject to experimentation) " \
|
||||
"Maximum number of abortable preclean iterations, if > 0") \
|
||||
\
|
||||
product(intx, CMSMaxAbortablePrecleanTime, 5000, \
|
||||
"(Temporary, subject to experimentation) " \
|
||||
"Maximum time in abortable preclean (in milliseconds)") \
|
||||
\
|
||||
product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \
|
||||
"(Temporary, subject to experimentation) " \
|
||||
"Nominal minimum work per abortable preclean iteration") \
|
||||
\
|
||||
manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \
|
||||
"(Temporary, subject to experimentation) " \
|
||||
"Time that we sleep between iterations when not given " \
|
||||
"enough work per iteration") \
|
||||
\
|
||||
@ -1958,13 +1954,13 @@ class CommandLineFlags {
|
||||
"(other young collectors)") \
|
||||
\
|
||||
develop(uintx, PromotionFailureALotInterval, 5, \
|
||||
"Total collections between promotion failures alot") \
|
||||
"Total collections between promotion failures a lot") \
|
||||
\
|
||||
experimental(uintx, WorkStealingSleepMillis, 1, \
|
||||
"Sleep time when sleep is used for yields") \
|
||||
\
|
||||
experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \
|
||||
"Number of yields before a sleep is done during workstealing") \
|
||||
"Number of yields before a sleep is done during work stealing") \
|
||||
\
|
||||
experimental(uintx, WorkStealingHardSpins, 4096, \
|
||||
"Number of iterations in a spin loop between checks on " \
|
||||
@ -2042,7 +2038,7 @@ class CommandLineFlags {
|
||||
"size; deprecated: to be renamed to MaxRAMFraction") \
|
||||
\
|
||||
product(uintx, MinRAMFraction, 2, \
|
||||
"Minimum fraction (1/n) of real memory used for maxmimum heap " \
|
||||
"Minimum fraction (1/n) of real memory used for maximum heap " \
|
||||
"size on systems with small physical memory size") \
|
||||
\
|
||||
product(uintx, InitialRAMFraction, 64, \
|
||||
|
@ -337,7 +337,7 @@ void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArgument
|
||||
// A klass might not be initialized since JavaCall's might be used during the executing of
|
||||
// the <clinit>. For example, a Thread.start might start executing on an object that is
|
||||
// not fully initialized! (bad Java programming style)
|
||||
assert(holder->is_linked(), "rewritting must have taken place");
|
||||
assert(holder->is_linked(), "rewriting must have taken place");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1197,7 +1197,7 @@ char* os::format_boot_path(const char* format_string,
|
||||
char fileSep,
|
||||
char pathSep) {
|
||||
assert((fileSep == '/' && pathSep == ':') ||
|
||||
(fileSep == '\\' && pathSep == ';'), "unexpected seperator chars");
|
||||
(fileSep == '\\' && pathSep == ';'), "unexpected separator chars");
|
||||
|
||||
// Scan the format string to determine the length of the actual
|
||||
// boot classpath, and handle platform dependencies as well.
|
||||
|
@ -215,9 +215,9 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
noaccess_prefix == _alignment, "noaccess prefix wrong");
|
||||
|
||||
assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
|
||||
"area must be distinguisable from marks for mark-sweep");
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
|
||||
"area must be distinguisable from marks for mark-sweep");
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
}
|
||||
|
||||
|
||||
|
@ -107,7 +107,7 @@ void BitMap::par_put_range_within_word(idx_t beg, idx_t end, bool value) {
|
||||
while (true) {
|
||||
intptr_t res = Atomic::cmpxchg_ptr(nw, pw, w);
|
||||
if (res == w) break;
|
||||
w = *pw;
|
||||
w = res;
|
||||
nw = value ? (w | ~mr) : (w & mr);
|
||||
}
|
||||
}
|
||||
|
84
hotspot/test/gc/TestVerifySilently.java
Normal file
84
hotspot/test/gc/TestVerifySilently.java
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/* @test TestVerifySilently.java
|
||||
* @key gc
|
||||
* @bug 8032771
|
||||
* @summary Test silent verification.
|
||||
* @library /testlibrary
|
||||
*/
|
||||
|
||||
import com.oracle.java.testlibrary.OutputAnalyzer;
|
||||
import com.oracle.java.testlibrary.ProcessTools;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
||||
class RunSystemGC {
|
||||
public static void main(String args[]) throws Exception {
|
||||
System.gc();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public class TestVerifySilently {
|
||||
private static String[] getTestJavaOpts() {
|
||||
String testVmOptsStr = System.getProperty("test.java.opts");
|
||||
if (!testVmOptsStr.isEmpty()) {
|
||||
return testVmOptsStr.split(" ");
|
||||
} else {
|
||||
return new String[] {};
|
||||
}
|
||||
}
|
||||
|
||||
private static OutputAnalyzer runTest(boolean verifySilently) throws Exception {
|
||||
ArrayList<String> vmOpts = new ArrayList();
|
||||
|
||||
Collections.addAll(vmOpts, getTestJavaOpts());
|
||||
Collections.addAll(vmOpts, new String[] {"-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:+VerifyDuringStartup",
|
||||
"-XX:+VerifyBeforeGC",
|
||||
"-XX:+VerifyAfterGC",
|
||||
"-XX:" + (verifySilently ? "+":"-") + "VerifySilently",
|
||||
RunSystemGC.class.getName()});
|
||||
ProcessBuilder pb =
|
||||
ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
|
||||
System.out.println("Output:\n" + output.getOutput());
|
||||
return output;
|
||||
}
|
||||
|
||||
|
||||
public static void main(String args[]) throws Exception {
|
||||
|
||||
OutputAnalyzer output;
|
||||
|
||||
output = runTest(false);
|
||||
output.shouldContain("[Verifying");
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
output = runTest(true);
|
||||
output.shouldNotContain("[Verifying");
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
}
|
@ -41,8 +41,8 @@ final class MinInitialMaxValues {
|
||||
public long initialHeapSize;
|
||||
public long maxHeapSize;
|
||||
|
||||
public long minAlignment;
|
||||
public long maxAlignment;
|
||||
public long spaceAlignment;
|
||||
public long heapAlignment;
|
||||
}
|
||||
|
||||
class TestMaxHeapSizeTools {
|
||||
@ -192,7 +192,7 @@ class TestMaxHeapSizeTools {
|
||||
// Unfortunately there is no other way to retrieve the minimum heap size and
|
||||
// the alignments.
|
||||
|
||||
Matcher m = Pattern.compile("Minimum heap \\d+ Initial heap \\d+ Maximum heap \\d+ Min alignment \\d+ Max alignment \\d+").
|
||||
Matcher m = Pattern.compile("Minimum heap \\d+ Initial heap \\d+ Maximum heap \\d+ Space alignment \\d+ Heap alignment \\d+").
|
||||
matcher(output.getStdout());
|
||||
if (!m.find()) {
|
||||
throw new RuntimeException("Could not find heap size string.");
|
||||
@ -204,8 +204,8 @@ class TestMaxHeapSizeTools {
|
||||
val.minHeapSize = valueAfter(match, "Minimum heap ");
|
||||
val.initialHeapSize = valueAfter(match, "Initial heap ");
|
||||
val.maxHeapSize = valueAfter(match, "Maximum heap ");
|
||||
val.minAlignment = valueAfter(match, "Min alignment ");
|
||||
val.maxAlignment = valueAfter(match, "Max alignment ");
|
||||
val.spaceAlignment = valueAfter(match, "Space alignment ");
|
||||
val.heapAlignment = valueAfter(match, "Heap alignment ");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -218,12 +218,12 @@ class TestMaxHeapSizeTools {
|
||||
MinInitialMaxValues v = new MinInitialMaxValues();
|
||||
getMinInitialMaxHeap(args, v);
|
||||
|
||||
if ((expectedMin != -1) && (align_up(expectedMin, v.minAlignment) != v.minHeapSize)) {
|
||||
if ((expectedMin != -1) && (align_up(expectedMin, v.heapAlignment) != v.minHeapSize)) {
|
||||
throw new RuntimeException("Actual minimum heap size of " + v.minHeapSize +
|
||||
" differs from expected minimum heap size of " + expectedMin);
|
||||
}
|
||||
|
||||
if ((expectedInitial != -1) && (align_up(expectedInitial, v.minAlignment) != v.initialHeapSize)) {
|
||||
if ((expectedInitial != -1) && (align_up(expectedInitial, v.heapAlignment) != v.initialHeapSize)) {
|
||||
throw new RuntimeException("Actual initial heap size of " + v.initialHeapSize +
|
||||
" differs from expected initial heap size of " + expectedInitial);
|
||||
}
|
||||
@ -247,7 +247,7 @@ class TestMaxHeapSizeTools {
|
||||
MinInitialMaxValues v = new MinInitialMaxValues();
|
||||
getMinInitialMaxHeap(new String[] { gcflag, "-XX:MaxHeapSize=" + maxHeapsize + "M" }, v);
|
||||
|
||||
long expectedHeapSize = align_up(maxHeapsize * K * K, v.maxAlignment);
|
||||
long expectedHeapSize = align_up(maxHeapsize * K * K, v.heapAlignment);
|
||||
long actualHeapSize = v.maxHeapSize;
|
||||
|
||||
if (actualHeapSize > expectedHeapSize) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user