This commit is contained in:
Max Ockner 2016-03-14 20:20:18 +01:00
commit 32165d2889
37 changed files with 281 additions and 365 deletions

View File

@ -30,6 +30,7 @@ import java.util.Arrays;
import sun.jvm.hotspot.tools.JStack;
import sun.jvm.hotspot.tools.JMap;
import sun.jvm.hotspot.tools.JInfo;
import sun.jvm.hotspot.tools.JSnap;
public class SALauncher {
@ -39,6 +40,7 @@ public class SALauncher {
System.out.println(" jstack --help\tto get more information");
System.out.println(" jmap --help\tto get more information");
System.out.println(" jinfo --help\tto get more information");
System.out.println(" jsnap --help\tto get more information");
return false;
}
@ -85,6 +87,11 @@ public class SALauncher {
return commonHelp();
}
private static boolean jsnapHelp() {
System.out.println(" <no option>\tdump performance counters");
return commonHelp();
}
private static boolean toolHelp(String toolName) {
if (toolName.equals("jstack")) {
return jstackHelp();
@ -95,6 +102,9 @@ public class SALauncher {
if (toolName.equals("jmap")) {
return jmapHelp();
}
if (toolName.equals("jsnap")) {
return jsnapHelp();
}
if (toolName.equals("hsdb") || toolName.equals("clhsdb")) {
return commonHelp();
}
@ -308,6 +318,40 @@ public class SALauncher {
JInfo.main(newArgs.toArray(new String[newArgs.size()]));
}
private static void runJSNAP(String[] oldArgs) {
SAGetopt sg = new SAGetopt(oldArgs);
String[] longOpts = {"exe=", "core=", "pid="};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
continue;
}
if (s.equals("core")) {
core = sg.getOptarg();
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
continue;
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
}
}
JSnap.main(newArgs.toArray(new String[newArgs.size()]));
}
public static void main(String[] args) {
// Provide a help
if (args.length == 0) {
@ -355,5 +399,10 @@ public class SALauncher {
runJINFO(oldArgs);
return;
}
if (args[0].equals("jsnap")) {
runJSNAP(oldArgs);
return;
}
}
}

View File

@ -81,6 +81,12 @@ public class CompactHashTable extends VMObject {
}
public Symbol probe(byte[] name, long hash) {
if (bucketCount() == 0) {
// The table is invalid, so don't try to lookup
return null;
}
long symOffset;
Symbol sym;
Address baseAddress = baseAddressField.getValue(addr);

View File

@ -593,15 +593,7 @@ void os::Linux::libpthread_init() {
// _expand_stack_to() assumes its frame size is less than page size, which
// should always be true if the function is not inlined.
#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
#define NOINLINE
#else
#define NOINLINE __attribute__ ((noinline))
#endif
static void _expand_stack_to(address bottom) NOINLINE;
static void _expand_stack_to(address bottom) {
static void NOINLINE _expand_stack_to(address bottom) {
address sp;
size_t size;
volatile char *p;

View File

@ -1687,7 +1687,7 @@ class BacktraceIterator : public StackObj {
public:
BacktraceIterator(objArrayHandle result, Thread* thread) {
init(result, thread);
assert(_methods->length() == java_lang_Throwable::trace_chunk_size, "lengths don't match");
assert(_methods.is_null() || _methods->length() == java_lang_Throwable::trace_chunk_size, "lengths don't match");
}
BacktraceElement next(Thread* thread) {

View File

@ -1412,7 +1412,7 @@ void CMSCollector::acquire_control_and_collect(bool full,
if (_foregroundGCShouldWait) {
// We are going to be waiting for action for the CMS thread;
// it had better not be gone (for instance at shutdown)!
assert(ConcurrentMarkSweepThread::cmst() != NULL,
assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
"CMS thread must be running");
// Wait here until the background collector gives us the go-ahead
ConcurrentMarkSweepThread::clear_CMS_flag(
@ -2286,17 +2286,16 @@ bool CMSCollector::verify_after_remark() {
// all marking, then check if the new marks-vector is
// a subset of the CMS marks-vector.
verify_after_remark_work_1();
} else if (CMSRemarkVerifyVariant == 2) {
} else {
guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
// In this second variant of verification, we flag an error
// (i.e. an object reachable in the new marks-vector not reachable
// in the CMS marks-vector) immediately, also indicating the
// identify of an object (A) that references the unmarked object (B) --
// presumably, a mutation to A failed to be picked up by preclean/remark?
verify_after_remark_work_2();
} else {
warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
CMSRemarkVerifyVariant);
}
return true;
}
@ -3649,7 +3648,7 @@ void CMSCollector::abortable_preclean() {
// XXX FIX ME!!! YSR
size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
while (!(should_abort_preclean() ||
ConcurrentMarkSweepThread::should_terminate())) {
ConcurrentMarkSweepThread::cmst()->should_terminate())) {
workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
cumworkdone += workdone;
loops++;

View File

@ -42,7 +42,6 @@
ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
bool ConcurrentMarkSweepThread::_should_terminate = false;
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
@ -62,88 +61,60 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
set_name("CMS Main Thread");
if (os::create_thread(this, os::cgc_thread)) {
// An old comment here said: "Priority should be just less
// than that of VMThread". Since the VMThread runs at
// NearMaxPriority, the old comment was inaccurate, but
// changing the default priority to NearMaxPriority-1
// could change current behavior, so the default of
// NearMaxPriority stays in place.
//
// Note that there's a possibility of the VMThread
// starving if UseCriticalCMSThreadPriority is on.
// That won't happen on Solaris for various reasons,
// but may well happen on non-Solaris platforms.
int native_prio;
if (UseCriticalCMSThreadPriority) {
native_prio = os::java_to_os_priority[CriticalPriority];
} else {
native_prio = os::java_to_os_priority[NearMaxPriority];
}
os::set_native_priority(this, native_prio);
// An old comment here said: "Priority should be just less
// than that of VMThread". Since the VMThread runs at
// NearMaxPriority, the old comment was inaccurate, but
// changing the default priority to NearMaxPriority-1
// could change current behavior, so the default of
// NearMaxPriority stays in place.
//
// Note that there's a possibility of the VMThread
// starving if UseCriticalCMSThreadPriority is on.
// That won't happen on Solaris for various reasons,
// but may well happen on non-Solaris platforms.
create_and_start(UseCriticalCMSThreadPriority ? CriticalPriority : NearMaxPriority);
if (!DisableStartThread) {
os::start_thread(this);
}
}
_sltMonitor = SLT_lock;
}
void ConcurrentMarkSweepThread::run() {
void ConcurrentMarkSweepThread::run_service() {
assert(this == cmst(), "just checking");
initialize_in_thread();
// From this time Thread::current() should be working.
assert(this == Thread::current(), "just checking");
if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
}
// Wait until Universe::is_fully_initialized()
{
CMSLoopCountWarn loopX("CMS::run", "waiting for "
"Universe::is_fully_initialized()", 2);
MutexLockerEx x(CGC_lock, true);
set_CMS_flag(CMS_cms_wants_token);
// Wait until Universe is initialized and all initialization is completed.
while (!is_init_completed() && !Universe::is_fully_initialized() &&
!_should_terminate) {
CGC_lock->wait(true, 200);
loopX.tick();
}
assert(is_init_completed() && Universe::is_fully_initialized(), "ConcurrentGCThread::run() should have waited for this.");
// Wait until the surrogate locker thread that will do
// pending list locking on our behalf has been created.
// We cannot start the SLT thread ourselves since we need
// to be a JavaThread to do so.
CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
while (_slt == NULL && !_should_terminate) {
while (_slt == NULL && !should_terminate()) {
CGC_lock->wait(true, 200);
loopY.tick();
}
clear_CMS_flag(CMS_cms_wants_token);
}
while (!_should_terminate) {
while (!should_terminate()) {
sleepBeforeNextCycle();
if (_should_terminate) break;
if (should_terminate()) break;
GCIdMark gc_id_mark;
GCCause::Cause cause = _collector->_full_gc_requested ?
_collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
_collector->collect_in_background(cause);
}
assert(_should_terminate, "just checking");
// Check that the state of any protocol for synchronization
// between background (CMS) and foreground collector is "clean"
// (i.e. will not potentially block the foreground collector,
// requiring action by us).
verify_ok_to_terminate();
// Signal that it is terminated
{
MutexLockerEx mu(Terminator_lock,
Mutex::_no_safepoint_check_flag);
assert(_cmst == this, "Weird!");
_cmst = NULL;
Terminator_lock->notify();
}
}
#ifndef PRODUCT
@ -157,39 +128,24 @@ void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
// create and start a new ConcurrentMarkSweep Thread for given CMS generation
ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) {
if (!_should_terminate) {
assert(cmst() == NULL, "start() called twice?");
ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
assert(cmst() == th, "Where did the just-created CMS thread go?");
return th;
}
return NULL;
guarantee(_cmst == NULL, "start() called twice!");
ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
assert(_cmst == th, "Where did the just-created CMS thread go?");
return th;
}
void ConcurrentMarkSweepThread::stop() {
// it is ok to take late safepoints here, if needed
{
MutexLockerEx x(Terminator_lock);
_should_terminate = true;
}
{ // Now post a notify on CGC_lock so as to nudge
// CMS thread(s) that might be slumbering in
// sleepBeforeNextCycle.
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
CGC_lock->notify_all();
}
{ // Now wait until (all) CMS thread(s) have exited
MutexLockerEx x(Terminator_lock);
while(cmst() != NULL) {
Terminator_lock->wait();
}
}
void ConcurrentMarkSweepThread::stop_service() {
// Now post a notify on CGC_lock so as to nudge
// CMS thread(s) that might be slumbering in
// sleepBeforeNextCycle.
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
CGC_lock->notify_all();
}
void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
assert(tc != NULL, "Null ThreadClosure");
if (_cmst != NULL) {
tc->do_thread(_cmst);
if (cmst() != NULL && !cmst()->has_terminated()) {
tc->do_thread(cmst());
}
assert(Universe::is_fully_initialized(),
"Called too early, make sure heap is fully initialized");
@ -202,8 +158,8 @@ void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
}
void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
if (_cmst != NULL) {
_cmst->print_on(st);
if (cmst() != NULL && !cmst()->has_terminated()) {
cmst()->print_on(st);
st->cr();
}
if (_collector != NULL) {
@ -278,7 +234,7 @@ void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
MutexLockerEx x(CGC_lock,
Mutex::_no_safepoint_check_flag);
if (_should_terminate || _collector->_full_gc_requested) {
if (should_terminate() || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
@ -307,7 +263,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
unsigned int loop_count = 0;
while(!_should_terminate) {
while(!should_terminate()) {
double now_time = os::elapsedTime();
long wait_time_millis;
@ -327,7 +283,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
{
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (_should_terminate || _collector->_full_gc_requested) {
if (should_terminate() || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
@ -364,7 +320,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
}
void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
while (!_should_terminate) {
while (!should_terminate()) {
if(CMSWaitDuration >= 0) {
// Wait until the next synchronous GC, a concurrent full gc
// request or a timeout, whichever is earlier.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,8 +37,6 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
friend class VMStructs;
friend class ConcurrentMarkSweepGeneration; // XXX should remove friendship
friend class CMSCollector;
public:
virtual void run();
private:
static ConcurrentMarkSweepThread* _cmst;
@ -47,8 +45,6 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
static SurrogateLockerThread::SLT_msg_type _sltBuffer;
static Monitor* _sltMonitor;
static bool _should_terminate;
enum CMS_flag_type {
CMS_nil = NoBits,
CMS_cms_wants_token = nth_bit(0),
@ -72,6 +68,9 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// debugging
void verify_ok_to_terminate() const PRODUCT_RETURN;
void run_service();
void stop_service();
public:
// Constructor
ConcurrentMarkSweepThread(CMSCollector* collector);
@ -91,8 +90,6 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// Create and start the CMS Thread, or stop it on shutdown
static ConcurrentMarkSweepThread* start(CMSCollector* collector);
static void stop();
static bool should_terminate() { return _should_terminate; }
// Synchronization using CMS token
static void synchronize(bool is_cms_thread);

View File

@ -78,7 +78,7 @@ void ConcurrentG1RefineThread::initialize() {
void ConcurrentG1RefineThread::wait_for_completed_buffers() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
while (!_should_terminate && !is_active()) {
while (!should_terminate() && !is_active()) {
_monitor->wait(Mutex::_no_safepoint_check_flag);
}
}
@ -109,22 +109,13 @@ void ConcurrentG1RefineThread::deactivate() {
}
}
void ConcurrentG1RefineThread::run() {
initialize_in_thread();
wait_for_universe_init();
run_service();
terminate();
}
void ConcurrentG1RefineThread::run_service() {
_vtime_start = os::elapsedVTime();
while (!_should_terminate) {
while (!should_terminate()) {
// Wait for work
wait_for_completed_buffers();
if (_should_terminate) {
if (should_terminate()) {
break;
}
@ -168,23 +159,6 @@ void ConcurrentG1RefineThread::run_service() {
log_debug(gc, refine)("Stopping %d", _worker_id);
}
void ConcurrentG1RefineThread::stop() {
// it is ok to take late safepoints here, if needed
{
MutexLockerEx mu(Terminator_lock);
_should_terminate = true;
}
stop_service();
{
MutexLockerEx mu(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
}
}
}
void ConcurrentG1RefineThread::stop_service() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
_monitor->notify();

View File

@ -72,7 +72,6 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
void stop_service();
public:
virtual void run();
// Constructor
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
CardTableEntryClosure* refine_closure,
@ -84,9 +83,6 @@ public:
double vtime_accum() { return _vtime_accum; }
ConcurrentG1Refine* cg1r() { return _cg1r; }
// shutdown
void stop();
};
#endif // SHARE_VM_GC_G1_CONCURRENTG1REFINETHREAD_HPP

View File

@ -105,25 +105,16 @@ class GCConcPhaseTimer : StackObj {
}
};
void ConcurrentMarkThread::run() {
initialize_in_thread();
wait_for_universe_init();
run_service();
terminate();
}
void ConcurrentMarkThread::run_service() {
_vtime_start = os::elapsedVTime();
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1_policy = g1h->g1_policy();
while (!_should_terminate) {
while (!should_terminate()) {
// wait until started is set.
sleepBeforeNextCycle();
if (_should_terminate) {
if (should_terminate()) {
_cm->root_regions()->cancel_scan();
break;
}
@ -293,22 +284,6 @@ void ConcurrentMarkThread::run_service() {
}
}
void ConcurrentMarkThread::stop() {
{
MutexLockerEx ml(Terminator_lock);
_should_terminate = true;
}
stop_service();
{
MutexLockerEx ml(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
}
}
}
void ConcurrentMarkThread::stop_service() {
MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
CGC_lock->notify_all();
@ -320,7 +295,7 @@ void ConcurrentMarkThread::sleepBeforeNextCycle() {
assert(!in_progress(), "should have been cleared");
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
while (!started() && !_should_terminate) {
while (!started() && !should_terminate()) {
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
}

View File

@ -38,13 +38,8 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
double _vtime_start; // Initial virtual time.
double _vtime_accum; // Accumulated virtual time.
double _vtime_mark_accum;
public:
virtual void run();
private:
G1ConcurrentMark* _cm;
enum State {
@ -93,9 +88,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
// as the CM thread might take some time to wake up before noticing
// that started() is set and set in_progress().
bool during_cycle() { return !idle(); }
// shutdown
void stop();
};
#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP

View File

@ -1229,6 +1229,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
ResourceMark rm;
print_heap_before_gc();
print_heap_regions();
trace_heap_before_gc(gc_tracer);
size_t metadata_prev_used = MetaspaceAux::used_bytes();
@ -1447,6 +1448,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
heap_transition.print();
print_heap_after_gc();
print_heap_regions();
trace_heap_after_gc(gc_tracer);
post_full_gc_dump(gc_timer);
@ -2718,6 +2720,14 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
return false; // keep some compilers happy
}
void G1CollectedHeap::print_heap_regions() const {
LogHandle(gc, heap, region) log;
if (log.is_trace()) {
ResourceMark rm;
print_regions_on(log.trace_stream());
}
}
void G1CollectedHeap::print_on(outputStream* st) const {
st->print(" %-20s", "garbage-first heap");
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
@ -2738,11 +2748,7 @@ void G1CollectedHeap::print_on(outputStream* st) const {
MetaspaceAux::print_on(st);
}
void G1CollectedHeap::print_extended_on(outputStream* st) const {
print_on(st);
// Print the per-region information.
st->cr();
void G1CollectedHeap::print_regions_on(outputStream* st) const {
st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, A=archive, TS=gc time stamp, "
@ -2752,6 +2758,13 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
heap_region_iterate(&blk);
}
void G1CollectedHeap::print_extended_on(outputStream* st) const {
print_on(st);
// Print the per-region information.
print_regions_on(st);
}
void G1CollectedHeap::print_on_error(outputStream* st) const {
this->CollectedHeap::print_on_error(st);
@ -3203,6 +3216,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
wait_for_root_region_scanning();
print_heap_before_gc();
print_heap_regions();
trace_heap_before_gc(_gc_tracer_stw);
_verifier->verify_region_sets_optional();
@ -3535,6 +3549,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
print_heap_after_gc();
print_heap_regions();
trace_heap_after_gc(_gc_tracer_stw);
// We must call G1MonitoringSupport::update_sizes() in the same scoping level

View File

@ -1470,7 +1470,11 @@ public:
G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
// Printing
private:
void print_heap_regions() const;
void print_regions_on(outputStream* st) const;
public:
virtual void print_on(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;

View File

@ -180,18 +180,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
// First make sure that, if either parameter is set, its value is
// reasonable.
if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
if (MaxGCPauseMillis < 1) {
vm_exit_during_initialization("MaxGCPauseMillis should be "
"greater than 0");
}
}
if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
if (GCPauseIntervalMillis < 1) {
vm_exit_during_initialization("GCPauseIntervalMillis should be "
"greater than 0");
}
}
guarantee(MaxGCPauseMillis >= 1, "Range checking for MaxGCPauseMillis should guarantee that value is >= 1");
// Then, if the pause time target parameter was not set, set it to
// the default value.
@ -213,16 +202,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
}
// Finally, make sure that the two parameters are consistent.
if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
char buffer[256];
jio_snprintf(buffer, 256,
"MaxGCPauseMillis (%u) should be less than "
"GCPauseIntervalMillis (%u)",
MaxGCPauseMillis, GCPauseIntervalMillis);
vm_exit_during_initialization(buffer);
}
guarantee(GCPauseIntervalMillis >= 1, "Constraint for GCPauseIntervalMillis should guarantee that value is >= 1");
guarantee(GCPauseIntervalMillis > MaxGCPauseMillis, "Constraint for GCPauseIntervalMillis should guarantee that GCPauseIntervalMillis > MaxGCPauseMillis");
double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
double time_slice = (double) GCPauseIntervalMillis / 1000.0;
@ -238,14 +219,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
"if a user set it to 0");
_gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
uintx reserve_perc = G1ReservePercent;
// Put an artificial ceiling on this so that it's not set to a silly value.
if (reserve_perc > 50) {
reserve_perc = 50;
warning("G1ReservePercent is set to a value that is too large, "
"it's been updated to " UINTX_FORMAT, reserve_perc);
}
_reserve_factor = (double) reserve_perc / 100.0;
guarantee(G1ReservePercent <= 50, "Range checking should not allow values over 50.");
_reserve_factor = (double) G1ReservePercent / 100.0;
// This will be set when the heap is expanded
// for the first time during initialization.
_reserve_regions = 0;
@ -287,9 +262,8 @@ void G1CollectorPolicy::initialize_flags() {
FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
}
if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified");
}
guarantee(SurvivorRatio >= 1, "Range checking for SurvivorRatio should guarantee that value is >= 1");
CollectorPolicy::initialize_flags();
_young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@ void G1StringDedup::initialize() {
void G1StringDedup::stop() {
assert(is_enabled(), "String deduplication not enabled");
G1StringDedupThread::stop();
G1StringDedupThread::thread()->stop();
}
bool G1StringDedup::is_candidate_from_mark(oop obj) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,11 +81,9 @@ void G1StringDedupThread::deduplicate_shared_strings(G1StringDedupStat& stat) {
StringTable::shared_oops_do(&sharedStringDedup);
}
void G1StringDedupThread::run() {
void G1StringDedupThread::run_service() {
G1StringDedupStat total_stat;
initialize_in_thread();
wait_for_universe_init();
deduplicate_shared_strings(total_stat);
// Main loop
@ -96,7 +94,7 @@ void G1StringDedupThread::run() {
// Wait for the queue to become non-empty
G1StringDedupQueue::wait();
if (_should_terminate) {
if (should_terminate()) {
break;
}
@ -133,23 +131,10 @@ void G1StringDedupThread::run() {
}
}
terminate();
}
void G1StringDedupThread::stop() {
{
MonitorLockerEx ml(Terminator_lock);
_thread->_should_terminate = true;
}
void G1StringDedupThread::stop_service() {
G1StringDedupQueue::cancel_wait();
{
MonitorLockerEx ml(Terminator_lock);
while (!_thread->_has_terminated) {
ml.wait();
}
}
}
void G1StringDedupThread::print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,14 +45,14 @@ private:
void print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
void run_service();
void stop_service();
public:
static void create();
static void stop();
static G1StringDedupThread* thread();
virtual void run();
void deduplicate_shared_strings(G1StringDedupStat& stat);
};

View File

@ -32,32 +32,6 @@
#include "gc/g1/suspendibleThreadSet.hpp"
#include "runtime/mutexLocker.hpp"
void G1YoungRemSetSamplingThread::run() {
initialize_in_thread();
wait_for_universe_init();
run_service();
terminate();
}
void G1YoungRemSetSamplingThread::stop() {
// it is ok to take late safepoints here, if needed
{
MutexLockerEx mu(Terminator_lock);
_should_terminate = true;
}
stop_service();
{
MutexLockerEx mu(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
}
}
}
G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() :
ConcurrentGCThread(),
_monitor(Mutex::nonleaf,
@ -70,7 +44,7 @@ G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() :
void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {
MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
if (!_should_terminate) {
if (!should_terminate()) {
uintx waitms = G1ConcRefinementServiceIntervalMillis; // 300, really should be?
_monitor.wait(Mutex::_no_safepoint_check_flag, waitms);
}
@ -79,7 +53,7 @@ void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {
void G1YoungRemSetSamplingThread::run_service() {
double vtime_start = os::elapsedVTime();
while (!_should_terminate) {
while (!should_terminate()) {
sample_young_list_rs_lengths();
if (os::supports_vtime()) {

View File

@ -55,9 +55,6 @@ private:
public:
G1YoungRemSetSamplingThread();
double vtime_accum() { return _vtime_accum; }
virtual void run();
void stop();
};
#endif // SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP

View File

@ -96,6 +96,9 @@ void CollectorPolicy::initialize_flags() {
}
// Check heap parameter properties
if (MaxHeapSize < 2 * M) {
vm_exit_during_initialization("Too small maximum heap");
}
if (InitialHeapSize < M) {
vm_exit_during_initialization("Too small initial heap");
}

View File

@ -37,12 +37,12 @@ ConcurrentGCThread::ConcurrentGCThread() :
_should_terminate(false), _has_terminated(false) {
};
void ConcurrentGCThread::create_and_start() {
void ConcurrentGCThread::create_and_start(ThreadPriority prio) {
if (os::create_thread(this, os::cgc_thread)) {
// XXX: need to set this to low priority
// unless "aggressive mode" set; priority
// should be just less than that of VMThread.
os::set_priority(this, NearMaxPriority);
os::set_priority(this, prio);
if (!_should_terminate && !DisableStartThread) {
os::start_thread(this);
}
@ -75,6 +75,34 @@ void ConcurrentGCThread::terminate() {
}
}
void ConcurrentGCThread::run() {
initialize_in_thread();
wait_for_universe_init();
run_service();
terminate();
}
void ConcurrentGCThread::stop() {
// it is ok to take late safepoints here, if needed
{
MutexLockerEx mu(Terminator_lock);
assert(!_has_terminated, "stop should only be called once");
assert(!_should_terminate, "stop should only be called once");
_should_terminate = true;
}
stop_service();
{
MutexLockerEx mu(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
}
}
}
static void _sltLoop(JavaThread* thread, TRAPS) {
SurrogateLockerThread* slt = (SurrogateLockerThread*)thread;
slt->loop();

View File

@ -31,13 +31,9 @@
class ConcurrentGCThread: public NamedThread {
friend class VMStructs;
protected:
bool volatile _should_terminate;
bool _has_terminated;
// Create and start the thread (setting it's priority high.)
void create_and_start();
// Do initialization steps in the thread: record stack base and size,
// init thread local storage, set JNI handle block.
void initialize_in_thread();
@ -49,11 +45,29 @@ protected:
// concurrent work.
void terminate();
protected:
// Create and start the thread (setting it's priority.)
void create_and_start(ThreadPriority prio = NearMaxPriority);
// Do the specific GC work. Called by run() after initialization complete.
virtual void run_service() = 0;
// Shut down the specific GC work. Called by stop() as part of termination protocol.
virtual void stop_service() = 0;
public:
ConcurrentGCThread();
// Tester
bool is_ConcurrentGC_thread() const { return true; }
virtual void run();
// shutdown following termination protocol
virtual void stop();
bool should_terminate() { return _should_terminate; }
bool has_terminated() { return _has_terminated; }
};
// The SurrogateLockerThread is used by concurrent GC threads for

View File

@ -1281,7 +1281,7 @@ jlong GenCollectedHeap::millis_since_last_gc() {
void GenCollectedHeap::stop() {
#if INCLUDE_ALL_GCS
if (UseConcMarkSweepGC) {
ConcurrentMarkSweepThread::stop();
ConcurrentMarkSweepThread::cmst()->stop();
}
#endif
}

View File

@ -52,6 +52,7 @@
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, heap)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, ihop)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap, region)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, freelist)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ihop)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, liveness)) \

View File

@ -242,7 +242,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
// Allocate a new chunk from the pool (might expand the pool)
_NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
NOINLINE void* allocate(size_t bytes, AllocFailType alloc_failmode) {
assert(bytes == _size, "bad size");
void* p = NULL;
// No VM lock can be taken inside ThreadCritical lock, so os::malloc

View File

@ -41,18 +41,6 @@
#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
// noinline attribute
#ifdef _WINDOWS
#define _NOINLINE_ __declspec(noinline)
#else
#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
#define _NOINLINE_
#else
#define _NOINLINE_ __attribute__ ((noinline))
#endif
#endif
class AllocFailStrategy {
public:
enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
@ -178,17 +166,17 @@ class NativeCallStack;
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
public:
_NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new(size_t size) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
NOINLINE void* operator new(size_t size, const NativeCallStack& stack) throw();
NOINLINE void* operator new(size_t size) throw();
NOINLINE void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant)
NOINLINE void* operator new (size_t size, const std::nothrow_t& nothrow_constant)
throw();
_NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new [](size_t size) throw();
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
NOINLINE void* operator new [](size_t size, const NativeCallStack& stack) throw();
NOINLINE void* operator new [](size_t size) throw();
NOINLINE void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant)
NOINLINE void* operator new [](size_t size, const std::nothrow_t& nothrow_constant)
throw();
void operator delete(void* p);
void operator delete [] (void* p);

View File

@ -1093,19 +1093,19 @@ void Universe::print_heap_at_SIGBREAK() {
void Universe::print_heap_before_gc() {
LogHandle(gc, heap) log;
if (log.is_trace()) {
log.trace("Heap before GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
if (log.is_debug()) {
log.debug("Heap before GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
ResourceMark rm;
heap()->print_on(log.trace_stream());
heap()->print_on(log.debug_stream());
}
}
void Universe::print_heap_after_gc() {
LogHandle(gc, heap) log;
if (log.is_trace()) {
log.trace("Heap after GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
if (log.is_debug()) {
log.debug("Heap after GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
ResourceMark rm;
heap()->print_on(log.trace_stream());
heap()->print_on(log.debug_stream());
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,16 +36,9 @@
// The iteration over the oops in objects is a hot path in the GC code.
// By force inlining the following functions, we get similar GC performance
// as the previous macro based implementation.
#ifdef TARGET_COMPILER_visCPP
#define INLINE __forceinline
#elif defined(TARGET_COMPILER_sparcWorks)
#define INLINE __attribute__((always_inline))
#else
#define INLINE inline
#endif
template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
T* p = (T*)obj->obj_field_addr<T>(map->offset());
T* const end = p + map->count();
@ -56,7 +49,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, Oo
#if INCLUDE_ALL_GCS
template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
T* const start = (T*)obj->obj_field_addr<T>(map->offset());
T* p = start + map->count();
@ -68,7 +61,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop
#endif
template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
T* p = (T*)obj->obj_field_addr<T>(map->offset());
T* end = p + map->count();
@ -91,7 +84,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop
}
template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
OopMapBlock* map = start_of_nonstatic_oop_maps();
OopMapBlock* const end_map = map + nonstatic_oop_map_count();
@ -102,7 +95,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClos
#if INCLUDE_ALL_GCS
template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
OopMapBlock* map = start_map + nonstatic_oop_map_count();
@ -114,7 +107,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj,
#endif
template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
OopMapBlock* map = start_of_nonstatic_oop_maps();
OopMapBlock* const end_map = map + nonstatic_oop_map_count();
@ -124,7 +117,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj,
}
template <bool nv, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
if (UseCompressedOops) {
oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
} else {
@ -134,7 +127,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* clo
#if INCLUDE_ALL_GCS
template <bool nv, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
if (UseCompressedOops) {
oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
} else {
@ -144,7 +137,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureT
#endif
template <bool nv, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
if (UseCompressedOops) {
oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
} else {
@ -153,7 +146,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureT
}
template <bool nv, class OopClosureType>
INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
ALWAYSINLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
if (Devirtualizer<nv>::do_metadata(closure)) {
Devirtualizer<nv>::do_klass(closure, this);
}
@ -165,7 +158,7 @@ INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
#if INCLUDE_ALL_GCS
template <bool nv, class OopClosureType>
INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
assert(!Devirtualizer<nv>::do_metadata(closure),
"Code to handle metadata is not implemented");
@ -176,7 +169,7 @@ INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closu
#endif
template <bool nv, class OopClosureType>
INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
ALWAYSINLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
if (Devirtualizer<nv>::do_metadata(closure)) {
if (mr.contains(obj)) {
Devirtualizer<nv>::do_klass(closure, this);
@ -188,8 +181,6 @@ INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closu
return size_helper();
}
#undef INLINE
#define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
OOP_OOP_ITERATE_DEFN( InstanceKlass, OopClosureType, nv_suffix) \
OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceKlass, OopClosureType, nv_suffix) \

View File

@ -44,14 +44,6 @@
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
#if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
// Need to inhibit inlining for older versions of GCC to avoid build-time failures
#define NOINLINE __attribute__((noinline))
#else
#define NOINLINE
#endif
#ifdef DTRACE_ENABLED
// Only bother with this argument setup if dtrace is available
@ -254,7 +246,7 @@ static volatile int InitDone = 0;
// -----------------------------------------------------------------------------
// Enter support
void NOINLINE ObjectMonitor::enter(TRAPS) {
void ObjectMonitor::enter(TRAPS) {
// The following code is ordered to check the most common cases first
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
Thread * const Self = THREAD;
@ -431,7 +423,7 @@ int ObjectMonitor::TryLock(Thread * Self) {
#define MAX_RECHECK_INTERVAL 1000
void NOINLINE ObjectMonitor::EnterI(TRAPS) {
void ObjectMonitor::EnterI(TRAPS) {
Thread * const Self = THREAD;
assert(Self->is_Java_thread(), "invariant");
assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
@ -681,7 +673,7 @@ void NOINLINE ObjectMonitor::EnterI(TRAPS) {
// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
// loop accordingly.
void NOINLINE ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
assert(Self != NULL, "invariant");
assert(SelfNode != NULL, "invariant");
assert(SelfNode->_thread == Self, "invariant");
@ -894,7 +886,7 @@ void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
// structured the code so the windows are short and the frequency
// of such futile wakups is low.
void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) {
void ObjectMonitor::exit(bool not_suspended, TRAPS) {
Thread * const Self = THREAD;
if (THREAD != _owner) {
if (THREAD->is_lock_owned((address) _owner)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,13 +48,6 @@
#include "utilities/events.hpp"
#include "utilities/preserveException.hpp"
#if defined(__GNUC__) && !defined(PPC64)
// Need to inhibit inlining for older versions of GCC to avoid build-time failures
#define NOINLINE __attribute__((noinline))
#else
#define NOINLINE
#endif
// The "core" versions of monitor enter and exit reside in this file.
// The interpreter and compilers contain specialized transliterated
// variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
@ -1038,7 +1031,7 @@ void ObjectSynchronizer::verifyInUse(Thread *Self) {
assert(free_tally == Self->omFreeCount, "free count off");
}
ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) {
ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
// A large MAXPRIVATE value reduces both list lock contention
// and list coherency traffic, but also tends to increase the
// number of objectMonitors in circulation as well as the STW
@ -1313,7 +1306,7 @@ ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
inflate_cause_vm_internal);
}
ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self,
ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
oop object,
const InflateCause cause) {

View File

@ -42,6 +42,12 @@
# include "utilities/globalDefinitions_xlc.hpp"
#endif
#ifndef NOINLINE
#define NOINLINE
#endif
#ifndef ALWAYSINLINE
#define ALWAYSINLINE inline
#endif
#ifndef PRAGMA_DIAG_PUSH
#define PRAGMA_DIAG_PUSH
#endif

View File

@ -322,4 +322,8 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
#define THREAD_LOCAL_DECL __thread
#endif
// Inlining support
#define NOINLINE __attribute__ ((noinline))
#define ALWAYSINLINE __attribute__ ((always_inline))
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -277,4 +277,8 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
#define THREAD_LOCAL_DECL __thread
#endif
// Inlining support
#define NOINLINE
#define ALWAYSINLINE __attribute__((always_inline))
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -240,4 +240,11 @@ inline int vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
#define THREAD_LOCAL_DECL __declspec( thread )
#endif
// Inlining support
// MSVC has '__declspec(noinline)' but according to the official documentation
// it only applies to member functions. There are reports though which pretend
// that it also works for freestanding functions.
#define NOINLINE __declspec(noinline)
#define ALWAYSINLINE __forceinline
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -184,4 +184,8 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
#define THREAD_LOCAL_DECL __thread
#endif
// Inlining support
#define NOINLINE
#define ALWAYSINLINE __attribute__((always_inline))
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,17 +27,6 @@
#include "utilities/stack.hpp"
// Stack is used by the GC code and in some hot paths a lot of the Stack
// code gets inlined. This is generally good, but when too much code has
// been inlined, no further inlining is allowed by GCC. Therefore we need
// to prevent parts of the slow path in Stack to be inlined to allow other
// code to be.
#if defined(TARGET_COMPILER_gcc)
#define NOINLINE __attribute__((noinline))
#else
#define NOINLINE
#endif
template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
size_t max_size):
_seg_size(segment_size),
@ -151,6 +140,11 @@ void Stack<E, F>::free(E* addr, size_t bytes)
FREE_C_HEAP_ARRAY(char, (char*) addr);
}
// Stack is used by the GC code and in some hot paths a lot of the Stack
// code gets inlined. This is generally good, but when too much code has
// been inlined, no further inlining is allowed by GCC. Therefore we need
// to prevent parts of the slow path in Stack to be inlined to allow other
// code to be.
template <class E, MEMFLAGS F>
NOINLINE void Stack<E, F>::push_segment()
{
@ -280,6 +274,4 @@ E* StackIterator<E, F>::next_addr()
return _cur_seg + --_cur_seg_size;
}
#undef NOINLINE
#endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,10 +112,12 @@ class TestMaxHeapSizeTools {
}
private static void checkInvalidMinInitialHeapCombinations(String gcflag) throws Exception {
expectError(new String[] { gcflag, "-XX:InitialHeapSize=1023K", "-version" });
expectError(new String[] { gcflag, "-Xms64M", "-XX:InitialHeapSize=32M", "-version" });
}
private static void checkValidMinInitialHeapCombinations(String gcflag) throws Exception {
expectValid(new String[] { gcflag, "-XX:InitialHeapSize=1024K", "-version" });
expectValid(new String[] { gcflag, "-XX:InitialHeapSize=8M", "-Xms4M", "-version" });
expectValid(new String[] { gcflag, "-Xms4M", "-XX:InitialHeapSize=8M", "-version" });
expectValid(new String[] { gcflag, "-XX:InitialHeapSize=8M", "-Xms8M", "-version" });
@ -124,11 +126,13 @@ class TestMaxHeapSizeTools {
}
private static void checkInvalidInitialMaxHeapCombinations(String gcflag) throws Exception {
expectError(new String[] { gcflag, "-XX:MaxHeapSize=2047K", "-version" });
expectError(new String[] { gcflag, "-XX:MaxHeapSize=4M", "-XX:InitialHeapSize=8M", "-version" });
expectError(new String[] { gcflag, "-XX:InitialHeapSize=8M", "-XX:MaxHeapSize=4M", "-version" });
}
private static void checkValidInitialMaxHeapCombinations(String gcflag) throws Exception {
expectValid(new String[] { gcflag, "-XX:MaxHeapSize=2048K", "-version" });
expectValid(new String[] { gcflag, "-XX:InitialHeapSize=4M", "-XX:MaxHeapSize=8M", "-version" });
expectValid(new String[] { gcflag, "-XX:MaxHeapSize=8M", "-XX:InitialHeapSize=4M", "-version" });
expectValid(new String[] { gcflag, "-XX:MaxHeapSize=4M", "-XX:InitialHeapSize=4M", "-version" });