Merge
This commit is contained in:
commit
7159e1b97a
@ -91,3 +91,4 @@ c1df968c4527bfab5f97662a89245f15d12d378b jdk7-b113
|
|||||||
27985a5c6e5268014d25d55886e0ecb96af4763d jdk7-b114
|
27985a5c6e5268014d25d55886e0ecb96af4763d jdk7-b114
|
||||||
e8ebdf41b9c01a26642848f4134f5504e8fb3233 jdk7-b115
|
e8ebdf41b9c01a26642848f4134f5504e8fb3233 jdk7-b115
|
||||||
94e9a1bfba8b8d1fe0bfd43b88629b1f27b02a76 jdk7-b116
|
94e9a1bfba8b8d1fe0bfd43b88629b1f27b02a76 jdk7-b116
|
||||||
|
7220e60b097fa027e922f1aeecdd330f3e37409f jdk7-b117
|
||||||
|
@ -127,3 +127,5 @@ beef35b96b81129c375d572357fb9548d9020db1 jdk7-b113
|
|||||||
5511edd5d719f3fc9fdd04879482026a3d2c8652 hs20-b01
|
5511edd5d719f3fc9fdd04879482026a3d2c8652 hs20-b01
|
||||||
bdbc48857210a509b3c50a3291ecb9dd6a72e016 jdk7-b115
|
bdbc48857210a509b3c50a3291ecb9dd6a72e016 jdk7-b115
|
||||||
96b3f2a7add0b445b8aa421f6823cff5a2e2fe03 jdk7-b116
|
96b3f2a7add0b445b8aa421f6823cff5a2e2fe03 jdk7-b116
|
||||||
|
52f19c724d9634af79044a2e0defbe4a5f1adbda hs20-b02
|
||||||
|
806d0c037e6bbb88dac0699673f4ba55ee8c02da jdk7-b117
|
||||||
|
@ -664,7 +664,7 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
|||||||
// Use temps to avoid kills
|
// Use temps to avoid kills
|
||||||
LIR_Opr t1 = FrameMap::G1_opr;
|
LIR_Opr t1 = FrameMap::G1_opr;
|
||||||
LIR_Opr t2 = FrameMap::G3_opr;
|
LIR_Opr t2 = FrameMap::G3_opr;
|
||||||
LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register();
|
LIR_Opr addr = new_pointer_register();
|
||||||
|
|
||||||
// get address of field
|
// get address of field
|
||||||
obj.load_item();
|
obj.load_item();
|
||||||
|
@ -62,3 +62,5 @@ define_pd_global(intx, PreInflateSpin, 40); // Determined by running desi
|
|||||||
|
|
||||||
define_pd_global(bool, RewriteBytecodes, true);
|
define_pd_global(bool, RewriteBytecodes, true);
|
||||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||||
|
|
||||||
|
define_pd_global(bool, UseMembar, false);
|
||||||
|
@ -499,7 +499,7 @@ void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
|
|||||||
Register new_val_reg = new_val()->as_register();
|
Register new_val_reg = new_val()->as_register();
|
||||||
__ cmpptr(new_val_reg, (int32_t) NULL_WORD);
|
__ cmpptr(new_val_reg, (int32_t) NULL_WORD);
|
||||||
__ jcc(Assembler::equal, _continuation);
|
__ jcc(Assembler::equal, _continuation);
|
||||||
ce->store_parameter(addr()->as_register(), 0);
|
ce->store_parameter(addr()->as_pointer_register(), 0);
|
||||||
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
|
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
|
||||||
__ jmp(_continuation);
|
__ jmp(_continuation);
|
||||||
}
|
}
|
||||||
|
@ -765,7 +765,7 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
|||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
|
||||||
LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register();
|
LIR_Opr addr = new_pointer_register();
|
||||||
LIR_Address* a;
|
LIR_Address* a;
|
||||||
if(offset.result()->is_constant()) {
|
if(offset.result()->is_constant()) {
|
||||||
a = new LIR_Address(obj.result(),
|
a = new LIR_Address(obj.result(),
|
||||||
|
@ -63,3 +63,5 @@ define_pd_global(intx, PreInflateSpin, 10);
|
|||||||
|
|
||||||
define_pd_global(bool, RewriteBytecodes, true);
|
define_pd_global(bool, RewriteBytecodes, true);
|
||||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||||
|
|
||||||
|
define_pd_global(bool, UseMembar, false);
|
||||||
|
@ -45,3 +45,5 @@ define_pd_global(intx, StackShadowPages, 5 LP64_ONLY(+1) DEBUG_ONLY(+3));
|
|||||||
|
|
||||||
define_pd_global(bool, RewriteBytecodes, true);
|
define_pd_global(bool, RewriteBytecodes, true);
|
||||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||||
|
|
||||||
|
define_pd_global(bool, UseMembar, false);
|
||||||
|
@ -176,10 +176,10 @@ int LinuxAttachListener::init() {
|
|||||||
|
|
||||||
int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
|
int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
|
||||||
os::get_temp_directory(), os::current_process_id());
|
os::get_temp_directory(), os::current_process_id());
|
||||||
if (n <= (int)UNIX_PATH_MAX) {
|
if (n < (int)UNIX_PATH_MAX) {
|
||||||
n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path);
|
n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path);
|
||||||
}
|
}
|
||||||
if (n > (int)UNIX_PATH_MAX) {
|
if (n >= (int)UNIX_PATH_MAX) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -827,8 +827,10 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
|||||||
|
|
||||||
switch (thr_type) {
|
switch (thr_type) {
|
||||||
case os::java_thread:
|
case os::java_thread:
|
||||||
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss
|
// Java threads use ThreadStackSize which default value can be
|
||||||
if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
|
// changed with the flag -Xss
|
||||||
|
assert (JavaThread::stack_size_at_create() > 0, "this should be set");
|
||||||
|
stack_size = JavaThread::stack_size_at_create();
|
||||||
break;
|
break;
|
||||||
case os::compiler_thread:
|
case os::compiler_thread:
|
||||||
if (CompilerThreadStackSize > 0) {
|
if (CompilerThreadStackSize > 0) {
|
||||||
@ -3922,12 +3924,21 @@ jint os::init_2(void)
|
|||||||
Linux::signal_sets_init();
|
Linux::signal_sets_init();
|
||||||
Linux::install_signal_handlers();
|
Linux::install_signal_handlers();
|
||||||
|
|
||||||
|
// Check minimum allowable stack size for thread creation and to initialize
|
||||||
|
// the java system classes, including StackOverflowError - depends on page
|
||||||
|
// size. Add a page for compiler2 recursion in main thread.
|
||||||
|
// Add in 2*BytesPerWord times page size to account for VM stack during
|
||||||
|
// class initialization depending on 32 or 64 bit VM.
|
||||||
|
os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
|
||||||
|
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
|
||||||
|
2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size());
|
||||||
|
|
||||||
size_t threadStackSizeInBytes = ThreadStackSize * K;
|
size_t threadStackSizeInBytes = ThreadStackSize * K;
|
||||||
if (threadStackSizeInBytes != 0 &&
|
if (threadStackSizeInBytes != 0 &&
|
||||||
threadStackSizeInBytes < Linux::min_stack_allowed) {
|
threadStackSizeInBytes < os::Linux::min_stack_allowed) {
|
||||||
tty->print_cr("\nThe stack size specified is too small, "
|
tty->print_cr("\nThe stack size specified is too small, "
|
||||||
"Specify at least %dk",
|
"Specify at least %dk",
|
||||||
Linux::min_stack_allowed / K);
|
os::Linux::min_stack_allowed/ K);
|
||||||
return JNI_ERR;
|
return JNI_ERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4839,7 +4850,7 @@ void Parker::park(bool isAbsolute, jlong time) {
|
|||||||
|
|
||||||
// Next, demultiplex/decode time arguments
|
// Next, demultiplex/decode time arguments
|
||||||
timespec absTime;
|
timespec absTime;
|
||||||
if (time < 0) { // don't wait at all
|
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (time > 0) {
|
if (time > 0) {
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
@ -1,23 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -4878,18 +4878,17 @@ jint os::init_2(void) {
|
|||||||
// Check minimum allowable stack size for thread creation and to initialize
|
// Check minimum allowable stack size for thread creation and to initialize
|
||||||
// the java system classes, including StackOverflowError - depends on page
|
// the java system classes, including StackOverflowError - depends on page
|
||||||
// size. Add a page for compiler2 recursion in main thread.
|
// size. Add a page for compiler2 recursion in main thread.
|
||||||
// Add in BytesPerWord times page size to account for VM stack during
|
// Add in 2*BytesPerWord times page size to account for VM stack during
|
||||||
// class initialization depending on 32 or 64 bit VM.
|
// class initialization depending on 32 or 64 bit VM.
|
||||||
guarantee((Solaris::min_stack_allowed >=
|
os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
|
||||||
(StackYellowPages+StackRedPages+StackShadowPages+BytesPerWord
|
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
|
||||||
COMPILER2_PRESENT(+1)) * page_size),
|
2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
|
||||||
"need to increase Solaris::min_stack_allowed on this platform");
|
|
||||||
|
|
||||||
size_t threadStackSizeInBytes = ThreadStackSize * K;
|
size_t threadStackSizeInBytes = ThreadStackSize * K;
|
||||||
if (threadStackSizeInBytes != 0 &&
|
if (threadStackSizeInBytes != 0 &&
|
||||||
threadStackSizeInBytes < Solaris::min_stack_allowed) {
|
threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
|
||||||
tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
|
tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
|
||||||
Solaris::min_stack_allowed/K);
|
os::Solaris::min_stack_allowed/K);
|
||||||
return JNI_ERR;
|
return JNI_ERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5837,7 +5836,7 @@ void Parker::park(bool isAbsolute, jlong time) {
|
|||||||
|
|
||||||
// First, demultiplex/decode time arguments
|
// First, demultiplex/decode time arguments
|
||||||
timespec absTime;
|
timespec absTime;
|
||||||
if (time < 0) { // don't wait at all
|
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (time > 0) {
|
if (time > 0) {
|
||||||
|
@ -1,25 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
private:
|
|
@ -1,23 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
@ -3311,7 +3311,6 @@ extern "C" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// this is called _after_ the global arguments have been parsed
|
// this is called _after_ the global arguments have been parsed
|
||||||
jint os::init_2(void) {
|
jint os::init_2(void) {
|
||||||
// Allocate a single page and mark it as readable for safepoint polling
|
// Allocate a single page and mark it as readable for safepoint polling
|
||||||
@ -3390,6 +3389,21 @@ jint os::init_2(void) {
|
|||||||
actual_reserve_size = default_reserve_size;
|
actual_reserve_size = default_reserve_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check minimum allowable stack size for thread creation and to initialize
|
||||||
|
// the java system classes, including StackOverflowError - depends on page
|
||||||
|
// size. Add a page for compiler2 recursion in main thread.
|
||||||
|
// Add in 2*BytesPerWord times page size to account for VM stack during
|
||||||
|
// class initialization depending on 32 or 64 bit VM.
|
||||||
|
size_t min_stack_allowed =
|
||||||
|
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
|
||||||
|
2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
|
||||||
|
if (actual_reserve_size < min_stack_allowed) {
|
||||||
|
tty->print_cr("\nThe stack size specified is too small, "
|
||||||
|
"Specify at least %dk",
|
||||||
|
min_stack_allowed / K);
|
||||||
|
return JNI_ERR;
|
||||||
|
}
|
||||||
|
|
||||||
JavaThread::set_stack_size_at_create(stack_commit_size);
|
JavaThread::set_stack_size_at_create(stack_commit_size);
|
||||||
|
|
||||||
// Calculate theoretical max. size of Threads to guard gainst artifical
|
// Calculate theoretical max. size of Threads to guard gainst artifical
|
||||||
@ -3992,7 +4006,7 @@ void Parker::park(bool isAbsolute, jlong time) {
|
|||||||
if (time < 0) { // don't wait
|
if (time < 0) { // don't wait
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else if (time == 0) {
|
else if (time == 0 && !isAbsolute) {
|
||||||
time = INFINITE;
|
time = INFINITE;
|
||||||
}
|
}
|
||||||
else if (isAbsolute) {
|
else if (isAbsolute) {
|
||||||
|
@ -1350,7 +1350,6 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
|
|||||||
addr = ptr;
|
addr = ptr;
|
||||||
}
|
}
|
||||||
assert(addr->is_register(), "must be a register at this point");
|
assert(addr->is_register(), "must be a register at this point");
|
||||||
assert(addr->type() == T_OBJECT, "addr should point to an object");
|
|
||||||
|
|
||||||
LIR_Opr xor_res = new_pointer_register();
|
LIR_Opr xor_res = new_pointer_register();
|
||||||
LIR_Opr xor_shift_res = new_pointer_register();
|
LIR_Opr xor_shift_res = new_pointer_register();
|
||||||
|
@ -4309,20 +4309,21 @@ int ClassFileParser::verify_legal_method_signature(symbolHandle name, symbolHand
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Unqualified names may not contain the characters '.', ';', or '/'.
|
// Unqualified names may not contain the characters '.', ';', '[', or '/'.
|
||||||
// Method names also may not contain the characters '<' or '>', unless <init> or <clinit>.
|
// Method names also may not contain the characters '<' or '>', unless <init>
|
||||||
// Note that method names may not be <init> or <clinit> in this method.
|
// or <clinit>. Note that method names may not be <init> or <clinit> in this
|
||||||
// Because these names have been checked as special cases before calling this method
|
// method. Because these names have been checked as special cases before
|
||||||
// in verify_legal_method_name.
|
// calling this method in verify_legal_method_name.
|
||||||
bool ClassFileParser::verify_unqualified_name(char* name, unsigned int length, int type) {
|
bool ClassFileParser::verify_unqualified_name(
|
||||||
|
char* name, unsigned int length, int type) {
|
||||||
jchar ch;
|
jchar ch;
|
||||||
|
|
||||||
for (char* p = name; p != name + length; ) {
|
for (char* p = name; p != name + length; ) {
|
||||||
ch = *p;
|
ch = *p;
|
||||||
if (ch < 128) {
|
if (ch < 128) {
|
||||||
p++;
|
p++;
|
||||||
if (ch == '.' || ch == ';') {
|
if (ch == '.' || ch == ';' || ch == '[' ) {
|
||||||
return false; // do not permit '.' or ';'
|
return false; // do not permit '.', ';', or '['
|
||||||
}
|
}
|
||||||
if (type != LegalClass && ch == '/') {
|
if (type != LegalClass && ch == '/') {
|
||||||
return false; // do not permit '/' unless it's class name
|
return false; // do not permit '/' unless it's class name
|
||||||
|
916
hotspot/src/share/vm/classfile/stackMapTableFormat.hpp
Normal file
916
hotspot/src/share/vm/classfile/stackMapTableFormat.hpp
Normal file
@ -0,0 +1,916 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// These classes represent the stack-map substructures described in the JVMS
|
||||||
|
// (hence the non-conforming naming scheme).
|
||||||
|
|
||||||
|
// These classes work with the types in their compressed form in-place (as they
|
||||||
|
// would appear in the classfile). No virtual methods or fields allowed.
|
||||||
|
|
||||||
|
class verification_type_info {
|
||||||
|
private:
|
||||||
|
// u1 tag
|
||||||
|
// u2 cpool_index || u2 bci (for ITEM_Object & ITEM_Uninitailized only)
|
||||||
|
|
||||||
|
address tag_addr() const { return (address)this; }
|
||||||
|
address cpool_index_addr() const { return tag_addr() + sizeof(u1); }
|
||||||
|
address bci_addr() const { return cpool_index_addr(); }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// No constructors - should be 'private', but GCC issues a warning if it is
|
||||||
|
verification_type_info() {}
|
||||||
|
verification_type_info(const verification_type_info&) {}
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static verification_type_info* at(address addr) {
|
||||||
|
return (verification_type_info*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static verification_type_info* create_at(address addr, u1 tag) {
|
||||||
|
verification_type_info* vti = (verification_type_info*)addr;
|
||||||
|
vti->set_tag(tag);
|
||||||
|
return vti;
|
||||||
|
}
|
||||||
|
|
||||||
|
static verification_type_info* create_object_at(address addr, u2 cp_idx) {
|
||||||
|
verification_type_info* vti = (verification_type_info*)addr;
|
||||||
|
vti->set_tag(ITEM_Object);
|
||||||
|
vti->set_cpool_index(cp_idx);
|
||||||
|
return vti;
|
||||||
|
}
|
||||||
|
|
||||||
|
static verification_type_info* create_uninit_at(address addr, u2 bci) {
|
||||||
|
verification_type_info* vti = (verification_type_info*)addr;
|
||||||
|
vti->set_tag(ITEM_Uninitialized);
|
||||||
|
vti->set_bci(bci);
|
||||||
|
return vti;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t calculate_size(u1 tag) {
|
||||||
|
if (tag == ITEM_Object || tag == ITEM_Uninitialized) {
|
||||||
|
return sizeof(u1) + sizeof(u2);
|
||||||
|
} else {
|
||||||
|
return sizeof(u1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t max_size() { return sizeof(u1) + sizeof(u2); }
|
||||||
|
|
||||||
|
u1 tag() const { return *(u1*)tag_addr(); }
|
||||||
|
void set_tag(u1 tag) { *((u1*)tag_addr()) = tag; }
|
||||||
|
|
||||||
|
bool is_object() const { return tag() == ITEM_Object; }
|
||||||
|
bool is_uninitialized() const { return tag() == ITEM_Uninitialized; }
|
||||||
|
|
||||||
|
u2 cpool_index() const {
|
||||||
|
assert(is_object(), "This type has no cp_index");
|
||||||
|
return Bytes::get_Java_u2(cpool_index_addr());
|
||||||
|
}
|
||||||
|
void set_cpool_index(u2 idx) {
|
||||||
|
assert(is_object(), "This type has no cp_index");
|
||||||
|
Bytes::put_Java_u2(cpool_index_addr(), idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
u2 bci() const {
|
||||||
|
assert(is_uninitialized(), "This type has no bci");
|
||||||
|
return Bytes::get_Java_u2(bci_addr());
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_bci(u2 bci) {
|
||||||
|
assert(is_uninitialized(), "This type has no bci");
|
||||||
|
Bytes::put_Java_u2(bci_addr(), bci);
|
||||||
|
}
|
||||||
|
|
||||||
|
void copy_from(verification_type_info* from) {
|
||||||
|
set_tag(from->tag());
|
||||||
|
if (from->is_object()) {
|
||||||
|
set_cpool_index(from->cpool_index());
|
||||||
|
} else if (from->is_uninitialized()) {
|
||||||
|
set_bci(from->bci());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t size() const {
|
||||||
|
return calculate_size(tag());
|
||||||
|
}
|
||||||
|
|
||||||
|
verification_type_info* next() {
|
||||||
|
return (verification_type_info*)((address)this + size());
|
||||||
|
}
|
||||||
|
|
||||||
|
// This method is used when reading unverified data in order to ensure
|
||||||
|
// that we don't read past a particular memory limit. It returns false
|
||||||
|
// if any part of the data structure is outside the specified memory bounds.
|
||||||
|
bool verify(address start, address end) {
|
||||||
|
return ((address)this >= start &&
|
||||||
|
(address)this < end &&
|
||||||
|
(bci_addr() + sizeof(u2) <= end ||
|
||||||
|
!is_object() && !is_uninitialized()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void print_on(outputStream* st) {
|
||||||
|
switch (tag()) {
|
||||||
|
case ITEM_Top: st->print("Top"); break;
|
||||||
|
case ITEM_Integer: st->print("Integer"); break;
|
||||||
|
case ITEM_Float: st->print("Float"); break;
|
||||||
|
case ITEM_Double: st->print("Double"); break;
|
||||||
|
case ITEM_Long: st->print("Long"); break;
|
||||||
|
case ITEM_Null: st->print("Null"); break;
|
||||||
|
case ITEM_UninitializedThis:
|
||||||
|
st->print("UninitializedThis"); break;
|
||||||
|
case ITEM_Uninitialized:
|
||||||
|
st->print("Uninitialized[#%d]", bci()); break;
|
||||||
|
case ITEM_Object:
|
||||||
|
st->print("Object[#%d]", cpool_index()); break;
|
||||||
|
default:
|
||||||
|
assert(false, "Bad verification_type_info");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
#define FOR_EACH_STACKMAP_FRAME_TYPE(macro, arg1, arg2) \
|
||||||
|
macro(same_frame, arg1, arg2) \
|
||||||
|
macro(same_frame_extended, arg1, arg2) \
|
||||||
|
macro(same_frame_1_stack_item_frame, arg1, arg2) \
|
||||||
|
macro(same_frame_1_stack_item_extended, arg1, arg2) \
|
||||||
|
macro(chop_frame, arg1, arg2) \
|
||||||
|
macro(append_frame, arg1, arg2) \
|
||||||
|
macro(full_frame, arg1, arg2)
|
||||||
|
|
||||||
|
#define SM_FORWARD_DECL(type, arg1, arg2) class type;
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(SM_FORWARD_DECL, x, x)
|
||||||
|
#undef SM_FORWARD_DECL
|
||||||
|
|
||||||
|
class stack_map_frame {
|
||||||
|
protected:
|
||||||
|
address frame_type_addr() const { return (address)this; }
|
||||||
|
|
||||||
|
// No constructors - should be 'private', but GCC issues a warning if it is
|
||||||
|
stack_map_frame() {}
|
||||||
|
stack_map_frame(const stack_map_frame&) {}
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static stack_map_frame* at(address addr) {
|
||||||
|
return (stack_map_frame*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
stack_map_frame* next() const {
|
||||||
|
return at((address)this + size());
|
||||||
|
}
|
||||||
|
|
||||||
|
u1 frame_type() const { return *(u1*)frame_type_addr(); }
|
||||||
|
void set_frame_type(u1 type) { *((u1*)frame_type_addr()) = type; }
|
||||||
|
|
||||||
|
// pseudo-virtual methods
|
||||||
|
inline size_t size() const;
|
||||||
|
inline int offset_delta() const;
|
||||||
|
inline void set_offset_delta(int offset_delta);
|
||||||
|
inline int number_of_types() const; // number of types contained in the frame
|
||||||
|
inline verification_type_info* types() const; // pointer to first type
|
||||||
|
inline bool is_valid_offset(int offset_delta) const;
|
||||||
|
|
||||||
|
// This method must be used when reading unverified data in order to ensure
|
||||||
|
// that we don't read past a particular memory limit. It returns false
|
||||||
|
// if any part of the data structure is outside the specified memory bounds.
|
||||||
|
inline bool verify(address start, address end) const;
|
||||||
|
#ifdef ASSERT
|
||||||
|
inline void print_on(outputStream* st) const;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Create as_xxx and is_xxx methods for the subtypes
|
||||||
|
#define FRAME_TYPE_DECL(stackmap_frame_type, arg1, arg2) \
|
||||||
|
inline stackmap_frame_type* as_##stackmap_frame_type() const; \
|
||||||
|
bool is_##stackmap_frame_type() { \
|
||||||
|
return as_##stackmap_frame_type() != NULL; \
|
||||||
|
}
|
||||||
|
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(FRAME_TYPE_DECL, x, x)
|
||||||
|
#undef FRAME_TYPE_DECL
|
||||||
|
};
|
||||||
|
|
||||||
|
class same_frame : public stack_map_frame {
|
||||||
|
private:
|
||||||
|
static int frame_type_to_offset_delta(u1 frame_type) {
|
||||||
|
return frame_type + 1; }
|
||||||
|
static u1 offset_delta_to_frame_type(int offset_delta) {
|
||||||
|
return (u1)(offset_delta - 1); }
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static bool is_frame_type(u1 tag) {
|
||||||
|
return tag < 64;
|
||||||
|
}
|
||||||
|
|
||||||
|
static same_frame* at(address addr) {
|
||||||
|
assert(is_frame_type(*addr), "Wrong frame id");
|
||||||
|
return (same_frame*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static same_frame* create_at(address addr, int offset_delta) {
|
||||||
|
same_frame* sm = (same_frame*)addr;
|
||||||
|
sm->set_offset_delta(offset_delta);
|
||||||
|
return sm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t calculate_size() { return sizeof(u1); }
|
||||||
|
|
||||||
|
size_t size() const { return calculate_size(); }
|
||||||
|
int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
|
||||||
|
|
||||||
|
void set_offset_delta(int offset_delta) {
|
||||||
|
assert(offset_delta <= 64, "Offset too large for same_frame");
|
||||||
|
set_frame_type(offset_delta_to_frame_type(offset_delta));
|
||||||
|
}
|
||||||
|
|
||||||
|
int number_of_types() const { return 0; }
|
||||||
|
verification_type_info* types() const { return NULL; }
|
||||||
|
|
||||||
|
bool is_valid_offset(int offset_delta) const {
|
||||||
|
return is_frame_type(offset_delta_to_frame_type(offset_delta));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool verify_subtype(address start, address end) const {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void print_on(outputStream* st) const {
|
||||||
|
st->print("same_frame(%d)", offset_delta());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
class same_frame_extended : public stack_map_frame {
|
||||||
|
private:
|
||||||
|
enum { _frame_id = 251 };
|
||||||
|
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
|
||||||
|
|
||||||
|
public:
|
||||||
|
static bool is_frame_type(u1 tag) {
|
||||||
|
return tag == _frame_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
static same_frame_extended* at(address addr) {
|
||||||
|
assert(is_frame_type(*addr), "Wrong frame type");
|
||||||
|
return (same_frame_extended*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static same_frame_extended* create_at(address addr, u2 offset_delta) {
|
||||||
|
same_frame_extended* sm = (same_frame_extended*)addr;
|
||||||
|
sm->set_frame_type(_frame_id);
|
||||||
|
sm->set_offset_delta(offset_delta);
|
||||||
|
return sm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t calculate_size() { return sizeof(u1) + sizeof(u2); }
|
||||||
|
|
||||||
|
size_t size() const { return calculate_size(); }
|
||||||
|
int offset_delta() const {
|
||||||
|
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_offset_delta(int offset_delta) {
|
||||||
|
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
int number_of_types() const { return 0; }
|
||||||
|
verification_type_info* types() const { return NULL; }
|
||||||
|
bool is_valid_offset(int offset) const { return true; }
|
||||||
|
|
||||||
|
bool verify_subtype(address start, address end) const {
|
||||||
|
return frame_type_addr() + size() <= end;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void print_on(outputStream* st) const {
|
||||||
|
st->print("same_frame_extended(%d)", offset_delta());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
class same_frame_1_stack_item_frame : public stack_map_frame {
|
||||||
|
private:
|
||||||
|
address type_addr() const { return frame_type_addr() + sizeof(u1); }
|
||||||
|
|
||||||
|
static int frame_type_to_offset_delta(u1 frame_type) {
|
||||||
|
return frame_type - 63; }
|
||||||
|
static u1 offset_delta_to_frame_type(int offset_delta) {
|
||||||
|
return (u1)(offset_delta + 63); }
|
||||||
|
|
||||||
|
public:
|
||||||
|
static bool is_frame_type(u1 tag) {
|
||||||
|
return tag >= 64 && tag < 128;
|
||||||
|
}
|
||||||
|
|
||||||
|
static same_frame_1_stack_item_frame* at(address addr) {
|
||||||
|
assert(is_frame_type(*addr), "Wrong frame id");
|
||||||
|
return (same_frame_1_stack_item_frame*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static same_frame_1_stack_item_frame* create_at(
|
||||||
|
address addr, int offset_delta, verification_type_info* vti) {
|
||||||
|
same_frame_1_stack_item_frame* sm = (same_frame_1_stack_item_frame*)addr;
|
||||||
|
sm->set_offset_delta(offset_delta);
|
||||||
|
if (vti != NULL) {
|
||||||
|
sm->set_type(vti);
|
||||||
|
}
|
||||||
|
return sm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t calculate_size(verification_type_info* vti) {
|
||||||
|
return sizeof(u1) + vti->size();
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t max_size() {
|
||||||
|
return sizeof(u1) + verification_type_info::max_size();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t size() const { return calculate_size(types()); }
|
||||||
|
int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
|
||||||
|
|
||||||
|
void set_offset_delta(int offset_delta) {
|
||||||
|
assert(offset_delta > 0 && offset_delta <= 64,
|
||||||
|
"Offset too large for this frame type");
|
||||||
|
set_frame_type(offset_delta_to_frame_type(offset_delta));
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_type(verification_type_info* vti) {
|
||||||
|
verification_type_info* cur = types();
|
||||||
|
cur->copy_from(vti);
|
||||||
|
}
|
||||||
|
|
||||||
|
int number_of_types() const { return 1; }
|
||||||
|
verification_type_info* types() const {
|
||||||
|
return verification_type_info::at(type_addr());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_valid_offset(int offset_delta) const {
|
||||||
|
return is_frame_type(offset_delta_to_frame_type(offset_delta));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool verify_subtype(address start, address end) const {
|
||||||
|
return types()->verify(start, end);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void print_on(outputStream* st) const {
|
||||||
|
st->print("same_frame_1_stack_item_frame(%d,", offset_delta());
|
||||||
|
types()->print_on(st);
|
||||||
|
st->print(")");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
class same_frame_1_stack_item_extended : public stack_map_frame {
|
||||||
|
private:
|
||||||
|
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
|
||||||
|
address type_addr() const { return offset_delta_addr() + sizeof(u2); }
|
||||||
|
|
||||||
|
enum { _frame_id = 247 };
|
||||||
|
|
||||||
|
public:
|
||||||
|
static bool is_frame_type(u1 tag) {
|
||||||
|
return tag == _frame_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
static same_frame_1_stack_item_extended* at(address addr) {
|
||||||
|
assert(is_frame_type(*addr), "Wrong frame id");
|
||||||
|
return (same_frame_1_stack_item_extended*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static same_frame_1_stack_item_extended* create_at(
|
||||||
|
address addr, int offset_delta, verification_type_info* vti) {
|
||||||
|
same_frame_1_stack_item_extended* sm =
|
||||||
|
(same_frame_1_stack_item_extended*)addr;
|
||||||
|
sm->set_frame_type(_frame_id);
|
||||||
|
sm->set_offset_delta(offset_delta);
|
||||||
|
if (vti != NULL) {
|
||||||
|
sm->set_type(vti);
|
||||||
|
}
|
||||||
|
return sm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t calculate_size(verification_type_info* vti) {
|
||||||
|
return sizeof(u1) + sizeof(u2) + vti->size();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t size() const { return calculate_size(types()); }
|
||||||
|
int offset_delta() const {
|
||||||
|
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_offset_delta(int offset_delta) {
|
||||||
|
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_type(verification_type_info* vti) {
|
||||||
|
verification_type_info* cur = types();
|
||||||
|
cur->copy_from(vti);
|
||||||
|
}
|
||||||
|
|
||||||
|
int number_of_types() const { return 1; }
|
||||||
|
verification_type_info* types() const {
|
||||||
|
return verification_type_info::at(type_addr());
|
||||||
|
}
|
||||||
|
bool is_valid_offset(int offset) { return true; }
|
||||||
|
|
||||||
|
bool verify_subtype(address start, address end) const {
|
||||||
|
return type_addr() < end && types()->verify(start, end);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void print_on(outputStream* st) const {
|
||||||
|
st->print("same_frame_1_stack_item_extended(%d,", offset_delta());
|
||||||
|
types()->print_on(st);
|
||||||
|
st->print(")");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
class chop_frame : public stack_map_frame {
|
||||||
|
private:
|
||||||
|
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
|
||||||
|
|
||||||
|
static int frame_type_to_chops(u1 frame_type) {
|
||||||
|
int chop = 251 - frame_type;
|
||||||
|
return chop;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u1 chops_to_frame_type(int chop) {
|
||||||
|
return 251 - chop;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static bool is_frame_type(u1 tag) {
|
||||||
|
return frame_type_to_chops(tag) > 0 && frame_type_to_chops(tag) < 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
static chop_frame* at(address addr) {
|
||||||
|
assert(is_frame_type(*addr), "Wrong frame id");
|
||||||
|
return (chop_frame*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static chop_frame* create_at(address addr, int offset_delta, int chops) {
|
||||||
|
chop_frame* sm = (chop_frame*)addr;
|
||||||
|
sm->set_chops(chops);
|
||||||
|
sm->set_offset_delta(offset_delta);
|
||||||
|
return sm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t calculate_size() {
|
||||||
|
return sizeof(u1) + sizeof(u2);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t size() const { return calculate_size(); }
|
||||||
|
int offset_delta() const {
|
||||||
|
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
|
||||||
|
}
|
||||||
|
void set_offset_delta(int offset_delta) {
|
||||||
|
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
int chops() const {
|
||||||
|
int chops = frame_type_to_chops(frame_type());
|
||||||
|
assert(chops > 0 && chops < 4, "Invalid number of chops in frame");
|
||||||
|
return chops;
|
||||||
|
}
|
||||||
|
void set_chops(int chops) {
|
||||||
|
assert(chops > 0 && chops <= 3, "Bad number of chops");
|
||||||
|
set_frame_type(chops_to_frame_type(chops));
|
||||||
|
}
|
||||||
|
|
||||||
|
int number_of_types() const { return 0; }
|
||||||
|
verification_type_info* types() const { return NULL; }
|
||||||
|
bool is_valid_offset(int offset) { return true; }
|
||||||
|
|
||||||
|
bool verify_subtype(address start, address end) const {
|
||||||
|
return frame_type_addr() + size() <= end;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void print_on(outputStream* st) const {
|
||||||
|
st->print("chop_frame(%d,%d)", offset_delta(), chops());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
class append_frame : public stack_map_frame {
|
||||||
|
private:
|
||||||
|
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
|
||||||
|
address types_addr() const { return offset_delta_addr() + sizeof(u2); }
|
||||||
|
|
||||||
|
static int frame_type_to_appends(u1 frame_type) {
|
||||||
|
int append = frame_type - 251;
|
||||||
|
return append;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u1 appends_to_frame_type(int appends) {
|
||||||
|
assert(appends > 0 && appends < 4, "Invalid append amount");
|
||||||
|
return 251 + appends;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static bool is_frame_type(u1 tag) {
|
||||||
|
return frame_type_to_appends(tag) > 0 && frame_type_to_appends(tag) < 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
static append_frame* at(address addr) {
|
||||||
|
assert(is_frame_type(*addr), "Wrong frame id");
|
||||||
|
return (append_frame*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static append_frame* create_at(
|
||||||
|
address addr, int offset_delta, int appends,
|
||||||
|
verification_type_info* types) {
|
||||||
|
append_frame* sm = (append_frame*)addr;
|
||||||
|
sm->set_appends(appends);
|
||||||
|
sm->set_offset_delta(offset_delta);
|
||||||
|
if (types != NULL) {
|
||||||
|
verification_type_info* cur = sm->types();
|
||||||
|
for (int i = 0; i < appends; ++i) {
|
||||||
|
cur->copy_from(types);
|
||||||
|
cur = cur->next();
|
||||||
|
types = types->next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t calculate_size(int appends, verification_type_info* types) {
|
||||||
|
size_t sz = sizeof(u1) + sizeof(u2);
|
||||||
|
for (int i = 0; i < appends; ++i) {
|
||||||
|
sz += types->size();
|
||||||
|
types = types->next();
|
||||||
|
}
|
||||||
|
return sz;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t max_size() {
|
||||||
|
return sizeof(u1) + sizeof(u2) + 3 * verification_type_info::max_size();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t size() const { return calculate_size(number_of_types(), types()); }
|
||||||
|
int offset_delta() const {
|
||||||
|
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_offset_delta(int offset_delta) {
|
||||||
|
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_appends(int appends) {
|
||||||
|
assert(appends > 0 && appends < 4, "Bad number of appends");
|
||||||
|
set_frame_type(appends_to_frame_type(appends));
|
||||||
|
}
|
||||||
|
|
||||||
|
int number_of_types() const {
|
||||||
|
int appends = frame_type_to_appends(frame_type());
|
||||||
|
assert(appends > 0 && appends < 4, "Invalid number of appends in frame");
|
||||||
|
return appends;
|
||||||
|
}
|
||||||
|
verification_type_info* types() const {
|
||||||
|
return verification_type_info::at(types_addr());
|
||||||
|
}
|
||||||
|
bool is_valid_offset(int offset) const { return true; }
|
||||||
|
|
||||||
|
bool verify_subtype(address start, address end) const {
|
||||||
|
verification_type_info* vti = types();
|
||||||
|
if ((address)vti < end && vti->verify(start, end)) {
|
||||||
|
int nof = number_of_types();
|
||||||
|
vti = vti->next();
|
||||||
|
if (nof < 2 || vti->verify(start, end)) {
|
||||||
|
vti = vti->next();
|
||||||
|
if (nof < 3 || vti->verify(start, end)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void print_on(outputStream* st) const {
|
||||||
|
st->print("append_frame(%d,", offset_delta());
|
||||||
|
verification_type_info* vti = types();
|
||||||
|
for (int i = 0; i < number_of_types(); ++i) {
|
||||||
|
vti->print_on(st);
|
||||||
|
if (i != number_of_types() - 1) {
|
||||||
|
st->print(",");
|
||||||
|
}
|
||||||
|
vti = vti->next();
|
||||||
|
}
|
||||||
|
st->print(")");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
class full_frame : public stack_map_frame {
|
||||||
|
private:
|
||||||
|
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
|
||||||
|
address num_locals_addr() const { return offset_delta_addr() + sizeof(u2); }
|
||||||
|
address locals_addr() const { return num_locals_addr() + sizeof(u2); }
|
||||||
|
address stack_slots_addr(address end_of_locals) const {
|
||||||
|
return end_of_locals; }
|
||||||
|
address stack_addr(address end_of_locals) const {
|
||||||
|
return stack_slots_addr(end_of_locals) + sizeof(u2); }
|
||||||
|
|
||||||
|
enum { _frame_id = 255 };
|
||||||
|
|
||||||
|
public:
|
||||||
|
static bool is_frame_type(u1 tag) {
|
||||||
|
return tag == _frame_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
static full_frame* at(address addr) {
|
||||||
|
assert(is_frame_type(*addr), "Wrong frame id");
|
||||||
|
return (full_frame*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static full_frame* create_at(
|
||||||
|
address addr, int offset_delta, int num_locals,
|
||||||
|
verification_type_info* locals,
|
||||||
|
int stack_slots, verification_type_info* stack) {
|
||||||
|
full_frame* sm = (full_frame*)addr;
|
||||||
|
sm->set_frame_type(_frame_id);
|
||||||
|
sm->set_offset_delta(offset_delta);
|
||||||
|
sm->set_num_locals(num_locals);
|
||||||
|
if (locals != NULL) {
|
||||||
|
verification_type_info* cur = sm->locals();
|
||||||
|
for (int i = 0; i < num_locals; ++i) {
|
||||||
|
cur->copy_from(locals);
|
||||||
|
cur = cur->next();
|
||||||
|
locals = locals->next();
|
||||||
|
}
|
||||||
|
address end_of_locals = (address)cur;
|
||||||
|
sm->set_stack_slots(end_of_locals, stack_slots);
|
||||||
|
cur = sm->stack(end_of_locals);
|
||||||
|
for (int i = 0; i < stack_slots; ++i) {
|
||||||
|
cur->copy_from(stack);
|
||||||
|
cur = cur->next();
|
||||||
|
stack = stack->next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t calculate_size(
|
||||||
|
int num_locals, verification_type_info* locals,
|
||||||
|
int stack_slots, verification_type_info* stack) {
|
||||||
|
size_t sz = sizeof(u1) + sizeof(u2) + sizeof(u2) + sizeof(u2);
|
||||||
|
verification_type_info* vti = locals;
|
||||||
|
for (int i = 0; i < num_locals; ++i) {
|
||||||
|
sz += vti->size();
|
||||||
|
vti = vti->next();
|
||||||
|
}
|
||||||
|
vti = stack;
|
||||||
|
for (int i = 0; i < stack_slots; ++i) {
|
||||||
|
sz += vti->size();
|
||||||
|
vti = vti->next();
|
||||||
|
}
|
||||||
|
return sz;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t max_size(int locals, int stack) {
|
||||||
|
return sizeof(u1) + 3 * sizeof(u2) +
|
||||||
|
(locals + stack) * verification_type_info::max_size();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t size() const {
|
||||||
|
address eol = end_of_locals();
|
||||||
|
return calculate_size(num_locals(), locals(), stack_slots(eol), stack(eol));
|
||||||
|
}
|
||||||
|
|
||||||
|
int offset_delta() const {
|
||||||
|
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
|
||||||
|
}
|
||||||
|
int num_locals() const { return Bytes::get_Java_u2(num_locals_addr()); }
|
||||||
|
verification_type_info* locals() const {
|
||||||
|
return verification_type_info::at(locals_addr());
|
||||||
|
}
|
||||||
|
address end_of_locals() const {
|
||||||
|
verification_type_info* vti = locals();
|
||||||
|
for (int i = 0; i < num_locals(); ++i) {
|
||||||
|
vti = vti->next();
|
||||||
|
}
|
||||||
|
return (address)vti;
|
||||||
|
}
|
||||||
|
int stack_slots(address end_of_locals) const {
|
||||||
|
return Bytes::get_Java_u2(stack_slots_addr(end_of_locals));
|
||||||
|
}
|
||||||
|
verification_type_info* stack(address end_of_locals) const {
|
||||||
|
return verification_type_info::at(stack_addr(end_of_locals));
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_offset_delta(int offset_delta) {
|
||||||
|
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
|
||||||
|
}
|
||||||
|
void set_num_locals(int num_locals) {
|
||||||
|
Bytes::put_Java_u2(num_locals_addr(), num_locals);
|
||||||
|
}
|
||||||
|
void set_stack_slots(address end_of_locals, int stack_slots) {
|
||||||
|
Bytes::put_Java_u2(stack_slots_addr(end_of_locals), stack_slots);
|
||||||
|
}
|
||||||
|
|
||||||
|
// These return only the locals. Extra processing is required for stack
|
||||||
|
// types of full frames.
|
||||||
|
int number_of_types() const { return num_locals(); }
|
||||||
|
verification_type_info* types() const { return locals(); }
|
||||||
|
bool is_valid_offset(int offset) { return true; }
|
||||||
|
|
||||||
|
bool verify_subtype(address start, address end) const {
|
||||||
|
verification_type_info* vti = types();
|
||||||
|
if ((address)vti >= end) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
int count = number_of_types();
|
||||||
|
for (int i = 0; i < count; ++i) {
|
||||||
|
if (!vti->verify(start, end)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
vti = vti->next();
|
||||||
|
}
|
||||||
|
address eol = (address)vti;
|
||||||
|
if (eol + sizeof(u2) > end) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
count = stack_slots(eol);
|
||||||
|
vti = stack(eol);
|
||||||
|
for (int i = 0; i < stack_slots(eol); ++i) {
|
||||||
|
if (!vti->verify(start, end)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
vti = vti->next();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void print_on(outputStream* st) const {
|
||||||
|
st->print("full_frame(%d,{", offset_delta());
|
||||||
|
verification_type_info* vti = locals();
|
||||||
|
for (int i = 0; i < num_locals(); ++i) {
|
||||||
|
vti->print_on(st);
|
||||||
|
if (i != num_locals() - 1) {
|
||||||
|
st->print(",");
|
||||||
|
}
|
||||||
|
vti = vti->next();
|
||||||
|
}
|
||||||
|
st->print("},{");
|
||||||
|
address end_of_locals = (address)vti;
|
||||||
|
vti = stack(end_of_locals);
|
||||||
|
int ss = stack_slots(end_of_locals);
|
||||||
|
for (int i = 0; i < ss; ++i) {
|
||||||
|
vti->print_on(st);
|
||||||
|
if (i != ss - 1) {
|
||||||
|
st->print(",");
|
||||||
|
}
|
||||||
|
vti = vti->next();
|
||||||
|
}
|
||||||
|
st->print("})");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
#define VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
|
||||||
|
stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
|
||||||
|
if (item_##stack_frame_type != NULL) { \
|
||||||
|
return item_##stack_frame_type->func_name args; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define VOID_VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
|
||||||
|
stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
|
||||||
|
if (item_##stack_frame_type != NULL) { \
|
||||||
|
item_##stack_frame_type->func_name args; \
|
||||||
|
return; \
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t stack_map_frame::size() const {
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, size, ());
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int stack_map_frame::offset_delta() const {
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, offset_delta, ());
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void stack_map_frame::set_offset_delta(int offset_delta) {
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(
|
||||||
|
VOID_VIRTUAL_DISPATCH, set_offset_delta, (offset_delta));
|
||||||
|
}
|
||||||
|
|
||||||
|
int stack_map_frame::number_of_types() const {
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, number_of_types, ());
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
verification_type_info* stack_map_frame::types() const {
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, types, ());
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool stack_map_frame::is_valid_offset(int offset) const {
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, is_valid_offset, (offset));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool stack_map_frame::verify(address start, address end) const {
|
||||||
|
if (frame_type_addr() >= start && frame_type_addr() < end) {
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(
|
||||||
|
VIRTUAL_DISPATCH, verify_subtype, (start, end));
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void stack_map_frame::print_on(outputStream* st) const {
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(VOID_VIRTUAL_DISPATCH, print_on, (st));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#undef VIRTUAL_DISPATCH
|
||||||
|
#undef VOID_VIRTUAL_DISPATCH
|
||||||
|
|
||||||
|
#define AS_SUBTYPE_DEF(stack_frame_type, arg1, arg2) \
|
||||||
|
stack_frame_type* stack_map_frame::as_##stack_frame_type() const { \
|
||||||
|
if (stack_frame_type::is_frame_type(frame_type())) { \
|
||||||
|
return (stack_frame_type*)this; \
|
||||||
|
} else { \
|
||||||
|
return NULL; \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
FOR_EACH_STACKMAP_FRAME_TYPE(AS_SUBTYPE_DEF, x, x)
|
||||||
|
#undef AS_SUBTYPE_DEF
|
||||||
|
|
||||||
|
class stack_map_table_attribute {
|
||||||
|
private:
|
||||||
|
address name_index_addr() const {
|
||||||
|
return (address)this; }
|
||||||
|
address attribute_length_addr() const {
|
||||||
|
return name_index_addr() + sizeof(u2); }
|
||||||
|
address number_of_entries_addr() const {
|
||||||
|
return attribute_length_addr() + sizeof(u4); }
|
||||||
|
address entries_addr() const {
|
||||||
|
return number_of_entries_addr() + sizeof(u2); }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// No constructors - should be 'private', but GCC issues a warning if it is
|
||||||
|
stack_map_table_attribute() {}
|
||||||
|
stack_map_table_attribute(const stack_map_table_attribute&) {}
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static stack_map_table_attribute* at(address addr) {
|
||||||
|
return (stack_map_table_attribute*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
u2 name_index() const {
|
||||||
|
return Bytes::get_Java_u2(name_index_addr()); }
|
||||||
|
u4 attribute_length() const {
|
||||||
|
return Bytes::get_Java_u4(attribute_length_addr()); }
|
||||||
|
u2 number_of_entries() const {
|
||||||
|
return Bytes::get_Java_u2(number_of_entries_addr()); }
|
||||||
|
stack_map_frame* entries() const {
|
||||||
|
return stack_map_frame::at(entries_addr());
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t header_size() {
|
||||||
|
return sizeof(u2) + sizeof(u4);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_name_index(u2 idx) {
|
||||||
|
Bytes::put_Java_u2(name_index_addr(), idx);
|
||||||
|
}
|
||||||
|
void set_attribute_length(u4 len) {
|
||||||
|
Bytes::put_Java_u4(attribute_length_addr(), len);
|
||||||
|
}
|
||||||
|
void set_number_of_entries(u2 num) {
|
||||||
|
Bytes::put_Java_u2(number_of_entries_addr(), num);
|
||||||
|
}
|
||||||
|
};
|
@ -354,12 +354,8 @@ void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
|
|||||||
double CMSStats::time_until_cms_gen_full() const {
|
double CMSStats::time_until_cms_gen_full() const {
|
||||||
size_t cms_free = _cms_gen->cmsSpace()->free();
|
size_t cms_free = _cms_gen->cmsSpace()->free();
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
size_t expected_promotion = gch->get_gen(0)->capacity();
|
size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
|
||||||
if (HandlePromotionFailure) {
|
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
|
||||||
expected_promotion = MIN2(
|
|
||||||
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
|
|
||||||
expected_promotion);
|
|
||||||
}
|
|
||||||
if (cms_free > expected_promotion) {
|
if (cms_free > expected_promotion) {
|
||||||
// Start a cms collection if there isn't enough space to promote
|
// Start a cms collection if there isn't enough space to promote
|
||||||
// for the next minor collection. Use the padded average as
|
// for the next minor collection. Use the padded average as
|
||||||
@ -865,57 +861,18 @@ size_t ConcurrentMarkSweepGeneration::max_available() const {
|
|||||||
return free() + _virtual_space.uncommitted_size();
|
return free() + _virtual_space.uncommitted_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
|
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
|
||||||
size_t max_promotion_in_bytes,
|
size_t available = max_available();
|
||||||
bool younger_handles_promotion_failure) const {
|
size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
|
||||||
|
bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
|
||||||
// This is the most conservative test. Full promotion is
|
if (PrintGC && Verbose) {
|
||||||
// guaranteed if this is used. The multiplicative factor is to
|
gclog_or_tty->print_cr(
|
||||||
// account for the worst case "dilatation".
|
"CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
|
||||||
double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
|
"max_promo("SIZE_FORMAT")",
|
||||||
if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
|
res? "":" not", available, res? ">=":"<",
|
||||||
adjusted_max_promo_bytes = (double)max_uintx;
|
av_promo, max_promotion_in_bytes);
|
||||||
}
|
}
|
||||||
bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
|
return res;
|
||||||
|
|
||||||
if (younger_handles_promotion_failure && !result) {
|
|
||||||
// Full promotion is not guaranteed because fragmentation
|
|
||||||
// of the cms generation can prevent the full promotion.
|
|
||||||
result = (max_available() >= (size_t)adjusted_max_promo_bytes);
|
|
||||||
|
|
||||||
if (!result) {
|
|
||||||
// With promotion failure handling the test for the ability
|
|
||||||
// to support the promotion does not have to be guaranteed.
|
|
||||||
// Use an average of the amount promoted.
|
|
||||||
result = max_available() >= (size_t)
|
|
||||||
gc_stats()->avg_promoted()->padded_average();
|
|
||||||
if (PrintGC && Verbose && result) {
|
|
||||||
gclog_or_tty->print_cr(
|
|
||||||
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
|
|
||||||
" max_available: " SIZE_FORMAT
|
|
||||||
" avg_promoted: " SIZE_FORMAT,
|
|
||||||
max_available(), (size_t)
|
|
||||||
gc_stats()->avg_promoted()->padded_average());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (PrintGC && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(
|
|
||||||
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
|
|
||||||
" max_available: " SIZE_FORMAT
|
|
||||||
" adj_max_promo_bytes: " SIZE_FORMAT,
|
|
||||||
max_available(), (size_t)adjusted_max_promo_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (PrintGC && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(
|
|
||||||
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
|
|
||||||
" contiguous_available: " SIZE_FORMAT
|
|
||||||
" adj_max_promo_bytes: " SIZE_FORMAT,
|
|
||||||
max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// At a promotion failure dump information on block layout in heap
|
// At a promotion failure dump information on block layout in heap
|
||||||
@ -6091,23 +6048,14 @@ void CMSCollector::sweep(bool asynch) {
|
|||||||
assert(_collectorState == Resizing, "Change of collector state to"
|
assert(_collectorState == Resizing, "Change of collector state to"
|
||||||
" Resizing must be done under the freelistLocks (plural)");
|
" Resizing must be done under the freelistLocks (plural)");
|
||||||
|
|
||||||
// Now that sweeping has been completed, if the GCH's
|
// Now that sweeping has been completed, we clear
|
||||||
// incremental_collection_will_fail flag is set, clear it,
|
// the incremental_collection_failed flag,
|
||||||
// thus inviting a younger gen collection to promote into
|
// thus inviting a younger gen collection to promote into
|
||||||
// this generation. If such a promotion may still fail,
|
// this generation. If such a promotion may still fail,
|
||||||
// the flag will be set again when a young collection is
|
// the flag will be set again when a young collection is
|
||||||
// attempted.
|
// attempted.
|
||||||
// I think the incremental_collection_will_fail flag's use
|
|
||||||
// is specific to a 2 generation collection policy, so i'll
|
|
||||||
// assert that that's the configuration we are operating within.
|
|
||||||
// The use of the flag can and should be generalized appropriately
|
|
||||||
// in the future to deal with a general n-generation system.
|
|
||||||
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
assert(gch->collector_policy()->is_two_generation_policy(),
|
gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
|
||||||
"Resetting of incremental_collection_will_fail flag"
|
|
||||||
" may be incorrect otherwise");
|
|
||||||
gch->clear_incremental_collection_will_fail();
|
|
||||||
gch->update_full_collections_completed(_collection_count_start);
|
gch->update_full_collections_completed(_collection_count_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1185,8 +1185,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||||||
virtual void par_promote_alloc_done(int thread_num);
|
virtual void par_promote_alloc_done(int thread_num);
|
||||||
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
|
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
|
||||||
|
|
||||||
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
|
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
|
||||||
bool younger_handles_promotion_failure) const;
|
|
||||||
|
|
||||||
// Inform this (non-young) generation that a promotion failure was
|
// Inform this (non-young) generation that a promotion failure was
|
||||||
// encountered during a collection of a younger generation that
|
// encountered during a collection of a younger generation that
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2006, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -272,12 +272,16 @@ void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait until the next synchronous GC or a timeout, whichever is earlier.
|
// Wait until the next synchronous GC, a concurrent full gc request,
|
||||||
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t) {
|
// or a timeout, whichever is earlier.
|
||||||
|
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
|
||||||
MutexLockerEx x(CGC_lock,
|
MutexLockerEx x(CGC_lock,
|
||||||
Mutex::_no_safepoint_check_flag);
|
Mutex::_no_safepoint_check_flag);
|
||||||
|
if (_should_terminate || _collector->_full_gc_requested) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
|
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
|
||||||
CGC_lock->wait(Mutex::_no_safepoint_check_flag, t);
|
CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
|
||||||
clear_CMS_flag(CMS_cms_wants_token);
|
clear_CMS_flag(CMS_cms_wants_token);
|
||||||
assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
|
assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
|
||||||
"Should not be set");
|
"Should not be set");
|
||||||
@ -289,7 +293,8 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
|
|||||||
icms_wait();
|
icms_wait();
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
// Wait until the next synchronous GC or a timeout, whichever is earlier
|
// Wait until the next synchronous GC, a concurrent full gc
|
||||||
|
// request or a timeout, whichever is earlier.
|
||||||
wait_on_cms_lock(CMSWaitDuration);
|
wait_on_cms_lock(CMSWaitDuration);
|
||||||
}
|
}
|
||||||
// Check if we should start a CMS collection cycle
|
// Check if we should start a CMS collection cycle
|
||||||
|
@ -120,8 +120,10 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait on CMS lock until the next synchronous GC
|
// Wait on CMS lock until the next synchronous GC
|
||||||
// or given timeout, whichever is earlier.
|
// or given timeout, whichever is earlier. A timeout value
|
||||||
void wait_on_cms_lock(long t); // milliseconds
|
// of 0 indicates that there is no upper bound on the wait time.
|
||||||
|
// A concurrent full gc request terminates the wait.
|
||||||
|
void wait_on_cms_lock(long t_millis);
|
||||||
|
|
||||||
// The CMS thread will yield during the work portion of its cycle
|
// The CMS thread will yield during the work portion of its cycle
|
||||||
// only when requested to. Both synchronous and asychronous requests
|
// only when requested to. Both synchronous and asychronous requests
|
||||||
|
@ -2418,6 +2418,8 @@ void ConcurrentMark::clear_marking_state() {
|
|||||||
for (int i = 0; i < (int)_max_task_num; ++i) {
|
for (int i = 0; i < (int)_max_task_num; ++i) {
|
||||||
OopTaskQueue* queue = _task_queues->queue(i);
|
OopTaskQueue* queue = _task_queues->queue(i);
|
||||||
queue->set_empty();
|
queue->set_empty();
|
||||||
|
// Clear any partial regions from the CMTasks
|
||||||
|
_tasks[i]->clear_aborted_region();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2706,7 +2708,6 @@ void ConcurrentMark::abort() {
|
|||||||
clear_marking_state();
|
clear_marking_state();
|
||||||
for (int i = 0; i < (int)_max_task_num; ++i) {
|
for (int i = 0; i < (int)_max_task_num; ++i) {
|
||||||
_tasks[i]->clear_region_fields();
|
_tasks[i]->clear_region_fields();
|
||||||
_tasks[i]->clear_aborted_region();
|
|
||||||
}
|
}
|
||||||
_has_aborted = true;
|
_has_aborted = true;
|
||||||
|
|
||||||
@ -2985,7 +2986,7 @@ void CMTask::reset(CMBitMap* nextMarkBitMap) {
|
|||||||
|
|
||||||
_nextMarkBitMap = nextMarkBitMap;
|
_nextMarkBitMap = nextMarkBitMap;
|
||||||
clear_region_fields();
|
clear_region_fields();
|
||||||
clear_aborted_region();
|
assert(_aborted_region.is_empty(), "should have been cleared");
|
||||||
|
|
||||||
_calls = 0;
|
_calls = 0;
|
||||||
_elapsed_time_ms = 0.0;
|
_elapsed_time_ms = 0.0;
|
||||||
|
@ -175,7 +175,7 @@ G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size
|
|||||||
}
|
}
|
||||||
assert(start_card > _array->index_for(_bottom), "Cannot be first card");
|
assert(start_card > _array->index_for(_bottom), "Cannot be first card");
|
||||||
assert(_array->offset_array(start_card-1) <= N_words,
|
assert(_array->offset_array(start_card-1) <= N_words,
|
||||||
"Offset card has an unexpected value");
|
"Offset card has an unexpected value");
|
||||||
size_t start_card_for_region = start_card;
|
size_t start_card_for_region = start_card;
|
||||||
u_char offset = max_jubyte;
|
u_char offset = max_jubyte;
|
||||||
for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
|
for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
|
||||||
@ -577,6 +577,16 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
|
||||||
|
assert(_end == new_end, "_end should have already been updated");
|
||||||
|
|
||||||
|
// The first BOT entry should have offset 0.
|
||||||
|
_array->set_offset_array(_array->index_for(_bottom), 0);
|
||||||
|
// The rest should point to the first one.
|
||||||
|
set_remainder_to_point_to_start(_bottom + N_words, new_end);
|
||||||
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
// G1BlockOffsetArrayContigSpace
|
// G1BlockOffsetArrayContigSpace
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
@ -626,3 +636,12 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
|
|||||||
"Precondition of call");
|
"Precondition of call");
|
||||||
_array->set_offset_array(bottom_index, 0);
|
_array->set_offset_array(bottom_index, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
|
||||||
|
G1BlockOffsetArray::set_for_starts_humongous(new_end);
|
||||||
|
|
||||||
|
// Make sure _next_offset_threshold and _next_offset_index point to new_end.
|
||||||
|
_next_offset_threshold = new_end;
|
||||||
|
_next_offset_index = _array->index_for(new_end);
|
||||||
|
}
|
||||||
|
@ -436,6 +436,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void check_all_cards(size_t left_card, size_t right_card) const;
|
void check_all_cards(size_t left_card, size_t right_card) const;
|
||||||
|
|
||||||
|
virtual void set_for_starts_humongous(HeapWord* new_end);
|
||||||
};
|
};
|
||||||
|
|
||||||
// A subtype of BlockOffsetArray that takes advantage of the fact
|
// A subtype of BlockOffsetArray that takes advantage of the fact
|
||||||
@ -484,4 +486,6 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
|
|||||||
|
|
||||||
HeapWord* block_start_unsafe(const void* addr);
|
HeapWord* block_start_unsafe(const void* addr);
|
||||||
HeapWord* block_start_unsafe_const(const void* addr) const;
|
HeapWord* block_start_unsafe_const(const void* addr) const;
|
||||||
|
|
||||||
|
virtual void set_for_starts_humongous(HeapWord* new_end);
|
||||||
};
|
};
|
||||||
|
@ -791,7 +791,7 @@ class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
|
|||||||
int _worker_i;
|
int _worker_i;
|
||||||
public:
|
public:
|
||||||
RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
|
RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
|
||||||
_cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
|
_cl(g1->g1_rem_set(), worker_i),
|
||||||
_worker_i(worker_i),
|
_worker_i(worker_i),
|
||||||
_g1h(g1)
|
_g1h(g1)
|
||||||
{ }
|
{ }
|
||||||
@ -890,7 +890,7 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
abandon_cur_alloc_region();
|
abandon_cur_alloc_region();
|
||||||
abandon_gc_alloc_regions();
|
abandon_gc_alloc_regions();
|
||||||
assert(_cur_alloc_region == NULL, "Invariant.");
|
assert(_cur_alloc_region == NULL, "Invariant.");
|
||||||
g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
|
g1_rem_set()->cleanupHRRS();
|
||||||
tear_down_region_lists();
|
tear_down_region_lists();
|
||||||
set_used_regions_to_need_zero_fill();
|
set_used_regions_to_need_zero_fill();
|
||||||
|
|
||||||
@ -1506,15 +1506,11 @@ jint G1CollectedHeap::initialize() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Also create a G1 rem set.
|
// Also create a G1 rem set.
|
||||||
if (G1UseHRIntoRS) {
|
if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
|
||||||
if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
|
_g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
|
||||||
_g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
|
|
||||||
} else {
|
|
||||||
vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
|
|
||||||
return JNI_ENOMEM;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
_g1_rem_set = new StupidG1RemSet(this);
|
vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
|
||||||
|
return JNI_ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Carve out the G1 part of the heap.
|
// Carve out the G1 part of the heap.
|
||||||
@ -2706,8 +2702,7 @@ size_t G1CollectedHeap::max_pending_card_num() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CollectedHeap::cards_scanned() {
|
size_t G1CollectedHeap::cards_scanned() {
|
||||||
HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
|
return g1_rem_set()->cardsScanned();
|
||||||
return g1_rset->cardsScanned();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -3850,6 +3845,54 @@ G1ParScanThreadState::print_termination_stats(int i,
|
|||||||
undo_waste() * HeapWordSize / K);
|
undo_waste() * HeapWordSize / K);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
|
||||||
|
assert(ref != NULL, "invariant");
|
||||||
|
assert(UseCompressedOops, "sanity");
|
||||||
|
assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
|
||||||
|
oop p = oopDesc::load_decode_heap_oop(ref);
|
||||||
|
assert(_g1h->is_in_g1_reserved(p),
|
||||||
|
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1ParScanThreadState::verify_ref(oop* ref) const {
|
||||||
|
assert(ref != NULL, "invariant");
|
||||||
|
if (has_partial_array_mask(ref)) {
|
||||||
|
// Must be in the collection set--it's already been copied.
|
||||||
|
oop p = clear_partial_array_mask(ref);
|
||||||
|
assert(_g1h->obj_in_cs(p),
|
||||||
|
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
|
||||||
|
} else {
|
||||||
|
oop p = oopDesc::load_decode_heap_oop(ref);
|
||||||
|
assert(_g1h->is_in_g1_reserved(p),
|
||||||
|
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1ParScanThreadState::verify_task(StarTask ref) const {
|
||||||
|
if (ref.is_narrow()) {
|
||||||
|
return verify_ref((narrowOop*) ref);
|
||||||
|
} else {
|
||||||
|
return verify_ref((oop*) ref);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // ASSERT
|
||||||
|
|
||||||
|
void G1ParScanThreadState::trim_queue() {
|
||||||
|
StarTask ref;
|
||||||
|
do {
|
||||||
|
// Drain the overflow stack first, so other threads can steal.
|
||||||
|
while (refs()->pop_overflow(ref)) {
|
||||||
|
deal_with_reference(ref);
|
||||||
|
}
|
||||||
|
while (refs()->pop_local(ref)) {
|
||||||
|
deal_with_reference(ref);
|
||||||
|
}
|
||||||
|
} while (!refs()->is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
|
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
|
||||||
_par_scan_state(par_scan_state) { }
|
_par_scan_state(par_scan_state) { }
|
||||||
@ -4052,39 +4095,44 @@ public:
|
|||||||
: _g1h(g1h), _par_scan_state(par_scan_state),
|
: _g1h(g1h), _par_scan_state(par_scan_state),
|
||||||
_queues(queues), _terminator(terminator) {}
|
_queues(queues), _terminator(terminator) {}
|
||||||
|
|
||||||
void do_void() {
|
void do_void();
|
||||||
G1ParScanThreadState* pss = par_scan_state();
|
|
||||||
while (true) {
|
|
||||||
pss->trim_queue();
|
|
||||||
|
|
||||||
StarTask stolen_task;
|
private:
|
||||||
if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
|
inline bool offer_termination();
|
||||||
// slightly paranoid tests; I'm trying to catch potential
|
|
||||||
// problems before we go into push_on_queue to know where the
|
|
||||||
// problem is coming from
|
|
||||||
assert((oop*)stolen_task != NULL, "Error");
|
|
||||||
if (stolen_task.is_narrow()) {
|
|
||||||
assert(UseCompressedOops, "Error");
|
|
||||||
narrowOop* p = (narrowOop*) stolen_task;
|
|
||||||
assert(has_partial_array_mask(p) ||
|
|
||||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error");
|
|
||||||
pss->push_on_queue(p);
|
|
||||||
} else {
|
|
||||||
oop* p = (oop*) stolen_task;
|
|
||||||
assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error");
|
|
||||||
pss->push_on_queue(p);
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
pss->start_term_time();
|
|
||||||
if (terminator()->offer_termination()) break;
|
|
||||||
pss->end_term_time();
|
|
||||||
}
|
|
||||||
pss->end_term_time();
|
|
||||||
pss->retire_alloc_buffers();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
bool G1ParEvacuateFollowersClosure::offer_termination() {
|
||||||
|
G1ParScanThreadState* const pss = par_scan_state();
|
||||||
|
pss->start_term_time();
|
||||||
|
const bool res = terminator()->offer_termination();
|
||||||
|
pss->end_term_time();
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1ParEvacuateFollowersClosure::do_void() {
|
||||||
|
StarTask stolen_task;
|
||||||
|
G1ParScanThreadState* const pss = par_scan_state();
|
||||||
|
pss->trim_queue();
|
||||||
|
|
||||||
|
do {
|
||||||
|
while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
|
||||||
|
assert(pss->verify_task(stolen_task), "sanity");
|
||||||
|
if (stolen_task.is_narrow()) {
|
||||||
|
pss->deal_with_reference((narrowOop*) stolen_task);
|
||||||
|
} else {
|
||||||
|
pss->deal_with_reference((oop*) stolen_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We've just processed a reference and we might have made
|
||||||
|
// available new entries on the queues. So we have to make sure
|
||||||
|
// we drain the queues as necessary.
|
||||||
|
pss->trim_queue();
|
||||||
|
}
|
||||||
|
} while (!offer_termination());
|
||||||
|
|
||||||
|
pss->retire_alloc_buffers();
|
||||||
|
}
|
||||||
|
|
||||||
class G1ParTask : public AbstractGangTask {
|
class G1ParTask : public AbstractGangTask {
|
||||||
protected:
|
protected:
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
@ -4182,8 +4230,7 @@ public:
|
|||||||
pss.print_termination_stats(i);
|
pss.print_termination_stats(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(pss.refs_to_scan() == 0, "Task queue should be empty");
|
assert(pss.refs()->is_empty(), "should be empty");
|
||||||
assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
|
|
||||||
double end_time_ms = os::elapsedTime() * 1000.0;
|
double end_time_ms = os::elapsedTime() * 1000.0;
|
||||||
_g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
|
_g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
|
||||||
}
|
}
|
||||||
|
@ -1651,49 +1651,17 @@ public:
|
|||||||
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
|
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
|
||||||
size_t undo_waste() const { return _undo_waste; }
|
size_t undo_waste() const { return _undo_waste; }
|
||||||
|
|
||||||
template <class T> void push_on_queue(T* ref) {
|
|
||||||
assert(ref != NULL, "invariant");
|
|
||||||
assert(has_partial_array_mask(ref) ||
|
|
||||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (has_partial_array_mask(ref)) {
|
bool verify_ref(narrowOop* ref) const;
|
||||||
oop p = clear_partial_array_mask(ref);
|
bool verify_ref(oop* ref) const;
|
||||||
// Verify that we point into the CS
|
bool verify_task(StarTask ref) const;
|
||||||
assert(_g1h->obj_in_cs(p), "Should be in CS");
|
#endif // ASSERT
|
||||||
}
|
|
||||||
#endif
|
template <class T> void push_on_queue(T* ref) {
|
||||||
|
assert(verify_ref(ref), "sanity");
|
||||||
refs()->push(ref);
|
refs()->push(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
void pop_from_queue(StarTask& ref) {
|
|
||||||
if (refs()->pop_local(ref)) {
|
|
||||||
assert((oop*)ref != NULL, "pop_local() returned true");
|
|
||||||
assert(UseCompressedOops || !ref.is_narrow(), "Error");
|
|
||||||
assert(has_partial_array_mask((oop*)ref) ||
|
|
||||||
_g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
|
|
||||||
: oopDesc::load_decode_heap_oop((oop*)ref)),
|
|
||||||
"invariant");
|
|
||||||
} else {
|
|
||||||
StarTask null_task;
|
|
||||||
ref = null_task;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void pop_from_overflow_queue(StarTask& ref) {
|
|
||||||
StarTask new_ref;
|
|
||||||
refs()->pop_overflow(new_ref);
|
|
||||||
assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
|
|
||||||
assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
|
|
||||||
assert(has_partial_array_mask((oop*)new_ref) ||
|
|
||||||
_g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
|
|
||||||
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
|
|
||||||
"invariant");
|
|
||||||
ref = new_ref;
|
|
||||||
}
|
|
||||||
|
|
||||||
int refs_to_scan() { return (int)refs()->size(); }
|
|
||||||
int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); }
|
|
||||||
|
|
||||||
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
||||||
if (G1DeferredRSUpdate) {
|
if (G1DeferredRSUpdate) {
|
||||||
deferred_rs_update(from, p, tid);
|
deferred_rs_update(from, p, tid);
|
||||||
@ -1804,7 +1772,6 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
|
||||||
template <class T> void deal_with_reference(T* ref_to_scan) {
|
template <class T> void deal_with_reference(T* ref_to_scan) {
|
||||||
if (has_partial_array_mask(ref_to_scan)) {
|
if (has_partial_array_mask(ref_to_scan)) {
|
||||||
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
||||||
@ -1818,59 +1785,15 @@ private:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
void deal_with_reference(StarTask ref) {
|
||||||
void trim_queue() {
|
assert(verify_task(ref), "sanity");
|
||||||
// I've replicated the loop twice, first to drain the overflow
|
if (ref.is_narrow()) {
|
||||||
// queue, second to drain the task queue. This is better than
|
deal_with_reference((narrowOop*)ref);
|
||||||
// having a single loop, which checks both conditions and, inside
|
} else {
|
||||||
// it, either pops the overflow queue or the task queue, as each
|
deal_with_reference((oop*)ref);
|
||||||
// loop is tighter. Also, the decision to drain the overflow queue
|
|
||||||
// first is not arbitrary, as the overflow queue is not visible
|
|
||||||
// to the other workers, whereas the task queue is. So, we want to
|
|
||||||
// drain the "invisible" entries first, while allowing the other
|
|
||||||
// workers to potentially steal the "visible" entries.
|
|
||||||
|
|
||||||
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
|
|
||||||
while (overflowed_refs_to_scan() > 0) {
|
|
||||||
StarTask ref_to_scan;
|
|
||||||
assert((oop*)ref_to_scan == NULL, "Constructed above");
|
|
||||||
pop_from_overflow_queue(ref_to_scan);
|
|
||||||
// We shouldn't have pushed it on the queue if it was not
|
|
||||||
// pointing into the CSet.
|
|
||||||
assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
|
|
||||||
if (ref_to_scan.is_narrow()) {
|
|
||||||
assert(UseCompressedOops, "Error");
|
|
||||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
|
||||||
assert(!has_partial_array_mask(p) &&
|
|
||||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
|
||||||
deal_with_reference(p);
|
|
||||||
} else {
|
|
||||||
oop* p = (oop*)ref_to_scan;
|
|
||||||
assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
|
|
||||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
|
||||||
deal_with_reference(p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (refs_to_scan() > 0) {
|
|
||||||
StarTask ref_to_scan;
|
|
||||||
assert((oop*)ref_to_scan == NULL, "Constructed above");
|
|
||||||
pop_from_queue(ref_to_scan);
|
|
||||||
if ((oop*)ref_to_scan != NULL) {
|
|
||||||
if (ref_to_scan.is_narrow()) {
|
|
||||||
assert(UseCompressedOops, "Error");
|
|
||||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
|
||||||
assert(!has_partial_array_mask(p) &&
|
|
||||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
|
||||||
deal_with_reference(p);
|
|
||||||
} else {
|
|
||||||
oop* p = (oop*)ref_to_scan;
|
|
||||||
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
|
||||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
|
||||||
deal_with_reference(p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
void trim_queue();
|
||||||
};
|
};
|
||||||
|
@ -25,8 +25,6 @@
|
|||||||
class HeapRegion;
|
class HeapRegion;
|
||||||
class G1CollectedHeap;
|
class G1CollectedHeap;
|
||||||
class G1RemSet;
|
class G1RemSet;
|
||||||
class HRInto_G1RemSet;
|
|
||||||
class G1RemSet;
|
|
||||||
class ConcurrentMark;
|
class ConcurrentMark;
|
||||||
class DirtyCardToOopClosure;
|
class DirtyCardToOopClosure;
|
||||||
class CMBitMap;
|
class CMBitMap;
|
||||||
|
@ -97,13 +97,6 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
|
||||||
StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
|
||||||
int worker_i) {
|
|
||||||
IntoCSRegionClosure rc(_g1, oc);
|
|
||||||
_g1->heap_region_iterate(&rc);
|
|
||||||
}
|
|
||||||
|
|
||||||
class VerifyRSCleanCardOopClosure: public OopClosure {
|
class VerifyRSCleanCardOopClosure: public OopClosure {
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
public:
|
public:
|
||||||
@ -119,8 +112,9 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
||||||
: G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
|
: _g1(g1), _conc_refine_cards(0),
|
||||||
|
_ct_bs(ct_bs), _g1p(_g1->g1_policy()),
|
||||||
_cg1r(g1->concurrent_g1_refine()),
|
_cg1r(g1->concurrent_g1_refine()),
|
||||||
_traversal_in_progress(false),
|
_traversal_in_progress(false),
|
||||||
_cset_rs_update_cl(NULL),
|
_cset_rs_update_cl(NULL),
|
||||||
@ -134,7 +128,7 @@ HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HRInto_G1RemSet::~HRInto_G1RemSet() {
|
G1RemSet::~G1RemSet() {
|
||||||
delete _seq_task;
|
delete _seq_task;
|
||||||
for (uint i = 0; i < n_workers(); i++) {
|
for (uint i = 0; i < n_workers(); i++) {
|
||||||
assert(_cset_rs_update_cl[i] == NULL, "it should be");
|
assert(_cset_rs_update_cl[i] == NULL, "it should be");
|
||||||
@ -277,7 +271,7 @@ public:
|
|||||||
// p threads
|
// p threads
|
||||||
// Then thread t will start at region t * floor (n/p)
|
// Then thread t will start at region t * floor (n/p)
|
||||||
|
|
||||||
HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
|
HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
|
||||||
HeapRegion* result = _g1p->collection_set();
|
HeapRegion* result = _g1p->collection_set();
|
||||||
if (ParallelGCThreads > 0) {
|
if (ParallelGCThreads > 0) {
|
||||||
size_t cs_size = _g1p->collection_set_size();
|
size_t cs_size = _g1p->collection_set_size();
|
||||||
@ -290,7 +284,7 @@ HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
|
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
|
||||||
double rs_time_start = os::elapsedTime();
|
double rs_time_start = os::elapsedTime();
|
||||||
HeapRegion *startRegion = calculateStartRegion(worker_i);
|
HeapRegion *startRegion = calculateStartRegion(worker_i);
|
||||||
|
|
||||||
@ -340,7 +334,7 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void HRInto_G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
|
void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
|
||||||
double start = os::elapsedTime();
|
double start = os::elapsedTime();
|
||||||
// Apply the given closure to all remaining log entries.
|
// Apply the given closure to all remaining log entries.
|
||||||
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
|
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
|
||||||
@ -439,12 +433,11 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void HRInto_G1RemSet::cleanupHRRS() {
|
void G1RemSet::cleanupHRRS() {
|
||||||
HeapRegionRemSet::cleanup();
|
HeapRegionRemSet::cleanup();
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
||||||
HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
|
||||||
int worker_i) {
|
int worker_i) {
|
||||||
#if CARD_REPEAT_HISTO
|
#if CARD_REPEAT_HISTO
|
||||||
ct_freq_update_histo_and_reset();
|
ct_freq_update_histo_and_reset();
|
||||||
@ -508,8 +501,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
|||||||
_cset_rs_update_cl[worker_i] = NULL;
|
_cset_rs_update_cl[worker_i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void HRInto_G1RemSet::
|
void G1RemSet::prepare_for_oops_into_collection_set_do() {
|
||||||
prepare_for_oops_into_collection_set_do() {
|
|
||||||
#if G1_REM_SET_LOGGING
|
#if G1_REM_SET_LOGGING
|
||||||
PrintRSClosure cl;
|
PrintRSClosure cl;
|
||||||
_g1->collection_set_iterate(&cl);
|
_g1->collection_set_iterate(&cl);
|
||||||
@ -581,7 +573,7 @@ public:
|
|||||||
// RSet updating,
|
// RSet updating,
|
||||||
// * the post-write barrier shouldn't be logging updates to young
|
// * the post-write barrier shouldn't be logging updates to young
|
||||||
// regions (but there is a situation where this can happen - see
|
// regions (but there is a situation where this can happen - see
|
||||||
// the comment in HRInto_G1RemSet::concurrentRefineOneCard below -
|
// the comment in G1RemSet::concurrentRefineOneCard below -
|
||||||
// that should not be applicable here), and
|
// that should not be applicable here), and
|
||||||
// * during actual RSet updating, the filtering of cards in young
|
// * during actual RSet updating, the filtering of cards in young
|
||||||
// regions in HeapRegion::oops_on_card_seq_iterate_careful is
|
// regions in HeapRegion::oops_on_card_seq_iterate_careful is
|
||||||
@ -601,7 +593,7 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
|
void G1RemSet::cleanup_after_oops_into_collection_set_do() {
|
||||||
guarantee( _cards_scanned != NULL, "invariant" );
|
guarantee( _cards_scanned != NULL, "invariant" );
|
||||||
_total_cards_scanned = 0;
|
_total_cards_scanned = 0;
|
||||||
for (uint i = 0; i < n_workers(); ++i)
|
for (uint i = 0; i < n_workers(); ++i)
|
||||||
@ -692,12 +684,12 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
|
void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
|
||||||
ScrubRSClosure scrub_cl(region_bm, card_bm);
|
ScrubRSClosure scrub_cl(region_bm, card_bm);
|
||||||
_g1->heap_region_iterate(&scrub_cl);
|
_g1->heap_region_iterate(&scrub_cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
|
void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||||
int worker_num, int claim_val) {
|
int worker_num, int claim_val) {
|
||||||
ScrubRSClosure scrub_cl(region_bm, card_bm);
|
ScrubRSClosure scrub_cl(region_bm, card_bm);
|
||||||
_g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
|
_g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
|
||||||
@ -741,7 +733,7 @@ public:
|
|||||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
};
|
};
|
||||||
|
|
||||||
bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||||
bool check_for_refs_into_cset) {
|
bool check_for_refs_into_cset) {
|
||||||
// Construct the region representing the card.
|
// Construct the region representing the card.
|
||||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||||
@ -820,7 +812,7 @@ bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i
|
|||||||
return trigger_cl.value();
|
return trigger_cl.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
||||||
bool check_for_refs_into_cset) {
|
bool check_for_refs_into_cset) {
|
||||||
// If the card is no longer dirty, nothing to do.
|
// If the card is no longer dirty, nothing to do.
|
||||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
||||||
@ -995,7 +987,7 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void HRInto_G1RemSet::print_summary_info() {
|
void G1RemSet::print_summary_info() {
|
||||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
||||||
|
|
||||||
#if CARD_REPEAT_HISTO
|
#if CARD_REPEAT_HISTO
|
||||||
@ -1029,30 +1021,26 @@ void HRInto_G1RemSet::print_summary_info() {
|
|||||||
g1->concurrent_g1_refine()->threads_do(&p);
|
g1->concurrent_g1_refine()->threads_do(&p);
|
||||||
gclog_or_tty->print_cr("");
|
gclog_or_tty->print_cr("");
|
||||||
|
|
||||||
if (G1UseHRIntoRS) {
|
HRRSStatsIter blk;
|
||||||
HRRSStatsIter blk;
|
g1->heap_region_iterate(&blk);
|
||||||
g1->heap_region_iterate(&blk);
|
gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
|
||||||
gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
|
" Max = " SIZE_FORMAT "K.",
|
||||||
" Max = " SIZE_FORMAT "K.",
|
blk.total_mem_sz()/K, blk.max_mem_sz()/K);
|
||||||
blk.total_mem_sz()/K, blk.max_mem_sz()/K);
|
gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
|
||||||
gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
|
" free_lists = " SIZE_FORMAT "K.",
|
||||||
" free_lists = " SIZE_FORMAT "K.",
|
HeapRegionRemSet::static_mem_size()/K,
|
||||||
HeapRegionRemSet::static_mem_size()/K,
|
HeapRegionRemSet::fl_mem_size()/K);
|
||||||
HeapRegionRemSet::fl_mem_size()/K);
|
gclog_or_tty->print_cr(" %d occupied cards represented.",
|
||||||
gclog_or_tty->print_cr(" %d occupied cards represented.",
|
blk.occupied());
|
||||||
blk.occupied());
|
gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
|
||||||
gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
|
", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
|
||||||
", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
|
blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
|
||||||
blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
|
(blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
|
||||||
(blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
|
(blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
|
||||||
(blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
|
gclog_or_tty->print_cr(" Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
|
||||||
gclog_or_tty->print_cr(" Did %d coarsenings.",
|
|
||||||
HeapRegionRemSet::n_coarsenings());
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void HRInto_G1RemSet::prepare_for_verify() {
|
void G1RemSet::prepare_for_verify() {
|
||||||
if (G1HRRSFlushLogBuffersOnVerify &&
|
if (G1HRRSFlushLogBuffersOnVerify &&
|
||||||
(VerifyBeforeGC || VerifyAfterGC)
|
(VerifyBeforeGC || VerifyAfterGC)
|
||||||
&& !_g1->full_collection()) {
|
&& !_g1->full_collection()) {
|
||||||
|
@ -27,107 +27,18 @@
|
|||||||
|
|
||||||
class G1CollectedHeap;
|
class G1CollectedHeap;
|
||||||
class CardTableModRefBarrierSet;
|
class CardTableModRefBarrierSet;
|
||||||
class HRInto_G1RemSet;
|
|
||||||
class ConcurrentG1Refine;
|
class ConcurrentG1Refine;
|
||||||
|
|
||||||
|
// A G1RemSet in which each heap region has a rem set that records the
|
||||||
|
// external heap references into it. Uses a mod ref bs to track updates,
|
||||||
|
// so that they can be used to update the individual region remsets.
|
||||||
|
|
||||||
class G1RemSet: public CHeapObj {
|
class G1RemSet: public CHeapObj {
|
||||||
protected:
|
protected:
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
unsigned _conc_refine_cards;
|
unsigned _conc_refine_cards;
|
||||||
size_t n_workers();
|
size_t n_workers();
|
||||||
|
|
||||||
public:
|
|
||||||
G1RemSet(G1CollectedHeap* g1) :
|
|
||||||
_g1(g1), _conc_refine_cards(0)
|
|
||||||
{}
|
|
||||||
|
|
||||||
// Invoke "blk->do_oop" on all pointers into the CS in object in regions
|
|
||||||
// outside the CS (having invoked "blk->set_region" to set the "from"
|
|
||||||
// region correctly beforehand.) The "worker_i" param is for the
|
|
||||||
// parallel case where the number of the worker thread calling this
|
|
||||||
// function can be helpful in partitioning the work to be done. It
|
|
||||||
// should be the same as the "i" passed to the calling thread's
|
|
||||||
// work(i) function. In the sequential case this param will be ingored.
|
|
||||||
virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
|
||||||
int worker_i) = 0;
|
|
||||||
|
|
||||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
|
||||||
// call. Must call each of these once before and after (in sequential
|
|
||||||
// code) any threads call oops into collection set do. (This offers an
|
|
||||||
// opportunity to sequential setup and teardown of structures needed by a
|
|
||||||
// parallel iteration over the CS's RS.)
|
|
||||||
virtual void prepare_for_oops_into_collection_set_do() = 0;
|
|
||||||
virtual void cleanup_after_oops_into_collection_set_do() = 0;
|
|
||||||
|
|
||||||
// If "this" is of the given subtype, return "this", else "NULL".
|
|
||||||
virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
|
|
||||||
|
|
||||||
// Record, if necessary, the fact that *p (where "p" is in region "from",
|
|
||||||
// and is, a fortiori, required to be non-NULL) has changed to its new value.
|
|
||||||
virtual void write_ref(HeapRegion* from, oop* p) = 0;
|
|
||||||
virtual void write_ref(HeapRegion* from, narrowOop* p) = 0;
|
|
||||||
virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
|
|
||||||
virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0;
|
|
||||||
|
|
||||||
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
|
|
||||||
// or card, respectively, such that a region or card with a corresponding
|
|
||||||
// 0 bit contains no part of any live object. Eliminates any remembered
|
|
||||||
// set entries that correspond to dead heap ranges.
|
|
||||||
virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0;
|
|
||||||
// Like the above, but assumes is called in parallel: "worker_num" is the
|
|
||||||
// parallel thread id of the current thread, and "claim_val" is the
|
|
||||||
// value that should be used to claim heap regions.
|
|
||||||
virtual void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
|
||||||
int worker_num, int claim_val) = 0;
|
|
||||||
|
|
||||||
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
|
|
||||||
// join and leave around parts that must be atomic wrt GC. (NULL means
|
|
||||||
// being done at a safepoint.)
|
|
||||||
// With some implementations of this routine, when check_for_refs_into_cset
|
|
||||||
// is true, a true result may be returned if the given card contains oops
|
|
||||||
// that have references into the current collection set.
|
|
||||||
virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
|
||||||
bool check_for_refs_into_cset) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print any relevant summary info.
|
|
||||||
virtual void print_summary_info() {}
|
|
||||||
|
|
||||||
// Prepare remebered set for verification.
|
|
||||||
virtual void prepare_for_verify() {};
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
// The simplest possible G1RemSet: iterates over all objects in non-CS
|
|
||||||
// regions, searching for pointers into the CS.
|
|
||||||
class StupidG1RemSet: public G1RemSet {
|
|
||||||
public:
|
|
||||||
StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {}
|
|
||||||
|
|
||||||
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
|
||||||
int worker_i);
|
|
||||||
|
|
||||||
void prepare_for_oops_into_collection_set_do() {}
|
|
||||||
void cleanup_after_oops_into_collection_set_do() {}
|
|
||||||
|
|
||||||
// Nothing is necessary in the version below.
|
|
||||||
void write_ref(HeapRegion* from, oop* p) {}
|
|
||||||
void write_ref(HeapRegion* from, narrowOop* p) {}
|
|
||||||
void par_write_ref(HeapRegion* from, oop* p, int tid) {}
|
|
||||||
void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {}
|
|
||||||
|
|
||||||
void scrub(BitMap* region_bm, BitMap* card_bm) {}
|
|
||||||
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
|
||||||
int worker_num, int claim_val) {}
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
// A G1RemSet in which each heap region has a rem set that records the
|
|
||||||
// external heap references into it. Uses a mod ref bs to track updates,
|
|
||||||
// so that they can be used to update the individual region remsets.
|
|
||||||
|
|
||||||
class HRInto_G1RemSet: public G1RemSet {
|
|
||||||
protected:
|
protected:
|
||||||
enum SomePrivateConstants {
|
enum SomePrivateConstants {
|
||||||
UpdateRStoMergeSync = 0,
|
UpdateRStoMergeSync = 0,
|
||||||
@ -175,27 +86,31 @@ public:
|
|||||||
// scanned.
|
// scanned.
|
||||||
void cleanupHRRS();
|
void cleanupHRRS();
|
||||||
|
|
||||||
HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
|
G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
|
||||||
~HRInto_G1RemSet();
|
~G1RemSet();
|
||||||
|
|
||||||
|
// Invoke "blk->do_oop" on all pointers into the CS in objects in regions
|
||||||
|
// outside the CS (having invoked "blk->set_region" to set the "from"
|
||||||
|
// region correctly beforehand.) The "worker_i" param is for the
|
||||||
|
// parallel case where the number of the worker thread calling this
|
||||||
|
// function can be helpful in partitioning the work to be done. It
|
||||||
|
// should be the same as the "i" passed to the calling thread's
|
||||||
|
// work(i) function. In the sequential case this param will be ingored.
|
||||||
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
||||||
int worker_i);
|
int worker_i);
|
||||||
|
|
||||||
|
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||||
|
// call. Must call each of these once before and after (in sequential
|
||||||
|
// code) any threads call oops_into_collection_set_do. (This offers an
|
||||||
|
// opportunity to sequential setup and teardown of structures needed by a
|
||||||
|
// parallel iteration over the CS's RS.)
|
||||||
void prepare_for_oops_into_collection_set_do();
|
void prepare_for_oops_into_collection_set_do();
|
||||||
void cleanup_after_oops_into_collection_set_do();
|
void cleanup_after_oops_into_collection_set_do();
|
||||||
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
|
|
||||||
template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i);
|
|
||||||
void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) {
|
|
||||||
if (UseCompressedOops) {
|
|
||||||
scanNewRefsRS_work<narrowOop>(oc, worker_i);
|
|
||||||
} else {
|
|
||||||
scanNewRefsRS_work<oop>(oc, worker_i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
|
|
||||||
HeapRegion* calculateStartRegion(int i);
|
|
||||||
|
|
||||||
HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; }
|
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
|
||||||
|
void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
|
||||||
|
|
||||||
|
HeapRegion* calculateStartRegion(int i);
|
||||||
|
|
||||||
CardTableModRefBS* ct_bs() { return _ct_bs; }
|
CardTableModRefBS* ct_bs() { return _ct_bs; }
|
||||||
size_t cardsScanned() { return _total_cards_scanned; }
|
size_t cardsScanned() { return _total_cards_scanned; }
|
||||||
@ -219,17 +134,31 @@ public:
|
|||||||
|
|
||||||
bool self_forwarded(oop obj);
|
bool self_forwarded(oop obj);
|
||||||
|
|
||||||
|
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
|
||||||
|
// or card, respectively, such that a region or card with a corresponding
|
||||||
|
// 0 bit contains no part of any live object. Eliminates any remembered
|
||||||
|
// set entries that correspond to dead heap ranges.
|
||||||
void scrub(BitMap* region_bm, BitMap* card_bm);
|
void scrub(BitMap* region_bm, BitMap* card_bm);
|
||||||
|
|
||||||
|
// Like the above, but assumes is called in parallel: "worker_num" is the
|
||||||
|
// parallel thread id of the current thread, and "claim_val" is the
|
||||||
|
// value that should be used to claim heap regions.
|
||||||
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||||
int worker_num, int claim_val);
|
int worker_num, int claim_val);
|
||||||
|
|
||||||
// If check_for_refs_into_cset is true then a true result is returned
|
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
|
||||||
// if the card contains oops that have references into the current
|
// join and leave around parts that must be atomic wrt GC. (NULL means
|
||||||
// collection set.
|
// being done at a safepoint.)
|
||||||
|
// If check_for_refs_into_cset is true, a true result is returned
|
||||||
|
// if the given card contains oops that have references into the
|
||||||
|
// current collection set.
|
||||||
virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
||||||
bool check_for_refs_into_cset);
|
bool check_for_refs_into_cset);
|
||||||
|
|
||||||
|
// Print any relevant summary info.
|
||||||
virtual void print_summary_info();
|
virtual void print_summary_info();
|
||||||
|
|
||||||
|
// Prepare remembered set for verification.
|
||||||
virtual void prepare_for_verify();
|
virtual void prepare_for_verify();
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -250,13 +179,13 @@ public:
|
|||||||
|
|
||||||
class UpdateRSOopClosure: public OopClosure {
|
class UpdateRSOopClosure: public OopClosure {
|
||||||
HeapRegion* _from;
|
HeapRegion* _from;
|
||||||
HRInto_G1RemSet* _rs;
|
G1RemSet* _rs;
|
||||||
int _worker_i;
|
int _worker_i;
|
||||||
|
|
||||||
template <class T> void do_oop_work(T* p);
|
template <class T> void do_oop_work(T* p);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
|
UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
|
||||||
_from(NULL), _rs(rs), _worker_i(worker_i) {
|
_from(NULL), _rs(rs), _worker_i(worker_i) {
|
||||||
guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
|
guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
|
||||||
}
|
}
|
||||||
|
@ -30,16 +30,18 @@ inline size_t G1RemSet::n_workers() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
|
template <class T>
|
||||||
|
inline void G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
|
||||||
par_write_ref_nv(from, p, 0);
|
par_write_ref_nv(from, p, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
|
inline bool G1RemSet::self_forwarded(oop obj) {
|
||||||
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
|
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
|
template <class T>
|
||||||
|
inline void G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
|
||||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// can't do because of races
|
// can't do because of races
|
||||||
@ -77,7 +79,7 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
|
|||||||
// Deferred updates to the CSet are either discarded (in the normal case),
|
// Deferred updates to the CSet are either discarded (in the normal case),
|
||||||
// or processed (if an evacuation failure occurs) at the end
|
// or processed (if an evacuation failure occurs) at the end
|
||||||
// of the collection.
|
// of the collection.
|
||||||
// See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do().
|
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
|
||||||
} else {
|
} else {
|
||||||
#if G1_REM_SET_LOGGING
|
#if G1_REM_SET_LOGGING
|
||||||
gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
|
gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
|
||||||
@ -91,12 +93,14 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> inline void UpdateRSOopClosure::do_oop_work(T* p) {
|
template <class T>
|
||||||
|
inline void UpdateRSOopClosure::do_oop_work(T* p) {
|
||||||
assert(_from != NULL, "from region must be non-NULL");
|
assert(_from != NULL, "from region must be non-NULL");
|
||||||
_rs->par_write_ref(_from, p, _worker_i);
|
_rs->par_write_ref(_from, p, _worker_i);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> inline void UpdateRSetImmediate::do_oop_work(T* p) {
|
template <class T>
|
||||||
|
inline void UpdateRSetImmediate::do_oop_work(T* p) {
|
||||||
assert(_from->is_in_reserved(p), "paranoia");
|
assert(_from->is_in_reserved(p), "paranoia");
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
|
if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
|
||||||
|
@ -40,9 +40,6 @@
|
|||||||
develop(intx, G1PolicyVerbose, 0, \
|
develop(intx, G1PolicyVerbose, 0, \
|
||||||
"The verbosity level on G1 policy decisions") \
|
"The verbosity level on G1 policy decisions") \
|
||||||
\
|
\
|
||||||
develop(bool, G1UseHRIntoRS, true, \
|
|
||||||
"Determines whether the 'advanced' HR Into rem set is used.") \
|
|
||||||
\
|
|
||||||
develop(intx, G1MarkingVerboseLevel, 0, \
|
develop(intx, G1MarkingVerboseLevel, 0, \
|
||||||
"Level (0-4) of verboseness of the marking code") \
|
"Level (0-4) of verboseness of the marking code") \
|
||||||
\
|
\
|
||||||
|
@ -377,10 +377,26 @@ void HeapRegion::calc_gc_efficiency() {
|
|||||||
}
|
}
|
||||||
// </PREDICTION>
|
// </PREDICTION>
|
||||||
|
|
||||||
void HeapRegion::set_startsHumongous() {
|
void HeapRegion::set_startsHumongous(HeapWord* new_end) {
|
||||||
|
assert(end() == _orig_end,
|
||||||
|
"Should be normal before the humongous object allocation");
|
||||||
|
assert(top() == bottom(), "should be empty");
|
||||||
|
|
||||||
_humongous_type = StartsHumongous;
|
_humongous_type = StartsHumongous;
|
||||||
_humongous_start_region = this;
|
_humongous_start_region = this;
|
||||||
assert(end() == _orig_end, "Should be normal before alloc.");
|
|
||||||
|
set_end(new_end);
|
||||||
|
_offsets.set_for_starts_humongous(new_end);
|
||||||
|
}
|
||||||
|
|
||||||
|
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
|
||||||
|
assert(end() == _orig_end,
|
||||||
|
"Should be normal before the humongous object allocation");
|
||||||
|
assert(top() == bottom(), "should be empty");
|
||||||
|
assert(start->startsHumongous(), "pre-condition");
|
||||||
|
|
||||||
|
_humongous_type = ContinuesHumongous;
|
||||||
|
_humongous_start_region = start;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HeapRegion::claimHeapRegion(jint claimValue) {
|
bool HeapRegion::claimHeapRegion(jint claimValue) {
|
||||||
@ -500,23 +516,6 @@ CompactibleSpace* HeapRegion::next_compaction_space() const {
|
|||||||
return blk.result();
|
return blk.result();
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
|
|
||||||
// The order is important here.
|
|
||||||
start->add_continuingHumongousRegion(this);
|
|
||||||
_humongous_type = ContinuesHumongous;
|
|
||||||
_humongous_start_region = start;
|
|
||||||
}
|
|
||||||
|
|
||||||
void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
|
|
||||||
// Must join the blocks of the current H region seq with the block of the
|
|
||||||
// added region.
|
|
||||||
offsets()->join_blocks(bottom(), cont->bottom());
|
|
||||||
arrayOop obj = (arrayOop)(bottom());
|
|
||||||
obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
|
|
||||||
set_end(cont->end());
|
|
||||||
set_top(cont->end());
|
|
||||||
}
|
|
||||||
|
|
||||||
void HeapRegion::save_marks() {
|
void HeapRegion::save_marks() {
|
||||||
set_saved_mark();
|
set_saved_mark();
|
||||||
}
|
}
|
||||||
|
@ -395,14 +395,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
|
|
||||||
// Causes the current region to represent a humongous object spanning "n"
|
// Causes the current region to represent a humongous object spanning "n"
|
||||||
// regions.
|
// regions.
|
||||||
virtual void set_startsHumongous();
|
void set_startsHumongous(HeapWord* new_end);
|
||||||
|
|
||||||
// The regions that continue a humongous sequence should be added using
|
// The regions that continue a humongous sequence should be added using
|
||||||
// this method, in increasing address order.
|
// this method, in increasing address order.
|
||||||
void set_continuesHumongous(HeapRegion* start);
|
void set_continuesHumongous(HeapRegion* start);
|
||||||
|
|
||||||
void add_continuingHumongousRegion(HeapRegion* cont);
|
|
||||||
|
|
||||||
// If the region has a remembered set, return a pointer to it.
|
// If the region has a remembered set, return a pointer to it.
|
||||||
HeapRegionRemSet* rem_set() const {
|
HeapRegionRemSet* rem_set() const {
|
||||||
return _rem_set;
|
return _rem_set;
|
||||||
@ -733,13 +731,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
FilterOutOfRegionClosure* cl,
|
FilterOutOfRegionClosure* cl,
|
||||||
bool filter_young);
|
bool filter_young);
|
||||||
|
|
||||||
// The region "mr" is entirely in "this", and starts and ends at block
|
|
||||||
// boundaries. The caller declares that all the contained blocks are
|
|
||||||
// coalesced into one.
|
|
||||||
void declare_filled_region_to_BOT(MemRegion mr) {
|
|
||||||
_offsets.single_block(mr.start(), mr.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
// A version of block start that is guaranteed to find *some* block
|
// A version of block start that is guaranteed to find *some* block
|
||||||
// boundary at or before "p", but does not object iteration, and may
|
// boundary at or before "p", but does not object iteration, and may
|
||||||
// therefore be used safely when the heap is unparseable.
|
// therefore be used safely when the heap is unparseable.
|
||||||
|
@ -1159,9 +1159,7 @@ HeapRegionRemSetIterator() :
|
|||||||
_hrrs(NULL),
|
_hrrs(NULL),
|
||||||
_g1h(G1CollectedHeap::heap()),
|
_g1h(G1CollectedHeap::heap()),
|
||||||
_bosa(NULL),
|
_bosa(NULL),
|
||||||
_sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start())
|
_sparse_iter() { }
|
||||||
>> CardTableModRefBS::card_shift)
|
|
||||||
{}
|
|
||||||
|
|
||||||
void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
|
void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
|
||||||
_hrrs = hrrs;
|
_hrrs = hrrs;
|
||||||
|
@ -91,34 +91,118 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
|
|||||||
}
|
}
|
||||||
if (sumSizes >= word_size) {
|
if (sumSizes >= word_size) {
|
||||||
_alloc_search_start = cur;
|
_alloc_search_start = cur;
|
||||||
// Mark the allocated regions as allocated.
|
|
||||||
|
// We need to initialize the region(s) we just discovered. This is
|
||||||
|
// a bit tricky given that it can happen concurrently with
|
||||||
|
// refinement threads refining cards on these regions and
|
||||||
|
// potentially wanting to refine the BOT as they are scanning
|
||||||
|
// those cards (this can happen shortly after a cleanup; see CR
|
||||||
|
// 6991377). So we have to set up the region(s) carefully and in
|
||||||
|
// a specific order.
|
||||||
|
|
||||||
|
// Currently, allocs_are_zero_filled() returns false. The zero
|
||||||
|
// filling infrastructure will be going away soon (see CR 6977804).
|
||||||
|
// So no need to do anything else here.
|
||||||
bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
|
bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
|
||||||
|
assert(!zf, "not supported");
|
||||||
|
|
||||||
|
// This will be the "starts humongous" region.
|
||||||
HeapRegion* first_hr = _regions.at(first);
|
HeapRegion* first_hr = _regions.at(first);
|
||||||
for (int i = first; i < cur; i++) {
|
{
|
||||||
HeapRegion* hr = _regions.at(i);
|
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
||||||
if (zf)
|
first_hr->set_zero_fill_allocated();
|
||||||
hr->ensure_zero_filled();
|
}
|
||||||
|
// The header of the new object will be placed at the bottom of
|
||||||
|
// the first region.
|
||||||
|
HeapWord* new_obj = first_hr->bottom();
|
||||||
|
// This will be the new end of the first region in the series that
|
||||||
|
// should also match the end of the last region in the seriers.
|
||||||
|
// (Note: sumSizes = "region size" x "number of regions we found").
|
||||||
|
HeapWord* new_end = new_obj + sumSizes;
|
||||||
|
// This will be the new top of the first region that will reflect
|
||||||
|
// this allocation.
|
||||||
|
HeapWord* new_top = new_obj + word_size;
|
||||||
|
|
||||||
|
// First, we need to zero the header of the space that we will be
|
||||||
|
// allocating. When we update top further down, some refinement
|
||||||
|
// threads might try to scan the region. By zeroing the header we
|
||||||
|
// ensure that any thread that will try to scan the region will
|
||||||
|
// come across the zero klass word and bail out.
|
||||||
|
//
|
||||||
|
// NOTE: It would not have been correct to have used
|
||||||
|
// CollectedHeap::fill_with_object() and make the space look like
|
||||||
|
// an int array. The thread that is doing the allocation will
|
||||||
|
// later update the object header to a potentially different array
|
||||||
|
// type and, for a very short period of time, the klass and length
|
||||||
|
// fields will be inconsistent. This could cause a refinement
|
||||||
|
// thread to calculate the object size incorrectly.
|
||||||
|
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
|
||||||
|
|
||||||
|
// We will set up the first region as "starts humongous". This
|
||||||
|
// will also update the BOT covering all the regions to reflect
|
||||||
|
// that there is a single object that starts at the bottom of the
|
||||||
|
// first region.
|
||||||
|
first_hr->set_startsHumongous(new_end);
|
||||||
|
|
||||||
|
// Then, if there are any, we will set up the "continues
|
||||||
|
// humongous" regions.
|
||||||
|
HeapRegion* hr = NULL;
|
||||||
|
for (int i = first + 1; i < cur; ++i) {
|
||||||
|
hr = _regions.at(i);
|
||||||
{
|
{
|
||||||
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
||||||
hr->set_zero_fill_allocated();
|
hr->set_zero_fill_allocated();
|
||||||
}
|
}
|
||||||
size_t sz = hr->capacity() / HeapWordSize;
|
hr->set_continuesHumongous(first_hr);
|
||||||
HeapWord* tmp = hr->allocate(sz);
|
}
|
||||||
assert(tmp != NULL, "Humongous allocation failure");
|
// If we have "continues humongous" regions (hr != NULL), then the
|
||||||
MemRegion mr = MemRegion(tmp, sz);
|
// end of the last one should match new_end.
|
||||||
CollectedHeap::fill_with_object(mr);
|
assert(hr == NULL || hr->end() == new_end, "sanity");
|
||||||
hr->declare_filled_region_to_BOT(mr);
|
|
||||||
if (i == first) {
|
// Up to this point no concurrent thread would have been able to
|
||||||
first_hr->set_startsHumongous();
|
// do any scanning on any region in this series. All the top
|
||||||
|
// fields still point to bottom, so the intersection between
|
||||||
|
// [bottom,top] and [card_start,card_end] will be empty. Before we
|
||||||
|
// update the top fields, we'll do a storestore to make sure that
|
||||||
|
// no thread sees the update to top before the zeroing of the
|
||||||
|
// object header and the BOT initialization.
|
||||||
|
OrderAccess::storestore();
|
||||||
|
|
||||||
|
// Now that the BOT and the object header have been initialized,
|
||||||
|
// we can update top of the "starts humongous" region.
|
||||||
|
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
|
||||||
|
"new_top should be in this region");
|
||||||
|
first_hr->set_top(new_top);
|
||||||
|
|
||||||
|
// Now, we will update the top fields of the "continues humongous"
|
||||||
|
// regions. The reason we need to do this is that, otherwise,
|
||||||
|
// these regions would look empty and this will confuse parts of
|
||||||
|
// G1. For example, the code that looks for a consecutive number
|
||||||
|
// of empty regions will consider them empty and try to
|
||||||
|
// re-allocate them. We can extend is_empty() to also include
|
||||||
|
// !continuesHumongous(), but it is easier to just update the top
|
||||||
|
// fields here.
|
||||||
|
hr = NULL;
|
||||||
|
for (int i = first + 1; i < cur; ++i) {
|
||||||
|
hr = _regions.at(i);
|
||||||
|
if ((i + 1) == cur) {
|
||||||
|
// last continues humongous region
|
||||||
|
assert(hr->bottom() < new_top && new_top <= hr->end(),
|
||||||
|
"new_top should fall on this region");
|
||||||
|
hr->set_top(new_top);
|
||||||
} else {
|
} else {
|
||||||
assert(i > first, "sanity");
|
// not last one
|
||||||
hr->set_continuesHumongous(first_hr);
|
assert(new_top > hr->end(), "new_top should be above this region");
|
||||||
|
hr->set_top(hr->end());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
HeapWord* first_hr_bot = first_hr->bottom();
|
// If we have continues humongous regions (hr != NULL), then the
|
||||||
HeapWord* obj_end = first_hr_bot + word_size;
|
// end of the last one should match new_end and its top should
|
||||||
first_hr->set_top(obj_end);
|
// match new_top.
|
||||||
return first_hr_bot;
|
assert(hr == NULL ||
|
||||||
|
(hr->end() == new_end && hr->top() == new_top), "sanity");
|
||||||
|
|
||||||
|
return new_obj;
|
||||||
} else {
|
} else {
|
||||||
// If we started from the beginning, we want to know why we can't alloc.
|
// If we started from the beginning, we want to know why we can't alloc.
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -308,7 +308,7 @@ void RSHashTable::add_entry(SparsePRTEntry* e) {
|
|||||||
assert(e2->num_valid_cards() > 0, "Postcondition.");
|
assert(e2->num_valid_cards() > 0, "Postcondition.");
|
||||||
}
|
}
|
||||||
|
|
||||||
CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
|
CardIdx_t RSHashTableIter::find_first_card_in_list() {
|
||||||
CardIdx_t res;
|
CardIdx_t res;
|
||||||
while (_bl_ind != RSHashTable::NullEntry) {
|
while (_bl_ind != RSHashTable::NullEntry) {
|
||||||
res = _rsht->entry(_bl_ind)->card(0);
|
res = _rsht->entry(_bl_ind)->card(0);
|
||||||
@ -322,14 +322,11 @@ CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
|
|||||||
return SparsePRTEntry::NullEntry;
|
return SparsePRTEntry::NullEntry;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
|
size_t RSHashTableIter::compute_card_ind(CardIdx_t ci) {
|
||||||
return
|
return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci;
|
||||||
_heap_bot_card_ind
|
|
||||||
+ (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
|
|
||||||
+ ci;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
|
bool RSHashTableIter::has_next(size_t& card_index) {
|
||||||
_card_ind++;
|
_card_ind++;
|
||||||
CardIdx_t ci;
|
CardIdx_t ci;
|
||||||
if (_card_ind < SparsePRTEntry::cards_num() &&
|
if (_card_ind < SparsePRTEntry::cards_num() &&
|
||||||
|
@ -169,7 +169,6 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
|||||||
int _bl_ind; // [-1, 0.._rsht->_capacity)
|
int _bl_ind; // [-1, 0.._rsht->_capacity)
|
||||||
short _card_ind; // [0..SparsePRTEntry::cards_num())
|
short _card_ind; // [0..SparsePRTEntry::cards_num())
|
||||||
RSHashTable* _rsht;
|
RSHashTable* _rsht;
|
||||||
size_t _heap_bot_card_ind;
|
|
||||||
|
|
||||||
// If the bucket list pointed to by _bl_ind contains a card, sets
|
// If the bucket list pointed to by _bl_ind contains a card, sets
|
||||||
// _bl_ind to the index of that entry, and returns the card.
|
// _bl_ind to the index of that entry, and returns the card.
|
||||||
@ -183,13 +182,11 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
|||||||
size_t compute_card_ind(CardIdx_t ci);
|
size_t compute_card_ind(CardIdx_t ci);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
RSHashTableIter(size_t heap_bot_card_ind) :
|
RSHashTableIter() :
|
||||||
_tbl_ind(RSHashTable::NullEntry),
|
_tbl_ind(RSHashTable::NullEntry),
|
||||||
_bl_ind(RSHashTable::NullEntry),
|
_bl_ind(RSHashTable::NullEntry),
|
||||||
_card_ind((SparsePRTEntry::cards_num() - 1)),
|
_card_ind((SparsePRTEntry::cards_num() - 1)),
|
||||||
_rsht(NULL),
|
_rsht(NULL) {}
|
||||||
_heap_bot_card_ind(heap_bot_card_ind)
|
|
||||||
{}
|
|
||||||
|
|
||||||
void init(RSHashTable* rsht) {
|
void init(RSHashTable* rsht) {
|
||||||
_rsht = rsht;
|
_rsht = rsht;
|
||||||
@ -280,20 +277,11 @@ public:
|
|||||||
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
|
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
|
||||||
return _next->contains_card(region_id, card_index);
|
return _next->contains_card(region_id, card_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
void verify_is_cleared();
|
|
||||||
void print();
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class SparsePRTIter: public /* RSHashTable:: */RSHashTableIter {
|
class SparsePRTIter: public RSHashTableIter {
|
||||||
public:
|
public:
|
||||||
SparsePRTIter(size_t heap_bot_card_ind) :
|
|
||||||
/* RSHashTable:: */RSHashTableIter(heap_bot_card_ind)
|
|
||||||
{}
|
|
||||||
|
|
||||||
void init(const SparsePRT* sprt) {
|
void init(const SparsePRT* sprt) {
|
||||||
RSHashTableIter::init(sprt->cur());
|
RSHashTableIter::init(sprt->cur());
|
||||||
}
|
}
|
||||||
|
@ -310,10 +310,16 @@ heapRegionSeq.hpp heapRegion.hpp
|
|||||||
|
|
||||||
heapRegionSeq.inline.hpp heapRegionSeq.hpp
|
heapRegionSeq.inline.hpp heapRegionSeq.hpp
|
||||||
|
|
||||||
|
instanceKlass.cpp g1RemSet.inline.hpp
|
||||||
|
|
||||||
|
instanceRefKlass.cpp g1RemSet.inline.hpp
|
||||||
|
|
||||||
klass.hpp g1OopClosures.hpp
|
klass.hpp g1OopClosures.hpp
|
||||||
|
|
||||||
memoryService.cpp g1MemoryPool.hpp
|
memoryService.cpp g1MemoryPool.hpp
|
||||||
|
|
||||||
|
objArrayKlass.cpp g1RemSet.inline.hpp
|
||||||
|
|
||||||
ptrQueue.cpp allocation.hpp
|
ptrQueue.cpp allocation.hpp
|
||||||
ptrQueue.cpp allocation.inline.hpp
|
ptrQueue.cpp allocation.inline.hpp
|
||||||
ptrQueue.cpp mutex.hpp
|
ptrQueue.cpp mutex.hpp
|
||||||
|
@ -846,7 +846,7 @@ void ParNewGeneration::collect(bool full,
|
|||||||
// from this generation, pass on collection; let the next generation
|
// from this generation, pass on collection; let the next generation
|
||||||
// do it.
|
// do it.
|
||||||
if (!collection_attempt_is_safe()) {
|
if (!collection_attempt_is_safe()) {
|
||||||
gch->set_incremental_collection_will_fail();
|
gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
|
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
|
||||||
@ -935,8 +935,6 @@ void ParNewGeneration::collect(bool full,
|
|||||||
|
|
||||||
assert(to()->is_empty(), "to space should be empty now");
|
assert(to()->is_empty(), "to space should be empty now");
|
||||||
} else {
|
} else {
|
||||||
assert(HandlePromotionFailure,
|
|
||||||
"Should only be here if promotion failure handling is on");
|
|
||||||
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
||||||
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
||||||
|
|
||||||
@ -947,7 +945,7 @@ void ParNewGeneration::collect(bool full,
|
|||||||
// All the spaces are in play for mark-sweep.
|
// All the spaces are in play for mark-sweep.
|
||||||
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
|
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
|
||||||
from()->set_next_compaction_space(to());
|
from()->set_next_compaction_space(to());
|
||||||
gch->set_incremental_collection_will_fail();
|
gch->set_incremental_collection_failed();
|
||||||
// Inform the next generation that a promotion failure occurred.
|
// Inform the next generation that a promotion failure occurred.
|
||||||
_next_gen->promotion_failure_occurred();
|
_next_gen->promotion_failure_occurred();
|
||||||
|
|
||||||
@ -1092,11 +1090,6 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
|
|||||||
old, m, sz);
|
old, m, sz);
|
||||||
|
|
||||||
if (new_obj == NULL) {
|
if (new_obj == NULL) {
|
||||||
if (!HandlePromotionFailure) {
|
|
||||||
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
|
|
||||||
// is incorrectly set. In any case, its seriously wrong to be here!
|
|
||||||
vm_exit_out_of_memory(sz*wordSize, "promotion");
|
|
||||||
}
|
|
||||||
// promotion failed, forward to self
|
// promotion failed, forward to self
|
||||||
_promotion_failed = true;
|
_promotion_failed = true;
|
||||||
new_obj = old;
|
new_obj = old;
|
||||||
@ -1206,12 +1199,6 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
|
|||||||
old, m, sz);
|
old, m, sz);
|
||||||
|
|
||||||
if (new_obj == NULL) {
|
if (new_obj == NULL) {
|
||||||
if (!HandlePromotionFailure) {
|
|
||||||
// A failed promotion likely means the MaxLiveObjectEvacuationRatio
|
|
||||||
// flag is incorrectly set. In any case, its seriously wrong to be
|
|
||||||
// here!
|
|
||||||
vm_exit_out_of_memory(sz*wordSize, "promotion");
|
|
||||||
}
|
|
||||||
// promotion failed, forward to self
|
// promotion failed, forward to self
|
||||||
forward_ptr = old->forward_to_atomic(old);
|
forward_ptr = old->forward_to_atomic(old);
|
||||||
new_obj = old;
|
new_obj = old;
|
||||||
|
@ -301,6 +301,7 @@ c1_MacroAssembler.hpp assembler.hpp
|
|||||||
c1_MacroAssembler.hpp assembler_<arch>.inline.hpp
|
c1_MacroAssembler.hpp assembler_<arch>.inline.hpp
|
||||||
|
|
||||||
c1_MacroAssembler_<arch>.cpp arrayOop.hpp
|
c1_MacroAssembler_<arch>.cpp arrayOop.hpp
|
||||||
|
c1_MacroAssembler_<arch>.cpp basicLock.hpp
|
||||||
c1_MacroAssembler_<arch>.cpp biasedLocking.hpp
|
c1_MacroAssembler_<arch>.cpp biasedLocking.hpp
|
||||||
c1_MacroAssembler_<arch>.cpp c1_MacroAssembler.hpp
|
c1_MacroAssembler_<arch>.cpp c1_MacroAssembler.hpp
|
||||||
c1_MacroAssembler_<arch>.cpp c1_Runtime1.hpp
|
c1_MacroAssembler_<arch>.cpp c1_Runtime1.hpp
|
||||||
@ -309,7 +310,6 @@ c1_MacroAssembler_<arch>.cpp interpreter.hpp
|
|||||||
c1_MacroAssembler_<arch>.cpp markOop.hpp
|
c1_MacroAssembler_<arch>.cpp markOop.hpp
|
||||||
c1_MacroAssembler_<arch>.cpp os.hpp
|
c1_MacroAssembler_<arch>.cpp os.hpp
|
||||||
c1_MacroAssembler_<arch>.cpp stubRoutines.hpp
|
c1_MacroAssembler_<arch>.cpp stubRoutines.hpp
|
||||||
c1_MacroAssembler_<arch>.cpp synchronizer.hpp
|
|
||||||
c1_MacroAssembler_<arch>.cpp systemDictionary.hpp
|
c1_MacroAssembler_<arch>.cpp systemDictionary.hpp
|
||||||
|
|
||||||
c1_MacroAssembler_<arch>.hpp generate_platform_dependent_include
|
c1_MacroAssembler_<arch>.hpp generate_platform_dependent_include
|
||||||
|
@ -300,10 +300,17 @@ barrierSet.hpp oopsHierarchy.hpp
|
|||||||
barrierSet.inline.hpp barrierSet.hpp
|
barrierSet.inline.hpp barrierSet.hpp
|
||||||
barrierSet.inline.hpp cardTableModRefBS.hpp
|
barrierSet.inline.hpp cardTableModRefBS.hpp
|
||||||
|
|
||||||
|
basicLock.cpp basicLock.hpp
|
||||||
|
basicLock.cpp synchronizer.hpp
|
||||||
|
|
||||||
|
basicLock.hpp handles.hpp
|
||||||
|
basicLock.hpp markOop.hpp
|
||||||
|
basicLock.hpp top.hpp
|
||||||
|
|
||||||
|
biasedLocking.cpp basicLock.hpp
|
||||||
biasedLocking.cpp biasedLocking.hpp
|
biasedLocking.cpp biasedLocking.hpp
|
||||||
biasedLocking.cpp klass.inline.hpp
|
biasedLocking.cpp klass.inline.hpp
|
||||||
biasedLocking.cpp markOop.hpp
|
biasedLocking.cpp markOop.hpp
|
||||||
biasedLocking.cpp synchronizer.hpp
|
|
||||||
biasedLocking.cpp task.hpp
|
biasedLocking.cpp task.hpp
|
||||||
biasedLocking.cpp vframe.hpp
|
biasedLocking.cpp vframe.hpp
|
||||||
biasedLocking.cpp vmThread.hpp
|
biasedLocking.cpp vmThread.hpp
|
||||||
@ -404,13 +411,13 @@ bytecodeInterpreter_<arch>.cpp vframeArray.hpp
|
|||||||
bytecodeInterpreterWithChecks.cpp bytecodeInterpreter.cpp
|
bytecodeInterpreterWithChecks.cpp bytecodeInterpreter.cpp
|
||||||
|
|
||||||
bytecodeInterpreter.hpp allocation.hpp
|
bytecodeInterpreter.hpp allocation.hpp
|
||||||
|
bytecodeInterpreter.hpp basicLock.hpp
|
||||||
bytecodeInterpreter.hpp bytes_<arch>.hpp
|
bytecodeInterpreter.hpp bytes_<arch>.hpp
|
||||||
bytecodeInterpreter.hpp frame.hpp
|
bytecodeInterpreter.hpp frame.hpp
|
||||||
bytecodeInterpreter.hpp globalDefinitions.hpp
|
bytecodeInterpreter.hpp globalDefinitions.hpp
|
||||||
bytecodeInterpreter.hpp globals.hpp
|
bytecodeInterpreter.hpp globals.hpp
|
||||||
bytecodeInterpreter.hpp methodDataOop.hpp
|
bytecodeInterpreter.hpp methodDataOop.hpp
|
||||||
bytecodeInterpreter.hpp methodOop.hpp
|
bytecodeInterpreter.hpp methodOop.hpp
|
||||||
bytecodeInterpreter.hpp synchronizer.hpp
|
|
||||||
|
|
||||||
bytecodeInterpreter.inline.hpp bytecodeInterpreter.hpp
|
bytecodeInterpreter.inline.hpp bytecodeInterpreter.hpp
|
||||||
bytecodeInterpreter.inline.hpp stubRoutines.hpp
|
bytecodeInterpreter.inline.hpp stubRoutines.hpp
|
||||||
@ -1667,10 +1674,10 @@ frame.cpp stubRoutines.hpp
|
|||||||
frame.cpp universe.inline.hpp
|
frame.cpp universe.inline.hpp
|
||||||
|
|
||||||
frame.hpp assembler.hpp
|
frame.hpp assembler.hpp
|
||||||
|
frame.hpp basicLock.hpp
|
||||||
frame.hpp methodOop.hpp
|
frame.hpp methodOop.hpp
|
||||||
frame.hpp monitorChunk.hpp
|
frame.hpp monitorChunk.hpp
|
||||||
frame.hpp registerMap.hpp
|
frame.hpp registerMap.hpp
|
||||||
frame.hpp synchronizer.hpp
|
|
||||||
frame.hpp top.hpp
|
frame.hpp top.hpp
|
||||||
|
|
||||||
frame.inline.hpp bytecodeInterpreter.hpp
|
frame.inline.hpp bytecodeInterpreter.hpp
|
||||||
@ -2120,6 +2127,7 @@ interfaceSupport.hpp vmThread.hpp
|
|||||||
interfaceSupport_<os_family>.hpp generate_platform_dependent_include
|
interfaceSupport_<os_family>.hpp generate_platform_dependent_include
|
||||||
|
|
||||||
interp_masm_<arch_model>.cpp arrayOop.hpp
|
interp_masm_<arch_model>.cpp arrayOop.hpp
|
||||||
|
interp_masm_<arch_model>.cpp basicLock.hpp
|
||||||
interp_masm_<arch_model>.cpp biasedLocking.hpp
|
interp_masm_<arch_model>.cpp biasedLocking.hpp
|
||||||
interp_masm_<arch_model>.cpp interp_masm_<arch_model>.hpp
|
interp_masm_<arch_model>.cpp interp_masm_<arch_model>.hpp
|
||||||
interp_masm_<arch_model>.cpp interpreterRuntime.hpp
|
interp_masm_<arch_model>.cpp interpreterRuntime.hpp
|
||||||
@ -2131,7 +2139,6 @@ interp_masm_<arch_model>.cpp markOop.hpp
|
|||||||
interp_masm_<arch_model>.cpp methodDataOop.hpp
|
interp_masm_<arch_model>.cpp methodDataOop.hpp
|
||||||
interp_masm_<arch_model>.cpp methodOop.hpp
|
interp_masm_<arch_model>.cpp methodOop.hpp
|
||||||
interp_masm_<arch_model>.cpp sharedRuntime.hpp
|
interp_masm_<arch_model>.cpp sharedRuntime.hpp
|
||||||
interp_masm_<arch_model>.cpp synchronizer.hpp
|
|
||||||
interp_masm_<arch_model>.cpp thread_<os_family>.inline.hpp
|
interp_masm_<arch_model>.cpp thread_<os_family>.inline.hpp
|
||||||
|
|
||||||
interp_masm_<arch_model>.hpp assembler_<arch>.inline.hpp
|
interp_masm_<arch_model>.hpp assembler_<arch>.inline.hpp
|
||||||
@ -3094,25 +3101,26 @@ objArrayOop.cpp oop.inline.hpp
|
|||||||
|
|
||||||
objArrayOop.hpp arrayOop.hpp
|
objArrayOop.hpp arrayOop.hpp
|
||||||
|
|
||||||
|
objectMonitor.cpp dtrace.hpp
|
||||||
|
objectMonitor.cpp handles.inline.hpp
|
||||||
|
objectMonitor.cpp interfaceSupport.hpp
|
||||||
|
objectMonitor.cpp markOop.hpp
|
||||||
|
objectMonitor.cpp mutexLocker.hpp
|
||||||
|
objectMonitor.cpp objectMonitor.hpp
|
||||||
|
objectMonitor.cpp objectMonitor.inline.hpp
|
||||||
|
objectMonitor.cpp oop.inline.hpp
|
||||||
|
objectMonitor.cpp osThread.hpp
|
||||||
|
objectMonitor.cpp os_<os_family>.inline.hpp
|
||||||
|
objectMonitor.cpp preserveException.hpp
|
||||||
|
objectMonitor.cpp resourceArea.hpp
|
||||||
|
objectMonitor.cpp stubRoutines.hpp
|
||||||
|
objectMonitor.cpp thread.hpp
|
||||||
|
objectMonitor.cpp thread_<os_family>.inline.hpp
|
||||||
|
objectMonitor.cpp threadService.hpp
|
||||||
|
objectMonitor.cpp vmSymbols.hpp
|
||||||
|
|
||||||
objectMonitor.hpp os.hpp
|
objectMonitor.hpp os.hpp
|
||||||
|
objectMonitor.hpp perfData.hpp
|
||||||
objectMonitor_<os_family>.cpp dtrace.hpp
|
|
||||||
objectMonitor_<os_family>.cpp interfaceSupport.hpp
|
|
||||||
objectMonitor_<os_family>.cpp objectMonitor.hpp
|
|
||||||
objectMonitor_<os_family>.cpp objectMonitor.inline.hpp
|
|
||||||
objectMonitor_<os_family>.cpp oop.inline.hpp
|
|
||||||
objectMonitor_<os_family>.cpp osThread.hpp
|
|
||||||
objectMonitor_<os_family>.cpp os_<os_family>.inline.hpp
|
|
||||||
objectMonitor_<os_family>.cpp threadService.hpp
|
|
||||||
objectMonitor_<os_family>.cpp thread_<os_family>.inline.hpp
|
|
||||||
objectMonitor_<os_family>.cpp vmSymbols.hpp
|
|
||||||
|
|
||||||
objectMonitor_<os_family>.hpp generate_platform_dependent_include
|
|
||||||
objectMonitor_<os_family>.hpp os_<os_family>.inline.hpp
|
|
||||||
objectMonitor_<os_family>.hpp thread_<os_family>.inline.hpp
|
|
||||||
objectMonitor_<os_family>.hpp top.hpp
|
|
||||||
|
|
||||||
objectMonitor_<os_family>.inline.hpp generate_platform_dependent_include
|
|
||||||
|
|
||||||
oop.cpp copy.hpp
|
oop.cpp copy.hpp
|
||||||
oop.cpp handles.inline.hpp
|
oop.cpp handles.inline.hpp
|
||||||
@ -3231,6 +3239,7 @@ orderAccess.hpp allocation.hpp
|
|||||||
orderAccess.hpp os.hpp
|
orderAccess.hpp os.hpp
|
||||||
|
|
||||||
orderAccess_<os_arch>.inline.hpp orderAccess.hpp
|
orderAccess_<os_arch>.inline.hpp orderAccess.hpp
|
||||||
|
orderAccess_<os_arch>.inline.hpp vm_version_<arch>.hpp
|
||||||
|
|
||||||
os.cpp allocation.inline.hpp
|
os.cpp allocation.inline.hpp
|
||||||
os.cpp arguments.hpp
|
os.cpp arguments.hpp
|
||||||
@ -3328,7 +3337,6 @@ os_<os_family>.cpp mutex_<os_family>.inline.hpp
|
|||||||
os_<os_family>.cpp nativeInst_<arch>.hpp
|
os_<os_family>.cpp nativeInst_<arch>.hpp
|
||||||
os_<os_family>.cpp no_precompiled_headers
|
os_<os_family>.cpp no_precompiled_headers
|
||||||
os_<os_family>.cpp objectMonitor.hpp
|
os_<os_family>.cpp objectMonitor.hpp
|
||||||
os_<os_family>.cpp objectMonitor.inline.hpp
|
|
||||||
os_<os_family>.cpp oop.inline.hpp
|
os_<os_family>.cpp oop.inline.hpp
|
||||||
os_<os_family>.cpp osThread.hpp
|
os_<os_family>.cpp osThread.hpp
|
||||||
os_<os_family>.cpp os_share_<os_family>.hpp
|
os_<os_family>.cpp os_share_<os_family>.hpp
|
||||||
@ -3388,6 +3396,12 @@ ostream.cpp xmlstream.hpp
|
|||||||
ostream.hpp allocation.hpp
|
ostream.hpp allocation.hpp
|
||||||
ostream.hpp timer.hpp
|
ostream.hpp timer.hpp
|
||||||
|
|
||||||
|
// include thread.hpp to prevent cyclic includes
|
||||||
|
park.cpp thread.hpp
|
||||||
|
|
||||||
|
park.hpp debug.hpp
|
||||||
|
park.hpp globalDefinitions.hpp
|
||||||
|
|
||||||
pcDesc.cpp debugInfoRec.hpp
|
pcDesc.cpp debugInfoRec.hpp
|
||||||
pcDesc.cpp nmethod.hpp
|
pcDesc.cpp nmethod.hpp
|
||||||
pcDesc.cpp pcDesc.hpp
|
pcDesc.cpp pcDesc.hpp
|
||||||
@ -3600,7 +3614,9 @@ relocInfo_<arch>.hpp generate_platform_dependent_include
|
|||||||
relocator.cpp bytecodes.hpp
|
relocator.cpp bytecodes.hpp
|
||||||
relocator.cpp handles.inline.hpp
|
relocator.cpp handles.inline.hpp
|
||||||
relocator.cpp oop.inline.hpp
|
relocator.cpp oop.inline.hpp
|
||||||
|
relocator.cpp oopFactory.hpp
|
||||||
relocator.cpp relocator.hpp
|
relocator.cpp relocator.hpp
|
||||||
|
relocator.cpp stackMapTableFormat.hpp
|
||||||
relocator.cpp universe.inline.hpp
|
relocator.cpp universe.inline.hpp
|
||||||
|
|
||||||
relocator.hpp bytecodes.hpp
|
relocator.hpp bytecodes.hpp
|
||||||
@ -3907,6 +3923,8 @@ stackMapTable.hpp globalDefinitions.hpp
|
|||||||
stackMapTable.hpp methodOop.hpp
|
stackMapTable.hpp methodOop.hpp
|
||||||
stackMapTable.hpp stackMapFrame.hpp
|
stackMapTable.hpp stackMapFrame.hpp
|
||||||
|
|
||||||
|
stackMapTableFormat.hpp verificationType.hpp
|
||||||
|
|
||||||
stackValue.cpp debugInfo.hpp
|
stackValue.cpp debugInfo.hpp
|
||||||
stackValue.cpp frame.inline.hpp
|
stackValue.cpp frame.inline.hpp
|
||||||
stackValue.cpp handles.inline.hpp
|
stackValue.cpp handles.inline.hpp
|
||||||
@ -4062,10 +4080,10 @@ synchronizer.cpp preserveException.hpp
|
|||||||
synchronizer.cpp resourceArea.hpp
|
synchronizer.cpp resourceArea.hpp
|
||||||
synchronizer.cpp stubRoutines.hpp
|
synchronizer.cpp stubRoutines.hpp
|
||||||
synchronizer.cpp synchronizer.hpp
|
synchronizer.cpp synchronizer.hpp
|
||||||
synchronizer.cpp threadService.hpp
|
|
||||||
synchronizer.cpp thread_<os_family>.inline.hpp
|
synchronizer.cpp thread_<os_family>.inline.hpp
|
||||||
synchronizer.cpp vmSymbols.hpp
|
synchronizer.cpp vmSymbols.hpp
|
||||||
|
|
||||||
|
synchronizer.hpp basicLock.hpp
|
||||||
synchronizer.hpp handles.hpp
|
synchronizer.hpp handles.hpp
|
||||||
synchronizer.hpp markOop.hpp
|
synchronizer.hpp markOop.hpp
|
||||||
synchronizer.hpp perfData.hpp
|
synchronizer.hpp perfData.hpp
|
||||||
@ -4237,7 +4255,6 @@ thread.cpp memprofiler.hpp
|
|||||||
thread.cpp mutexLocker.hpp
|
thread.cpp mutexLocker.hpp
|
||||||
thread.cpp objArrayOop.hpp
|
thread.cpp objArrayOop.hpp
|
||||||
thread.cpp objectMonitor.hpp
|
thread.cpp objectMonitor.hpp
|
||||||
thread.cpp objectMonitor.inline.hpp
|
|
||||||
thread.cpp oop.inline.hpp
|
thread.cpp oop.inline.hpp
|
||||||
thread.cpp oopFactory.hpp
|
thread.cpp oopFactory.hpp
|
||||||
thread.cpp osThread.hpp
|
thread.cpp osThread.hpp
|
||||||
@ -4275,6 +4292,7 @@ thread.hpp mutexLocker.hpp
|
|||||||
thread.hpp oop.hpp
|
thread.hpp oop.hpp
|
||||||
thread.hpp os.hpp
|
thread.hpp os.hpp
|
||||||
thread.hpp osThread.hpp
|
thread.hpp osThread.hpp
|
||||||
|
thread.hpp park.hpp
|
||||||
thread.hpp safepoint.hpp
|
thread.hpp safepoint.hpp
|
||||||
thread.hpp stubRoutines.hpp
|
thread.hpp stubRoutines.hpp
|
||||||
thread.hpp threadLocalAllocBuffer.hpp
|
thread.hpp threadLocalAllocBuffer.hpp
|
||||||
@ -4586,6 +4604,7 @@ vframeArray.hpp frame.inline.hpp
|
|||||||
vframeArray.hpp growableArray.hpp
|
vframeArray.hpp growableArray.hpp
|
||||||
vframeArray.hpp monitorChunk.hpp
|
vframeArray.hpp monitorChunk.hpp
|
||||||
|
|
||||||
|
vframe_hp.cpp basicLock.hpp
|
||||||
vframe_hp.cpp codeCache.hpp
|
vframe_hp.cpp codeCache.hpp
|
||||||
vframe_hp.cpp debugInfoRec.hpp
|
vframe_hp.cpp debugInfoRec.hpp
|
||||||
vframe_hp.cpp handles.inline.hpp
|
vframe_hp.cpp handles.inline.hpp
|
||||||
@ -4599,7 +4618,6 @@ vframe_hp.cpp pcDesc.hpp
|
|||||||
vframe_hp.cpp scopeDesc.hpp
|
vframe_hp.cpp scopeDesc.hpp
|
||||||
vframe_hp.cpp signature.hpp
|
vframe_hp.cpp signature.hpp
|
||||||
vframe_hp.cpp stubRoutines.hpp
|
vframe_hp.cpp stubRoutines.hpp
|
||||||
vframe_hp.cpp synchronizer.hpp
|
|
||||||
vframe_hp.cpp vframeArray.hpp
|
vframe_hp.cpp vframeArray.hpp
|
||||||
vframe_hp.cpp vframe_hp.hpp
|
vframe_hp.cpp vframe_hp.hpp
|
||||||
|
|
||||||
@ -4751,6 +4769,7 @@ workgroup.cpp os.hpp
|
|||||||
workgroup.cpp workgroup.hpp
|
workgroup.cpp workgroup.hpp
|
||||||
|
|
||||||
workgroup.hpp taskqueue.hpp
|
workgroup.hpp taskqueue.hpp
|
||||||
|
|
||||||
workgroup.hpp thread_<os_family>.inline.hpp
|
workgroup.hpp thread_<os_family>.inline.hpp
|
||||||
|
|
||||||
xmlstream.cpp allocation.hpp
|
xmlstream.cpp allocation.hpp
|
||||||
|
@ -184,6 +184,13 @@ jvmtiImpl.hpp stackValueCollection.hpp
|
|||||||
jvmtiImpl.hpp systemDictionary.hpp
|
jvmtiImpl.hpp systemDictionary.hpp
|
||||||
jvmtiImpl.hpp vm_operations.hpp
|
jvmtiImpl.hpp vm_operations.hpp
|
||||||
|
|
||||||
|
jvmtiRawMonitor.cpp interfaceSupport.hpp
|
||||||
|
jvmtiRawMonitor.cpp jvmtiRawMonitor.hpp
|
||||||
|
jvmtiRawMonitor.cpp thread.hpp
|
||||||
|
|
||||||
|
jvmtiRawMonitor.hpp growableArray.hpp
|
||||||
|
jvmtiRawMonitor.hpp objectMonitor.hpp
|
||||||
|
|
||||||
jvmtiTagMap.cpp biasedLocking.hpp
|
jvmtiTagMap.cpp biasedLocking.hpp
|
||||||
jvmtiTagMap.cpp javaCalls.hpp
|
jvmtiTagMap.cpp javaCalls.hpp
|
||||||
jvmtiTagMap.cpp jniHandles.hpp
|
jvmtiTagMap.cpp jniHandles.hpp
|
||||||
|
@ -35,6 +35,7 @@ jvmtiClassFileReconstituter.hpp jvmtiEnv.hpp
|
|||||||
// jvmtiCodeBlobEvents is jck optional, please put deps in includeDB_features
|
// jvmtiCodeBlobEvents is jck optional, please put deps in includeDB_features
|
||||||
|
|
||||||
jvmtiEnter.cpp jvmtiEnter.hpp
|
jvmtiEnter.cpp jvmtiEnter.hpp
|
||||||
|
jvmtiEnter.cpp jvmtiRawMonitor.hpp
|
||||||
jvmtiEnter.cpp jvmtiUtil.hpp
|
jvmtiEnter.cpp jvmtiUtil.hpp
|
||||||
|
|
||||||
jvmtiEnter.hpp interfaceSupport.hpp
|
jvmtiEnter.hpp interfaceSupport.hpp
|
||||||
@ -44,6 +45,7 @@ jvmtiEnter.hpp resourceArea.hpp
|
|||||||
jvmtiEnter.hpp systemDictionary.hpp
|
jvmtiEnter.hpp systemDictionary.hpp
|
||||||
|
|
||||||
jvmtiEnterTrace.cpp jvmtiEnter.hpp
|
jvmtiEnterTrace.cpp jvmtiEnter.hpp
|
||||||
|
jvmtiEnterTrace.cpp jvmtiRawMonitor.hpp
|
||||||
jvmtiEnterTrace.cpp jvmtiUtil.hpp
|
jvmtiEnterTrace.cpp jvmtiUtil.hpp
|
||||||
|
|
||||||
jvmtiEnv.cpp arguments.hpp
|
jvmtiEnv.cpp arguments.hpp
|
||||||
@ -66,11 +68,11 @@ jvmtiEnv.cpp jvmtiExtensions.hpp
|
|||||||
jvmtiEnv.cpp jvmtiGetLoadedClasses.hpp
|
jvmtiEnv.cpp jvmtiGetLoadedClasses.hpp
|
||||||
jvmtiEnv.cpp jvmtiImpl.hpp
|
jvmtiEnv.cpp jvmtiImpl.hpp
|
||||||
jvmtiEnv.cpp jvmtiManageCapabilities.hpp
|
jvmtiEnv.cpp jvmtiManageCapabilities.hpp
|
||||||
|
jvmtiEnv.cpp jvmtiRawMonitor.hpp
|
||||||
jvmtiEnv.cpp jvmtiRedefineClasses.hpp
|
jvmtiEnv.cpp jvmtiRedefineClasses.hpp
|
||||||
jvmtiEnv.cpp jvmtiTagMap.hpp
|
jvmtiEnv.cpp jvmtiTagMap.hpp
|
||||||
jvmtiEnv.cpp jvmtiThreadState.inline.hpp
|
jvmtiEnv.cpp jvmtiThreadState.inline.hpp
|
||||||
jvmtiEnv.cpp jvmtiUtil.hpp
|
jvmtiEnv.cpp jvmtiUtil.hpp
|
||||||
jvmtiEnv.cpp objectMonitor.inline.hpp
|
|
||||||
jvmtiEnv.cpp osThread.hpp
|
jvmtiEnv.cpp osThread.hpp
|
||||||
jvmtiEnv.cpp preserveException.hpp
|
jvmtiEnv.cpp preserveException.hpp
|
||||||
jvmtiEnv.cpp reflectionUtils.hpp
|
jvmtiEnv.cpp reflectionUtils.hpp
|
||||||
@ -178,11 +180,13 @@ jvmtiExport.cpp jvmtiEventController.inline.hpp
|
|||||||
jvmtiExport.cpp jvmtiExport.hpp
|
jvmtiExport.cpp jvmtiExport.hpp
|
||||||
jvmtiExport.cpp jvmtiImpl.hpp
|
jvmtiExport.cpp jvmtiImpl.hpp
|
||||||
jvmtiExport.cpp jvmtiManageCapabilities.hpp
|
jvmtiExport.cpp jvmtiManageCapabilities.hpp
|
||||||
|
jvmtiExport.cpp jvmtiRawMonitor.hpp
|
||||||
jvmtiExport.cpp jvmtiTagMap.hpp
|
jvmtiExport.cpp jvmtiTagMap.hpp
|
||||||
jvmtiExport.cpp jvmtiThreadState.inline.hpp
|
jvmtiExport.cpp jvmtiThreadState.inline.hpp
|
||||||
jvmtiExport.cpp nmethod.hpp
|
jvmtiExport.cpp nmethod.hpp
|
||||||
jvmtiExport.cpp objArrayKlass.hpp
|
jvmtiExport.cpp objArrayKlass.hpp
|
||||||
jvmtiExport.cpp objArrayOop.hpp
|
jvmtiExport.cpp objArrayOop.hpp
|
||||||
|
jvmtiExport.cpp objectMonitor.hpp
|
||||||
jvmtiExport.cpp objectMonitor.inline.hpp
|
jvmtiExport.cpp objectMonitor.inline.hpp
|
||||||
jvmtiExport.cpp pcDesc.hpp
|
jvmtiExport.cpp pcDesc.hpp
|
||||||
jvmtiExport.cpp resourceArea.hpp
|
jvmtiExport.cpp resourceArea.hpp
|
||||||
@ -210,6 +214,8 @@ jvmtiManageCapabilities.cpp jvmtiManageCapabilities.hpp
|
|||||||
jvmtiManageCapabilities.hpp allocation.hpp
|
jvmtiManageCapabilities.hpp allocation.hpp
|
||||||
jvmtiManageCapabilities.hpp jvmti.h
|
jvmtiManageCapabilities.hpp jvmti.h
|
||||||
|
|
||||||
|
// jvmtiRawMonitor is jck optional, please put deps in includeDB_features
|
||||||
|
|
||||||
jvmtiRedefineClasses.cpp bitMap.inline.hpp
|
jvmtiRedefineClasses.cpp bitMap.inline.hpp
|
||||||
jvmtiRedefineClasses.cpp codeCache.hpp
|
jvmtiRedefineClasses.cpp codeCache.hpp
|
||||||
jvmtiRedefineClasses.cpp deoptimization.hpp
|
jvmtiRedefineClasses.cpp deoptimization.hpp
|
||||||
|
@ -659,9 +659,6 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
|
|||||||
}
|
}
|
||||||
return result; // could be null if we are out of space
|
return result; // could be null if we are out of space
|
||||||
} else if (!gch->incremental_collection_will_fail()) {
|
} else if (!gch->incremental_collection_will_fail()) {
|
||||||
// The gc_prologues have not executed yet. The value
|
|
||||||
// for incremental_collection_will_fail() is the remanent
|
|
||||||
// of the last collection.
|
|
||||||
// Do an incremental collection.
|
// Do an incremental collection.
|
||||||
gch->do_collection(false /* full */,
|
gch->do_collection(false /* full */,
|
||||||
false /* clear_all_soft_refs */,
|
false /* clear_all_soft_refs */,
|
||||||
@ -739,9 +736,8 @@ bool GenCollectorPolicy::should_try_older_generation_allocation(
|
|||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
|
size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
|
||||||
return (word_size > heap_word_size(gen0_capacity))
|
return (word_size > heap_word_size(gen0_capacity))
|
||||||
|| (GC_locker::is_active_and_needs_gc())
|
|| GC_locker::is_active_and_needs_gc()
|
||||||
|| ( gch->last_incremental_collection_failed()
|
|| gch->incremental_collection_failed();
|
||||||
&& gch->incremental_collection_will_fail());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -510,7 +510,7 @@ void DefNewGeneration::collect(bool full,
|
|||||||
// from this generation, pass on collection; let the next generation
|
// from this generation, pass on collection; let the next generation
|
||||||
// do it.
|
// do it.
|
||||||
if (!collection_attempt_is_safe()) {
|
if (!collection_attempt_is_safe()) {
|
||||||
gch->set_incremental_collection_will_fail();
|
gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
|
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
|
||||||
@ -596,9 +596,8 @@ void DefNewGeneration::collect(bool full,
|
|||||||
if (PrintGC && !PrintGCDetails) {
|
if (PrintGC && !PrintGCDetails) {
|
||||||
gch->print_heap_change(gch_prev_used);
|
gch->print_heap_change(gch_prev_used);
|
||||||
}
|
}
|
||||||
|
assert(!gch->incremental_collection_failed(), "Should be clear");
|
||||||
} else {
|
} else {
|
||||||
assert(HandlePromotionFailure,
|
|
||||||
"Should not be here unless promotion failure handling is on");
|
|
||||||
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
||||||
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
||||||
|
|
||||||
@ -613,7 +612,7 @@ void DefNewGeneration::collect(bool full,
|
|||||||
// and from-space.
|
// and from-space.
|
||||||
swap_spaces(); // For uniformity wrt ParNewGeneration.
|
swap_spaces(); // For uniformity wrt ParNewGeneration.
|
||||||
from()->set_next_compaction_space(to());
|
from()->set_next_compaction_space(to());
|
||||||
gch->set_incremental_collection_will_fail();
|
gch->set_incremental_collection_failed();
|
||||||
|
|
||||||
// Inform the next generation that a promotion failure occurred.
|
// Inform the next generation that a promotion failure occurred.
|
||||||
_next_gen->promotion_failure_occurred();
|
_next_gen->promotion_failure_occurred();
|
||||||
@ -700,12 +699,6 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
|
|||||||
if (obj == NULL) {
|
if (obj == NULL) {
|
||||||
obj = _next_gen->promote(old, s);
|
obj = _next_gen->promote(old, s);
|
||||||
if (obj == NULL) {
|
if (obj == NULL) {
|
||||||
if (!HandlePromotionFailure) {
|
|
||||||
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
|
|
||||||
// is incorrectly set. In any case, its seriously wrong to be here!
|
|
||||||
vm_exit_out_of_memory(s*wordSize, "promotion");
|
|
||||||
}
|
|
||||||
|
|
||||||
handle_promotion_failure(old);
|
handle_promotion_failure(old);
|
||||||
return old;
|
return old;
|
||||||
}
|
}
|
||||||
@ -812,47 +805,43 @@ bool DefNewGeneration::collection_attempt_is_safe() {
|
|||||||
assert(_next_gen != NULL,
|
assert(_next_gen != NULL,
|
||||||
"This must be the youngest gen, and not the only gen");
|
"This must be the youngest gen, and not the only gen");
|
||||||
}
|
}
|
||||||
|
return _next_gen->promotion_attempt_is_safe(used());
|
||||||
// Decide if there's enough room for a full promotion
|
|
||||||
// When using extremely large edens, we effectively lose a
|
|
||||||
// large amount of old space. Use the "MaxLiveObjectEvacuationRatio"
|
|
||||||
// flag to reduce the minimum evacuation space requirements. If
|
|
||||||
// there is not enough space to evacuate eden during a scavenge,
|
|
||||||
// the VM will immediately exit with an out of memory error.
|
|
||||||
// This flag has not been tested
|
|
||||||
// with collectors other than simple mark & sweep.
|
|
||||||
//
|
|
||||||
// Note that with the addition of promotion failure handling, the
|
|
||||||
// VM will not immediately exit but will undo the young generation
|
|
||||||
// collection. The parameter is left here for compatibility.
|
|
||||||
const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
|
|
||||||
|
|
||||||
// worst_case_evacuation is based on "used()". For the case where this
|
|
||||||
// method is called after a collection, this is still appropriate because
|
|
||||||
// the case that needs to be detected is one in which a full collection
|
|
||||||
// has been done and has overflowed into the young generation. In that
|
|
||||||
// case a minor collection will fail (the overflow of the full collection
|
|
||||||
// means there is no space in the old generation for any promotion).
|
|
||||||
size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
|
|
||||||
|
|
||||||
return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
|
|
||||||
HandlePromotionFailure);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void DefNewGeneration::gc_epilogue(bool full) {
|
void DefNewGeneration::gc_epilogue(bool full) {
|
||||||
|
DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
|
||||||
|
|
||||||
|
assert(!GC_locker::is_active(), "We should not be executing here");
|
||||||
// Check if the heap is approaching full after a collection has
|
// Check if the heap is approaching full after a collection has
|
||||||
// been done. Generally the young generation is empty at
|
// been done. Generally the young generation is empty at
|
||||||
// a minimum at the end of a collection. If it is not, then
|
// a minimum at the end of a collection. If it is not, then
|
||||||
// the heap is approaching full.
|
// the heap is approaching full.
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
clear_should_allocate_from_space();
|
if (full) {
|
||||||
if (collection_attempt_is_safe()) {
|
DEBUG_ONLY(seen_incremental_collection_failed = false;)
|
||||||
gch->clear_incremental_collection_will_fail();
|
if (!collection_attempt_is_safe()) {
|
||||||
} else {
|
gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
|
||||||
gch->set_incremental_collection_will_fail();
|
set_should_allocate_from_space(); // we seem to be running out of space
|
||||||
if (full) { // we seem to be running out of space
|
} else {
|
||||||
set_should_allocate_from_space();
|
gch->clear_incremental_collection_failed(); // We just did a full collection
|
||||||
|
clear_should_allocate_from_space(); // if set
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
#ifdef ASSERT
|
||||||
|
// It is possible that incremental_collection_failed() == true
|
||||||
|
// here, because an attempted scavenge did not succeed. The policy
|
||||||
|
// is normally expected to cause a full collection which should
|
||||||
|
// clear that condition, so we should not be here twice in a row
|
||||||
|
// with incremental_collection_failed() == true without having done
|
||||||
|
// a full collection in between.
|
||||||
|
if (!seen_incremental_collection_failed &&
|
||||||
|
gch->incremental_collection_failed()) {
|
||||||
|
seen_incremental_collection_failed = true;
|
||||||
|
} else if (seen_incremental_collection_failed) {
|
||||||
|
assert(!gch->incremental_collection_failed(), "Twice in a row");
|
||||||
|
seen_incremental_collection_failed = false;
|
||||||
|
}
|
||||||
|
#endif // ASSERT
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ZapUnusedHeapArea) {
|
if (ZapUnusedHeapArea) {
|
||||||
|
@ -82,12 +82,6 @@ protected:
|
|||||||
Stack<oop> _objs_with_preserved_marks;
|
Stack<oop> _objs_with_preserved_marks;
|
||||||
Stack<markOop> _preserved_marks_of_objs;
|
Stack<markOop> _preserved_marks_of_objs;
|
||||||
|
|
||||||
// Returns true if the collection can be safely attempted.
|
|
||||||
// If this method returns false, a collection is not
|
|
||||||
// guaranteed to fail but the system may not be able
|
|
||||||
// to recover from the failure.
|
|
||||||
bool collection_attempt_is_safe();
|
|
||||||
|
|
||||||
// Promotion failure handling
|
// Promotion failure handling
|
||||||
OopClosure *_promo_failure_scan_stack_closure;
|
OopClosure *_promo_failure_scan_stack_closure;
|
||||||
void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
|
void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
|
||||||
@ -304,6 +298,14 @@ protected:
|
|||||||
|
|
||||||
// GC support
|
// GC support
|
||||||
virtual void compute_new_size();
|
virtual void compute_new_size();
|
||||||
|
|
||||||
|
// Returns true if the collection is likely to be safely
|
||||||
|
// completed. Even if this method returns true, a collection
|
||||||
|
// may not be guaranteed to succeed, and the system should be
|
||||||
|
// able to safely unwind and recover from that failure, albeit
|
||||||
|
// at some additional cost. Override superclass's implementation.
|
||||||
|
virtual bool collection_attempt_is_safe();
|
||||||
|
|
||||||
virtual void collect(bool full,
|
virtual void collect(bool full,
|
||||||
bool clear_all_soft_refs,
|
bool clear_all_soft_refs,
|
||||||
size_t size,
|
size_t size,
|
||||||
|
@ -142,8 +142,7 @@ jint GenCollectedHeap::initialize() {
|
|||||||
}
|
}
|
||||||
_perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
|
_perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
|
||||||
|
|
||||||
clear_incremental_collection_will_fail();
|
clear_incremental_collection_failed();
|
||||||
clear_last_incremental_collection_failed();
|
|
||||||
|
|
||||||
#ifndef SERIALGC
|
#ifndef SERIALGC
|
||||||
// If we are running CMS, create the collector responsible
|
// If we are running CMS, create the collector responsible
|
||||||
@ -1347,17 +1346,6 @@ class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void GenCollectedHeap::gc_epilogue(bool full) {
|
void GenCollectedHeap::gc_epilogue(bool full) {
|
||||||
// Remember if a partial collection of the heap failed, and
|
|
||||||
// we did a complete collection.
|
|
||||||
if (full && incremental_collection_will_fail()) {
|
|
||||||
set_last_incremental_collection_failed();
|
|
||||||
} else {
|
|
||||||
clear_last_incremental_collection_failed();
|
|
||||||
}
|
|
||||||
// Clear the flag, if set; the generation gc_epilogues will set the
|
|
||||||
// flag again if the condition persists despite the collection.
|
|
||||||
clear_incremental_collection_will_fail();
|
|
||||||
|
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
assert(DerivedPointerTable::is_empty(), "derived pointer present");
|
assert(DerivedPointerTable::is_empty(), "derived pointer present");
|
||||||
size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
|
size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
|
||||||
|
@ -62,11 +62,10 @@ public:
|
|||||||
// The generational collector policy.
|
// The generational collector policy.
|
||||||
GenCollectorPolicy* _gen_policy;
|
GenCollectorPolicy* _gen_policy;
|
||||||
|
|
||||||
// If a generation would bail out of an incremental collection,
|
// Indicates that the most recent previous incremental collection failed.
|
||||||
// it sets this flag. If the flag is set, satisfy_failed_allocation
|
// The flag is cleared when an action is taken that might clear the
|
||||||
// will attempt allocating in all generations before doing a full GC.
|
// condition that caused that incremental collection to fail.
|
||||||
bool _incremental_collection_will_fail;
|
bool _incremental_collection_failed;
|
||||||
bool _last_incremental_collection_failed;
|
|
||||||
|
|
||||||
// In support of ExplicitGCInvokesConcurrent functionality
|
// In support of ExplicitGCInvokesConcurrent functionality
|
||||||
unsigned int _full_collections_completed;
|
unsigned int _full_collections_completed;
|
||||||
@ -469,26 +468,26 @@ public:
|
|||||||
// call to "save_marks".
|
// call to "save_marks".
|
||||||
bool no_allocs_since_save_marks(int level);
|
bool no_allocs_since_save_marks(int level);
|
||||||
|
|
||||||
// If a generation bails out of an incremental collection,
|
// Returns true if an incremental collection is likely to fail.
|
||||||
// it sets this flag.
|
|
||||||
bool incremental_collection_will_fail() {
|
bool incremental_collection_will_fail() {
|
||||||
return _incremental_collection_will_fail;
|
// Assumes a 2-generation system; the first disjunct remembers if an
|
||||||
}
|
// incremental collection failed, even when we thought (second disjunct)
|
||||||
void set_incremental_collection_will_fail() {
|
// that it would not.
|
||||||
_incremental_collection_will_fail = true;
|
assert(heap()->collector_policy()->is_two_generation_policy(),
|
||||||
}
|
"the following definition may not be suitable for an n(>2)-generation system");
|
||||||
void clear_incremental_collection_will_fail() {
|
return incremental_collection_failed() || !get_gen(0)->collection_attempt_is_safe();
|
||||||
_incremental_collection_will_fail = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool last_incremental_collection_failed() const {
|
// If a generation bails out of an incremental collection,
|
||||||
return _last_incremental_collection_failed;
|
// it sets this flag.
|
||||||
|
bool incremental_collection_failed() const {
|
||||||
|
return _incremental_collection_failed;
|
||||||
}
|
}
|
||||||
void set_last_incremental_collection_failed() {
|
void set_incremental_collection_failed() {
|
||||||
_last_incremental_collection_failed = true;
|
_incremental_collection_failed = true;
|
||||||
}
|
}
|
||||||
void clear_last_incremental_collection_failed() {
|
void clear_incremental_collection_failed() {
|
||||||
_last_incremental_collection_failed = false;
|
_incremental_collection_failed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Promotion of obj into gen failed. Try to promote obj to higher non-perm
|
// Promotion of obj into gen failed. Try to promote obj to higher non-perm
|
||||||
|
@ -165,15 +165,16 @@ size_t Generation::max_contiguous_available() const {
|
|||||||
return max;
|
return max;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes,
|
bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
|
||||||
bool not_used) const {
|
size_t available = max_contiguous_available();
|
||||||
|
bool res = (available >= max_promotion_in_bytes);
|
||||||
if (PrintGC && Verbose) {
|
if (PrintGC && Verbose) {
|
||||||
gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe"
|
gclog_or_tty->print_cr(
|
||||||
" contiguous_available: " SIZE_FORMAT
|
"Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
|
||||||
" promotion_in_bytes: " SIZE_FORMAT,
|
res? "":" not", available, res? ">=":"<",
|
||||||
max_contiguous_available(), promotion_in_bytes);
|
max_promotion_in_bytes);
|
||||||
}
|
}
|
||||||
return max_contiguous_available() >= promotion_in_bytes;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignores "ref" and calls allocate().
|
// Ignores "ref" and calls allocate().
|
||||||
|
@ -173,15 +173,11 @@ class Generation: public CHeapObj {
|
|||||||
// The largest number of contiguous free bytes in this or any higher generation.
|
// The largest number of contiguous free bytes in this or any higher generation.
|
||||||
virtual size_t max_contiguous_available() const;
|
virtual size_t max_contiguous_available() const;
|
||||||
|
|
||||||
// Returns true if promotions of the specified amount can
|
// Returns true if promotions of the specified amount are
|
||||||
// be attempted safely (without a vm failure).
|
// likely to succeed without a promotion failure.
|
||||||
// Promotion of the full amount is not guaranteed but
|
// Promotion of the full amount is not guaranteed but
|
||||||
// can be attempted.
|
// might be attempted in the worst case.
|
||||||
// younger_handles_promotion_failure
|
virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
|
||||||
// is true if the younger generation handles a promotion
|
|
||||||
// failure.
|
|
||||||
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
|
|
||||||
bool younger_handles_promotion_failure) const;
|
|
||||||
|
|
||||||
// For a non-young generation, this interface can be used to inform a
|
// For a non-young generation, this interface can be used to inform a
|
||||||
// generation that a promotion attempt into that generation failed.
|
// generation that a promotion attempt into that generation failed.
|
||||||
@ -358,6 +354,16 @@ class Generation: public CHeapObj {
|
|||||||
return (full || should_allocate(word_size, is_tlab));
|
return (full || should_allocate(word_size, is_tlab));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns true if the collection is likely to be safely
|
||||||
|
// completed. Even if this method returns true, a collection
|
||||||
|
// may not be guaranteed to succeed, and the system should be
|
||||||
|
// able to safely unwind and recover from that failure, albeit
|
||||||
|
// at some additional cost.
|
||||||
|
virtual bool collection_attempt_is_safe() {
|
||||||
|
guarantee(false, "Are you sure you want to call this method?");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// Perform a garbage collection.
|
// Perform a garbage collection.
|
||||||
// If full is true attempt a full garbage collection of this generation.
|
// If full is true attempt a full garbage collection of this generation.
|
||||||
// Otherwise, attempting to (at least) free enough space to support an
|
// Otherwise, attempting to (at least) free enough space to support an
|
||||||
|
@ -419,29 +419,16 @@ void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
|
|||||||
void TenuredGeneration::verify_alloc_buffers_clean() {}
|
void TenuredGeneration::verify_alloc_buffers_clean() {}
|
||||||
#endif // SERIALGC
|
#endif // SERIALGC
|
||||||
|
|
||||||
bool TenuredGeneration::promotion_attempt_is_safe(
|
bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
|
||||||
size_t max_promotion_in_bytes,
|
size_t available = max_contiguous_available();
|
||||||
bool younger_handles_promotion_failure) const {
|
size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
|
||||||
|
bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
|
||||||
bool result = max_contiguous_available() >= max_promotion_in_bytes;
|
if (PrintGC && Verbose) {
|
||||||
|
gclog_or_tty->print_cr(
|
||||||
if (younger_handles_promotion_failure && !result) {
|
"Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
|
||||||
result = max_contiguous_available() >=
|
"max_promo("SIZE_FORMAT")",
|
||||||
(size_t) gc_stats()->avg_promoted()->padded_average();
|
res? "":" not", available, res? ">=":"<",
|
||||||
if (PrintGC && Verbose && result) {
|
av_promo, max_promotion_in_bytes);
|
||||||
gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
|
|
||||||
" contiguous_available: " SIZE_FORMAT
|
|
||||||
" avg_promoted: " SIZE_FORMAT,
|
|
||||||
max_contiguous_available(),
|
|
||||||
gc_stats()->avg_promoted()->padded_average());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (PrintGC && Verbose) {
|
|
||||||
gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
|
|
||||||
" contiguous_available: " SIZE_FORMAT
|
|
||||||
" promotion_in_bytes: " SIZE_FORMAT,
|
|
||||||
max_contiguous_available(), max_promotion_in_bytes);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return result;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -101,8 +101,7 @@ class TenuredGeneration: public OneContigSpaceCardGeneration {
|
|||||||
|
|
||||||
virtual void update_gc_stats(int level, bool full);
|
virtual void update_gc_stats(int level, bool full);
|
||||||
|
|
||||||
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes,
|
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
|
||||||
bool younger_handles_promotion_failure) const;
|
|
||||||
|
|
||||||
void verify_alloc_buffers_clean();
|
void verify_alloc_buffers_clean();
|
||||||
};
|
};
|
||||||
|
@ -247,6 +247,10 @@ class methodOopDesc : public oopDesc {
|
|||||||
return constMethod()->stackmap_data();
|
return constMethod()->stackmap_data();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void set_stackmap_data(typeArrayOop sd) {
|
||||||
|
constMethod()->set_stackmap_data(sd);
|
||||||
|
}
|
||||||
|
|
||||||
// exception handler table
|
// exception handler table
|
||||||
typeArrayOop exception_table() const
|
typeArrayOop exception_table() const
|
||||||
{ return constMethod()->exception_table(); }
|
{ return constMethod()->exception_table(); }
|
||||||
|
@ -25,26 +25,6 @@
|
|||||||
# include "incls/_precompiled.incl"
|
# include "incls/_precompiled.incl"
|
||||||
# include "incls/_jvmtiImpl.cpp.incl"
|
# include "incls/_jvmtiImpl.cpp.incl"
|
||||||
|
|
||||||
GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
|
|
||||||
|
|
||||||
void JvmtiPendingMonitors::transition_raw_monitors() {
|
|
||||||
assert((Threads::number_of_threads()==1),
|
|
||||||
"Java thread has not created yet or more than one java thread \
|
|
||||||
is running. Raw monitor transition will not work");
|
|
||||||
JavaThread *current_java_thread = JavaThread::current();
|
|
||||||
assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
|
|
||||||
{
|
|
||||||
ThreadBlockInVM __tbivm(current_java_thread);
|
|
||||||
for(int i=0; i< count(); i++) {
|
|
||||||
JvmtiRawMonitor *rmonitor = monitors()->at(i);
|
|
||||||
int r = rmonitor->raw_enter(current_java_thread);
|
|
||||||
assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// pending monitors are converted to real monitor so delete them all.
|
|
||||||
dispose();
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// class JvmtiAgentThread
|
// class JvmtiAgentThread
|
||||||
//
|
//
|
||||||
@ -216,57 +196,6 @@ void GrowableCache::gc_epilogue() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// class JvmtiRawMonitor
|
|
||||||
//
|
|
||||||
|
|
||||||
JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
|
|
||||||
#ifdef ASSERT
|
|
||||||
_name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
|
|
||||||
#else
|
|
||||||
_name = NULL;
|
|
||||||
#endif
|
|
||||||
_magic = JVMTI_RM_MAGIC;
|
|
||||||
}
|
|
||||||
|
|
||||||
JvmtiRawMonitor::~JvmtiRawMonitor() {
|
|
||||||
#ifdef ASSERT
|
|
||||||
FreeHeap(_name);
|
|
||||||
#endif
|
|
||||||
_magic = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool
|
|
||||||
JvmtiRawMonitor::is_valid() {
|
|
||||||
int value = 0;
|
|
||||||
|
|
||||||
// This object might not be a JvmtiRawMonitor so we can't assume
|
|
||||||
// the _magic field is properly aligned. Get the value in a safe
|
|
||||||
// way and then check against JVMTI_RM_MAGIC.
|
|
||||||
|
|
||||||
switch (sizeof(_magic)) {
|
|
||||||
case 2:
|
|
||||||
value = Bytes::get_native_u2((address)&_magic);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 4:
|
|
||||||
value = Bytes::get_native_u4((address)&_magic);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 8:
|
|
||||||
value = Bytes::get_native_u8((address)&_magic);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
guarantee(false, "_magic field is an unexpected size");
|
|
||||||
}
|
|
||||||
|
|
||||||
return value == JVMTI_RM_MAGIC;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// class JvmtiBreakpoint
|
// class JvmtiBreakpoint
|
||||||
//
|
//
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
// Forward Declarations
|
// Forward Declarations
|
||||||
//
|
//
|
||||||
|
|
||||||
class JvmtiRawMonitor;
|
|
||||||
class JvmtiBreakpoint;
|
class JvmtiBreakpoint;
|
||||||
class JvmtiBreakpoints;
|
class JvmtiBreakpoints;
|
||||||
|
|
||||||
@ -327,76 +326,6 @@ bool JvmtiCurrentBreakpoints::is_breakpoint(address bcp) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////
|
|
||||||
//
|
|
||||||
// class JvmtiRawMonitor
|
|
||||||
//
|
|
||||||
// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
|
|
||||||
//
|
|
||||||
// Wrapper for ObjectMonitor class that saves the Monitor's name
|
|
||||||
//
|
|
||||||
|
|
||||||
class JvmtiRawMonitor : public ObjectMonitor {
|
|
||||||
private:
|
|
||||||
int _magic;
|
|
||||||
char * _name;
|
|
||||||
// JVMTI_RM_MAGIC is set in contructor and unset in destructor.
|
|
||||||
enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
|
|
||||||
|
|
||||||
public:
|
|
||||||
JvmtiRawMonitor(const char *name);
|
|
||||||
~JvmtiRawMonitor();
|
|
||||||
int magic() { return _magic; }
|
|
||||||
const char *get_name() { return _name; }
|
|
||||||
bool is_valid();
|
|
||||||
};
|
|
||||||
|
|
||||||
// Onload pending raw monitors
|
|
||||||
// Class is used to cache onload or onstart monitor enter
|
|
||||||
// which will transition into real monitor when
|
|
||||||
// VM is fully initialized.
|
|
||||||
class JvmtiPendingMonitors : public AllStatic {
|
|
||||||
|
|
||||||
private:
|
|
||||||
static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
|
|
||||||
|
|
||||||
inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
|
|
||||||
|
|
||||||
static void dispose() {
|
|
||||||
delete monitors();
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
static void enter(JvmtiRawMonitor *monitor) {
|
|
||||||
monitors()->append(monitor);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int count() {
|
|
||||||
return monitors()->length();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void destroy(JvmtiRawMonitor *monitor) {
|
|
||||||
while (monitors()->contains(monitor)) {
|
|
||||||
monitors()->remove(monitor);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return false if monitor is not found in the list.
|
|
||||||
static bool exit(JvmtiRawMonitor *monitor) {
|
|
||||||
if (monitors()->contains(monitor)) {
|
|
||||||
monitors()->remove(monitor);
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void transition_raw_monitors();
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
// The get/set local operations must only be done by the VM thread
|
// The get/set local operations must only be done by the VM thread
|
||||||
// because the interpreter version needs to access oop maps, which can
|
// because the interpreter version needs to access oop maps, which can
|
||||||
|
420
hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp
Normal file
420
hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp
Normal file
@ -0,0 +1,420 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
# include "incls/_precompiled.incl"
|
||||||
|
# include "incls/_jvmtiRawMonitor.cpp.incl"
|
||||||
|
|
||||||
|
GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
|
||||||
|
|
||||||
|
void JvmtiPendingMonitors::transition_raw_monitors() {
|
||||||
|
assert((Threads::number_of_threads()==1),
|
||||||
|
"Java thread has not created yet or more than one java thread \
|
||||||
|
is running. Raw monitor transition will not work");
|
||||||
|
JavaThread *current_java_thread = JavaThread::current();
|
||||||
|
assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
|
||||||
|
{
|
||||||
|
ThreadBlockInVM __tbivm(current_java_thread);
|
||||||
|
for(int i=0; i< count(); i++) {
|
||||||
|
JvmtiRawMonitor *rmonitor = monitors()->at(i);
|
||||||
|
int r = rmonitor->raw_enter(current_java_thread);
|
||||||
|
assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// pending monitors are converted to real monitor so delete them all.
|
||||||
|
dispose();
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// class JvmtiRawMonitor
|
||||||
|
//
|
||||||
|
|
||||||
|
JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
|
||||||
|
#ifdef ASSERT
|
||||||
|
_name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
|
||||||
|
#else
|
||||||
|
_name = NULL;
|
||||||
|
#endif
|
||||||
|
_magic = JVMTI_RM_MAGIC;
|
||||||
|
}
|
||||||
|
|
||||||
|
JvmtiRawMonitor::~JvmtiRawMonitor() {
|
||||||
|
#ifdef ASSERT
|
||||||
|
FreeHeap(_name);
|
||||||
|
#endif
|
||||||
|
_magic = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool
|
||||||
|
JvmtiRawMonitor::is_valid() {
|
||||||
|
int value = 0;
|
||||||
|
|
||||||
|
// This object might not be a JvmtiRawMonitor so we can't assume
|
||||||
|
// the _magic field is properly aligned. Get the value in a safe
|
||||||
|
// way and then check against JVMTI_RM_MAGIC.
|
||||||
|
|
||||||
|
switch (sizeof(_magic)) {
|
||||||
|
case 2:
|
||||||
|
value = Bytes::get_native_u2((address)&_magic);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 4:
|
||||||
|
value = Bytes::get_native_u4((address)&_magic);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 8:
|
||||||
|
value = Bytes::get_native_u8((address)&_magic);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
guarantee(false, "_magic field is an unexpected size");
|
||||||
|
}
|
||||||
|
|
||||||
|
return value == JVMTI_RM_MAGIC;
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------------------------------------------------------
|
||||||
|
// The raw monitor subsystem is entirely distinct from normal
|
||||||
|
// java-synchronization or jni-synchronization. raw monitors are not
|
||||||
|
// associated with objects. They can be implemented in any manner
|
||||||
|
// that makes sense. The original implementors decided to piggy-back
|
||||||
|
// the raw-monitor implementation on the existing Java objectMonitor mechanism.
|
||||||
|
// This flaw needs to fixed. We should reimplement raw monitors as sui-generis.
|
||||||
|
// Specifically, we should not implement raw monitors via java monitors.
|
||||||
|
// Time permitting, we should disentangle and deconvolve the two implementations
|
||||||
|
// and move the resulting raw monitor implementation over to the JVMTI directories.
|
||||||
|
// Ideally, the raw monitor implementation would be built on top of
|
||||||
|
// park-unpark and nothing else.
|
||||||
|
//
|
||||||
|
// raw monitors are used mainly by JVMTI
|
||||||
|
// The raw monitor implementation borrows the ObjectMonitor structure,
|
||||||
|
// but the operators are degenerate and extremely simple.
|
||||||
|
//
|
||||||
|
// Mixed use of a single objectMonitor instance -- as both a raw monitor
|
||||||
|
// and a normal java monitor -- is not permissible.
|
||||||
|
//
|
||||||
|
// Note that we use the single RawMonitor_lock to protect queue operations for
|
||||||
|
// _all_ raw monitors. This is a scalability impediment, but since raw monitor usage
|
||||||
|
// is deprecated and rare, this is not of concern. The RawMonitor_lock can not
|
||||||
|
// be held indefinitely. The critical sections must be short and bounded.
|
||||||
|
//
|
||||||
|
// -------------------------------------------------------------------------
|
||||||
|
|
||||||
|
int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
|
||||||
|
for (;;) {
|
||||||
|
if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
|
||||||
|
return OS_OK ;
|
||||||
|
}
|
||||||
|
|
||||||
|
ObjectWaiter Node (Self) ;
|
||||||
|
Self->_ParkEvent->reset() ; // strictly optional
|
||||||
|
Node.TState = ObjectWaiter::TS_ENTER ;
|
||||||
|
|
||||||
|
RawMonitor_lock->lock_without_safepoint_check() ;
|
||||||
|
Node._next = _EntryList ;
|
||||||
|
_EntryList = &Node ;
|
||||||
|
OrderAccess::fence() ;
|
||||||
|
if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
|
||||||
|
_EntryList = Node._next ;
|
||||||
|
RawMonitor_lock->unlock() ;
|
||||||
|
return OS_OK ;
|
||||||
|
}
|
||||||
|
RawMonitor_lock->unlock() ;
|
||||||
|
while (Node.TState == ObjectWaiter::TS_ENTER) {
|
||||||
|
Self->_ParkEvent->park() ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int JvmtiRawMonitor::SimpleExit (Thread * Self) {
|
||||||
|
guarantee (_owner == Self, "invariant") ;
|
||||||
|
OrderAccess::release_store_ptr (&_owner, NULL) ;
|
||||||
|
OrderAccess::fence() ;
|
||||||
|
if (_EntryList == NULL) return OS_OK ;
|
||||||
|
ObjectWaiter * w ;
|
||||||
|
|
||||||
|
RawMonitor_lock->lock_without_safepoint_check() ;
|
||||||
|
w = _EntryList ;
|
||||||
|
if (w != NULL) {
|
||||||
|
_EntryList = w->_next ;
|
||||||
|
}
|
||||||
|
RawMonitor_lock->unlock() ;
|
||||||
|
if (w != NULL) {
|
||||||
|
guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
|
||||||
|
ParkEvent * ev = w->_event ;
|
||||||
|
w->TState = ObjectWaiter::TS_RUN ;
|
||||||
|
OrderAccess::fence() ;
|
||||||
|
ev->unpark() ;
|
||||||
|
}
|
||||||
|
return OS_OK ;
|
||||||
|
}
|
||||||
|
|
||||||
|
int JvmtiRawMonitor::SimpleWait (Thread * Self, jlong millis) {
|
||||||
|
guarantee (_owner == Self , "invariant") ;
|
||||||
|
guarantee (_recursions == 0, "invariant") ;
|
||||||
|
|
||||||
|
ObjectWaiter Node (Self) ;
|
||||||
|
Node._notified = 0 ;
|
||||||
|
Node.TState = ObjectWaiter::TS_WAIT ;
|
||||||
|
|
||||||
|
RawMonitor_lock->lock_without_safepoint_check() ;
|
||||||
|
Node._next = _WaitSet ;
|
||||||
|
_WaitSet = &Node ;
|
||||||
|
RawMonitor_lock->unlock() ;
|
||||||
|
|
||||||
|
SimpleExit (Self) ;
|
||||||
|
guarantee (_owner != Self, "invariant") ;
|
||||||
|
|
||||||
|
int ret = OS_OK ;
|
||||||
|
if (millis <= 0) {
|
||||||
|
Self->_ParkEvent->park();
|
||||||
|
} else {
|
||||||
|
ret = Self->_ParkEvent->park(millis);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If thread still resides on the waitset then unlink it.
|
||||||
|
// Double-checked locking -- the usage is safe in this context
|
||||||
|
// as we TState is volatile and the lock-unlock operators are
|
||||||
|
// serializing (barrier-equivalent).
|
||||||
|
|
||||||
|
if (Node.TState == ObjectWaiter::TS_WAIT) {
|
||||||
|
RawMonitor_lock->lock_without_safepoint_check() ;
|
||||||
|
if (Node.TState == ObjectWaiter::TS_WAIT) {
|
||||||
|
// Simple O(n) unlink, but performance isn't critical here.
|
||||||
|
ObjectWaiter * p ;
|
||||||
|
ObjectWaiter * q = NULL ;
|
||||||
|
for (p = _WaitSet ; p != &Node; p = p->_next) {
|
||||||
|
q = p ;
|
||||||
|
}
|
||||||
|
guarantee (p == &Node, "invariant") ;
|
||||||
|
if (q == NULL) {
|
||||||
|
guarantee (p == _WaitSet, "invariant") ;
|
||||||
|
_WaitSet = p->_next ;
|
||||||
|
} else {
|
||||||
|
guarantee (p == q->_next, "invariant") ;
|
||||||
|
q->_next = p->_next ;
|
||||||
|
}
|
||||||
|
Node.TState = ObjectWaiter::TS_RUN ;
|
||||||
|
}
|
||||||
|
RawMonitor_lock->unlock() ;
|
||||||
|
}
|
||||||
|
|
||||||
|
guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
|
||||||
|
SimpleEnter (Self) ;
|
||||||
|
|
||||||
|
guarantee (_owner == Self, "invariant") ;
|
||||||
|
guarantee (_recursions == 0, "invariant") ;
|
||||||
|
return ret ;
|
||||||
|
}
|
||||||
|
|
||||||
|
int JvmtiRawMonitor::SimpleNotify (Thread * Self, bool All) {
|
||||||
|
guarantee (_owner == Self, "invariant") ;
|
||||||
|
if (_WaitSet == NULL) return OS_OK ;
|
||||||
|
|
||||||
|
// We have two options:
|
||||||
|
// A. Transfer the threads from the WaitSet to the EntryList
|
||||||
|
// B. Remove the thread from the WaitSet and unpark() it.
|
||||||
|
//
|
||||||
|
// We use (B), which is crude and results in lots of futile
|
||||||
|
// context switching. In particular (B) induces lots of contention.
|
||||||
|
|
||||||
|
ParkEvent * ev = NULL ; // consider using a small auto array ...
|
||||||
|
RawMonitor_lock->lock_without_safepoint_check() ;
|
||||||
|
for (;;) {
|
||||||
|
ObjectWaiter * w = _WaitSet ;
|
||||||
|
if (w == NULL) break ;
|
||||||
|
_WaitSet = w->_next ;
|
||||||
|
if (ev != NULL) { ev->unpark(); ev = NULL; }
|
||||||
|
ev = w->_event ;
|
||||||
|
OrderAccess::loadstore() ;
|
||||||
|
w->TState = ObjectWaiter::TS_RUN ;
|
||||||
|
OrderAccess::storeload();
|
||||||
|
if (!All) break ;
|
||||||
|
}
|
||||||
|
RawMonitor_lock->unlock() ;
|
||||||
|
if (ev != NULL) ev->unpark();
|
||||||
|
return OS_OK ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Any JavaThread will enter here with state _thread_blocked
|
||||||
|
int JvmtiRawMonitor::raw_enter(TRAPS) {
|
||||||
|
TEVENT (raw_enter) ;
|
||||||
|
void * Contended ;
|
||||||
|
|
||||||
|
// don't enter raw monitor if thread is being externally suspended, it will
|
||||||
|
// surprise the suspender if a "suspended" thread can still enter monitor
|
||||||
|
JavaThread * jt = (JavaThread *)THREAD;
|
||||||
|
if (THREAD->is_Java_thread()) {
|
||||||
|
jt->SR_lock()->lock_without_safepoint_check();
|
||||||
|
while (jt->is_external_suspend()) {
|
||||||
|
jt->SR_lock()->unlock();
|
||||||
|
jt->java_suspend_self();
|
||||||
|
jt->SR_lock()->lock_without_safepoint_check();
|
||||||
|
}
|
||||||
|
// guarded by SR_lock to avoid racing with new external suspend requests.
|
||||||
|
Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
|
||||||
|
jt->SR_lock()->unlock();
|
||||||
|
} else {
|
||||||
|
Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Contended == THREAD) {
|
||||||
|
_recursions ++ ;
|
||||||
|
return OM_OK ;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Contended == NULL) {
|
||||||
|
guarantee (_owner == THREAD, "invariant") ;
|
||||||
|
guarantee (_recursions == 0, "invariant") ;
|
||||||
|
return OM_OK ;
|
||||||
|
}
|
||||||
|
|
||||||
|
THREAD->set_current_pending_monitor(this);
|
||||||
|
|
||||||
|
if (!THREAD->is_Java_thread()) {
|
||||||
|
// No other non-Java threads besides VM thread would acquire
|
||||||
|
// a raw monitor.
|
||||||
|
assert(THREAD->is_VM_thread(), "must be VM thread");
|
||||||
|
SimpleEnter (THREAD) ;
|
||||||
|
} else {
|
||||||
|
guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
|
||||||
|
for (;;) {
|
||||||
|
jt->set_suspend_equivalent();
|
||||||
|
// cleared by handle_special_suspend_equivalent_condition() or
|
||||||
|
// java_suspend_self()
|
||||||
|
SimpleEnter (THREAD) ;
|
||||||
|
|
||||||
|
// were we externally suspended while we were waiting?
|
||||||
|
if (!jt->handle_special_suspend_equivalent_condition()) break ;
|
||||||
|
|
||||||
|
// This thread was externally suspended
|
||||||
|
//
|
||||||
|
// This logic isn't needed for JVMTI raw monitors,
|
||||||
|
// but doesn't hurt just in case the suspend rules change. This
|
||||||
|
// logic is needed for the JvmtiRawMonitor.wait() reentry phase.
|
||||||
|
// We have reentered the contended monitor, but while we were
|
||||||
|
// waiting another thread suspended us. We don't want to reenter
|
||||||
|
// the monitor while suspended because that would surprise the
|
||||||
|
// thread that suspended us.
|
||||||
|
//
|
||||||
|
// Drop the lock -
|
||||||
|
SimpleExit (THREAD) ;
|
||||||
|
|
||||||
|
jt->java_suspend_self();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(_owner == THREAD, "Fatal error with monitor owner!");
|
||||||
|
assert(_recursions == 0, "Fatal error with monitor recursions!");
|
||||||
|
}
|
||||||
|
|
||||||
|
THREAD->set_current_pending_monitor(NULL);
|
||||||
|
guarantee (_recursions == 0, "invariant") ;
|
||||||
|
return OM_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used mainly for JVMTI raw monitor implementation
|
||||||
|
// Also used for JvmtiRawMonitor::wait().
|
||||||
|
int JvmtiRawMonitor::raw_exit(TRAPS) {
|
||||||
|
TEVENT (raw_exit) ;
|
||||||
|
if (THREAD != _owner) {
|
||||||
|
return OM_ILLEGAL_MONITOR_STATE;
|
||||||
|
}
|
||||||
|
if (_recursions > 0) {
|
||||||
|
--_recursions ;
|
||||||
|
return OM_OK ;
|
||||||
|
}
|
||||||
|
|
||||||
|
void * List = _EntryList ;
|
||||||
|
SimpleExit (THREAD) ;
|
||||||
|
|
||||||
|
return OM_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used for JVMTI raw monitor implementation.
|
||||||
|
// All JavaThreads will enter here with state _thread_blocked
|
||||||
|
|
||||||
|
int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
|
||||||
|
TEVENT (raw_wait) ;
|
||||||
|
if (THREAD != _owner) {
|
||||||
|
return OM_ILLEGAL_MONITOR_STATE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
|
||||||
|
// The caller must be able to tolerate spurious returns from raw_wait().
|
||||||
|
THREAD->_ParkEvent->reset() ;
|
||||||
|
OrderAccess::fence() ;
|
||||||
|
|
||||||
|
// check interrupt event
|
||||||
|
if (interruptible && Thread::is_interrupted(THREAD, true)) {
|
||||||
|
return OM_INTERRUPTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
intptr_t save = _recursions ;
|
||||||
|
_recursions = 0 ;
|
||||||
|
_waiters ++ ;
|
||||||
|
if (THREAD->is_Java_thread()) {
|
||||||
|
guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
|
||||||
|
((JavaThread *)THREAD)->set_suspend_equivalent();
|
||||||
|
}
|
||||||
|
int rv = SimpleWait (THREAD, millis) ;
|
||||||
|
_recursions = save ;
|
||||||
|
_waiters -- ;
|
||||||
|
|
||||||
|
guarantee (THREAD == _owner, "invariant") ;
|
||||||
|
if (THREAD->is_Java_thread()) {
|
||||||
|
JavaThread * jSelf = (JavaThread *) THREAD ;
|
||||||
|
for (;;) {
|
||||||
|
if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
|
||||||
|
SimpleExit (THREAD) ;
|
||||||
|
jSelf->java_suspend_self();
|
||||||
|
SimpleEnter (THREAD) ;
|
||||||
|
jSelf->set_suspend_equivalent() ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
guarantee (THREAD == _owner, "invariant") ;
|
||||||
|
|
||||||
|
if (interruptible && Thread::is_interrupted(THREAD, true)) {
|
||||||
|
return OM_INTERRUPTED;
|
||||||
|
}
|
||||||
|
return OM_OK ;
|
||||||
|
}
|
||||||
|
|
||||||
|
int JvmtiRawMonitor::raw_notify(TRAPS) {
|
||||||
|
TEVENT (raw_notify) ;
|
||||||
|
if (THREAD != _owner) {
|
||||||
|
return OM_ILLEGAL_MONITOR_STATE;
|
||||||
|
}
|
||||||
|
SimpleNotify (THREAD, false) ;
|
||||||
|
return OM_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
int JvmtiRawMonitor::raw_notifyAll(TRAPS) {
|
||||||
|
TEVENT (raw_notifyAll) ;
|
||||||
|
if (THREAD != _owner) {
|
||||||
|
return OM_ILLEGAL_MONITOR_STATE;
|
||||||
|
}
|
||||||
|
SimpleNotify (THREAD, true) ;
|
||||||
|
return OM_OK;
|
||||||
|
}
|
||||||
|
|
99
hotspot/src/share/vm/prims/jvmtiRawMonitor.hpp
Normal file
99
hotspot/src/share/vm/prims/jvmtiRawMonitor.hpp
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
//
|
||||||
|
// class JvmtiRawMonitor
|
||||||
|
//
|
||||||
|
// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
|
||||||
|
//
|
||||||
|
// Wrapper for ObjectMonitor class that saves the Monitor's name
|
||||||
|
//
|
||||||
|
|
||||||
|
class JvmtiRawMonitor : public ObjectMonitor {
|
||||||
|
private:
|
||||||
|
int _magic;
|
||||||
|
char * _name;
|
||||||
|
// JVMTI_RM_MAGIC is set in contructor and unset in destructor.
|
||||||
|
enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
|
||||||
|
|
||||||
|
int SimpleEnter (Thread * Self) ;
|
||||||
|
int SimpleExit (Thread * Self) ;
|
||||||
|
int SimpleWait (Thread * Self, jlong millis) ;
|
||||||
|
int SimpleNotify (Thread * Self, bool All) ;
|
||||||
|
|
||||||
|
public:
|
||||||
|
JvmtiRawMonitor(const char *name);
|
||||||
|
~JvmtiRawMonitor();
|
||||||
|
int raw_enter(TRAPS);
|
||||||
|
int raw_exit(TRAPS);
|
||||||
|
int raw_wait(jlong millis, bool interruptable, TRAPS);
|
||||||
|
int raw_notify(TRAPS);
|
||||||
|
int raw_notifyAll(TRAPS);
|
||||||
|
int magic() { return _magic; }
|
||||||
|
const char *get_name() { return _name; }
|
||||||
|
bool is_valid();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Onload pending raw monitors
|
||||||
|
// Class is used to cache onload or onstart monitor enter
|
||||||
|
// which will transition into real monitor when
|
||||||
|
// VM is fully initialized.
|
||||||
|
class JvmtiPendingMonitors : public AllStatic {
|
||||||
|
|
||||||
|
private:
|
||||||
|
static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
|
||||||
|
|
||||||
|
inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
|
||||||
|
|
||||||
|
static void dispose() {
|
||||||
|
delete monitors();
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static void enter(JvmtiRawMonitor *monitor) {
|
||||||
|
monitors()->append(monitor);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int count() {
|
||||||
|
return monitors()->length();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void destroy(JvmtiRawMonitor *monitor) {
|
||||||
|
while (monitors()->contains(monitor)) {
|
||||||
|
monitors()->remove(monitor);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return false if monitor is not found in the list.
|
||||||
|
static bool exit(JvmtiRawMonitor *monitor) {
|
||||||
|
if (monitors()->contains(monitor)) {
|
||||||
|
monitors()->remove(monitor);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void transition_raw_monitors();
|
||||||
|
};
|
@ -119,11 +119,8 @@ void Arguments::init_system_properties() {
|
|||||||
PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.version", "1.0", false));
|
PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.version", "1.0", false));
|
||||||
PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.name",
|
PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.name",
|
||||||
"Java Virtual Machine Specification", false));
|
"Java Virtual Machine Specification", false));
|
||||||
PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
|
|
||||||
JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false));
|
|
||||||
PropertyList_add(&_system_properties, new SystemProperty("java.vm.version", VM_Version::vm_release(), false));
|
PropertyList_add(&_system_properties, new SystemProperty("java.vm.version", VM_Version::vm_release(), false));
|
||||||
PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(), false));
|
PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(), false));
|
||||||
PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(), false));
|
|
||||||
PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(), true));
|
PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(), true));
|
||||||
|
|
||||||
// following are JVMTI agent writeable properties.
|
// following are JVMTI agent writeable properties.
|
||||||
@ -151,6 +148,14 @@ void Arguments::init_system_properties() {
|
|||||||
os::init_system_properties_values();
|
os::init_system_properties_values();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Update/Initialize System properties after JDK version number is known
|
||||||
|
void Arguments::init_version_specific_system_properties() {
|
||||||
|
PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
|
||||||
|
JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false));
|
||||||
|
PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(), false));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provide a slightly more user-friendly way of eliminating -XX flags.
|
* Provide a slightly more user-friendly way of eliminating -XX flags.
|
||||||
* When a flag is eliminated, it can be added to this list in order to
|
* When a flag is eliminated, it can be added to this list in order to
|
||||||
@ -185,6 +190,10 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
|
|||||||
JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
|
JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
|
||||||
{ "UseDepthFirstScavengeOrder",
|
{ "UseDepthFirstScavengeOrder",
|
||||||
JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) },
|
JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) },
|
||||||
|
{ "HandlePromotionFailure",
|
||||||
|
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
|
||||||
|
{ "MaxLiveObjectEvacuationRatio",
|
||||||
|
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
|
||||||
{ NULL, JDK_Version(0), JDK_Version(0) }
|
{ NULL, JDK_Version(0), JDK_Version(0) }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -948,26 +957,65 @@ static void no_shared_spaces() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Arguments::check_compressed_oops_compat() {
|
||||||
|
#ifdef _LP64
|
||||||
|
assert(UseCompressedOops, "Precondition");
|
||||||
|
# if defined(COMPILER1) && !defined(TIERED)
|
||||||
|
// Until c1 supports compressed oops turn them off.
|
||||||
|
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
||||||
|
# else
|
||||||
|
// Is it on by default or set on ergonomically
|
||||||
|
bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops);
|
||||||
|
|
||||||
|
// Tiered currently doesn't work with compressed oops
|
||||||
|
if (TieredCompilation) {
|
||||||
|
if (is_on_by_default) {
|
||||||
|
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
vm_exit_during_initialization(
|
||||||
|
"Tiered compilation is not supported with compressed oops yet", NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX JSR 292 currently does not support compressed oops
|
||||||
|
if (EnableMethodHandles) {
|
||||||
|
if (is_on_by_default) {
|
||||||
|
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
vm_exit_during_initialization(
|
||||||
|
"JSR292 is not supported with compressed oops yet", NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If dumping an archive or forcing its use, disable compressed oops if possible
|
||||||
|
if (DumpSharedSpaces || RequireSharedSpaces) {
|
||||||
|
if (is_on_by_default) {
|
||||||
|
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
vm_exit_during_initialization(
|
||||||
|
"Class Data Sharing is not supported with compressed oops yet", NULL);
|
||||||
|
}
|
||||||
|
} else if (UseSharedSpaces) {
|
||||||
|
// UseSharedSpaces is on by default. With compressed oops, we turn it off.
|
||||||
|
FLAG_SET_DEFAULT(UseSharedSpaces, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
# endif // defined(COMPILER1) && !defined(TIERED)
|
||||||
|
#endif // _LP64
|
||||||
|
}
|
||||||
|
|
||||||
void Arguments::set_tiered_flags() {
|
void Arguments::set_tiered_flags() {
|
||||||
if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
|
if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
|
||||||
FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
|
FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (CompilationPolicyChoice < 2) {
|
if (CompilationPolicyChoice < 2) {
|
||||||
vm_exit_during_initialization(
|
vm_exit_during_initialization(
|
||||||
"Incompatible compilation policy selected", NULL);
|
"Incompatible compilation policy selected", NULL);
|
||||||
}
|
}
|
||||||
|
// Increase the code cache size - tiered compiles a lot more.
|
||||||
#ifdef _LP64
|
|
||||||
if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
|
|
||||||
UseCompressedOops = false;
|
|
||||||
}
|
|
||||||
if (UseCompressedOops) {
|
|
||||||
vm_exit_during_initialization(
|
|
||||||
"Tiered compilation is not supported with compressed oops yet", NULL);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
// Increase the code cache size - tiered compiles a lot more.
|
|
||||||
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
||||||
FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 2);
|
FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 2);
|
||||||
}
|
}
|
||||||
@ -1676,7 +1724,8 @@ bool Arguments::check_stack_pages()
|
|||||||
bool status = true;
|
bool status = true;
|
||||||
status = status && verify_min_value(StackYellowPages, 1, "StackYellowPages");
|
status = status && verify_min_value(StackYellowPages, 1, "StackYellowPages");
|
||||||
status = status && verify_min_value(StackRedPages, 1, "StackRedPages");
|
status = status && verify_min_value(StackRedPages, 1, "StackRedPages");
|
||||||
status = status && verify_min_value(StackShadowPages, 1, "StackShadowPages");
|
// greater stack shadow pages can't generate instruction to bang stack
|
||||||
|
status = status && verify_interval(StackShadowPages, 1, 50, "StackShadowPages");
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1722,8 +1771,6 @@ bool Arguments::check_vm_args_consistency() {
|
|||||||
status = false;
|
status = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
status = status && verify_percentage(MaxLiveObjectEvacuationRatio,
|
|
||||||
"MaxLiveObjectEvacuationRatio");
|
|
||||||
status = status && verify_percentage(AdaptiveSizePolicyWeight,
|
status = status && verify_percentage(AdaptiveSizePolicyWeight,
|
||||||
"AdaptiveSizePolicyWeight");
|
"AdaptiveSizePolicyWeight");
|
||||||
status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
|
status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
|
||||||
@ -2827,6 +2874,7 @@ jint Arguments::parse_options_environment_variable(const char* name, SysClassPat
|
|||||||
return JNI_OK;
|
return JNI_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Parse entry point called from JNI_CreateJavaVM
|
// Parse entry point called from JNI_CreateJavaVM
|
||||||
|
|
||||||
jint Arguments::parse(const JavaVMInitArgs* args) {
|
jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||||
@ -2969,10 +3017,6 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
|||||||
PrintGC = true;
|
PrintGC = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(_LP64) && defined(COMPILER1) && !defined(TIERED)
|
|
||||||
UseCompressedOops = false;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Set object alignment values.
|
// Set object alignment values.
|
||||||
set_object_alignment();
|
set_object_alignment();
|
||||||
|
|
||||||
@ -2987,13 +3031,10 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
|||||||
set_ergonomics_flags();
|
set_ergonomics_flags();
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// XXX JSR 292 currently does not support compressed oops.
|
if (UseCompressedOops) {
|
||||||
if (EnableMethodHandles && UseCompressedOops) {
|
check_compressed_oops_compat();
|
||||||
if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
|
|
||||||
UseCompressedOops = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif // _LP64
|
#endif
|
||||||
|
|
||||||
// Check the GC selections again.
|
// Check the GC selections again.
|
||||||
if (!check_gc_consistency()) {
|
if (!check_gc_consistency()) {
|
||||||
|
@ -291,6 +291,8 @@ class Arguments : AllStatic {
|
|||||||
|
|
||||||
// Tiered
|
// Tiered
|
||||||
static void set_tiered_flags();
|
static void set_tiered_flags();
|
||||||
|
// Check compressed oops compatibility with other flags
|
||||||
|
static void check_compressed_oops_compat();
|
||||||
// CMS/ParNew garbage collectors
|
// CMS/ParNew garbage collectors
|
||||||
static void set_parnew_gc_flags();
|
static void set_parnew_gc_flags();
|
||||||
static void set_cms_and_parnew_gc_flags();
|
static void set_cms_and_parnew_gc_flags();
|
||||||
@ -484,6 +486,9 @@ class Arguments : AllStatic {
|
|||||||
// System properties
|
// System properties
|
||||||
static void init_system_properties();
|
static void init_system_properties();
|
||||||
|
|
||||||
|
// Update/Initialize System properties after JDK version number is known
|
||||||
|
static void init_version_specific_system_properties();
|
||||||
|
|
||||||
// Property List manipulation
|
// Property List manipulation
|
||||||
static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
|
static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
|
||||||
static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
|
static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
|
||||||
|
76
hotspot/src/share/vm/runtime/basicLock.cpp
Normal file
76
hotspot/src/share/vm/runtime/basicLock.cpp
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
# include "incls/_precompiled.incl"
|
||||||
|
# include "incls/_basicLock.cpp.incl"
|
||||||
|
|
||||||
|
void BasicLock::print_on(outputStream* st) const {
|
||||||
|
st->print("monitor");
|
||||||
|
}
|
||||||
|
|
||||||
|
void BasicLock::move_to(oop obj, BasicLock* dest) {
|
||||||
|
// Check to see if we need to inflate the lock. This is only needed
|
||||||
|
// if an object is locked using "this" lightweight monitor. In that
|
||||||
|
// case, the displaced_header() is unlocked, because the
|
||||||
|
// displaced_header() contains the header for the originally unlocked
|
||||||
|
// object. However the object could have already been inflated. But it
|
||||||
|
// does not matter, the inflation will just a no-op. For other cases,
|
||||||
|
// the displaced header will be either 0x0 or 0x3, which are location
|
||||||
|
// independent, therefore the BasicLock is free to move.
|
||||||
|
//
|
||||||
|
// During OSR we may need to relocate a BasicLock (which contains a
|
||||||
|
// displaced word) from a location in an interpreter frame to a
|
||||||
|
// new location in a compiled frame. "this" refers to the source
|
||||||
|
// basiclock in the interpreter frame. "dest" refers to the destination
|
||||||
|
// basiclock in the new compiled frame. We *always* inflate in move_to().
|
||||||
|
// The always-Inflate policy works properly, but in 1.5.0 it can sometimes
|
||||||
|
// cause performance problems in code that makes heavy use of a small # of
|
||||||
|
// uncontended locks. (We'd inflate during OSR, and then sync performance
|
||||||
|
// would subsequently plummet because the thread would be forced thru the slow-path).
|
||||||
|
// This problem has been made largely moot on IA32 by inlining the inflated fast-path
|
||||||
|
// operations in Fast_Lock and Fast_Unlock in i486.ad.
|
||||||
|
//
|
||||||
|
// Note that there is a way to safely swing the object's markword from
|
||||||
|
// one stack location to another. This avoids inflation. Obviously,
|
||||||
|
// we need to ensure that both locations refer to the current thread's stack.
|
||||||
|
// There are some subtle concurrency issues, however, and since the benefit is
|
||||||
|
// is small (given the support for inflated fast-path locking in the fast_lock, etc)
|
||||||
|
// we'll leave that optimization for another time.
|
||||||
|
|
||||||
|
if (displaced_header()->is_neutral()) {
|
||||||
|
ObjectSynchronizer::inflate_helper(obj);
|
||||||
|
// WARNING: We can not put check here, because the inflation
|
||||||
|
// will not update the displaced header. Once BasicLock is inflated,
|
||||||
|
// no one should ever look at its content.
|
||||||
|
} else {
|
||||||
|
// Typically the displaced header will be 0 (recursive stack lock) or
|
||||||
|
// unused_mark. Naively we'd like to assert that the displaced mark
|
||||||
|
// value is either 0, neutral, or 3. But with the advent of the
|
||||||
|
// store-before-CAS avoidance in fast_lock/compiler_lock_object
|
||||||
|
// we can find any flavor mark in the displaced mark.
|
||||||
|
}
|
||||||
|
// [RGV] The next line appears to do nothing!
|
||||||
|
intptr_t dh = (intptr_t) displaced_header();
|
||||||
|
dest->set_displaced_header(displaced_header());
|
||||||
|
}
|
72
hotspot/src/share/vm/runtime/basicLock.hpp
Normal file
72
hotspot/src/share/vm/runtime/basicLock.hpp
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
class BasicLock VALUE_OBJ_CLASS_SPEC {
|
||||||
|
friend class VMStructs;
|
||||||
|
private:
|
||||||
|
volatile markOop _displaced_header;
|
||||||
|
public:
|
||||||
|
markOop displaced_header() const { return _displaced_header; }
|
||||||
|
void set_displaced_header(markOop header) { _displaced_header = header; }
|
||||||
|
|
||||||
|
void print_on(outputStream* st) const;
|
||||||
|
|
||||||
|
// move a basic lock (used during deoptimization
|
||||||
|
void move_to(oop obj, BasicLock* dest);
|
||||||
|
|
||||||
|
static int displaced_header_offset_in_bytes() { return offset_of(BasicLock, _displaced_header); }
|
||||||
|
};
|
||||||
|
|
||||||
|
// A BasicObjectLock associates a specific Java object with a BasicLock.
|
||||||
|
// It is currently embedded in an interpreter frame.
|
||||||
|
|
||||||
|
// Because some machines have alignment restrictions on the control stack,
|
||||||
|
// the actual space allocated by the interpreter may include padding words
|
||||||
|
// after the end of the BasicObjectLock. Also, in order to guarantee
|
||||||
|
// alignment of the embedded BasicLock objects on such machines, we
|
||||||
|
// put the embedded BasicLock at the beginning of the struct.
|
||||||
|
|
||||||
|
class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
|
||||||
|
friend class VMStructs;
|
||||||
|
private:
|
||||||
|
BasicLock _lock; // the lock, must be double word aligned
|
||||||
|
oop _obj; // object holds the lock;
|
||||||
|
|
||||||
|
public:
|
||||||
|
// Manipulation
|
||||||
|
oop obj() const { return _obj; }
|
||||||
|
void set_obj(oop obj) { _obj = obj; }
|
||||||
|
BasicLock* lock() { return &_lock; }
|
||||||
|
|
||||||
|
// Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
|
||||||
|
// in interpreter activation frames since it includes machine-specific padding.
|
||||||
|
static int size() { return sizeof(BasicObjectLock)/wordSize; }
|
||||||
|
|
||||||
|
// GC support
|
||||||
|
void oops_do(OopClosure* f) { f->do_oop(&_obj); }
|
||||||
|
|
||||||
|
static int obj_offset_in_bytes() { return offset_of(BasicObjectLock, _obj); }
|
||||||
|
static int lock_offset_in_bytes() { return offset_of(BasicObjectLock, _lock); }
|
||||||
|
};
|
||||||
|
|
@ -327,10 +327,10 @@ class CommandLineFlags {
|
|||||||
/* UseMembar is theoretically a temp flag used for memory barrier \
|
/* UseMembar is theoretically a temp flag used for memory barrier \
|
||||||
* removal testing. It was supposed to be removed before FCS but has \
|
* removal testing. It was supposed to be removed before FCS but has \
|
||||||
* been re-added (see 6401008) */ \
|
* been re-added (see 6401008) */ \
|
||||||
product(bool, UseMembar, false, \
|
product_pd(bool, UseMembar, \
|
||||||
"(Unstable) Issues membars on thread state transitions") \
|
"(Unstable) Issues membars on thread state transitions") \
|
||||||
\
|
\
|
||||||
/* Temporary: See 6948537 */ \
|
/* Temporary: See 6948537 */ \
|
||||||
experimental(bool, UseMemSetInBOT, true, \
|
experimental(bool, UseMemSetInBOT, true, \
|
||||||
"(Unstable) uses memset in BOT updates in GC code") \
|
"(Unstable) uses memset in BOT updates in GC code") \
|
||||||
\
|
\
|
||||||
@ -822,6 +822,9 @@ class CommandLineFlags {
|
|||||||
develop(bool, PrintJVMWarnings, false, \
|
develop(bool, PrintJVMWarnings, false, \
|
||||||
"Prints warnings for unimplemented JVM functions") \
|
"Prints warnings for unimplemented JVM functions") \
|
||||||
\
|
\
|
||||||
|
product(bool, PrintWarnings, true, \
|
||||||
|
"Prints JVM warnings to output stream") \
|
||||||
|
\
|
||||||
notproduct(uintx, WarnOnStalledSpinLock, 0, \
|
notproduct(uintx, WarnOnStalledSpinLock, 0, \
|
||||||
"Prints warnings for stalled SpinLocks") \
|
"Prints warnings for stalled SpinLocks") \
|
||||||
\
|
\
|
||||||
@ -1585,7 +1588,7 @@ class CommandLineFlags {
|
|||||||
"(Temporary, subject to experimentation)" \
|
"(Temporary, subject to experimentation)" \
|
||||||
"Nominal minimum work per abortable preclean iteration") \
|
"Nominal minimum work per abortable preclean iteration") \
|
||||||
\
|
\
|
||||||
product(intx, CMSAbortablePrecleanWaitMillis, 100, \
|
manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \
|
||||||
"(Temporary, subject to experimentation)" \
|
"(Temporary, subject to experimentation)" \
|
||||||
" Time that we sleep between iterations when not given" \
|
" Time that we sleep between iterations when not given" \
|
||||||
" enough work per iteration") \
|
" enough work per iteration") \
|
||||||
@ -1677,7 +1680,7 @@ class CommandLineFlags {
|
|||||||
product(uintx, CMSWorkQueueDrainThreshold, 10, \
|
product(uintx, CMSWorkQueueDrainThreshold, 10, \
|
||||||
"Don't drain below this size per parallel worker/thief") \
|
"Don't drain below this size per parallel worker/thief") \
|
||||||
\
|
\
|
||||||
product(intx, CMSWaitDuration, 2000, \
|
manageable(intx, CMSWaitDuration, 2000, \
|
||||||
"Time in milliseconds that CMS thread waits for young GC") \
|
"Time in milliseconds that CMS thread waits for young GC") \
|
||||||
\
|
\
|
||||||
product(bool, CMSYield, true, \
|
product(bool, CMSYield, true, \
|
||||||
@ -1786,10 +1789,6 @@ class CommandLineFlags {
|
|||||||
notproduct(bool, GCALotAtAllSafepoints, false, \
|
notproduct(bool, GCALotAtAllSafepoints, false, \
|
||||||
"Enforce ScavengeALot/GCALot at all potential safepoints") \
|
"Enforce ScavengeALot/GCALot at all potential safepoints") \
|
||||||
\
|
\
|
||||||
product(bool, HandlePromotionFailure, true, \
|
|
||||||
"The youngest generation collection does not require " \
|
|
||||||
"a guarantee of full promotion of all live objects.") \
|
|
||||||
\
|
|
||||||
product(bool, PrintPromotionFailure, false, \
|
product(bool, PrintPromotionFailure, false, \
|
||||||
"Print additional diagnostic information following " \
|
"Print additional diagnostic information following " \
|
||||||
" promotion failure") \
|
" promotion failure") \
|
||||||
@ -3003,9 +3002,6 @@ class CommandLineFlags {
|
|||||||
product(intx, NewRatio, 2, \
|
product(intx, NewRatio, 2, \
|
||||||
"Ratio of new/old generation sizes") \
|
"Ratio of new/old generation sizes") \
|
||||||
\
|
\
|
||||||
product(uintx, MaxLiveObjectEvacuationRatio, 100, \
|
|
||||||
"Max percent of eden objects that will be live at scavenge") \
|
|
||||||
\
|
|
||||||
product_pd(uintx, NewSizeThreadIncrease, \
|
product_pd(uintx, NewSizeThreadIncrease, \
|
||||||
"Additional size added to desired new generation size per " \
|
"Additional size added to desired new generation size per " \
|
||||||
"non-daemon thread (in bytes)") \
|
"non-daemon thread (in bytes)") \
|
||||||
@ -3542,7 +3538,7 @@ class CommandLineFlags {
|
|||||||
product(uintx, SharedDummyBlockSize, 512*M, \
|
product(uintx, SharedDummyBlockSize, 512*M, \
|
||||||
"Size of dummy block used to shift heap addresses (in bytes)") \
|
"Size of dummy block used to shift heap addresses (in bytes)") \
|
||||||
\
|
\
|
||||||
product(uintx, SharedReadWriteSize, 12*M, \
|
product(uintx, SharedReadWriteSize, NOT_LP64(12*M) LP64_ONLY(13*M), \
|
||||||
"Size of read-write space in permanent generation (in bytes)") \
|
"Size of read-write space in permanent generation (in bytes)") \
|
||||||
\
|
\
|
||||||
product(uintx, SharedReadOnlySize, 10*M, \
|
product(uintx, SharedReadOnlySize, 10*M, \
|
||||||
|
@ -265,48 +265,3 @@ class Mutex : public Monitor { // degenerate Monitor
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* Per-thread blocking support for JSR166. See the Java-level
|
|
||||||
* Documentation for rationale. Basically, park acts like wait, unpark
|
|
||||||
* like notify.
|
|
||||||
*
|
|
||||||
* 6271289 --
|
|
||||||
* To avoid errors where an os thread expires but the JavaThread still
|
|
||||||
* exists, Parkers are immortal (type-stable) and are recycled across
|
|
||||||
* new threads. This parallels the ParkEvent implementation.
|
|
||||||
* Because park-unpark allow spurious wakeups it is harmless if an
|
|
||||||
* unpark call unparks a new thread using the old Parker reference.
|
|
||||||
*
|
|
||||||
* In the future we'll want to think about eliminating Parker and using
|
|
||||||
* ParkEvent instead. There's considerable duplication between the two
|
|
||||||
* services.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
class Parker : public os::PlatformParker {
|
|
||||||
private:
|
|
||||||
volatile int _counter ;
|
|
||||||
Parker * FreeNext ;
|
|
||||||
JavaThread * AssociatedWith ; // Current association
|
|
||||||
|
|
||||||
public:
|
|
||||||
Parker() : PlatformParker() {
|
|
||||||
_counter = 0 ;
|
|
||||||
FreeNext = NULL ;
|
|
||||||
AssociatedWith = NULL ;
|
|
||||||
}
|
|
||||||
protected:
|
|
||||||
~Parker() { ShouldNotReachHere(); }
|
|
||||||
public:
|
|
||||||
// For simplicity of interface with Java, all forms of park (indefinite,
|
|
||||||
// relative, and absolute) are multiplexed into one call.
|
|
||||||
void park(bool isAbsolute, jlong time);
|
|
||||||
void unpark();
|
|
||||||
|
|
||||||
// Lifecycle operators
|
|
||||||
static Parker * Allocate (JavaThread * t) ;
|
|
||||||
static void Release (Parker * e) ;
|
|
||||||
private:
|
|
||||||
static Parker * volatile FreeList ;
|
|
||||||
static volatile int ListLock ;
|
|
||||||
};
|
|
||||||
|
2421
hotspot/src/share/vm/runtime/objectMonitor.cpp
Normal file
2421
hotspot/src/share/vm/runtime/objectMonitor.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -22,6 +22,32 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
// ObjectWaiter serves as a "proxy" or surrogate thread.
|
||||||
|
// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
|
||||||
|
// ParkEvent instead. Beware, however, that the JVMTI code
|
||||||
|
// knows about ObjectWaiters, so we'll have to reconcile that code.
|
||||||
|
// See next_waiter(), first_waiter(), etc.
|
||||||
|
|
||||||
|
class ObjectWaiter : public StackObj {
|
||||||
|
public:
|
||||||
|
enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
|
||||||
|
enum Sorted { PREPEND, APPEND, SORTED } ;
|
||||||
|
ObjectWaiter * volatile _next;
|
||||||
|
ObjectWaiter * volatile _prev;
|
||||||
|
Thread* _thread;
|
||||||
|
ParkEvent * _event;
|
||||||
|
volatile int _notified ;
|
||||||
|
volatile TStates TState ;
|
||||||
|
Sorted _Sorted ; // List placement disposition
|
||||||
|
bool _active ; // Contention monitoring is enabled
|
||||||
|
public:
|
||||||
|
ObjectWaiter(Thread* thread);
|
||||||
|
|
||||||
|
void wait_reenter_begin(ObjectMonitor *mon);
|
||||||
|
void wait_reenter_end(ObjectMonitor *mon);
|
||||||
|
};
|
||||||
|
|
||||||
// WARNING:
|
// WARNING:
|
||||||
// This is a very sensitive and fragile class. DO NOT make any
|
// This is a very sensitive and fragile class. DO NOT make any
|
||||||
// change unless you are fully aware of the underlying semantics.
|
// change unless you are fully aware of the underlying semantics.
|
||||||
@ -38,8 +64,6 @@
|
|||||||
// It is also used as RawMonitor by the JVMTI
|
// It is also used as RawMonitor by the JVMTI
|
||||||
|
|
||||||
|
|
||||||
class ObjectWaiter;
|
|
||||||
|
|
||||||
class ObjectMonitor {
|
class ObjectMonitor {
|
||||||
public:
|
public:
|
||||||
enum {
|
enum {
|
||||||
@ -74,13 +98,16 @@ class ObjectMonitor {
|
|||||||
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ObjectMonitor();
|
|
||||||
~ObjectMonitor();
|
|
||||||
|
|
||||||
markOop header() const;
|
markOop header() const;
|
||||||
void set_header(markOop hdr);
|
void set_header(markOop hdr);
|
||||||
|
|
||||||
intptr_t is_busy() const;
|
intptr_t is_busy() const {
|
||||||
|
// TODO-FIXME: merge _count and _waiters.
|
||||||
|
// TODO-FIXME: assert _owner == null implies _recursions = 0
|
||||||
|
// TODO-FIXME: assert _WaitSet != null implies _count > 0
|
||||||
|
return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
|
||||||
|
}
|
||||||
|
|
||||||
intptr_t is_entered(Thread* current) const;
|
intptr_t is_entered(Thread* current) const;
|
||||||
|
|
||||||
void* owner() const;
|
void* owner() const;
|
||||||
@ -91,13 +118,58 @@ class ObjectMonitor {
|
|||||||
intptr_t count() const;
|
intptr_t count() const;
|
||||||
void set_count(intptr_t count);
|
void set_count(intptr_t count);
|
||||||
intptr_t contentions() const ;
|
intptr_t contentions() const ;
|
||||||
|
intptr_t recursions() const { return _recursions; }
|
||||||
|
|
||||||
// JVM/DI GetMonitorInfo() needs this
|
// JVM/DI GetMonitorInfo() needs this
|
||||||
Thread * thread_of_waiter (ObjectWaiter *) ;
|
ObjectWaiter* first_waiter() { return _WaitSet; }
|
||||||
ObjectWaiter * first_waiter () ;
|
ObjectWaiter* next_waiter(ObjectWaiter* o) { return o->_next; }
|
||||||
ObjectWaiter * next_waiter(ObjectWaiter* o);
|
Thread* thread_of_waiter(ObjectWaiter* o) { return o->_thread; }
|
||||||
|
|
||||||
intptr_t recursions() const { return _recursions; }
|
// initialize the monitor, exception the semaphore, all other fields
|
||||||
|
// are simple integers or pointers
|
||||||
|
ObjectMonitor() {
|
||||||
|
_header = NULL;
|
||||||
|
_count = 0;
|
||||||
|
_waiters = 0,
|
||||||
|
_recursions = 0;
|
||||||
|
_object = NULL;
|
||||||
|
_owner = NULL;
|
||||||
|
_WaitSet = NULL;
|
||||||
|
_WaitSetLock = 0 ;
|
||||||
|
_Responsible = NULL ;
|
||||||
|
_succ = NULL ;
|
||||||
|
_cxq = NULL ;
|
||||||
|
FreeNext = NULL ;
|
||||||
|
_EntryList = NULL ;
|
||||||
|
_SpinFreq = 0 ;
|
||||||
|
_SpinClock = 0 ;
|
||||||
|
OwnerIsThread = 0 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
~ObjectMonitor() {
|
||||||
|
// TODO: Add asserts ...
|
||||||
|
// _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
|
||||||
|
// _count == 0 _EntryList == NULL etc
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void Recycle () {
|
||||||
|
// TODO: add stronger asserts ...
|
||||||
|
// _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
|
||||||
|
// _count == 0 EntryList == NULL
|
||||||
|
// _recursions == 0 _WaitSet == NULL
|
||||||
|
// TODO: assert (is_busy()|_recursions) == 0
|
||||||
|
_succ = NULL ;
|
||||||
|
_EntryList = NULL ;
|
||||||
|
_cxq = NULL ;
|
||||||
|
_WaitSet = NULL ;
|
||||||
|
_recursions = 0 ;
|
||||||
|
_SpinFreq = 0 ;
|
||||||
|
_SpinClock = 0 ;
|
||||||
|
OwnerIsThread = 0 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
void* object() const;
|
void* object() const;
|
||||||
void* object_addr();
|
void* object_addr();
|
||||||
@ -122,22 +194,9 @@ class ObjectMonitor {
|
|||||||
intptr_t complete_exit(TRAPS);
|
intptr_t complete_exit(TRAPS);
|
||||||
void reenter(intptr_t recursions, TRAPS);
|
void reenter(intptr_t recursions, TRAPS);
|
||||||
|
|
||||||
int raw_enter(TRAPS);
|
|
||||||
int raw_exit(TRAPS);
|
|
||||||
int raw_wait(jlong millis, bool interruptable, TRAPS);
|
|
||||||
int raw_notify(TRAPS);
|
|
||||||
int raw_notifyAll(TRAPS);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// JVMTI support -- remove ASAP
|
|
||||||
int SimpleEnter (Thread * Self) ;
|
|
||||||
int SimpleExit (Thread * Self) ;
|
|
||||||
int SimpleWait (Thread * Self, jlong millis) ;
|
|
||||||
int SimpleNotify (Thread * Self, bool All) ;
|
|
||||||
|
|
||||||
private:
|
|
||||||
void Recycle () ;
|
|
||||||
void AddWaiter (ObjectWaiter * waiter) ;
|
void AddWaiter (ObjectWaiter * waiter) ;
|
||||||
|
static void DeferredInitialize();
|
||||||
|
|
||||||
ObjectWaiter * DequeueWaiter () ;
|
ObjectWaiter * DequeueWaiter () ;
|
||||||
void DequeueSpecificWaiter (ObjectWaiter * waiter) ;
|
void DequeueSpecificWaiter (ObjectWaiter * waiter) ;
|
||||||
@ -172,13 +231,17 @@ class ObjectMonitor {
|
|||||||
// The VM assumes write ordering wrt these fields, which can be
|
// The VM assumes write ordering wrt these fields, which can be
|
||||||
// read from other threads.
|
// read from other threads.
|
||||||
|
|
||||||
|
protected: // protected for jvmtiRawMonitor
|
||||||
void * volatile _owner; // pointer to owning thread OR BasicLock
|
void * volatile _owner; // pointer to owning thread OR BasicLock
|
||||||
volatile intptr_t _recursions; // recursion count, 0 for first entry
|
volatile intptr_t _recursions; // recursion count, 0 for first entry
|
||||||
|
private:
|
||||||
int OwnerIsThread ; // _owner is (Thread *) vs SP/BasicLock
|
int OwnerIsThread ; // _owner is (Thread *) vs SP/BasicLock
|
||||||
ObjectWaiter * volatile _cxq ; // LL of recently-arrived threads blocked on entry.
|
ObjectWaiter * volatile _cxq ; // LL of recently-arrived threads blocked on entry.
|
||||||
// The list is actually composed of WaitNodes, acting
|
// The list is actually composed of WaitNodes, acting
|
||||||
// as proxies for Threads.
|
// as proxies for Threads.
|
||||||
|
protected:
|
||||||
ObjectWaiter * volatile _EntryList ; // Threads blocked on entry or reentry.
|
ObjectWaiter * volatile _EntryList ; // Threads blocked on entry or reentry.
|
||||||
|
private:
|
||||||
Thread * volatile _succ ; // Heir presumptive thread - used for futile wakeup throttling
|
Thread * volatile _succ ; // Heir presumptive thread - used for futile wakeup throttling
|
||||||
Thread * volatile _Responsible ;
|
Thread * volatile _Responsible ;
|
||||||
int _PromptDrain ; // rqst to drain cxq into EntryList ASAP
|
int _PromptDrain ; // rqst to drain cxq into EntryList ASAP
|
||||||
@ -196,8 +259,12 @@ class ObjectMonitor {
|
|||||||
volatile intptr_t _count; // reference count to prevent reclaimation/deflation
|
volatile intptr_t _count; // reference count to prevent reclaimation/deflation
|
||||||
// at stop-the-world time. See deflate_idle_monitors().
|
// at stop-the-world time. See deflate_idle_monitors().
|
||||||
// _count is approximately |_WaitSet| + |_EntryList|
|
// _count is approximately |_WaitSet| + |_EntryList|
|
||||||
|
protected:
|
||||||
volatile intptr_t _waiters; // number of waiting threads
|
volatile intptr_t _waiters; // number of waiting threads
|
||||||
|
private:
|
||||||
|
protected:
|
||||||
ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
|
ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
|
||||||
|
private:
|
||||||
volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
|
volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -205,4 +272,37 @@ class ObjectMonitor {
|
|||||||
ObjectMonitor * FreeNext ; // Free list linkage
|
ObjectMonitor * FreeNext ; // Free list linkage
|
||||||
intptr_t StatA, StatsB ;
|
intptr_t StatA, StatsB ;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static void Initialize () ;
|
||||||
|
static PerfCounter * _sync_ContendedLockAttempts ;
|
||||||
|
static PerfCounter * _sync_FutileWakeups ;
|
||||||
|
static PerfCounter * _sync_Parks ;
|
||||||
|
static PerfCounter * _sync_EmptyNotifications ;
|
||||||
|
static PerfCounter * _sync_Notifications ;
|
||||||
|
static PerfCounter * _sync_SlowEnter ;
|
||||||
|
static PerfCounter * _sync_SlowExit ;
|
||||||
|
static PerfCounter * _sync_SlowNotify ;
|
||||||
|
static PerfCounter * _sync_SlowNotifyAll ;
|
||||||
|
static PerfCounter * _sync_FailedSpins ;
|
||||||
|
static PerfCounter * _sync_SuccessfulSpins ;
|
||||||
|
static PerfCounter * _sync_PrivateA ;
|
||||||
|
static PerfCounter * _sync_PrivateB ;
|
||||||
|
static PerfCounter * _sync_MonInCirculation ;
|
||||||
|
static PerfCounter * _sync_MonScavenged ;
|
||||||
|
static PerfCounter * _sync_Inflations ;
|
||||||
|
static PerfCounter * _sync_Deflations ;
|
||||||
|
static PerfLongVariable * _sync_MonExtant ;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static int Knob_Verbose;
|
||||||
|
static int Knob_SpinLimit;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#undef TEVENT
|
||||||
|
#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
|
||||||
|
|
||||||
|
#define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
|
||||||
|
|
||||||
|
#undef TEVENT
|
||||||
|
#define TEVENT(nom) {;}
|
||||||
|
|
||||||
|
@ -104,7 +104,3 @@ inline void ObjectMonitor::set_owner(void* owner) {
|
|||||||
_count = 0;
|
_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// here are the platform-dependent bodies:
|
|
||||||
|
|
||||||
# include "incls/_objectMonitor_pd.inline.hpp.incl"
|
|
||||||
|
237
hotspot/src/share/vm/runtime/park.cpp
Normal file
237
hotspot/src/share/vm/runtime/park.cpp
Normal file
@ -0,0 +1,237 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
# include "incls/_precompiled.incl"
|
||||||
|
# include "incls/_park.cpp.incl"
|
||||||
|
|
||||||
|
|
||||||
|
// Lifecycle management for TSM ParkEvents.
|
||||||
|
// ParkEvents are type-stable (TSM).
|
||||||
|
// In our particular implementation they happen to be immortal.
|
||||||
|
//
|
||||||
|
// We manage concurrency on the FreeList with a CAS-based
|
||||||
|
// detach-modify-reattach idiom that avoids the ABA problems
|
||||||
|
// that would otherwise be present in a simple CAS-based
|
||||||
|
// push-pop implementation. (push-one and pop-all)
|
||||||
|
//
|
||||||
|
// Caveat: Allocate() and Release() may be called from threads
|
||||||
|
// other than the thread associated with the Event!
|
||||||
|
// If we need to call Allocate() when running as the thread in
|
||||||
|
// question then look for the PD calls to initialize native TLS.
|
||||||
|
// Native TLS (Win32/Linux/Solaris) can only be initialized or
|
||||||
|
// accessed by the associated thread.
|
||||||
|
// See also pd_initialize().
|
||||||
|
//
|
||||||
|
// Note that we could defer associating a ParkEvent with a thread
|
||||||
|
// until the 1st time the thread calls park(). unpark() calls to
|
||||||
|
// an unprovisioned thread would be ignored. The first park() call
|
||||||
|
// for a thread would allocate and associate a ParkEvent and return
|
||||||
|
// immediately.
|
||||||
|
|
||||||
|
volatile int ParkEvent::ListLock = 0 ;
|
||||||
|
ParkEvent * volatile ParkEvent::FreeList = NULL ;
|
||||||
|
|
||||||
|
ParkEvent * ParkEvent::Allocate (Thread * t) {
|
||||||
|
// In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
|
||||||
|
ParkEvent * ev ;
|
||||||
|
|
||||||
|
// Start by trying to recycle an existing but unassociated
|
||||||
|
// ParkEvent from the global free list.
|
||||||
|
for (;;) {
|
||||||
|
ev = FreeList ;
|
||||||
|
if (ev == NULL) break ;
|
||||||
|
// 1: Detach - sequester or privatize the list
|
||||||
|
// Tantamount to ev = Swap (&FreeList, NULL)
|
||||||
|
if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
|
||||||
|
continue ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We've detached the list. The list in-hand is now
|
||||||
|
// local to this thread. This thread can operate on the
|
||||||
|
// list without risk of interference from other threads.
|
||||||
|
// 2: Extract -- pop the 1st element from the list.
|
||||||
|
ParkEvent * List = ev->FreeNext ;
|
||||||
|
if (List == NULL) break ;
|
||||||
|
for (;;) {
|
||||||
|
// 3: Try to reattach the residual list
|
||||||
|
guarantee (List != NULL, "invariant") ;
|
||||||
|
ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
|
||||||
|
if (Arv == NULL) break ;
|
||||||
|
|
||||||
|
// New nodes arrived. Try to detach the recent arrivals.
|
||||||
|
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
|
||||||
|
continue ;
|
||||||
|
}
|
||||||
|
guarantee (Arv != NULL, "invariant") ;
|
||||||
|
// 4: Merge Arv into List
|
||||||
|
ParkEvent * Tail = List ;
|
||||||
|
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
|
||||||
|
Tail->FreeNext = Arv ;
|
||||||
|
}
|
||||||
|
break ;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ev != NULL) {
|
||||||
|
guarantee (ev->AssociatedWith == NULL, "invariant") ;
|
||||||
|
} else {
|
||||||
|
// Do this the hard way -- materialize a new ParkEvent.
|
||||||
|
// In rare cases an allocating thread might detach a long list --
|
||||||
|
// installing null into FreeList -- and then stall or be obstructed.
|
||||||
|
// A 2nd thread calling Allocate() would see FreeList == null.
|
||||||
|
// The list held privately by the 1st thread is unavailable to the 2nd thread.
|
||||||
|
// In that case the 2nd thread would have to materialize a new ParkEvent,
|
||||||
|
// even though free ParkEvents existed in the system. In this case we end up
|
||||||
|
// with more ParkEvents in circulation than we need, but the race is
|
||||||
|
// rare and the outcome is benign. Ideally, the # of extant ParkEvents
|
||||||
|
// is equal to the maximum # of threads that existed at any one time.
|
||||||
|
// Because of the race mentioned above, segments of the freelist
|
||||||
|
// can be transiently inaccessible. At worst we may end up with the
|
||||||
|
// # of ParkEvents in circulation slightly above the ideal.
|
||||||
|
// Note that if we didn't have the TSM/immortal constraint, then
|
||||||
|
// when reattaching, above, we could trim the list.
|
||||||
|
ev = new ParkEvent () ;
|
||||||
|
guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
|
||||||
|
}
|
||||||
|
ev->reset() ; // courtesy to caller
|
||||||
|
ev->AssociatedWith = t ; // Associate ev with t
|
||||||
|
ev->FreeNext = NULL ;
|
||||||
|
return ev ;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ParkEvent::Release (ParkEvent * ev) {
|
||||||
|
if (ev == NULL) return ;
|
||||||
|
guarantee (ev->FreeNext == NULL , "invariant") ;
|
||||||
|
ev->AssociatedWith = NULL ;
|
||||||
|
for (;;) {
|
||||||
|
// Push ev onto FreeList
|
||||||
|
// The mechanism is "half" lock-free.
|
||||||
|
ParkEvent * List = FreeList ;
|
||||||
|
ev->FreeNext = List ;
|
||||||
|
if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override operator new and delete so we can ensure that the
|
||||||
|
// least significant byte of ParkEvent addresses is 0.
|
||||||
|
// Beware that excessive address alignment is undesirable
|
||||||
|
// as it can result in D$ index usage imbalance as
|
||||||
|
// well as bank access imbalance on Niagara-like platforms,
|
||||||
|
// although Niagara's hash function should help.
|
||||||
|
|
||||||
|
void * ParkEvent::operator new (size_t sz) {
|
||||||
|
return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ParkEvent::operator delete (void * a) {
|
||||||
|
// ParkEvents are type-stable and immortal ...
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// 6399321 As a temporary measure we copied & modified the ParkEvent::
|
||||||
|
// allocate() and release() code for use by Parkers. The Parker:: forms
|
||||||
|
// will eventually be removed as we consolide and shift over to ParkEvents
|
||||||
|
// for both builtin synchronization and JSR166 operations.
|
||||||
|
|
||||||
|
volatile int Parker::ListLock = 0 ;
|
||||||
|
Parker * volatile Parker::FreeList = NULL ;
|
||||||
|
|
||||||
|
Parker * Parker::Allocate (JavaThread * t) {
|
||||||
|
guarantee (t != NULL, "invariant") ;
|
||||||
|
Parker * p ;
|
||||||
|
|
||||||
|
// Start by trying to recycle an existing but unassociated
|
||||||
|
// Parker from the global free list.
|
||||||
|
for (;;) {
|
||||||
|
p = FreeList ;
|
||||||
|
if (p == NULL) break ;
|
||||||
|
// 1: Detach
|
||||||
|
// Tantamount to p = Swap (&FreeList, NULL)
|
||||||
|
if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
|
||||||
|
continue ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We've detached the list. The list in-hand is now
|
||||||
|
// local to this thread. This thread can operate on the
|
||||||
|
// list without risk of interference from other threads.
|
||||||
|
// 2: Extract -- pop the 1st element from the list.
|
||||||
|
Parker * List = p->FreeNext ;
|
||||||
|
if (List == NULL) break ;
|
||||||
|
for (;;) {
|
||||||
|
// 3: Try to reattach the residual list
|
||||||
|
guarantee (List != NULL, "invariant") ;
|
||||||
|
Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
|
||||||
|
if (Arv == NULL) break ;
|
||||||
|
|
||||||
|
// New nodes arrived. Try to detach the recent arrivals.
|
||||||
|
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
|
||||||
|
continue ;
|
||||||
|
}
|
||||||
|
guarantee (Arv != NULL, "invariant") ;
|
||||||
|
// 4: Merge Arv into List
|
||||||
|
Parker * Tail = List ;
|
||||||
|
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
|
||||||
|
Tail->FreeNext = Arv ;
|
||||||
|
}
|
||||||
|
break ;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (p != NULL) {
|
||||||
|
guarantee (p->AssociatedWith == NULL, "invariant") ;
|
||||||
|
} else {
|
||||||
|
// Do this the hard way -- materialize a new Parker..
|
||||||
|
// In rare cases an allocating thread might detach
|
||||||
|
// a long list -- installing null into FreeList --and
|
||||||
|
// then stall. Another thread calling Allocate() would see
|
||||||
|
// FreeList == null and then invoke the ctor. In this case we
|
||||||
|
// end up with more Parkers in circulation than we need, but
|
||||||
|
// the race is rare and the outcome is benign.
|
||||||
|
// Ideally, the # of extant Parkers is equal to the
|
||||||
|
// maximum # of threads that existed at any one time.
|
||||||
|
// Because of the race mentioned above, segments of the
|
||||||
|
// freelist can be transiently inaccessible. At worst
|
||||||
|
// we may end up with the # of Parkers in circulation
|
||||||
|
// slightly above the ideal.
|
||||||
|
p = new Parker() ;
|
||||||
|
}
|
||||||
|
p->AssociatedWith = t ; // Associate p with t
|
||||||
|
p->FreeNext = NULL ;
|
||||||
|
return p ;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Parker::Release (Parker * p) {
|
||||||
|
if (p == NULL) return ;
|
||||||
|
guarantee (p->AssociatedWith != NULL, "invariant") ;
|
||||||
|
guarantee (p->FreeNext == NULL , "invariant") ;
|
||||||
|
p->AssociatedWith = NULL ;
|
||||||
|
for (;;) {
|
||||||
|
// Push p onto FreeList
|
||||||
|
Parker * List = FreeList ;
|
||||||
|
p->FreeNext = List ;
|
||||||
|
if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
169
hotspot/src/share/vm/runtime/park.hpp
Normal file
169
hotspot/src/share/vm/runtime/park.hpp
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Per-thread blocking support for JSR166. See the Java-level
|
||||||
|
* Documentation for rationale. Basically, park acts like wait, unpark
|
||||||
|
* like notify.
|
||||||
|
*
|
||||||
|
* 6271289 --
|
||||||
|
* To avoid errors where an os thread expires but the JavaThread still
|
||||||
|
* exists, Parkers are immortal (type-stable) and are recycled across
|
||||||
|
* new threads. This parallels the ParkEvent implementation.
|
||||||
|
* Because park-unpark allow spurious wakeups it is harmless if an
|
||||||
|
* unpark call unparks a new thread using the old Parker reference.
|
||||||
|
*
|
||||||
|
* In the future we'll want to think about eliminating Parker and using
|
||||||
|
* ParkEvent instead. There's considerable duplication between the two
|
||||||
|
* services.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
class Parker : public os::PlatformParker {
|
||||||
|
private:
|
||||||
|
volatile int _counter ;
|
||||||
|
Parker * FreeNext ;
|
||||||
|
JavaThread * AssociatedWith ; // Current association
|
||||||
|
|
||||||
|
public:
|
||||||
|
Parker() : PlatformParker() {
|
||||||
|
_counter = 0 ;
|
||||||
|
FreeNext = NULL ;
|
||||||
|
AssociatedWith = NULL ;
|
||||||
|
}
|
||||||
|
protected:
|
||||||
|
~Parker() { ShouldNotReachHere(); }
|
||||||
|
public:
|
||||||
|
// For simplicity of interface with Java, all forms of park (indefinite,
|
||||||
|
// relative, and absolute) are multiplexed into one call.
|
||||||
|
void park(bool isAbsolute, jlong time);
|
||||||
|
void unpark();
|
||||||
|
|
||||||
|
// Lifecycle operators
|
||||||
|
static Parker * Allocate (JavaThread * t) ;
|
||||||
|
static void Release (Parker * e) ;
|
||||||
|
private:
|
||||||
|
static Parker * volatile FreeList ;
|
||||||
|
static volatile int ListLock ;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// ParkEvents are type-stable and immortal.
|
||||||
|
//
|
||||||
|
// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
|
||||||
|
// associated with the thread for the thread's entire lifetime - the relationship is
|
||||||
|
// stable. A thread will be associated at most one ParkEvent. When the thread
|
||||||
|
// expires, the ParkEvent moves to the EventFreeList. New threads attempt to allocate from
|
||||||
|
// the EventFreeList before creating a new Event. Type-stability frees us from
|
||||||
|
// worrying about stale Event or Thread references in the objectMonitor subsystem.
|
||||||
|
// (A reference to ParkEvent is always valid, even though the event may no longer be associated
|
||||||
|
// with the desired or expected thread. A key aspect of this design is that the callers of
|
||||||
|
// park, unpark, etc must tolerate stale references and spurious wakeups).
|
||||||
|
//
|
||||||
|
// Only the "associated" thread can block (park) on the ParkEvent, although
|
||||||
|
// any other thread can unpark a reachable parkevent. Park() is allowed to
|
||||||
|
// return spuriously. In fact park-unpark a really just an optimization to
|
||||||
|
// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
|
||||||
|
// A degenerate albeit "impolite" park-unpark implementation could simply return.
|
||||||
|
// See http://blogs.sun.com/dave for more details.
|
||||||
|
//
|
||||||
|
// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
|
||||||
|
// thread proxies, and simply make the THREAD structure type-stable and persistent.
|
||||||
|
// Currently, we unpark events associated with threads, but ideally we'd just
|
||||||
|
// unpark threads.
|
||||||
|
//
|
||||||
|
// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
|
||||||
|
// platform-independent. PlatformEvent provides park(), unpark(), etc., and
|
||||||
|
// is abstract -- that is, a PlatformEvent should never be instantiated except
|
||||||
|
// as part of a ParkEvent.
|
||||||
|
// Equivalently we could have defined a platform-independent base-class that
|
||||||
|
// exported Allocate(), Release(), etc. The platform-specific class would extend
|
||||||
|
// that base-class, adding park(), unpark(), etc.
|
||||||
|
//
|
||||||
|
// A word of caution: The JVM uses 2 very similar constructs:
|
||||||
|
// 1. ParkEvent are used for Java-level "monitor" synchronization.
|
||||||
|
// 2. Parkers are used by JSR166-JUC park-unpark.
|
||||||
|
//
|
||||||
|
// We'll want to eventually merge these redundant facilities and use ParkEvent.
|
||||||
|
|
||||||
|
|
||||||
|
class ParkEvent : public os::PlatformEvent {
|
||||||
|
private:
|
||||||
|
ParkEvent * FreeNext ;
|
||||||
|
|
||||||
|
// Current association
|
||||||
|
Thread * AssociatedWith ;
|
||||||
|
intptr_t RawThreadIdentity ; // LWPID etc
|
||||||
|
volatile int Incarnation ;
|
||||||
|
|
||||||
|
// diagnostic : keep track of last thread to wake this thread.
|
||||||
|
// this is useful for construction of dependency graphs.
|
||||||
|
void * LastWaker ;
|
||||||
|
|
||||||
|
public:
|
||||||
|
// MCS-CLH list linkage and Native Mutex/Monitor
|
||||||
|
ParkEvent * volatile ListNext ;
|
||||||
|
ParkEvent * volatile ListPrev ;
|
||||||
|
volatile intptr_t OnList ;
|
||||||
|
volatile int TState ;
|
||||||
|
volatile int Notified ; // for native monitor construct
|
||||||
|
volatile int IsWaiting ; // Enqueued on WaitSet
|
||||||
|
|
||||||
|
|
||||||
|
private:
|
||||||
|
static ParkEvent * volatile FreeList ;
|
||||||
|
static volatile int ListLock ;
|
||||||
|
|
||||||
|
// It's prudent to mark the dtor as "private"
|
||||||
|
// ensuring that it's not visible outside the package.
|
||||||
|
// Unfortunately gcc warns about such usage, so
|
||||||
|
// we revert to the less desirable "protected" visibility.
|
||||||
|
// The other compilers accept private dtors.
|
||||||
|
|
||||||
|
protected: // Ensure dtor is never invoked
|
||||||
|
~ParkEvent() { guarantee (0, "invariant") ; }
|
||||||
|
|
||||||
|
ParkEvent() : PlatformEvent() {
|
||||||
|
AssociatedWith = NULL ;
|
||||||
|
FreeNext = NULL ;
|
||||||
|
ListNext = NULL ;
|
||||||
|
ListPrev = NULL ;
|
||||||
|
OnList = 0 ;
|
||||||
|
TState = 0 ;
|
||||||
|
Notified = 0 ;
|
||||||
|
IsWaiting = 0 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We use placement-new to force ParkEvent instances to be
|
||||||
|
// aligned on 256-byte address boundaries. This ensures that the least
|
||||||
|
// significant byte of a ParkEvent address is always 0.
|
||||||
|
|
||||||
|
void * operator new (size_t sz) ;
|
||||||
|
void operator delete (void * a) ;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static ParkEvent * Allocate (Thread * t) ;
|
||||||
|
static void Release (ParkEvent * e) ;
|
||||||
|
} ;
|
@ -435,6 +435,120 @@ void Relocator::adjust_local_var_table(int bci, int delta) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create a new array, copying the src array but adding a hole at
|
||||||
|
// the specified location
|
||||||
|
static typeArrayOop insert_hole_at(
|
||||||
|
size_t where, int hole_sz, typeArrayOop src) {
|
||||||
|
Thread* THREAD = Thread::current();
|
||||||
|
Handle src_hnd(THREAD, src);
|
||||||
|
typeArrayOop dst =
|
||||||
|
oopFactory::new_permanent_byteArray(src->length() + hole_sz, CHECK_NULL);
|
||||||
|
src = (typeArrayOop)src_hnd();
|
||||||
|
|
||||||
|
address src_addr = (address)src->byte_at_addr(0);
|
||||||
|
address dst_addr = (address)dst->byte_at_addr(0);
|
||||||
|
|
||||||
|
memcpy(dst_addr, src_addr, where);
|
||||||
|
memcpy(dst_addr + where + hole_sz,
|
||||||
|
src_addr + where, src->length() - where);
|
||||||
|
return dst;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The width of instruction at "bci" is changing by "delta". Adjust the stack
|
||||||
|
// map frames.
|
||||||
|
void Relocator::adjust_stack_map_table(int bci, int delta) {
|
||||||
|
if (method()->has_stackmap_table()) {
|
||||||
|
typeArrayOop data = method()->stackmap_data();
|
||||||
|
// The data in the array is a classfile representation of the stackmap
|
||||||
|
// table attribute, less the initial u2 tag and u4 attribute_length fields.
|
||||||
|
stack_map_table_attribute* attr = stack_map_table_attribute::at(
|
||||||
|
(address)data->byte_at_addr(0) - (sizeof(u2) + sizeof(u4)));
|
||||||
|
|
||||||
|
int count = attr->number_of_entries();
|
||||||
|
stack_map_frame* frame = attr->entries();
|
||||||
|
int bci_iter = -1;
|
||||||
|
bool offset_adjusted = false; // only need to adjust one offset
|
||||||
|
|
||||||
|
for (int i = 0; i < count; ++i) {
|
||||||
|
int offset_delta = frame->offset_delta();
|
||||||
|
bci_iter += offset_delta;
|
||||||
|
|
||||||
|
if (!offset_adjusted && bci_iter > bci) {
|
||||||
|
int new_offset_delta = offset_delta + delta;
|
||||||
|
|
||||||
|
if (frame->is_valid_offset(new_offset_delta)) {
|
||||||
|
frame->set_offset_delta(new_offset_delta);
|
||||||
|
} else {
|
||||||
|
assert(frame->is_same_frame() ||
|
||||||
|
frame->is_same_frame_1_stack_item_frame(),
|
||||||
|
"Frame must be one of the compressed forms");
|
||||||
|
// The new delta exceeds the capacity of the 'same_frame' or
|
||||||
|
// 'same_frame_1_stack_item_frame' frame types. We need to
|
||||||
|
// convert these frames to the extended versions, but the extended
|
||||||
|
// version is bigger and requires more room. So we allocate a
|
||||||
|
// new array and copy the data, being sure to leave u2-sized hole
|
||||||
|
// right after the 'frame_type' for the new offset field.
|
||||||
|
//
|
||||||
|
// We can safely ignore the reverse situation as a small delta
|
||||||
|
// can still be used in an extended version of the frame.
|
||||||
|
|
||||||
|
size_t frame_offset = (address)frame - (address)data->byte_at_addr(0);
|
||||||
|
|
||||||
|
data = insert_hole_at(frame_offset + 1, 2, data);
|
||||||
|
if (data == NULL) {
|
||||||
|
return; // out-of-memory?
|
||||||
|
}
|
||||||
|
|
||||||
|
address frame_addr = (address)(data->byte_at_addr(0) + frame_offset);
|
||||||
|
frame = stack_map_frame::at(frame_addr);
|
||||||
|
|
||||||
|
|
||||||
|
// Now convert the frames in place
|
||||||
|
if (frame->is_same_frame()) {
|
||||||
|
same_frame_extended::create_at(frame_addr, new_offset_delta);
|
||||||
|
} else {
|
||||||
|
same_frame_1_stack_item_extended::create_at(
|
||||||
|
frame_addr, new_offset_delta, NULL);
|
||||||
|
// the verification_info_type should already be at the right spot
|
||||||
|
}
|
||||||
|
}
|
||||||
|
offset_adjusted = true; // needs to be done only once, since subsequent
|
||||||
|
// values are offsets from the current
|
||||||
|
}
|
||||||
|
|
||||||
|
// The stack map frame may contain verification types, if so we need to
|
||||||
|
// check and update any Uninitialized type's bci (no matter where it is).
|
||||||
|
int number_of_types = frame->number_of_types();
|
||||||
|
verification_type_info* types = frame->types();
|
||||||
|
|
||||||
|
for (int i = 0; i < number_of_types; ++i) {
|
||||||
|
if (types->is_uninitialized() && types->bci() > bci) {
|
||||||
|
types->set_bci(types->bci() + delta);
|
||||||
|
}
|
||||||
|
types = types->next();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Full frame has stack values too
|
||||||
|
full_frame* ff = frame->as_full_frame();
|
||||||
|
if (ff != NULL) {
|
||||||
|
address eol = (address)types;
|
||||||
|
number_of_types = ff->stack_slots(eol);
|
||||||
|
types = ff->stack(eol);
|
||||||
|
for (int i = 0; i < number_of_types; ++i) {
|
||||||
|
if (types->is_uninitialized() && types->bci() > bci) {
|
||||||
|
types->set_bci(types->bci() + delta);
|
||||||
|
}
|
||||||
|
types = types->next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
frame = frame->next();
|
||||||
|
}
|
||||||
|
|
||||||
|
method()->set_stackmap_data(data); // in case it has changed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Relocator::expand_code_array(int delta) {
|
bool Relocator::expand_code_array(int delta) {
|
||||||
int length = MAX2(code_length() + delta, code_length() * (100+code_slop_pct()) / 100);
|
int length = MAX2(code_length() + delta, code_length() * (100+code_slop_pct()) / 100);
|
||||||
@ -499,6 +613,9 @@ bool Relocator::relocate_code(int bci, int ilen, int delta) {
|
|||||||
// And local variable table...
|
// And local variable table...
|
||||||
adjust_local_var_table(bci, delta);
|
adjust_local_var_table(bci, delta);
|
||||||
|
|
||||||
|
// Adjust stack maps
|
||||||
|
adjust_stack_map_table(bci, delta);
|
||||||
|
|
||||||
// Relocate the pending change stack...
|
// Relocate the pending change stack...
|
||||||
for (int j = 0; j < _changes->length(); j++) {
|
for (int j = 0; j < _changes->length(); j++) {
|
||||||
ChangeItem* ci = _changes->at(j);
|
ChangeItem* ci = _changes->at(j);
|
||||||
@ -641,6 +758,7 @@ bool Relocator::handle_switch_pad(int bci, int old_pad, bool is_lookup_switch) {
|
|||||||
memmove(addr_at(bci +1 + new_pad),
|
memmove(addr_at(bci +1 + new_pad),
|
||||||
addr_at(bci +1 + old_pad),
|
addr_at(bci +1 + old_pad),
|
||||||
len * 4);
|
len * 4);
|
||||||
|
memset(addr_at(bci + 1), 0, new_pad); // pad must be 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -105,6 +105,7 @@ class Relocator : public ResourceObj {
|
|||||||
void adjust_exception_table(int bci, int delta);
|
void adjust_exception_table(int bci, int delta);
|
||||||
void adjust_line_no_table (int bci, int delta);
|
void adjust_line_no_table (int bci, int delta);
|
||||||
void adjust_local_var_table(int bci, int delta);
|
void adjust_local_var_table(int bci, int delta);
|
||||||
|
void adjust_stack_map_table(int bci, int delta);
|
||||||
int get_orig_switch_pad (int bci, bool is_lookup_switch);
|
int get_orig_switch_pad (int bci, bool is_lookup_switch);
|
||||||
int rc_instr_len (int bci);
|
int rc_instr_len (int bci);
|
||||||
bool expand_code_array (int delta);
|
bool expand_code_array (int delta);
|
||||||
|
@ -302,6 +302,9 @@ double SharedRuntime::dabs(double f) {
|
|||||||
return (f <= (double)0.0) ? (double)0.0 - f : f;
|
return (f <= (double)0.0) ? (double)0.0 - f : f;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__SOFTFP__) || defined(PPC)
|
||||||
double SharedRuntime::dsqrt(double f) {
|
double SharedRuntime::dsqrt(double f) {
|
||||||
return sqrt(f);
|
return sqrt(f);
|
||||||
}
|
}
|
||||||
|
@ -116,6 +116,9 @@ class SharedRuntime: AllStatic {
|
|||||||
|
|
||||||
#if defined(__SOFTFP__) || defined(E500V2)
|
#if defined(__SOFTFP__) || defined(E500V2)
|
||||||
static double dabs(double f);
|
static double dabs(double f);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__SOFTFP__) || defined(PPC)
|
||||||
static double dsqrt(double f);
|
static double dsqrt(double f);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -22,53 +22,6 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
class BasicLock VALUE_OBJ_CLASS_SPEC {
|
|
||||||
friend class VMStructs;
|
|
||||||
private:
|
|
||||||
volatile markOop _displaced_header;
|
|
||||||
public:
|
|
||||||
markOop displaced_header() const { return _displaced_header; }
|
|
||||||
void set_displaced_header(markOop header) { _displaced_header = header; }
|
|
||||||
|
|
||||||
void print_on(outputStream* st) const;
|
|
||||||
|
|
||||||
// move a basic lock (used during deoptimization
|
|
||||||
void move_to(oop obj, BasicLock* dest);
|
|
||||||
|
|
||||||
static int displaced_header_offset_in_bytes() { return offset_of(BasicLock, _displaced_header); }
|
|
||||||
};
|
|
||||||
|
|
||||||
// A BasicObjectLock associates a specific Java object with a BasicLock.
|
|
||||||
// It is currently embedded in an interpreter frame.
|
|
||||||
|
|
||||||
// Because some machines have alignment restrictions on the control stack,
|
|
||||||
// the actual space allocated by the interpreter may include padding words
|
|
||||||
// after the end of the BasicObjectLock. Also, in order to guarantee
|
|
||||||
// alignment of the embedded BasicLock objects on such machines, we
|
|
||||||
// put the embedded BasicLock at the beginning of the struct.
|
|
||||||
|
|
||||||
class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
|
|
||||||
friend class VMStructs;
|
|
||||||
private:
|
|
||||||
BasicLock _lock; // the lock, must be double word aligned
|
|
||||||
oop _obj; // object holds the lock;
|
|
||||||
|
|
||||||
public:
|
|
||||||
// Manipulation
|
|
||||||
oop obj() const { return _obj; }
|
|
||||||
void set_obj(oop obj) { _obj = obj; }
|
|
||||||
BasicLock* lock() { return &_lock; }
|
|
||||||
|
|
||||||
// Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
|
|
||||||
// in interpreter activation frames since it includes machine-specific padding.
|
|
||||||
static int size() { return sizeof(BasicObjectLock)/wordSize; }
|
|
||||||
|
|
||||||
// GC support
|
|
||||||
void oops_do(OopClosure* f) { f->do_oop(&_obj); }
|
|
||||||
|
|
||||||
static int obj_offset_in_bytes() { return offset_of(BasicObjectLock, _obj); }
|
|
||||||
static int lock_offset_in_bytes() { return offset_of(BasicObjectLock, _lock); }
|
|
||||||
};
|
|
||||||
|
|
||||||
class ObjectMonitor;
|
class ObjectMonitor;
|
||||||
|
|
||||||
@ -163,6 +116,8 @@ class ObjectSynchronizer : AllStatic {
|
|||||||
static void verify() PRODUCT_RETURN;
|
static void verify() PRODUCT_RETURN;
|
||||||
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
|
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
|
||||||
|
|
||||||
|
static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum { _BLOCKSIZE = 128 };
|
enum { _BLOCKSIZE = 128 };
|
||||||
static ObjectMonitor* gBlockList;
|
static ObjectMonitor* gBlockList;
|
||||||
@ -170,30 +125,6 @@ class ObjectSynchronizer : AllStatic {
|
|||||||
static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
|
static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
|
||||||
static int gOmInUseCount;
|
static int gOmInUseCount;
|
||||||
|
|
||||||
public:
|
|
||||||
static void Initialize () ;
|
|
||||||
static PerfCounter * _sync_ContendedLockAttempts ;
|
|
||||||
static PerfCounter * _sync_FutileWakeups ;
|
|
||||||
static PerfCounter * _sync_Parks ;
|
|
||||||
static PerfCounter * _sync_EmptyNotifications ;
|
|
||||||
static PerfCounter * _sync_Notifications ;
|
|
||||||
static PerfCounter * _sync_SlowEnter ;
|
|
||||||
static PerfCounter * _sync_SlowExit ;
|
|
||||||
static PerfCounter * _sync_SlowNotify ;
|
|
||||||
static PerfCounter * _sync_SlowNotifyAll ;
|
|
||||||
static PerfCounter * _sync_FailedSpins ;
|
|
||||||
static PerfCounter * _sync_SuccessfulSpins ;
|
|
||||||
static PerfCounter * _sync_PrivateA ;
|
|
||||||
static PerfCounter * _sync_PrivateB ;
|
|
||||||
static PerfCounter * _sync_MonInCirculation ;
|
|
||||||
static PerfCounter * _sync_MonScavenged ;
|
|
||||||
static PerfCounter * _sync_Inflations ;
|
|
||||||
static PerfCounter * _sync_Deflations ;
|
|
||||||
static PerfLongVariable * _sync_MonExtant ;
|
|
||||||
|
|
||||||
public:
|
|
||||||
static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// ObjectLocker enforced balanced locking and can never thrown an
|
// ObjectLocker enforced balanced locking and can never thrown an
|
||||||
|
@ -2921,6 +2921,9 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
|||||||
// So that JDK version can be used as a discrimintor when parsing arguments
|
// So that JDK version can be used as a discrimintor when parsing arguments
|
||||||
JDK_Version_init();
|
JDK_Version_init();
|
||||||
|
|
||||||
|
// Update/Initialize System properties after JDK version number is known
|
||||||
|
Arguments::init_version_specific_system_properties();
|
||||||
|
|
||||||
// Parse arguments
|
// Parse arguments
|
||||||
jint parse_result = Arguments::parse(args);
|
jint parse_result = Arguments::parse(args);
|
||||||
if (parse_result != JNI_OK) return parse_result;
|
if (parse_result != JNI_OK) return parse_result;
|
||||||
@ -2992,8 +2995,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
|||||||
// crash Linux VM, see notes in os_linux.cpp.
|
// crash Linux VM, see notes in os_linux.cpp.
|
||||||
main_thread->create_stack_guard_pages();
|
main_thread->create_stack_guard_pages();
|
||||||
|
|
||||||
// Initialize Java-Leve synchronization subsystem
|
// Initialize Java-Level synchronization subsystem
|
||||||
ObjectSynchronizer::Initialize() ;
|
ObjectMonitor::Initialize() ;
|
||||||
|
|
||||||
// Initialize global modules
|
// Initialize global modules
|
||||||
jint status = init_globals();
|
jint status = init_globals();
|
||||||
@ -3962,215 +3965,272 @@ void Threads::print_on_error(outputStream* st, Thread* current, char* buf, int b
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Internal SpinLock and Mutex
|
||||||
|
// Based on ParkEvent
|
||||||
|
|
||||||
// Lifecycle management for TSM ParkEvents.
|
// Ad-hoc mutual exclusion primitives: SpinLock and Mux
|
||||||
// ParkEvents are type-stable (TSM).
|
|
||||||
// In our particular implementation they happen to be immortal.
|
|
||||||
//
|
//
|
||||||
// We manage concurrency on the FreeList with a CAS-based
|
// We employ SpinLocks _only for low-contention, fixed-length
|
||||||
// detach-modify-reattach idiom that avoids the ABA problems
|
// short-duration critical sections where we're concerned
|
||||||
// that would otherwise be present in a simple CAS-based
|
// about native mutex_t or HotSpot Mutex:: latency.
|
||||||
// push-pop implementation. (push-one and pop-all)
|
// The mux construct provides a spin-then-block mutual exclusion
|
||||||
|
// mechanism.
|
||||||
//
|
//
|
||||||
// Caveat: Allocate() and Release() may be called from threads
|
// Testing has shown that contention on the ListLock guarding gFreeList
|
||||||
// other than the thread associated with the Event!
|
// is common. If we implement ListLock as a simple SpinLock it's common
|
||||||
// If we need to call Allocate() when running as the thread in
|
// for the JVM to devolve to yielding with little progress. This is true
|
||||||
// question then look for the PD calls to initialize native TLS.
|
// despite the fact that the critical sections protected by ListLock are
|
||||||
// Native TLS (Win32/Linux/Solaris) can only be initialized or
|
// extremely short.
|
||||||
// accessed by the associated thread.
|
|
||||||
// See also pd_initialize().
|
|
||||||
//
|
//
|
||||||
// Note that we could defer associating a ParkEvent with a thread
|
// TODO-FIXME: ListLock should be of type SpinLock.
|
||||||
// until the 1st time the thread calls park(). unpark() calls to
|
// We should make this a 1st-class type, integrated into the lock
|
||||||
// an unprovisioned thread would be ignored. The first park() call
|
// hierarchy as leaf-locks. Critically, the SpinLock structure
|
||||||
// for a thread would allocate and associate a ParkEvent and return
|
// should have sufficient padding to avoid false-sharing and excessive
|
||||||
// immediately.
|
// cache-coherency traffic.
|
||||||
|
|
||||||
volatile int ParkEvent::ListLock = 0 ;
|
|
||||||
ParkEvent * volatile ParkEvent::FreeList = NULL ;
|
|
||||||
|
|
||||||
ParkEvent * ParkEvent::Allocate (Thread * t) {
|
typedef volatile int SpinLockT ;
|
||||||
// In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
|
|
||||||
ParkEvent * ev ;
|
|
||||||
|
|
||||||
// Start by trying to recycle an existing but unassociated
|
void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
|
||||||
// ParkEvent from the global free list.
|
if (Atomic::cmpxchg (1, adr, 0) == 0) {
|
||||||
|
return ; // normal fast-path return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
|
||||||
|
TEVENT (SpinAcquire - ctx) ;
|
||||||
|
int ctr = 0 ;
|
||||||
|
int Yields = 0 ;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
ev = FreeList ;
|
while (*adr != 0) {
|
||||||
if (ev == NULL) break ;
|
++ctr ;
|
||||||
// 1: Detach - sequester or privatize the list
|
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
|
||||||
// Tantamount to ev = Swap (&FreeList, NULL)
|
if (Yields > 5) {
|
||||||
if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
|
// Consider using a simple NakedSleep() instead.
|
||||||
continue ;
|
// Then SpinAcquire could be called by non-JVM threads
|
||||||
}
|
Thread::current()->_ParkEvent->park(1) ;
|
||||||
|
} else {
|
||||||
// We've detached the list. The list in-hand is now
|
os::NakedYield() ;
|
||||||
// local to this thread. This thread can operate on the
|
++Yields ;
|
||||||
// list without risk of interference from other threads.
|
}
|
||||||
// 2: Extract -- pop the 1st element from the list.
|
} else {
|
||||||
ParkEvent * List = ev->FreeNext ;
|
SpinPause() ;
|
||||||
if (List == NULL) break ;
|
|
||||||
for (;;) {
|
|
||||||
// 3: Try to reattach the residual list
|
|
||||||
guarantee (List != NULL, "invariant") ;
|
|
||||||
ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
|
|
||||||
if (Arv == NULL) break ;
|
|
||||||
|
|
||||||
// New nodes arrived. Try to detach the recent arrivals.
|
|
||||||
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
|
|
||||||
continue ;
|
|
||||||
}
|
}
|
||||||
guarantee (Arv != NULL, "invariant") ;
|
}
|
||||||
// 4: Merge Arv into List
|
if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
|
||||||
ParkEvent * Tail = List ;
|
|
||||||
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
|
|
||||||
Tail->FreeNext = Arv ;
|
|
||||||
}
|
|
||||||
break ;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ev != NULL) {
|
|
||||||
guarantee (ev->AssociatedWith == NULL, "invariant") ;
|
|
||||||
} else {
|
|
||||||
// Do this the hard way -- materialize a new ParkEvent.
|
|
||||||
// In rare cases an allocating thread might detach a long list --
|
|
||||||
// installing null into FreeList -- and then stall or be obstructed.
|
|
||||||
// A 2nd thread calling Allocate() would see FreeList == null.
|
|
||||||
// The list held privately by the 1st thread is unavailable to the 2nd thread.
|
|
||||||
// In that case the 2nd thread would have to materialize a new ParkEvent,
|
|
||||||
// even though free ParkEvents existed in the system. In this case we end up
|
|
||||||
// with more ParkEvents in circulation than we need, but the race is
|
|
||||||
// rare and the outcome is benign. Ideally, the # of extant ParkEvents
|
|
||||||
// is equal to the maximum # of threads that existed at any one time.
|
|
||||||
// Because of the race mentioned above, segments of the freelist
|
|
||||||
// can be transiently inaccessible. At worst we may end up with the
|
|
||||||
// # of ParkEvents in circulation slightly above the ideal.
|
|
||||||
// Note that if we didn't have the TSM/immortal constraint, then
|
|
||||||
// when reattaching, above, we could trim the list.
|
|
||||||
ev = new ParkEvent () ;
|
|
||||||
guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
|
|
||||||
}
|
|
||||||
ev->reset() ; // courtesy to caller
|
|
||||||
ev->AssociatedWith = t ; // Associate ev with t
|
|
||||||
ev->FreeNext = NULL ;
|
|
||||||
return ev ;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParkEvent::Release (ParkEvent * ev) {
|
void Thread::SpinRelease (volatile int * adr) {
|
||||||
if (ev == NULL) return ;
|
assert (*adr != 0, "invariant") ;
|
||||||
guarantee (ev->FreeNext == NULL , "invariant") ;
|
OrderAccess::fence() ; // guarantee at least release consistency.
|
||||||
ev->AssociatedWith = NULL ;
|
// Roach-motel semantics.
|
||||||
|
// It's safe if subsequent LDs and STs float "up" into the critical section,
|
||||||
|
// but prior LDs and STs within the critical section can't be allowed
|
||||||
|
// to reorder or float past the ST that releases the lock.
|
||||||
|
*adr = 0 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// muxAcquire and muxRelease:
|
||||||
|
//
|
||||||
|
// * muxAcquire and muxRelease support a single-word lock-word construct.
|
||||||
|
// The LSB of the word is set IFF the lock is held.
|
||||||
|
// The remainder of the word points to the head of a singly-linked list
|
||||||
|
// of threads blocked on the lock.
|
||||||
|
//
|
||||||
|
// * The current implementation of muxAcquire-muxRelease uses its own
|
||||||
|
// dedicated Thread._MuxEvent instance. If we're interested in
|
||||||
|
// minimizing the peak number of extant ParkEvent instances then
|
||||||
|
// we could eliminate _MuxEvent and "borrow" _ParkEvent as long
|
||||||
|
// as certain invariants were satisfied. Specifically, care would need
|
||||||
|
// to be taken with regards to consuming unpark() "permits".
|
||||||
|
// A safe rule of thumb is that a thread would never call muxAcquire()
|
||||||
|
// if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
|
||||||
|
// park(). Otherwise the _ParkEvent park() operation in muxAcquire() could
|
||||||
|
// consume an unpark() permit intended for monitorenter, for instance.
|
||||||
|
// One way around this would be to widen the restricted-range semaphore
|
||||||
|
// implemented in park(). Another alternative would be to provide
|
||||||
|
// multiple instances of the PlatformEvent() for each thread. One
|
||||||
|
// instance would be dedicated to muxAcquire-muxRelease, for instance.
|
||||||
|
//
|
||||||
|
// * Usage:
|
||||||
|
// -- Only as leaf locks
|
||||||
|
// -- for short-term locking only as muxAcquire does not perform
|
||||||
|
// thread state transitions.
|
||||||
|
//
|
||||||
|
// Alternatives:
|
||||||
|
// * We could implement muxAcquire and muxRelease with MCS or CLH locks
|
||||||
|
// but with parking or spin-then-park instead of pure spinning.
|
||||||
|
// * Use Taura-Oyama-Yonenzawa locks.
|
||||||
|
// * It's possible to construct a 1-0 lock if we encode the lockword as
|
||||||
|
// (List,LockByte). Acquire will CAS the full lockword while Release
|
||||||
|
// will STB 0 into the LockByte. The 1-0 scheme admits stranding, so
|
||||||
|
// acquiring threads use timers (ParkTimed) to detect and recover from
|
||||||
|
// the stranding window. Thread/Node structures must be aligned on 256-byte
|
||||||
|
// boundaries by using placement-new.
|
||||||
|
// * Augment MCS with advisory back-link fields maintained with CAS().
|
||||||
|
// Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
|
||||||
|
// The validity of the backlinks must be ratified before we trust the value.
|
||||||
|
// If the backlinks are invalid the exiting thread must back-track through the
|
||||||
|
// the forward links, which are always trustworthy.
|
||||||
|
// * Add a successor indication. The LockWord is currently encoded as
|
||||||
|
// (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable
|
||||||
|
// to provide the usual futile-wakeup optimization.
|
||||||
|
// See RTStt for details.
|
||||||
|
// * Consider schedctl.sc_nopreempt to cover the critical section.
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
|
typedef volatile intptr_t MutexT ; // Mux Lock-word
|
||||||
|
enum MuxBits { LOCKBIT = 1 } ;
|
||||||
|
|
||||||
|
void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
|
||||||
|
intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
|
||||||
|
if (w == 0) return ;
|
||||||
|
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEVENT (muxAcquire - Contention) ;
|
||||||
|
ParkEvent * const Self = Thread::current()->_MuxEvent ;
|
||||||
|
assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
// Push ev onto FreeList
|
int its = (os::is_MP() ? 100 : 0) + 1 ;
|
||||||
// The mechanism is "half" lock-free.
|
|
||||||
ParkEvent * List = FreeList ;
|
|
||||||
ev->FreeNext = List ;
|
|
||||||
if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Override operator new and delete so we can ensure that the
|
// Optional spin phase: spin-then-park strategy
|
||||||
// least significant byte of ParkEvent addresses is 0.
|
while (--its >= 0) {
|
||||||
// Beware that excessive address alignment is undesirable
|
w = *Lock ;
|
||||||
// as it can result in D$ index usage imbalance as
|
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
||||||
// well as bank access imbalance on Niagara-like platforms,
|
return ;
|
||||||
// although Niagara's hash function should help.
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void * ParkEvent::operator new (size_t sz) {
|
Self->reset() ;
|
||||||
return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
|
Self->OnList = intptr_t(Lock) ;
|
||||||
}
|
// The following fence() isn't _strictly necessary as the subsequent
|
||||||
|
// CAS() both serializes execution and ratifies the fetched *Lock value.
|
||||||
void ParkEvent::operator delete (void * a) {
|
OrderAccess::fence();
|
||||||
// ParkEvents are type-stable and immortal ...
|
for (;;) {
|
||||||
ShouldNotReachHere();
|
w = *Lock ;
|
||||||
}
|
if ((w & LOCKBIT) == 0) {
|
||||||
|
if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
||||||
|
Self->OnList = 0 ; // hygiene - allows stronger asserts
|
||||||
// 6399321 As a temporary measure we copied & modified the ParkEvent::
|
return ;
|
||||||
// allocate() and release() code for use by Parkers. The Parker:: forms
|
}
|
||||||
// will eventually be removed as we consolide and shift over to ParkEvents
|
continue ; // Interference -- *Lock changed -- Just retry
|
||||||
// for both builtin synchronization and JSR166 operations.
|
|
||||||
|
|
||||||
volatile int Parker::ListLock = 0 ;
|
|
||||||
Parker * volatile Parker::FreeList = NULL ;
|
|
||||||
|
|
||||||
Parker * Parker::Allocate (JavaThread * t) {
|
|
||||||
guarantee (t != NULL, "invariant") ;
|
|
||||||
Parker * p ;
|
|
||||||
|
|
||||||
// Start by trying to recycle an existing but unassociated
|
|
||||||
// Parker from the global free list.
|
|
||||||
for (;;) {
|
|
||||||
p = FreeList ;
|
|
||||||
if (p == NULL) break ;
|
|
||||||
// 1: Detach
|
|
||||||
// Tantamount to p = Swap (&FreeList, NULL)
|
|
||||||
if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
|
|
||||||
continue ;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We've detached the list. The list in-hand is now
|
|
||||||
// local to this thread. This thread can operate on the
|
|
||||||
// list without risk of interference from other threads.
|
|
||||||
// 2: Extract -- pop the 1st element from the list.
|
|
||||||
Parker * List = p->FreeNext ;
|
|
||||||
if (List == NULL) break ;
|
|
||||||
for (;;) {
|
|
||||||
// 3: Try to reattach the residual list
|
|
||||||
guarantee (List != NULL, "invariant") ;
|
|
||||||
Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
|
|
||||||
if (Arv == NULL) break ;
|
|
||||||
|
|
||||||
// New nodes arrived. Try to detach the recent arrivals.
|
|
||||||
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
|
|
||||||
continue ;
|
|
||||||
}
|
}
|
||||||
guarantee (Arv != NULL, "invariant") ;
|
assert (w & LOCKBIT, "invariant") ;
|
||||||
// 4: Merge Arv into List
|
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
|
||||||
Parker * Tail = List ;
|
if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
|
||||||
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
|
}
|
||||||
Tail->FreeNext = Arv ;
|
|
||||||
}
|
|
||||||
break ;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (p != NULL) {
|
while (Self->OnList != 0) {
|
||||||
guarantee (p->AssociatedWith == NULL, "invariant") ;
|
Self->park() ;
|
||||||
} else {
|
}
|
||||||
// Do this the hard way -- materialize a new Parker..
|
|
||||||
// In rare cases an allocating thread might detach
|
|
||||||
// a long list -- installing null into FreeList --and
|
|
||||||
// then stall. Another thread calling Allocate() would see
|
|
||||||
// FreeList == null and then invoke the ctor. In this case we
|
|
||||||
// end up with more Parkers in circulation than we need, but
|
|
||||||
// the race is rare and the outcome is benign.
|
|
||||||
// Ideally, the # of extant Parkers is equal to the
|
|
||||||
// maximum # of threads that existed at any one time.
|
|
||||||
// Because of the race mentioned above, segments of the
|
|
||||||
// freelist can be transiently inaccessible. At worst
|
|
||||||
// we may end up with the # of Parkers in circulation
|
|
||||||
// slightly above the ideal.
|
|
||||||
p = new Parker() ;
|
|
||||||
}
|
}
|
||||||
p->AssociatedWith = t ; // Associate p with t
|
|
||||||
p->FreeNext = NULL ;
|
|
||||||
return p ;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
|
||||||
|
intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
|
||||||
|
if (w == 0) return ;
|
||||||
|
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
|
||||||
void Parker::Release (Parker * p) {
|
TEVENT (muxAcquire - Contention) ;
|
||||||
if (p == NULL) return ;
|
ParkEvent * ReleaseAfter = NULL ;
|
||||||
guarantee (p->AssociatedWith != NULL, "invariant") ;
|
if (ev == NULL) {
|
||||||
guarantee (p->FreeNext == NULL , "invariant") ;
|
ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
|
||||||
p->AssociatedWith = NULL ;
|
}
|
||||||
|
assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
// Push p onto FreeList
|
guarantee (ev->OnList == 0, "invariant") ;
|
||||||
Parker * List = FreeList ;
|
int its = (os::is_MP() ? 100 : 0) + 1 ;
|
||||||
p->FreeNext = List ;
|
|
||||||
if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
|
// Optional spin phase: spin-then-park strategy
|
||||||
|
while (--its >= 0) {
|
||||||
|
w = *Lock ;
|
||||||
|
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
||||||
|
if (ReleaseAfter != NULL) {
|
||||||
|
ParkEvent::Release (ReleaseAfter) ;
|
||||||
|
}
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ev->reset() ;
|
||||||
|
ev->OnList = intptr_t(Lock) ;
|
||||||
|
// The following fence() isn't _strictly necessary as the subsequent
|
||||||
|
// CAS() both serializes execution and ratifies the fetched *Lock value.
|
||||||
|
OrderAccess::fence();
|
||||||
|
for (;;) {
|
||||||
|
w = *Lock ;
|
||||||
|
if ((w & LOCKBIT) == 0) {
|
||||||
|
if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
||||||
|
ev->OnList = 0 ;
|
||||||
|
// We call ::Release while holding the outer lock, thus
|
||||||
|
// artificially lengthening the critical section.
|
||||||
|
// Consider deferring the ::Release() until the subsequent unlock(),
|
||||||
|
// after we've dropped the outer lock.
|
||||||
|
if (ReleaseAfter != NULL) {
|
||||||
|
ParkEvent::Release (ReleaseAfter) ;
|
||||||
|
}
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
continue ; // Interference -- *Lock changed -- Just retry
|
||||||
|
}
|
||||||
|
assert (w & LOCKBIT, "invariant") ;
|
||||||
|
ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
|
||||||
|
if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (ev->OnList != 0) {
|
||||||
|
ev->park() ;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Release() must extract a successor from the list and then wake that thread.
|
||||||
|
// It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
|
||||||
|
// similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based
|
||||||
|
// Release() would :
|
||||||
|
// (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
|
||||||
|
// (B) Extract a successor from the private list "in-hand"
|
||||||
|
// (C) attempt to CAS() the residual back into *Lock over null.
|
||||||
|
// If there were any newly arrived threads and the CAS() would fail.
|
||||||
|
// In that case Release() would detach the RATs, re-merge the list in-hand
|
||||||
|
// with the RATs and repeat as needed. Alternately, Release() might
|
||||||
|
// detach and extract a successor, but then pass the residual list to the wakee.
|
||||||
|
// The wakee would be responsible for reattaching and remerging before it
|
||||||
|
// competed for the lock.
|
||||||
|
//
|
||||||
|
// Both "pop" and DMR are immune from ABA corruption -- there can be
|
||||||
|
// multiple concurrent pushers, but only one popper or detacher.
|
||||||
|
// This implementation pops from the head of the list. This is unfair,
|
||||||
|
// but tends to provide excellent throughput as hot threads remain hot.
|
||||||
|
// (We wake recently run threads first).
|
||||||
|
|
||||||
|
void Thread::muxRelease (volatile intptr_t * Lock) {
|
||||||
|
for (;;) {
|
||||||
|
const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
|
||||||
|
assert (w & LOCKBIT, "invariant") ;
|
||||||
|
if (w == LOCKBIT) return ;
|
||||||
|
ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
|
||||||
|
assert (List != NULL, "invariant") ;
|
||||||
|
assert (List->OnList == intptr_t(Lock), "invariant") ;
|
||||||
|
ParkEvent * nxt = List->ListNext ;
|
||||||
|
|
||||||
|
// The following CAS() releases the lock and pops the head element.
|
||||||
|
if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
|
||||||
|
continue ;
|
||||||
|
}
|
||||||
|
List->OnList = 0 ;
|
||||||
|
OrderAccess::fence() ;
|
||||||
|
List->unpark () ;
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Threads::verify() {
|
void Threads::verify() {
|
||||||
ALL_JAVA_THREADS(p) {
|
ALL_JAVA_THREADS(p) {
|
||||||
p->verify();
|
p->verify();
|
||||||
|
@ -30,6 +30,7 @@ class JvmtiGetLoadedClassesClosure;
|
|||||||
class ThreadStatistics;
|
class ThreadStatistics;
|
||||||
class ConcurrentLocksDump;
|
class ConcurrentLocksDump;
|
||||||
class ParkEvent ;
|
class ParkEvent ;
|
||||||
|
class Parker;
|
||||||
|
|
||||||
class ciEnv;
|
class ciEnv;
|
||||||
class CompileThread;
|
class CompileThread;
|
||||||
@ -544,7 +545,6 @@ public:
|
|||||||
static void muxAcquire (volatile intptr_t * Lock, const char * Name) ;
|
static void muxAcquire (volatile intptr_t * Lock, const char * Name) ;
|
||||||
static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
|
static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
|
||||||
static void muxRelease (volatile intptr_t * Lock) ;
|
static void muxRelease (volatile intptr_t * Lock) ;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Inline implementation of Thread::current()
|
// Inline implementation of Thread::current()
|
||||||
@ -1769,100 +1769,3 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// ParkEvents are type-stable and immortal.
|
|
||||||
//
|
|
||||||
// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
|
|
||||||
// associated with the thread for the thread's entire lifetime - the relationship is
|
|
||||||
// stable. A thread will be associated at most one ParkEvent. When the thread
|
|
||||||
// expires, the ParkEvent moves to the EventFreeList. New threads attempt to allocate from
|
|
||||||
// the EventFreeList before creating a new Event. Type-stability frees us from
|
|
||||||
// worrying about stale Event or Thread references in the objectMonitor subsystem.
|
|
||||||
// (A reference to ParkEvent is always valid, even though the event may no longer be associated
|
|
||||||
// with the desired or expected thread. A key aspect of this design is that the callers of
|
|
||||||
// park, unpark, etc must tolerate stale references and spurious wakeups).
|
|
||||||
//
|
|
||||||
// Only the "associated" thread can block (park) on the ParkEvent, although
|
|
||||||
// any other thread can unpark a reachable parkevent. Park() is allowed to
|
|
||||||
// return spuriously. In fact park-unpark a really just an optimization to
|
|
||||||
// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
|
|
||||||
// A degenerate albeit "impolite" park-unpark implementation could simply return.
|
|
||||||
// See http://blogs.sun.com/dave for more details.
|
|
||||||
//
|
|
||||||
// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
|
|
||||||
// thread proxies, and simply make the THREAD structure type-stable and persistent.
|
|
||||||
// Currently, we unpark events associated with threads, but ideally we'd just
|
|
||||||
// unpark threads.
|
|
||||||
//
|
|
||||||
// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
|
|
||||||
// platform-independent. PlatformEvent provides park(), unpark(), etc., and
|
|
||||||
// is abstract -- that is, a PlatformEvent should never be instantiated except
|
|
||||||
// as part of a ParkEvent.
|
|
||||||
// Equivalently we could have defined a platform-independent base-class that
|
|
||||||
// exported Allocate(), Release(), etc. The platform-specific class would extend
|
|
||||||
// that base-class, adding park(), unpark(), etc.
|
|
||||||
//
|
|
||||||
// A word of caution: The JVM uses 2 very similar constructs:
|
|
||||||
// 1. ParkEvent are used for Java-level "monitor" synchronization.
|
|
||||||
// 2. Parkers are used by JSR166-JUC park-unpark.
|
|
||||||
//
|
|
||||||
// We'll want to eventually merge these redundant facilities and use ParkEvent.
|
|
||||||
|
|
||||||
|
|
||||||
class ParkEvent : public os::PlatformEvent {
|
|
||||||
private:
|
|
||||||
ParkEvent * FreeNext ;
|
|
||||||
|
|
||||||
// Current association
|
|
||||||
Thread * AssociatedWith ;
|
|
||||||
intptr_t RawThreadIdentity ; // LWPID etc
|
|
||||||
volatile int Incarnation ;
|
|
||||||
|
|
||||||
// diagnostic : keep track of last thread to wake this thread.
|
|
||||||
// this is useful for construction of dependency graphs.
|
|
||||||
void * LastWaker ;
|
|
||||||
|
|
||||||
public:
|
|
||||||
// MCS-CLH list linkage and Native Mutex/Monitor
|
|
||||||
ParkEvent * volatile ListNext ;
|
|
||||||
ParkEvent * volatile ListPrev ;
|
|
||||||
volatile intptr_t OnList ;
|
|
||||||
volatile int TState ;
|
|
||||||
volatile int Notified ; // for native monitor construct
|
|
||||||
volatile int IsWaiting ; // Enqueued on WaitSet
|
|
||||||
|
|
||||||
|
|
||||||
private:
|
|
||||||
static ParkEvent * volatile FreeList ;
|
|
||||||
static volatile int ListLock ;
|
|
||||||
|
|
||||||
// It's prudent to mark the dtor as "private"
|
|
||||||
// ensuring that it's not visible outside the package.
|
|
||||||
// Unfortunately gcc warns about such usage, so
|
|
||||||
// we revert to the less desirable "protected" visibility.
|
|
||||||
// The other compilers accept private dtors.
|
|
||||||
|
|
||||||
protected: // Ensure dtor is never invoked
|
|
||||||
~ParkEvent() { guarantee (0, "invariant") ; }
|
|
||||||
|
|
||||||
ParkEvent() : PlatformEvent() {
|
|
||||||
AssociatedWith = NULL ;
|
|
||||||
FreeNext = NULL ;
|
|
||||||
ListNext = NULL ;
|
|
||||||
ListPrev = NULL ;
|
|
||||||
OnList = 0 ;
|
|
||||||
TState = 0 ;
|
|
||||||
Notified = 0 ;
|
|
||||||
IsWaiting = 0 ;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We use placement-new to force ParkEvent instances to be
|
|
||||||
// aligned on 256-byte address boundaries. This ensures that the least
|
|
||||||
// significant byte of a ParkEvent address is always 0.
|
|
||||||
|
|
||||||
void * operator new (size_t sz) ;
|
|
||||||
void operator delete (void * a) ;
|
|
||||||
|
|
||||||
public:
|
|
||||||
static ParkEvent * Allocate (Thread * t) ;
|
|
||||||
static void Release (ParkEvent * e) ;
|
|
||||||
} ;
|
|
||||||
|
@ -51,14 +51,16 @@
|
|||||||
|
|
||||||
|
|
||||||
void warning(const char* format, ...) {
|
void warning(const char* format, ...) {
|
||||||
// In case error happens before init or during shutdown
|
if (PrintWarnings) {
|
||||||
if (tty == NULL) ostream_init();
|
// In case error happens before init or during shutdown
|
||||||
|
if (tty == NULL) ostream_init();
|
||||||
|
|
||||||
tty->print("%s warning: ", VM_Version::vm_name());
|
tty->print("%s warning: ", VM_Version::vm_name());
|
||||||
va_list ap;
|
va_list ap;
|
||||||
va_start(ap, format);
|
va_start(ap, format);
|
||||||
tty->vprint_cr(format, ap);
|
tty->vprint_cr(format, ap);
|
||||||
va_end(ap);
|
va_end(ap);
|
||||||
|
}
|
||||||
if (BreakAtWarning) BREAKPOINT;
|
if (BreakAtWarning) BREAKPOINT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,6 +61,18 @@ bool Exceptions::special_exception(Thread* thread, const char* file, int line, H
|
|||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
// Check for trying to throw stack overflow before initialization is complete
|
||||||
|
// to prevent infinite recursion trying to initialize stack overflow without
|
||||||
|
// adequate stack space.
|
||||||
|
// This can happen with stress testing a large value of StackShadowPages
|
||||||
|
if (h_exception()->klass() == SystemDictionary::StackOverflowError_klass()) {
|
||||||
|
instanceKlass* ik = instanceKlass::cast(h_exception->klass());
|
||||||
|
assert(ik->is_initialized(),
|
||||||
|
"need to increase min_stack_allowed calculation");
|
||||||
|
}
|
||||||
|
#endif // ASSERT
|
||||||
|
|
||||||
if (thread->is_VM_thread()
|
if (thread->is_VM_thread()
|
||||||
|| thread->is_Compiler_thread() ) {
|
|| thread->is_Compiler_thread() ) {
|
||||||
// We do not care what kind of exception we get for the vm-thread or a thread which
|
// We do not care what kind of exception we get for the vm-thread or a thread which
|
||||||
@ -91,7 +103,6 @@ bool Exceptions::special_exception(Thread* thread, const char* file, int line, s
|
|||||||
thread->set_pending_exception(Universe::vm_exception(), file, line);
|
thread->set_pending_exception(Universe::vm_exception(), file, line);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,6 +204,7 @@ void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file
|
|||||||
klassOop k = SystemDictionary::StackOverflowError_klass();
|
klassOop k = SystemDictionary::StackOverflowError_klass();
|
||||||
oop e = instanceKlass::cast(k)->allocate_instance(CHECK);
|
oop e = instanceKlass::cast(k)->allocate_instance(CHECK);
|
||||||
exception = Handle(THREAD, e); // fill_in_stack trace does gc
|
exception = Handle(THREAD, e); // fill_in_stack trace does gc
|
||||||
|
assert(instanceKlass::cast(k)->is_initialized(), "need to increase min_stack_allowed calculation");
|
||||||
if (StackTraceInThrowable) {
|
if (StackTraceInThrowable) {
|
||||||
java_lang_Throwable::fill_in_stack_trace(exception);
|
java_lang_Throwable::fill_in_stack_trace(exception);
|
||||||
}
|
}
|
||||||
|
@ -91,3 +91,4 @@ b53f226b1d91473ac54184afa827be07b87e0319 jdk7-b112
|
|||||||
e250cef36ea05e627e7e6f7d75e5e19f529e2ba3 jdk7-b114
|
e250cef36ea05e627e7e6f7d75e5e19f529e2ba3 jdk7-b114
|
||||||
449bad8d67b5808ecf0f927683acc0a5940f8c85 jdk7-b115
|
449bad8d67b5808ecf0f927683acc0a5940f8c85 jdk7-b115
|
||||||
1657ed4e1d86c8aa2028ab5a41f9da1ac4a369f8 jdk7-b116
|
1657ed4e1d86c8aa2028ab5a41f9da1ac4a369f8 jdk7-b116
|
||||||
|
3e6726bbf80a4254ecd01051c8ed77ee19325e46 jdk7-b117
|
||||||
|
@ -413,6 +413,7 @@ JAVA_JAVA_java = \
|
|||||||
java/io/FilePermission.java \
|
java/io/FilePermission.java \
|
||||||
java/io/Serializable.java \
|
java/io/Serializable.java \
|
||||||
java/io/Externalizable.java \
|
java/io/Externalizable.java \
|
||||||
|
java/io/SerialCallbackContext.java \
|
||||||
java/io/Bits.java \
|
java/io/Bits.java \
|
||||||
java/io/ObjectInput.java \
|
java/io/ObjectInput.java \
|
||||||
java/io/ObjectInputStream.java \
|
java/io/ObjectInputStream.java \
|
||||||
|
@ -148,14 +148,9 @@ include $(BUILDDIR)/common/Library.gmk
|
|||||||
#
|
#
|
||||||
ifeq ($(PLATFORM), windows)
|
ifeq ($(PLATFORM), windows)
|
||||||
|
|
||||||
STATIC_LIBRARY_DIR = $(OBJDIR)/static
|
STATIC_LIBRARY = $(OBJDIR)/static/$(LIBPREFIX)$(LIBRARY).lib
|
||||||
STATIC_LIBRARY_NAME = $(LIBPREFIX)$(LIBRARY).lib
|
|
||||||
STATIC_LIBRARY = $(STATIC_LIBRARY_DIR)/$(STATIC_LIBRARY_NAME)
|
|
||||||
|
|
||||||
$(STATIC_LIBRARY_DIR): $(OBJDIR)
|
$(STATIC_LIBRARY): $(FILES_o)
|
||||||
@$(MKDIR) $(STATIC_LIBRARY_DIR)
|
|
||||||
|
|
||||||
$(STATIC_LIBRARY): $(STATIC_LIBRARY_DIR)
|
|
||||||
@$(prep-target)
|
@$(prep-target)
|
||||||
$(LIBEXE) -nologo -out:$@ $(FILES_o)
|
$(LIBEXE) -nologo -out:$@ $(FILES_o)
|
||||||
|
|
||||||
|
@ -33,7 +33,6 @@ FILES_src = \
|
|||||||
java/nio/channels/AsynchronousByteChannel.java \
|
java/nio/channels/AsynchronousByteChannel.java \
|
||||||
java/nio/channels/AsynchronousChannel.java \
|
java/nio/channels/AsynchronousChannel.java \
|
||||||
java/nio/channels/AsynchronousChannelGroup.java \
|
java/nio/channels/AsynchronousChannelGroup.java \
|
||||||
java/nio/channels/AsynchronousDatagramChannel.java \
|
|
||||||
java/nio/channels/AsynchronousFileChannel.java \
|
java/nio/channels/AsynchronousFileChannel.java \
|
||||||
java/nio/channels/AsynchronousServerSocketChannel.java \
|
java/nio/channels/AsynchronousServerSocketChannel.java \
|
||||||
java/nio/channels/AsynchronousSocketChannel.java \
|
java/nio/channels/AsynchronousSocketChannel.java \
|
||||||
@ -207,7 +206,6 @@ FILES_src = \
|
|||||||
sun/nio/ch/SelChImpl.java \
|
sun/nio/ch/SelChImpl.java \
|
||||||
sun/nio/ch/ServerSocketAdaptor.java \
|
sun/nio/ch/ServerSocketAdaptor.java \
|
||||||
sun/nio/ch/ServerSocketChannelImpl.java \
|
sun/nio/ch/ServerSocketChannelImpl.java \
|
||||||
sun/nio/ch/SimpleAsynchronousDatagramChannelImpl.java \
|
|
||||||
sun/nio/ch/SinkChannelImpl.java \
|
sun/nio/ch/SinkChannelImpl.java \
|
||||||
sun/nio/ch/SocketAdaptor.java \
|
sun/nio/ch/SocketAdaptor.java \
|
||||||
sun/nio/ch/SocketChannelImpl.java \
|
sun/nio/ch/SocketChannelImpl.java \
|
||||||
|
@ -53,7 +53,7 @@ FILES_export = \
|
|||||||
#
|
#
|
||||||
# Extra cc/linker flags.
|
# Extra cc/linker flags.
|
||||||
#
|
#
|
||||||
LDLIBS += dsound.lib winmm.lib user32.lib
|
LDLIBS += dsound.lib winmm.lib user32.lib ole32.lib
|
||||||
CPPFLAGS += \
|
CPPFLAGS += \
|
||||||
-DUSE_DAUDIO=TRUE \
|
-DUSE_DAUDIO=TRUE \
|
||||||
-I$(SHARE_SRC)/native/com/sun/media/sound \
|
-I$(SHARE_SRC)/native/com/sun/media/sound \
|
||||||
|
@ -48,8 +48,8 @@ jar.jmx.name = jmx.jar
|
|||||||
jar.jmx.sealed = true
|
jar.jmx.sealed = true
|
||||||
jar.jmx.spec.title = JSR 003, 160, 255 - JMX API
|
jar.jmx.spec.title = JSR 003, 160, 255 - JMX API
|
||||||
jar.jmx.spec.version = ${project.spec.version}
|
jar.jmx.spec.version = ${project.spec.version}
|
||||||
jar.jmx.spec.vendor = Sun Microsystems, Inc.
|
jar.jmx.spec.vendor = Oracle Corporation
|
||||||
jar.jmx.impl.title = JSR 003, 160, 255 - OpenJDK 7 JMX API
|
jar.jmx.impl.title = JSR 003, 160, 255 - OpenJDK 7 JMX API
|
||||||
jar.jmx.impl.vendor = Project OpenJDK
|
jar.jmx.impl.vendor = Project OpenJDK
|
||||||
|
|
||||||
javadoc.options=-J-Xmx256m
|
javadoc.options=-J-Xmx256m
|
||||||
|
@ -21,4 +21,4 @@
|
|||||||
# or visit www.oracle.com if you need additional information or have any
|
# or visit www.oracle.com if you need additional information or have any
|
||||||
# questions.
|
# questions.
|
||||||
#
|
#
|
||||||
tzdata2010l
|
tzdata2010o
|
||||||
|
@ -569,8 +569,8 @@ Rule HK 1953 only - Nov 1 3:30 0 -
|
|||||||
Rule HK 1954 1964 - Mar Sun>=18 3:30 1:00 S
|
Rule HK 1954 1964 - Mar Sun>=18 3:30 1:00 S
|
||||||
Rule HK 1954 only - Oct 31 3:30 0 -
|
Rule HK 1954 only - Oct 31 3:30 0 -
|
||||||
Rule HK 1955 1964 - Nov Sun>=1 3:30 0 -
|
Rule HK 1955 1964 - Nov Sun>=1 3:30 0 -
|
||||||
Rule HK 1965 1977 - Apr Sun>=16 3:30 1:00 S
|
Rule HK 1965 1976 - Apr Sun>=16 3:30 1:00 S
|
||||||
Rule HK 1965 1977 - Oct Sun>=16 3:30 0 -
|
Rule HK 1965 1976 - Oct Sun>=16 3:30 0 -
|
||||||
Rule HK 1973 only - Dec 30 3:30 1:00 S
|
Rule HK 1973 only - Dec 30 3:30 1:00 S
|
||||||
Rule HK 1979 only - May Sun>=8 3:30 1:00 S
|
Rule HK 1979 only - May Sun>=8 3:30 1:00 S
|
||||||
Rule HK 1979 only - Oct Sun>=16 3:30 0 -
|
Rule HK 1979 only - Oct Sun>=16 3:30 0 -
|
||||||
|
@ -306,13 +306,26 @@ Zone Indian/Cocos 6:27:40 - LMT 1900
|
|||||||
# http://www.timeanddate.com/news/time/fiji-dst-ends-march-2010.html
|
# http://www.timeanddate.com/news/time/fiji-dst-ends-march-2010.html
|
||||||
# </a>
|
# </a>
|
||||||
|
|
||||||
|
# From Alexander Krivenyshev (2010-10-24):
|
||||||
|
# According to Radio Fiji and Fiji Times online, Fiji will end DST 3
|
||||||
|
# weeks earlier than expected - on March 6, 2011, not March 27, 2011...
|
||||||
|
# Here is confirmation from Government of the Republic of the Fiji Islands,
|
||||||
|
# Ministry of Information (fiji.gov.fj) web site:
|
||||||
|
# <a href="http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=2608:daylight-savings&catid=71:press-releases&Itemid=155">
|
||||||
|
# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=2608:daylight-savings&catid=71:press-releases&Itemid=155
|
||||||
|
# </a>
|
||||||
|
# or
|
||||||
|
# <a href="http://www.worldtimezone.com/dst_news/dst_news_fiji04.html">
|
||||||
|
# http://www.worldtimezone.com/dst_news/dst_news_fiji04.html
|
||||||
|
# </a>
|
||||||
|
|
||||||
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
|
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
|
||||||
Rule Fiji 1998 1999 - Nov Sun>=1 2:00 1:00 S
|
Rule Fiji 1998 1999 - Nov Sun>=1 2:00 1:00 S
|
||||||
Rule Fiji 1999 2000 - Feb lastSun 3:00 0 -
|
Rule Fiji 1999 2000 - Feb lastSun 3:00 0 -
|
||||||
Rule Fiji 2009 only - Nov 29 2:00 1:00 S
|
Rule Fiji 2009 only - Nov 29 2:00 1:00 S
|
||||||
Rule Fiji 2010 only - Mar lastSun 3:00 0 -
|
Rule Fiji 2010 only - Mar lastSun 3:00 0 -
|
||||||
Rule Fiji 2010 only - Oct 24 2:00 1:00 S
|
Rule Fiji 2010 only - Oct 24 2:00 1:00 S
|
||||||
Rule Fiji 2011 only - Mar lastSun 3:00 0 -
|
Rule Fiji 2011 only - Mar Sun>=1 3:00 0 -
|
||||||
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
|
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
|
||||||
Zone Pacific/Fiji 11:53:40 - LMT 1915 Oct 26 # Suva
|
Zone Pacific/Fiji 11:53:40 - LMT 1915 Oct 26 # Suva
|
||||||
12:00 Fiji FJ%sT # Fiji Time
|
12:00 Fiji FJ%sT # Fiji Time
|
||||||
@ -509,11 +522,21 @@ Zone Pacific/Pago_Pago 12:37:12 - LMT 1879 Jul 5
|
|||||||
# http://www.parliament.gov.ws/documents/acts/Daylight%20Saving%20Act%20%202009%20%28English%29%20-%20Final%207-7-091.pdf
|
# http://www.parliament.gov.ws/documents/acts/Daylight%20Saving%20Act%20%202009%20%28English%29%20-%20Final%207-7-091.pdf
|
||||||
# </a>
|
# </a>
|
||||||
|
|
||||||
|
# From Raymond Hughes (2010-10-07):
|
||||||
|
# Please see
|
||||||
|
# <a href="http://www.mcil.gov.ws">
|
||||||
|
# http://www.mcil.gov.ws
|
||||||
|
# </a>,
|
||||||
|
# the Ministry of Commerce, Industry and Labour (sideframe) "Last Sunday
|
||||||
|
# September 2010 (26/09/10) - adjust clocks forward from 12:00 midnight
|
||||||
|
# to 01:00am and First Sunday April 2011 (03/04/11) - adjust clocks
|
||||||
|
# backwards from 1:00am to 12:00am"
|
||||||
|
|
||||||
Zone Pacific/Apia 12:33:04 - LMT 1879 Jul 5
|
Zone Pacific/Apia 12:33:04 - LMT 1879 Jul 5
|
||||||
-11:26:56 - LMT 1911
|
-11:26:56 - LMT 1911
|
||||||
-11:30 - SAMT 1950 # Samoa Time
|
-11:30 - SAMT 1950 # Samoa Time
|
||||||
-11:00 - WST 2010 Sep 26
|
-11:00 - WST 2010 Sep 26
|
||||||
-11:00 1:00 WSDT 2011 Apr 3
|
-11:00 1:00 WSDT 2011 Apr 3 1:00
|
||||||
-11:00 - WST
|
-11:00 - WST
|
||||||
|
|
||||||
# Solomon Is
|
# Solomon Is
|
||||||
|
@ -63,7 +63,7 @@ AQ -6448-06406 Antarctica/Palmer Palmer Station, Anvers Island
|
|||||||
AQ -6736+06253 Antarctica/Mawson Mawson Station, Holme Bay
|
AQ -6736+06253 Antarctica/Mawson Mawson Station, Holme Bay
|
||||||
AQ -6835+07758 Antarctica/Davis Davis Station, Vestfold Hills
|
AQ -6835+07758 Antarctica/Davis Davis Station, Vestfold Hills
|
||||||
AQ -6617+11031 Antarctica/Casey Casey Station, Bailey Peninsula
|
AQ -6617+11031 Antarctica/Casey Casey Station, Bailey Peninsula
|
||||||
AQ -7824+10654 Antarctica/Vostok Vostok Station, S Magnetic Pole
|
AQ -7824+10654 Antarctica/Vostok Vostok Station, Lake Vostok
|
||||||
AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville Station, Terre Adelie
|
AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville Station, Terre Adelie
|
||||||
AQ -690022+0393524 Antarctica/Syowa Syowa Station, E Ongul I
|
AQ -690022+0393524 Antarctica/Syowa Syowa Station, E Ongul I
|
||||||
AQ -5430+15857 Antarctica/Macquarie Macquarie Island Station, Macquarie Island
|
AQ -5430+15857 Antarctica/Macquarie Macquarie Island Station, Macquarie Island
|
||||||
|
@ -355,7 +355,6 @@ JavaMain(void * _args)
|
|||||||
|
|
||||||
JavaVM *vm = 0;
|
JavaVM *vm = 0;
|
||||||
JNIEnv *env = 0;
|
JNIEnv *env = 0;
|
||||||
jstring mainClassName;
|
|
||||||
jclass mainClass;
|
jclass mainClass;
|
||||||
jmethodID mainID;
|
jmethodID mainID;
|
||||||
jobjectArray mainArgs;
|
jobjectArray mainArgs;
|
||||||
|
@ -72,7 +72,7 @@ inflate_file(int fd, zentry *entry, int *size_out)
|
|||||||
if (entry->how == STORED) {
|
if (entry->how == STORED) {
|
||||||
*(char *)((size_t)in + entry->csize) = '\0';
|
*(char *)((size_t)in + entry->csize) = '\0';
|
||||||
if (size_out) {
|
if (size_out) {
|
||||||
*size_out = entry->csize;
|
*size_out = (int)entry->csize;
|
||||||
}
|
}
|
||||||
return (in);
|
return (in);
|
||||||
} else if (entry->how == DEFLATED) {
|
} else if (entry->how == DEFLATED) {
|
||||||
@ -103,7 +103,7 @@ inflate_file(int fd, zentry *entry, int *size_out)
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
if (size_out) {
|
if (size_out) {
|
||||||
*size_out = entry->isize;
|
*size_out = (int)entry->isize;
|
||||||
}
|
}
|
||||||
return (out);
|
return (out);
|
||||||
} else
|
} else
|
||||||
@ -317,7 +317,7 @@ find_file(int fd, zentry *entry, const char *file_name)
|
|||||||
* manifest. If so, build the entry record from the data found in
|
* manifest. If so, build the entry record from the data found in
|
||||||
* the header located and return success.
|
* the header located and return success.
|
||||||
*/
|
*/
|
||||||
if (CENNAM(p) == JLI_StrLen(file_name) &&
|
if ((size_t)CENNAM(p) == JLI_StrLen(file_name) &&
|
||||||
memcmp((p + CENHDR), file_name, JLI_StrLen(file_name)) == 0) {
|
memcmp((p + CENHDR), file_name, JLI_StrLen(file_name)) == 0) {
|
||||||
if (lseek(fd, base_offset + CENOFF(p), SEEK_SET) < (off_t)0) {
|
if (lseek(fd, base_offset + CENOFF(p), SEEK_SET) < (off_t)0) {
|
||||||
free(buffer);
|
free(buffer);
|
||||||
@ -606,8 +606,5 @@ JLI_ManifestIterate(const char *jarfile, attribute_closure ac, void *user_data)
|
|||||||
}
|
}
|
||||||
free(mp);
|
free(mp);
|
||||||
close(fd);
|
close(fd);
|
||||||
if (rc == 0)
|
return (rc == 0) ? 0 : -2;
|
||||||
return (0);
|
|
||||||
else
|
|
||||||
return (-2);
|
|
||||||
}
|
}
|
||||||
|
@ -290,12 +290,12 @@ FileList_join(FileList fl, char sep)
|
|||||||
char *path;
|
char *path;
|
||||||
char *p;
|
char *p;
|
||||||
for (i = 0, size = 1; i < fl->size; i++)
|
for (i = 0, size = 1; i < fl->size; i++)
|
||||||
size += JLI_StrLen(fl->files[i]) + 1;
|
size += (int)JLI_StrLen(fl->files[i]) + 1;
|
||||||
|
|
||||||
path = JLI_MemAlloc(size);
|
path = JLI_MemAlloc(size);
|
||||||
|
|
||||||
for (i = 0, p = path; i < fl->size; i++) {
|
for (i = 0, p = path; i < fl->size; i++) {
|
||||||
int len = JLI_StrLen(fl->files[i]);
|
int len = (int)JLI_StrLen(fl->files[i]);
|
||||||
if (i > 0) *p++ = sep;
|
if (i > 0) *p++ = sep;
|
||||||
memcpy(p, fl->files[i], len);
|
memcpy(p, fl->files[i], len);
|
||||||
p += len;
|
p += len;
|
||||||
@ -309,7 +309,7 @@ static FileList
|
|||||||
FileList_split(const char *path, char sep)
|
FileList_split(const char *path, char sep)
|
||||||
{
|
{
|
||||||
const char *p, *q;
|
const char *p, *q;
|
||||||
int len = JLI_StrLen(path);
|
int len = (int)JLI_StrLen(path);
|
||||||
int count;
|
int count;
|
||||||
FileList fl;
|
FileList fl;
|
||||||
for (count = 1, p = path; p < path + len; p++)
|
for (count = 1, p = path; p < path + len; p++)
|
||||||
@ -330,7 +330,7 @@ FileList_split(const char *path, char sep)
|
|||||||
static int
|
static int
|
||||||
isJarFileName(const char *filename)
|
isJarFileName(const char *filename)
|
||||||
{
|
{
|
||||||
int len = JLI_StrLen(filename);
|
int len = (int)JLI_StrLen(filename);
|
||||||
return (len >= 4) &&
|
return (len >= 4) &&
|
||||||
(filename[len - 4] == '.') &&
|
(filename[len - 4] == '.') &&
|
||||||
(equal(filename + len - 3, "jar") ||
|
(equal(filename + len - 3, "jar") ||
|
||||||
@ -342,8 +342,8 @@ isJarFileName(const char *filename)
|
|||||||
static char *
|
static char *
|
||||||
wildcardConcat(const char *wildcard, const char *basename)
|
wildcardConcat(const char *wildcard, const char *basename)
|
||||||
{
|
{
|
||||||
int wildlen = JLI_StrLen(wildcard);
|
int wildlen = (int)JLI_StrLen(wildcard);
|
||||||
int baselen = JLI_StrLen(basename);
|
int baselen = (int)JLI_StrLen(basename);
|
||||||
char *filename = (char *) JLI_MemAlloc(wildlen + baselen);
|
char *filename = (char *) JLI_MemAlloc(wildlen + baselen);
|
||||||
/* Replace the trailing '*' with basename */
|
/* Replace the trailing '*' with basename */
|
||||||
memcpy(filename, wildcard, wildlen-1);
|
memcpy(filename, wildcard, wildlen-1);
|
||||||
@ -369,7 +369,7 @@ wildcardFileList(const char *wildcard)
|
|||||||
static int
|
static int
|
||||||
isWildcard(const char *filename)
|
isWildcard(const char *filename)
|
||||||
{
|
{
|
||||||
int len = JLI_StrLen(filename);
|
int len = (int)JLI_StrLen(filename);
|
||||||
return (len > 0) &&
|
return (len > 0) &&
|
||||||
(filename[len - 1] == '*') &&
|
(filename[len - 1] == '*') &&
|
||||||
(len == 1 || IS_FILE_SEPARATOR(filename[len - 2])) &&
|
(len == 1 || IS_FILE_SEPARATOR(filename[len - 2])) &&
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2007, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -253,7 +253,8 @@ final class AESCrypt extends SymmetricCipher implements AESConstants
|
|||||||
for (j = 0; j < 8; j++) {
|
for (j = 0; j < 8; j++) {
|
||||||
if (AA[i][j] != 0) {
|
if (AA[i][j] != 0) {
|
||||||
AA[i][j] = (byte)
|
AA[i][j] = (byte)
|
||||||
alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255];
|
alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF])
|
||||||
|
% 255];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (t = 0; t < 4; t++) {
|
for (t = 0; t < 4; t++) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -92,7 +92,8 @@ public final class ARCFOURCipher extends CipherSpi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// core crypt code. OFB style, so works for both encryption and decryption
|
// core crypt code. OFB style, so works for both encryption and decryption
|
||||||
private void crypt(byte[] in, int inOfs, int inLen, byte[] out, int outOfs) {
|
private void crypt(byte[] in, int inOfs, int inLen, byte[] out,
|
||||||
|
int outOfs) {
|
||||||
if (is < 0) {
|
if (is < 0) {
|
||||||
// doFinal() was called, need to reset the cipher to initial state
|
// doFinal() was called, need to reset the cipher to initial state
|
||||||
init(lastKey);
|
init(lastKey);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -31,8 +31,8 @@ import javax.crypto.*;
|
|||||||
import javax.crypto.spec.*;
|
import javax.crypto.spec.*;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class implements the DESede algorithm (DES-EDE, tripleDES) in its various
|
* This class implements the DESede algorithm (DES-EDE, tripleDES) in
|
||||||
* modes (<code>ECB</code>, <code>CFB</code>, <code>OFB</code>,
|
* its various modes (<code>ECB</code>, <code>CFB</code>, <code>OFB</code>,
|
||||||
* <code>CBC</code>, <code>PCBC</code>) and padding schemes
|
* <code>CBC</code>, <code>PCBC</code>) and padding schemes
|
||||||
* (<code>PKCS5Padding</code>, <code>NoPadding</code>,
|
* (<code>PKCS5Padding</code>, <code>NoPadding</code>,
|
||||||
* <code>ISO10126Padding</code>).
|
* <code>ISO10126Padding</code>).
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -190,7 +190,8 @@ javax.crypto.interfaces.DHPrivateKey, Serializable {
|
|||||||
ike.initCause(e);
|
ike.initCause(e);
|
||||||
throw ike;
|
throw ike;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
InvalidKeyException ike = new InvalidKeyException("Error parsing key encoding: " + e.getMessage());
|
InvalidKeyException ike = new InvalidKeyException(
|
||||||
|
"Error parsing key encoding: " + e.getMessage());
|
||||||
ike.initCause(e);
|
ike.initCause(e);
|
||||||
throw ike;
|
throw ike;
|
||||||
}
|
}
|
||||||
@ -300,7 +301,8 @@ javax.crypto.interfaces.DHPrivateKey, Serializable {
|
|||||||
DerInputStream in = new DerInputStream(this.key);
|
DerInputStream in = new DerInputStream(this.key);
|
||||||
this.x = in.getBigInteger();
|
this.x = in.getBigInteger();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
InvalidKeyException ike = new InvalidKeyException("Error parsing key encoding: " + e.getMessage());
|
InvalidKeyException ike = new InvalidKeyException(
|
||||||
|
"Error parsing key encoding: " + e.getMessage());
|
||||||
ike.initCause(e);
|
ike.initCause(e);
|
||||||
throw ike;
|
throw ike;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -180,7 +180,8 @@ javax.crypto.interfaces.DHPublicKey, Serializable {
|
|||||||
throw new InvalidKeyException("Private-value length too big");
|
throw new InvalidKeyException("Private-value length too big");
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new InvalidKeyException("Error parsing key encoding: " + e.toString());
|
throw new InvalidKeyException(
|
||||||
|
"Error parsing key encoding: " + e.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,7 +282,8 @@ javax.crypto.interfaces.DHPublicKey, Serializable {
|
|||||||
DerInputStream in = new DerInputStream(this.key);
|
DerInputStream in = new DerInputStream(this.key);
|
||||||
this.y = in.getBigInteger();
|
this.y = in.getBigInteger();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new InvalidKeyException("Error parsing key encoding: " + e.toString());
|
throw new InvalidKeyException(
|
||||||
|
"Error parsing key encoding: " + e.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -764,7 +764,8 @@ public final class JceKeyStore extends KeyStoreSpi {
|
|||||||
cf = (CertificateFactory)cfs.get(certType);
|
cf = (CertificateFactory)cfs.get(certType);
|
||||||
} else {
|
} else {
|
||||||
// create new certificate factory
|
// create new certificate factory
|
||||||
cf = CertificateFactory.getInstance(certType);
|
cf = CertificateFactory.getInstance(
|
||||||
|
certType);
|
||||||
// store the certificate factory so we can
|
// store the certificate factory so we can
|
||||||
// reuse it later
|
// reuse it later
|
||||||
cfs.put(certType, cf);
|
cfs.put(certType, cf);
|
||||||
@ -863,8 +864,9 @@ public final class JceKeyStore extends KeyStoreSpi {
|
|||||||
dis.readFully(actual);
|
dis.readFully(actual);
|
||||||
for (int i = 0; i < computed.length; i++) {
|
for (int i = 0; i < computed.length; i++) {
|
||||||
if (computed[i] != actual[i]) {
|
if (computed[i] != actual[i]) {
|
||||||
throw new IOException("Keystore was tampered with, or "
|
throw new IOException(
|
||||||
+ "password was incorrect");
|
"Keystore was tampered with, or "
|
||||||
|
+ "password was incorrect");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user