This commit is contained in:
Daniel D. Daugherty 2014-10-31 18:18:58 +00:00
commit c6b9495b55
460 changed files with 7872 additions and 4569 deletions
.hgtags.hgtags-top-repo
common/autoconf
corba
hotspot
.hgtags
make/solaris/makefiles
src
test
jaxp
.hgtags
src/java.xml/share/classes/com/sun/org/apache/xerces/internal/impl/io
jaxws
jdk

@ -279,3 +279,4 @@ f0c5e4b732da823bdaa4184133675f384e7cd68d jdk9-b33
9618201c5df28a460631577fad1f61e96f775c34 jdk9-b34
a137992d750c72f6f944f341aa19b0d0d96afe0c jdk9-b35
41df50e7303daf73c0d661ef601c4fe250915de5 jdk9-b36
b409bc51bc23cfd51f2bd04ea919ec83535af9d0 jdk9-b37

@ -279,3 +279,4 @@ e4ba01b726e263953ae129be37c94de6ed145b1d jdk9-b33
087b23f35631e68e950496a36fce8ccca612966a jdk9-b34
c173ba994245380fb11ef077d1e59823386840eb jdk9-b35
201d4e235d597a25a2d3ee1404394789ba386119 jdk9-b36
723a67b0c442391447b1d8aad8b249d06d1032e8 jdk9-b37

@ -136,8 +136,8 @@ AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS],
SYSROOT_CFLAGS="-isysroot \"$SYSROOT\" -iframework\"$SYSROOT/System/Library/Frameworks\""
SYSROOT_LDFLAGS=$SYSROOT_CFLAGS
elif test "x$TOOLCHAIN_TYPE" = xgcc; then
SYSROOT_CFLAGS="--sysroot=\"$SYSROOT\""
SYSROOT_LDFLAGS="--sysroot=\"$SYSROOT\""
SYSROOT_CFLAGS="--sysroot=$SYSROOT"
SYSROOT_LDFLAGS="--sysroot=$SYSROOT"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
SYSROOT_CFLAGS="-isysroot \"$SYSROOT\""
SYSROOT_LDFLAGS="-isysroot \"$SYSROOT\""

@ -4328,7 +4328,7 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1413533532
DATE_WHEN_GENERATED=1414663067
###############################################################################
#
@ -41681,8 +41681,8 @@ $as_echo "$tool_specified" >&6; }
SYSROOT_CFLAGS="-isysroot \"$SYSROOT\" -iframework\"$SYSROOT/System/Library/Frameworks\""
SYSROOT_LDFLAGS=$SYSROOT_CFLAGS
elif test "x$TOOLCHAIN_TYPE" = xgcc; then
SYSROOT_CFLAGS="--sysroot=\"$SYSROOT\""
SYSROOT_LDFLAGS="--sysroot=\"$SYSROOT\""
SYSROOT_CFLAGS="--sysroot=$SYSROOT"
SYSROOT_LDFLAGS="--sysroot=$SYSROOT"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
SYSROOT_CFLAGS="-isysroot \"$SYSROOT\""
SYSROOT_LDFLAGS="-isysroot \"$SYSROOT\""
@ -44037,17 +44037,6 @@ fi
-R$OPENWIN_HOME/lib$OPENJDK_TARGET_CPU_ISADIR"
fi
#
# Weird Sol10 something check...TODO change to try compile
#
if test "x${OPENJDK_TARGET_OS}" = xsolaris; then
if test "`uname -r`" = "5.10"; then
if test "`${EGREP} -c XLinearGradient ${OPENWIN_HOME}/share/include/X11/extensions/Xrender.h`" = "0"; then
X_CFLAGS="${X_CFLAGS} -DSOLARIS10_NO_XRENDER_STRUCTS"
fi
fi
fi
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@ -44055,7 +44044,7 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $
ac_compiler_gnu=$ac_cv_c_compiler_gnu
OLD_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $X_CFLAGS"
CFLAGS="$CFLAGS $SYSROOT_CFLAGS $X_CFLAGS"
# Need to include Xlib.h and Xutil.h to avoid "present but cannot be compiled" warnings on Solaris 10
for ac_header in X11/extensions/shape.h X11/extensions/Xrender.h X11/extensions/XTest.h X11/Intrinsic.h
@ -44079,6 +44068,31 @@ fi
done
# If XLinearGradient isn't available in Xrender.h, signal that it needs to be
# defined in libawt_xawt.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if XlinearGradient is defined in Xrender.h" >&5
$as_echo_n "checking if XlinearGradient is defined in Xrender.h... " >&6; }
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <X11/extensions/Xrender.h>
int
main ()
{
XLinearGradient x;
;
return 0;
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
X_CFLAGS="$X_CFLAGS -DSOLARIS10_NO_XRENDER_STRUCTS"
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
CFLAGS="$OLD_CFLAGS"
ac_ext=cpp
ac_cpp='$CXXCPP $CPPFLAGS'

@ -139,20 +139,9 @@ AC_DEFUN_ONCE([LIB_SETUP_X11],
-R$OPENWIN_HOME/lib$OPENJDK_TARGET_CPU_ISADIR"
fi
#
# Weird Sol10 something check...TODO change to try compile
#
if test "x${OPENJDK_TARGET_OS}" = xsolaris; then
if test "`uname -r`" = "5.10"; then
if test "`${EGREP} -c XLinearGradient ${OPENWIN_HOME}/share/include/X11/extensions/Xrender.h`" = "0"; then
X_CFLAGS="${X_CFLAGS} -DSOLARIS10_NO_XRENDER_STRUCTS"
fi
fi
fi
AC_LANG_PUSH(C)
OLD_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $X_CFLAGS"
CFLAGS="$CFLAGS $SYSROOT_CFLAGS $X_CFLAGS"
# Need to include Xlib.h and Xutil.h to avoid "present but cannot be compiled" warnings on Solaris 10
AC_CHECK_HEADERS([X11/extensions/shape.h X11/extensions/Xrender.h X11/extensions/XTest.h X11/Intrinsic.h],
@ -164,6 +153,16 @@ AC_DEFUN_ONCE([LIB_SETUP_X11],
]
)
# If XLinearGradient isn't available in Xrender.h, signal that it needs to be
# defined in libawt_xawt.
AC_MSG_CHECKING([if XlinearGradient is defined in Xrender.h])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <X11/extensions/Xrender.h>]],
[[XLinearGradient x;]])],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
X_CFLAGS="$X_CFLAGS -DSOLARIS10_NO_XRENDER_STRUCTS"])
CFLAGS="$OLD_CFLAGS"
AC_LANG_POP(C)

@ -279,3 +279,4 @@ cfdac5887952c2dd73c73a1d8d9aa880d0539bbf jdk9-b33
24a0bad5910f775bb4002d1dacf8b3af87c63cd8 jdk9-b34
9bc2dbd3dfb8c9fa88e00056b8b93a81ee6d306e jdk9-b35
ffd90c81d4ef9d94d880fc852e2fc482ecd9b374 jdk9-b36
7e9add74ad50841fb39dae75db56374aefa1de4c jdk9-b37

@ -439,3 +439,4 @@ af46576a8d7cb4003028b8ee8bf408cfe227315b jdk9-b32
821164b0131a47ca065697c7d27d8f215e608c8d jdk9-b34
438cb613151c4bd290bb732697517cba1cafcb04 jdk9-b35
464ab653fbb17eb518d8ef60f8df301de7ef00d0 jdk9-b36
b1c2dd843f247a1db19e1e85eb62ca405f72dc26 jdk9-b37

@ -143,7 +143,7 @@ else
LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle
endif # sparcWorks
LIBS += -lkstat -lpicl
LIBS += -lkstat
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM

@ -865,14 +865,19 @@ void VM_Version::get_processor_features() {
if (supports_bmi1()) {
// tzcnt does not require VEX prefix
if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
UseCountTrailingZerosInstruction = true;
if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
// Don't use tzcnt if BMI1 is switched off on command line.
UseCountTrailingZerosInstruction = false;
} else {
UseCountTrailingZerosInstruction = true;
}
}
} else if (UseCountTrailingZerosInstruction) {
warning("tzcnt instruction is not available on this CPU");
FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
}
// BMI instructions use an encoding with VEX prefix.
// BMI instructions (except tzcnt) use an encoding with VEX prefix.
// VEX prefix is generated only when AVX > 0.
if (supports_bmi1() && supports_avx()) {
if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {

@ -33,18 +33,51 @@
#include <sys/systeminfo.h>
#include <kstat.h>
#include <picl.h>
#include <dlfcn.h>
#include <link.h>
extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
// Functions from the library we need (signatures should match those in picl.h)
extern "C" {
typedef int (*picl_initialize_func_t)(void);
typedef int (*picl_shutdown_func_t)(void);
typedef int (*picl_get_root_func_t)(picl_nodehdl_t *nodehandle);
typedef int (*picl_walk_tree_by_class_func_t)(picl_nodehdl_t rooth,
const char *classname, void *c_args,
int (*callback_fn)(picl_nodehdl_t hdl, void *args));
typedef int (*picl_get_prop_by_name_func_t)(picl_nodehdl_t nodeh, const char *nm,
picl_prophdl_t *ph);
typedef int (*picl_get_propval_func_t)(picl_prophdl_t proph, void *valbuf, size_t sz);
typedef int (*picl_get_propinfo_func_t)(picl_prophdl_t proph, picl_propinfo_t *pi);
}
class PICL {
// Pointers to functions in the library
picl_initialize_func_t _picl_initialize;
picl_shutdown_func_t _picl_shutdown;
picl_get_root_func_t _picl_get_root;
picl_walk_tree_by_class_func_t _picl_walk_tree_by_class;
picl_get_prop_by_name_func_t _picl_get_prop_by_name;
picl_get_propval_func_t _picl_get_propval;
picl_get_propinfo_func_t _picl_get_propinfo;
// Handle to the library that is returned by dlopen
void *_dl_handle;
bool open_library();
void close_library();
template<typename FuncType> bool bind(FuncType& func, const char* name);
bool bind_library_functions();
// Get a value of the integer property. The value in the tree can be either 32 or 64 bit
// depending on the platform. The result is converted to int.
static int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) {
int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) {
picl_propinfo_t pinfo;
picl_prophdl_t proph;
if (picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS ||
picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) {
if (_picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS ||
_picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) {
return PICL_FAILURE;
}
@ -54,13 +87,13 @@ class PICL {
}
if (pinfo.size == sizeof(int64_t)) {
int64_t val;
if (picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) {
if (_picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) {
return PICL_FAILURE;
}
*result = static_cast<int>(val);
} else if (pinfo.size == sizeof(int32_t)) {
int32_t val;
if (picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) {
if (_picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) {
return PICL_FAILURE;
}
*result = static_cast<int>(val);
@ -74,6 +107,7 @@ class PICL {
// Visitor and a state machine that visits integer properties and verifies that the
// values are the same. Stores the unique value observed.
class UniqueValueVisitor {
PICL *_picl;
enum {
INITIAL, // Start state, no assignments happened
ASSIGNED, // Assigned a value
@ -81,7 +115,7 @@ class PICL {
} _state;
int _value;
public:
UniqueValueVisitor() : _state(INITIAL) { }
UniqueValueVisitor(PICL* picl) : _picl(picl), _state(INITIAL) { }
int value() {
assert(_state == ASSIGNED, "Precondition");
return _value;
@ -98,9 +132,10 @@ class PICL {
static int visit(picl_nodehdl_t nodeh, const char* name, void *arg) {
UniqueValueVisitor *state = static_cast<UniqueValueVisitor*>(arg);
PICL* picl = state->_picl;
assert(!state->is_inconsistent(), "Precondition");
int curr;
if (PICL::get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
if (picl->get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
if (!state->is_assigned()) { // first iteration
state->set_value(curr);
} else if (curr != state->value()) { // following iterations
@ -124,32 +159,36 @@ public:
return UniqueValueVisitor::visit(nodeh, "l2-cache-line-size", state);
}
PICL() : _L1_data_cache_line_size(0), _L2_cache_line_size(0) {
if (picl_initialize() == PICL_SUCCESS) {
PICL() : _L1_data_cache_line_size(0), _L2_cache_line_size(0), _dl_handle(NULL) {
if (!open_library()) {
return;
}
if (_picl_initialize() == PICL_SUCCESS) {
picl_nodehdl_t rooth;
if (picl_get_root(&rooth) == PICL_SUCCESS) {
UniqueValueVisitor L1_state;
if (_picl_get_root(&rooth) == PICL_SUCCESS) {
UniqueValueVisitor L1_state(this);
// Visit all "cpu" class instances
picl_walk_tree_by_class(rooth, "cpu", &L1_state, PICL_get_l1_data_cache_line_size_helper);
_picl_walk_tree_by_class(rooth, "cpu", &L1_state, PICL_get_l1_data_cache_line_size_helper);
if (L1_state.is_initial()) { // Still initial, iteration found no values
// Try walk all "core" class instances, it might be a Fujitsu machine
picl_walk_tree_by_class(rooth, "core", &L1_state, PICL_get_l1_data_cache_line_size_helper);
_picl_walk_tree_by_class(rooth, "core", &L1_state, PICL_get_l1_data_cache_line_size_helper);
}
if (L1_state.is_assigned()) { // Is there a value?
_L1_data_cache_line_size = L1_state.value();
}
UniqueValueVisitor L2_state;
picl_walk_tree_by_class(rooth, "cpu", &L2_state, PICL_get_l2_cache_line_size_helper);
UniqueValueVisitor L2_state(this);
_picl_walk_tree_by_class(rooth, "cpu", &L2_state, PICL_get_l2_cache_line_size_helper);
if (L2_state.is_initial()) {
picl_walk_tree_by_class(rooth, "core", &L2_state, PICL_get_l2_cache_line_size_helper);
_picl_walk_tree_by_class(rooth, "core", &L2_state, PICL_get_l2_cache_line_size_helper);
}
if (L2_state.is_assigned()) {
_L2_cache_line_size = L2_state.value();
}
}
picl_shutdown();
_picl_shutdown();
}
close_library();
}
unsigned int L1_data_cache_line_size() const { return _L1_data_cache_line_size; }
@ -163,6 +202,43 @@ extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, v
return PICL::get_l2_cache_line_size(nodeh, result);
}
template<typename FuncType>
bool PICL::bind(FuncType& func, const char* name) {
func = reinterpret_cast<FuncType>(dlsym(_dl_handle, name));
return func != NULL;
}
bool PICL::bind_library_functions() {
assert(_dl_handle != NULL, "library should be open");
return bind(_picl_initialize, "picl_initialize" ) &&
bind(_picl_shutdown, "picl_shutdown" ) &&
bind(_picl_get_root, "picl_get_root" ) &&
bind(_picl_walk_tree_by_class, "picl_walk_tree_by_class") &&
bind(_picl_get_prop_by_name, "picl_get_prop_by_name" ) &&
bind(_picl_get_propval, "picl_get_propval" ) &&
bind(_picl_get_propinfo, "picl_get_propinfo" );
}
bool PICL::open_library() {
_dl_handle = dlopen("libpicl.so.1", RTLD_LAZY);
if (_dl_handle == NULL) {
warning("PICL (libpicl.so.1) is missing. Performance will not be optimal.");
return false;
}
if (!bind_library_functions()) {
assert(false, "unexpected PICL API change");
close_library();
return false;
}
return true;
}
void PICL::close_library() {
assert(_dl_handle != NULL, "library should be open");
dlclose(_dl_handle);
_dl_handle = NULL;
}
// We need to keep these here as long as we have to build on Solaris
// versions before 10.
#ifndef SI_ARCHITECTURE_32

@ -1093,9 +1093,8 @@ void ciEnv::register_method(ciMethod* target,
// JVMTI -- compiled method notification (must be done outside lock)
nm->post_compiled_method_load_event();
} else {
// The CodeCache is full. Print out warning and disable compilation.
// The CodeCache is full.
record_failure("code cache is full");
CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
}
}

@ -229,8 +229,8 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
return blob;
}
void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
return CodeCache::allocate(size, CodeBlobType::NonNMethod, is_critical);
void* BufferBlob::operator new(size_t s, unsigned size) throw() {
return CodeCache::allocate(size, CodeBlobType::NonNMethod);
}
void BufferBlob::free(BufferBlob *blob) {
@ -260,10 +260,7 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// The parameter 'true' indicates a critical memory allocation.
// This means that CodeCacheMinimumFreeSpace is used, if necessary
const bool is_critical = true;
blob = new (size, is_critical) AdapterBlob(size, cb);
blob = new (size) AdapterBlob(size, cb);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@ -285,10 +282,7 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
size += round_to(buffer_size, oopSize);
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// The parameter 'true' indicates a critical memory allocation.
// This means that CodeCacheMinimumFreeSpace is used, if necessary
const bool is_critical = true;
blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
blob = new (size) MethodHandlesAdapterBlob(size);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@ -336,14 +330,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
// operator new shared by all singletons:
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}

@ -221,7 +221,7 @@ class BufferBlob: public CodeBlob {
BufferBlob(const char* name, int size);
BufferBlob(const char* name, int size, CodeBuffer* cb);
void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
void* operator new(size_t s, unsigned size) throw();
public:
// Creation

@ -44,6 +44,7 @@
#include "runtime/icache.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/compilationPolicy.hpp"
#include "services/memoryService.hpp"
#include "trace/tracing.hpp"
@ -192,16 +193,16 @@ void CodeCache::initialize_heaps() {
}
// Make sure we have enough space for VM internal code
uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
}
guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
// Align reserved sizes of CodeHeaps
size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
// Compute initial sizes of CodeHeaps
size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
@ -267,6 +268,22 @@ bool CodeCache::heap_available(int code_blob_type) {
}
}
const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
switch(code_blob_type) {
case CodeBlobType::NonNMethod:
return "NonNMethodCodeHeapSize";
break;
case CodeBlobType::MethodNonProfiled:
return "NonProfiledCodeHeapSize";
break;
case CodeBlobType::MethodProfiled:
return "ProfiledCodeHeapSize";
break;
}
ShouldNotReachHere();
return NULL;
}
void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
// Check if heap is needed
if (!heap_available(code_blob_type)) {
@ -332,14 +349,18 @@ CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
return next_blob(get_code_heap(cb), cb);
}
CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
// Do not seize the CodeCache lock here--if the caller has not
// already done so, we are going to lose bigtime, since the code
// cache will contain a garbage CodeBlob until the caller can
// run the constructor for the CodeBlob subclass he is busy
// instantiating.
/**
* Do not seize the CodeCache lock here--if the caller has not
* already done so, we are going to lose bigtime, since the code
* cache will contain a garbage CodeBlob until the caller can
* run the constructor for the CodeBlob subclass he is busy
* instantiating.
*/
CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
// Possibly wakes up the sweeper thread.
NMethodSweeper::notify(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock);
assert(size > 0, "allocation request must be reasonable");
assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size));
if (size <= 0) {
return NULL;
}
@ -350,14 +371,18 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
assert(heap != NULL, "heap is null");
while (true) {
cb = (CodeBlob*)heap->allocate(size, is_critical);
cb = (CodeBlob*)heap->allocate(size);
if (cb != NULL) break;
if (!heap->expand_by(CodeCacheExpansionSize)) {
// Expansion failed
if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
// Fallback solution: Store non-nmethod code in the non-profiled code heap
return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
// Fallback solution: Store non-nmethod code in the non-profiled code heap.
// Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled
// code heap and force stack scanning if less than 10% if the code heap are free.
return allocate(size, CodeBlobType::MethodNonProfiled);
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(code_blob_type);
return NULL;
}
if (PrintCodeCacheExtension) {
@ -754,19 +779,6 @@ size_t CodeCache::max_capacity() {
return max_cap;
}
/**
* Returns true if a CodeHeap is full and sets code_blob_type accordingly.
*/
bool CodeCache::is_full(int* code_blob_type) {
FOR_ALL_HEAPS(heap) {
if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
*code_blob_type = (*heap)->code_blob_type();
return true;
}
}
return false;
}
/**
* Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
* is free, reverse_free_ratio() returns 4.
@ -776,9 +788,13 @@ double CodeCache::reverse_free_ratio(int code_blob_type) {
if (heap == NULL) {
return 0;
}
double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
double max_capacity = (double)heap->max_capacity();
return max_capacity / unallocated_capacity;
double result = max_capacity / unallocated_capacity;
assert (max_capacity >= unallocated_capacity, "Must be");
assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result));
return result;
}
size_t CodeCache::bytes_allocated_in_freelists() {
@ -1011,9 +1027,8 @@ void CodeCache::report_codemem_full(int code_blob_type, bool print) {
// Not yet reported for this heap, report
heap->report_full();
if (SegmentedCodeCache) {
warning("%s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type));
warning("Try increasing the code heap size using -XX:%s=",
(code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type));
warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type));
} else {
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");

@ -100,6 +100,8 @@ class CodeCache : AllStatic {
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
// Returns the name of the VM option to set the size of the corresponding CodeHeap
static const char* get_code_heap_flag_name(int code_blob_type);
static bool heap_available(int code_blob_type); // Returns true if an own CodeHeap for the given CodeBlobType is available
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
@ -118,16 +120,16 @@ class CodeCache : AllStatic {
static void initialize();
// Allocation/administration
static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
static void free(CodeBlob* cb); // frees a CodeBlob
static bool contains(void *p); // returns whether p is included
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
static CodeBlob* allocate(int size, int code_blob_type); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
static void free(CodeBlob* cb); // frees a CodeBlob
static bool contains(void *p); // returns whether p is included
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
// Lookup
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
@ -180,7 +182,6 @@ class CodeCache : AllStatic {
static size_t unallocated_capacity();
static size_t max_capacity();
static bool is_full(int* code_blob_type);
static double reverse_free_ratio(int code_blob_type);
static bool needs_cache_clean() { return _needs_cache_clean; }

@ -804,10 +804,7 @@ nmethod::nmethod(
#endif // def HAVE_DTRACE_H
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
// With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
// with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
bool is_critical = SegmentedCodeCache;
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
}
nmethod::nmethod(

@ -63,7 +63,6 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
// If changing the name, update the other file accordingly.
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL;
}
_chunk = blob->content_begin();

@ -156,8 +156,6 @@ long CompileBroker::_peak_compilation_time = 0;
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
class CompilationLog : public StringEventLog {
public:
@ -657,13 +655,10 @@ void CompileQueue::free_all() {
lock()->notify_all();
}
// ------------------------------------------------------------------
// CompileQueue::get
//
// Get the next CompileTask from a CompileQueue
/**
* Get the next CompileTask from a CompileQueue
*/
CompileTask* CompileQueue::get() {
NMethodSweeper::possibly_sweep();
MutexLocker locker(lock());
// If _first is NULL we have no more compile jobs. There are two reasons for
// having no compile jobs: First, we compiled everything we wanted. Second,
@ -676,35 +671,16 @@ CompileTask* CompileQueue::get() {
return NULL;
}
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
// Wait a certain amount of time to possibly do another sweep.
// We must wait until stack scanning has happened so that we can
// transition a method's state from 'not_entrant' to 'zombie'.
long wait_time = NmethodSweepCheckInterval * 1000;
if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
// Only one thread at a time can do sweeping. Scale the
// wait time according to the number of compiler threads.
// As a result, the next sweep is likely to happen every 100ms
// with an arbitrary number of threads that do sweeping.
wait_time = 100 * CICompilerCount;
}
bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
if (timeout) {
MutexUnlocker ul(lock());
NMethodSweeper::possibly_sweep();
}
} else {
// If there are no compilation tasks and we can compile new jobs
// (i.e., there is enough free space in the code cache) there is
// no need to invoke the sweeper. As a result, the hotness of methods
// remains unchanged. This behavior is desired, since we want to keep
// the stable state, i.e., we do not want to evict methods from the
// code cache if it is unnecessary.
// We need a timed wait here, since compiler threads can exit if compilation
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
// is not critical and we do not want idle compiler threads to wake up too often.
lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
}
// If there are no compilation tasks and we can compile new jobs
// (i.e., there is enough free space in the code cache) there is
// no need to invoke the sweeper. As a result, the hotness of methods
// remains unchanged. This behavior is desired, since we want to keep
// the stable state, i.e., we do not want to evict methods from the
// code cache if it is unnecessary.
// We need a timed wait here, since compiler threads can exit if compilation
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
// is not critical and we do not want idle compiler threads to wake up too often.
lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
}
if (CompileBroker::is_compilation_disabled_forever()) {
@ -894,8 +870,8 @@ void CompileBroker::compilation_init() {
_compilers[1] = new SharkCompiler();
#endif // SHARK
// Start the CompilerThreads
init_compiler_threads(c1_count, c2_count);
// Start the compiler thread(s) and the sweeper thread
init_compiler_sweeper_threads(c1_count, c2_count);
// totalTime performance counter is always created as it is required
// by the implementation of java.lang.management.CompilationMBean.
{
@ -999,13 +975,10 @@ void CompileBroker::compilation_init() {
}
CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
AbstractCompiler* comp, TRAPS) {
CompilerThread* compiler_thread = NULL;
Klass* k =
SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(),
true, CHECK_0);
JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
AbstractCompiler* comp, bool compiler_thread, TRAPS) {
JavaThread* thread = NULL;
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_0);
instanceKlassHandle klass (THREAD, k);
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_0);
Handle string = java_lang_String::create_from_str(name, CHECK_0);
@ -1023,7 +996,11 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
{
MutexLocker mu(Threads_lock, THREAD);
compiler_thread = new CompilerThread(queue, counters);
if (compiler_thread) {
thread = new CompilerThread(queue, counters);
} else {
thread = new CodeCacheSweeperThread();
}
// At this point the new CompilerThread data-races with this startup
// thread (which I believe is the primoridal thread and NOT the VM
// thread). This means Java bytecodes being executed at startup can
@ -1036,12 +1013,12 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
// in that case. However, since this must work and we do not allow
// exceptions anyway, check and abort if this fails.
if (compiler_thread == NULL || compiler_thread->osthread() == NULL){
if (thread == NULL || thread->osthread() == NULL) {
vm_exit_during_initialization("java.lang.OutOfMemoryError",
os::native_thread_creation_failed_msg());
}
java_lang_Thread::set_thread(thread_oop(), compiler_thread);
java_lang_Thread::set_thread(thread_oop(), thread);
// Note that this only sets the JavaThread _priority field, which by
// definition is limited to Java priorities and not OS priorities.
@ -1062,24 +1039,26 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
native_prio = os::java_to_os_priority[NearMaxPriority];
}
}
os::set_native_priority(compiler_thread, native_prio);
os::set_native_priority(thread, native_prio);
java_lang_Thread::set_daemon(thread_oop());
compiler_thread->set_threadObj(thread_oop());
compiler_thread->set_compiler(comp);
Threads::add(compiler_thread);
Thread::start(compiler_thread);
thread->set_threadObj(thread_oop());
if (compiler_thread) {
thread->as_CompilerThread()->set_compiler(comp);
}
Threads::add(thread);
Thread::start(thread);
}
// Let go of Threads_lock before yielding
os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
return compiler_thread;
return thread;
}
void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
void CompileBroker::init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count) {
EXCEPTION_MARK;
#if !defined(ZERO) && !defined(SHARK)
assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
@ -1096,17 +1075,14 @@ void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler
int compiler_count = c1_compiler_count + c2_compiler_count;
_compiler_threads =
new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
char name_buffer[256];
const bool compiler_thread = true;
for (int i = 0; i < c2_compiler_count; i++) {
// Create a name for our thread.
sprintf(name_buffer, "C2 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
// Shark and C2
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK);
_compiler_threads->append(new_thread);
make_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], compiler_thread, CHECK);
}
for (int i = c2_compiler_count; i < compiler_count; i++) {
@ -1114,13 +1090,17 @@ void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler
sprintf(name_buffer, "C1 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
// C1
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK);
_compiler_threads->append(new_thread);
make_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], compiler_thread, CHECK);
}
if (UsePerfData) {
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
}
if (MethodFlushing) {
// Initialize the sweeper thread
make_thread("Sweeper thread", NULL, NULL, NULL, false, CHECK);
}
}
@ -1767,13 +1747,6 @@ void CompileBroker::compiler_thread_loop() {
// We need this HandleMark to avoid leaking VM handles.
HandleMark hm(thread);
// Check if the CodeCache is full
int code_blob_type = 0;
if (CodeCache::is_full(&code_blob_type)) {
// The CodeHeap for code_blob_type is really full
handle_full_code_cache(code_blob_type);
}
CompileTask* task = queue->get();
if (task == NULL) {
continue;
@ -1781,8 +1754,9 @@ void CompileBroker::compiler_thread_loop() {
// Give compiler threads an extra quanta. They tend to be bursty and
// this helps the compiler to finish up the job.
if( CompilerThreadHintNoPreempt )
if (CompilerThreadHintNoPreempt) {
os::hint_no_preempt();
}
// trace per thread time and compile statistics
CompilerCounters* counters = ((CompilerThread*)thread)->counters();
@ -2094,8 +2068,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
}
/**
* The CodeCache is full. Print out warning and disable compilation
* or try code cache cleaning so compilation can continue later.
* The CodeCache is full. Print warning and disable compilation.
* Schedule code cache cleaning so compilation can continue later.
* This function needs to be called only from CodeCache::allocate(),
* since we currently handle a full code cache uniformly.
*/
void CompileBroker::handle_full_code_cache(int code_blob_type) {
UseInterpreter = true;
@ -2127,10 +2103,6 @@ void CompileBroker::handle_full_code_cache(int code_blob_type) {
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
NMethodSweeper::log_sweep("disable_compiler");
}
// Switch to 'vm_state'. This ensures that possibly_sweep() can be called
// without having to consider the state in which the current thread is.
ThreadInVMfromUnknown in_vm;
NMethodSweeper::possibly_sweep();
} else {
disable_compilation_forever();
}

@ -290,8 +290,6 @@ class CompileBroker: AllStatic {
static CompileQueue* _c2_compile_queue;
static CompileQueue* _c1_compile_queue;
static GrowableArray<CompilerThread*>* _compiler_threads;
// performance counters
static PerfCounter* _perf_total_compilation;
static PerfCounter* _perf_native_compilation;
@ -339,8 +337,8 @@ class CompileBroker: AllStatic {
static volatile jint _print_compilation_warning;
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS);
static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count);
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
static bool is_compile_blocking();

@ -127,41 +127,6 @@ public:
};
class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
size_t _num_processed;
CardTableModRefBS* _ctbs;
int _histo[256];
public:
ClearLoggedCardTableEntryClosure() :
_num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
{
for (int i = 0; i < 256; i++) _histo[i] = 0;
}
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
unsigned char* ujb = (unsigned char*)card_ptr;
int ind = (int)(*ujb);
_histo[ind]++;
*card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
_num_processed++;
return true;
}
size_t num_processed() { return _num_processed; }
void print_histo() {
gclog_or_tty->print_cr("Card table value histogram:");
for (int i = 0; i < 256; i++) {
if (_histo[i] != 0) {
gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
}
}
}
};
class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
private:
size_t _num_processed;
@ -475,48 +440,6 @@ bool G1CollectedHeap::is_scavengable(const void* p) {
return !hr->is_humongous();
}
void G1CollectedHeap::check_ct_logs_at_safepoint() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
CardTableModRefBS* ct_bs = g1_barrier_set();
// Count the dirty cards at the start.
CountNonCleanMemRegionClosure count1(this);
ct_bs->mod_card_iterate(&count1);
int orig_count = count1.n();
// First clear the logged cards.
ClearLoggedCardTableEntryClosure clear;
dcqs.apply_closure_to_all_completed_buffers(&clear);
dcqs.iterate_closure_all_threads(&clear, false);
clear.print_histo();
// Now ensure that there's no dirty cards.
CountNonCleanMemRegionClosure count2(this);
ct_bs->mod_card_iterate(&count2);
if (count2.n() != 0) {
gclog_or_tty->print_cr("Card table has %d entries; %d originally",
count2.n(), orig_count);
}
guarantee(count2.n() == 0, "Card table should be clean.");
RedirtyLoggedCardTableEntryClosure redirty;
dcqs.apply_closure_to_all_completed_buffers(&redirty);
dcqs.iterate_closure_all_threads(&redirty, false);
gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
clear.num_processed(), orig_count);
guarantee(redirty.num_processed() == clear.num_processed(),
err_msg("Redirtied "SIZE_FORMAT" cards, bug cleared "SIZE_FORMAT,
redirty.num_processed(), clear.num_processed()));
CountNonCleanMemRegionClosure count3(this);
ct_bs->mod_card_iterate(&count3);
if (count3.n() != orig_count) {
gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
orig_count, count3.n());
guarantee(count3.n() >= orig_count, "Should have restored them all.");
}
}
// Private class members.
G1CollectedHeap* G1CollectedHeap::_g1h;
@ -5760,14 +5683,10 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
// not copied during the pause.
process_discovered_references(n_workers);
// Weak root processing.
{
if (G1StringDedup::is_enabled()) {
G1STWIsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
JNIHandles::weak_oops_do(&is_alive, &keep_alive);
if (G1StringDedup::is_enabled()) {
G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
}
G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
}
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);

@ -797,9 +797,6 @@ protected:
// The closure used to refine a single card.
RefineCardTableEntryClosure* _refine_cte_cl;
// A function to check the consistency of dirty card logs.
void check_ct_logs_at_safepoint();
// A DirtyCardQueueSet that is used to hold cards that contain
// references into the current collection set. This is used to
// update the remembered sets of the regions in the collection

@ -1077,7 +1077,6 @@ IRT_END
address SignatureHandlerLibrary::set_handler_blob() {
BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
if (handler_blob == NULL) {
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL;
}
address handler = handler_blob->code_begin();

@ -171,13 +171,13 @@ void CodeHeap::clear() {
}
void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
void* CodeHeap::allocate(size_t instance_size) {
size_t number_of_segments = size_to_segments(instance_size + header_size());
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
// First check if we can satisfy request from freelist
NOT_PRODUCT(verify());
HeapBlock* block = search_freelist(number_of_segments, is_critical);
HeapBlock* block = search_freelist(number_of_segments);
NOT_PRODUCT(verify());
if (block != NULL) {
@ -191,15 +191,6 @@ void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
// Ensure minimum size for allocation to the heap.
number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
if (!is_critical) {
// Make sure the allocation fits in the unallocated heap without using
// the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
// Fail allocation
return NULL;
}
}
if (_next_segment + number_of_segments <= _number_of_committed_segments) {
mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
HeapBlock* b = block_at(_next_segment);
@ -427,24 +418,17 @@ void CodeHeap::add_to_freelist(HeapBlock* a) {
* Search freelist for an entry on the list with the best fit.
* @return NULL, if no one was found
*/
FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
FreeBlock* CodeHeap::search_freelist(size_t length) {
FreeBlock* found_block = NULL;
FreeBlock* found_prev = NULL;
size_t found_length = 0;
FreeBlock* prev = NULL;
FreeBlock* cur = _freelist;
const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace;
// Search for first block that fits
while(cur != NULL) {
if (cur->length() >= length) {
// Non critical allocations are not allowed to use the last part of the code heap.
// Make sure the end of the allocation doesn't cross into the last part of the code heap.
if (!is_critical && (((size_t)cur + length) > critical_boundary)) {
// The freelist is sorted by address - if one fails, all consecutive will also fail.
break;
}
// Remember block, its previous element, and its length
found_block = cur;
found_prev = prev;

@ -120,7 +120,7 @@ class CodeHeap : public CHeapObj<mtCode> {
// Toplevel freelist management
void add_to_freelist(HeapBlock* b);
FreeBlock* search_freelist(size_t length, bool is_critical);
FreeBlock* search_freelist(size_t length);
// Iteration helpers
void* next_free(HeapBlock* b) const;
@ -140,8 +140,8 @@ class CodeHeap : public CHeapObj<mtCode> {
bool expand_by(size_t size); // expands committed memory by size
// Memory allocation
void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL
void deallocate(void* p); // deallocates a block
void* allocate (size_t size); // Allocate 'size' bytes in the code cache or return NULL
void deallocate(void* p); // Deallocate memory
// Attributes
char* low_boundary() const { return _memory.low_boundary (); }

@ -2792,19 +2792,18 @@ void InstanceKlass::adjust_default_methods(Method** old_methods, Method** new_me
// On-stack replacement stuff
void InstanceKlass::add_osr_nmethod(nmethod* n) {
// only one compilation can be active
NEEDS_CLEANUP
// This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check();
assert(n->is_osr_method(), "wrong kind of nmethod");
n->set_osr_link(osr_nmethods_head());
set_osr_nmethods_head(n);
// Raise the highest osr level if necessary
if (TieredCompilation) {
Method* m = n->method();
m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
{
// This is a short non-blocking critical region, so the no safepoint check is ok.
MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
assert(n->is_osr_method(), "wrong kind of nmethod");
n->set_osr_link(osr_nmethods_head());
set_osr_nmethods_head(n);
// Raise the highest osr level if necessary
if (TieredCompilation) {
Method* m = n->method();
m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
}
}
// Remember to unlock again
OsrList_lock->unlock();
// Get rid of the osr methods for the same bci that have lower levels.
if (TieredCompilation) {
@ -2820,7 +2819,7 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) {
void InstanceKlass::remove_osr_nmethod(nmethod* n) {
// This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check();
MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
assert(n->is_osr_method(), "wrong kind of nmethod");
nmethod* last = NULL;
nmethod* cur = osr_nmethods_head();
@ -2857,13 +2856,27 @@ void InstanceKlass::remove_osr_nmethod(nmethod* n) {
}
m->set_highest_osr_comp_level(max_level);
}
// Remember to unlock again
OsrList_lock->unlock();
}
int InstanceKlass::mark_osr_nmethods(const Method* m) {
// This is a short non-blocking critical region, so the no safepoint check is ok.
MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head();
int found = 0;
while (osr != NULL) {
assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
if (osr->method() == m) {
osr->mark_for_deoptimization();
found++;
}
osr = osr->osr_link();
}
return found;
}
nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const {
// This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check();
MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head();
nmethod* best = NULL;
while (osr != NULL) {
@ -2879,14 +2892,12 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
if (match_level) {
if (osr->comp_level() == comp_level) {
// Found a match - return it.
OsrList_lock->unlock();
return osr;
}
} else {
if (best == NULL || (osr->comp_level() > best->comp_level())) {
if (osr->comp_level() == CompLevel_highest_tier) {
// Found the best possible - return it.
OsrList_lock->unlock();
return osr;
}
best = osr;
@ -2895,7 +2906,6 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
}
osr = osr->osr_link();
}
OsrList_lock->unlock();
if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
return best;
}

@ -754,6 +754,7 @@ class InstanceKlass: public Klass {
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
void add_osr_nmethod(nmethod* n);
void remove_osr_nmethod(nmethod* n);
int mark_osr_nmethods(const Method* m);
nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
// Breakpoint support (see methods on Method* for details)

@ -813,6 +813,10 @@ class Method : public Metadata {
return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
}
int mark_osr_nmethods() {
return method_holder()->mark_osr_nmethods(this);
}
nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
}

@ -45,9 +45,10 @@ class objArrayOopDesc : public arrayOopDesc {
private:
// Give size of objArrayOop in HeapWords minus the header
static int array_size(int length) {
const int OopsPerHeapWord = HeapWordSize/heapOopSize;
const uint OopsPerHeapWord = HeapWordSize/heapOopSize;
assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
"Else the following (new) computation would be in error");
uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
#ifdef ASSERT
// The old code is left in for sanity-checking; it'll
// go away pretty soon. XXX
@ -55,16 +56,15 @@ private:
// oop->length() * HeapWordsPerOop;
// With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
// The oop elements are aligned up to wordSize
const int HeapWordsPerOop = heapOopSize/HeapWordSize;
int old_res;
const uint HeapWordsPerOop = heapOopSize/HeapWordSize;
uint old_res;
if (HeapWordsPerOop > 0) {
old_res = length * HeapWordsPerOop;
} else {
old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
}
#endif // ASSERT
int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
assert(res == old_res, "Inconsistency between old and new.");
#endif // ASSERT
return res;
}

@ -150,7 +150,7 @@ class typeArrayOopDesc : public arrayOopDesc {
DEBUG_ONLY(BasicType etype = Klass::layout_helper_element_type(lh));
assert(length <= arrayOopDesc::max_array_length(etype), "no overflow");
julong size_in_bytes = length;
julong size_in_bytes = (juint)length;
size_in_bytes <<= element_shift;
size_in_bytes += instance_header_size;
julong size_in_words = ((size_in_bytes + (HeapWordSize-1)) >> LogHeapWordSize);

@ -476,6 +476,9 @@
product(bool, DoEscapeAnalysis, true, \
"Perform escape analysis") \
\
product(double, EscapeAnalysisTimeout, 20. DEBUG_ONLY(+40.), \
"Abort EA when it reaches time limit (in sec)") \
\
develop(bool, ExitEscapeAnalysisOnTimeout, true, \
"Exit or throw assert in EA when it reaches time limit") \
\

@ -939,7 +939,8 @@ int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
#ifndef PRODUCT
if (!(call->req() > TypeFunc::Parms &&
call->in(TypeFunc::Parms) != NULL &&
call->in(TypeFunc::Parms)->is_Con())) {
call->in(TypeFunc::Parms)->is_Con() &&
call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
assert(in_dump() != 0, "OK if dumping");
tty->print("[bad uncommon trap]");
return 0;

@ -281,9 +281,11 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
Node *copy;
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
// Rematerialize constants instead of copying them
if( m->is_Mach() && m->as_Mach()->is_Con() &&
m->as_Mach()->rematerialize() ) {
// Rematerialize constants instead of copying them.
// We do this only for immediate constants, we avoid constant table loads
// because that will unsafely extend the live range of the constant table base.
if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
m->as_Mach()->rematerialize()) {
copy = m->clone();
// Insert the copy in the predecessor basic block
pred->add_inst(copy);
@ -317,8 +319,8 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
// At this point it is unsafe to extend live ranges (6550579).
// Rematerialize only constants as we do for Phi above.
if(m->is_Mach() && m->as_Mach()->is_Con() &&
m->as_Mach()->rematerialize()) {
if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
m->as_Mach()->rematerialize()) {
copy = m->clone();
// Insert the copy in the basic block, just before us
b->insert_node(copy, l++);

@ -535,7 +535,6 @@ void Compile::init_scratch_buffer_blob(int const_size) {
if (scratch_buffer_blob() == NULL) {
// Let CompilerBroker disable further compilations.
record_failure("Not enough space for scratch buffer in CodeCache");
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return;
}
}

@ -38,6 +38,8 @@
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
_in_worklist(C->comp_arena()),
_next_pidx(0),
_collecting(true),
_verify(false),
_compile(C),
@ -125,13 +127,19 @@ bool ConnectionGraph::compute_escape() {
if (C->root() != NULL) {
ideal_nodes.push(C->root());
}
// Processed ideal nodes are unique on ideal_nodes list
// but several ideal nodes are mapped to the phantom_obj.
// To avoid duplicated entries on the following worklists
// add the phantom_obj only once to them.
ptnodes_worklist.append(phantom_obj);
java_objects_worklist.append(phantom_obj);
for( uint next = 0; next < ideal_nodes.size(); ++next ) {
Node* n = ideal_nodes.at(next);
// Create PointsTo nodes and add them to Connection Graph. Called
// only once per ideal node since ideal_nodes is Unique_Node list.
add_node_to_connection_graph(n, &delayed_worklist);
PointsToNode* ptn = ptnode_adr(n->_idx);
if (ptn != NULL) {
if (ptn != NULL && ptn != phantom_obj) {
ptnodes_worklist.append(ptn);
if (ptn->is_JavaObject()) {
java_objects_worklist.append(ptn->as_JavaObject());
@ -415,7 +423,7 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
}
case Op_CreateEx: {
// assume that all exception objects globally escape
add_java_object(n, PointsToNode::GlobalEscape);
map_ideal_node(n, phantom_obj);
break;
}
case Op_LoadKlass:
@ -1074,13 +1082,8 @@ bool ConnectionGraph::complete_connection_graph(
// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
// Set limit to 20 to catch situation when something did go wrong and
// bailout Escape Analysis.
// Also limit build time to 30 sec (60 in debug VM).
// Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
#define CG_BUILD_ITER_LIMIT 20
#ifdef ASSERT
#define CG_BUILD_TIME_LIMIT 60.0
#else
#define CG_BUILD_TIME_LIMIT 30.0
#endif
// Propagate GlobalEscape and ArgEscape escape states and check that
// we still have non-escaping objects. The method pushs on _worklist
@ -1091,12 +1094,13 @@ bool ConnectionGraph::complete_connection_graph(
// Now propagate references to all JavaObject nodes.
int java_objects_length = java_objects_worklist.length();
elapsedTimer time;
bool timeout = false;
int new_edges = 1;
int iterations = 0;
do {
while ((new_edges > 0) &&
(iterations++ < CG_BUILD_ITER_LIMIT) &&
(time.seconds() < CG_BUILD_TIME_LIMIT)) {
(iterations++ < CG_BUILD_ITER_LIMIT)) {
double start_time = time.seconds();
time.start();
new_edges = 0;
// Propagate references to phantom_object for nodes pushed on _worklist
@ -1105,7 +1109,26 @@ bool ConnectionGraph::complete_connection_graph(
for (int next = 0; next < java_objects_length; ++next) {
JavaObjectNode* ptn = java_objects_worklist.at(next);
new_edges += add_java_object_edges(ptn, true);
#define SAMPLE_SIZE 4
if ((next % SAMPLE_SIZE) == 0) {
// Each 4 iterations calculate how much time it will take
// to complete graph construction.
time.stop();
double stop_time = time.seconds();
double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
double time_until_end = time_per_iter * (double)(java_objects_length - next);
if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
timeout = true;
break; // Timeout
}
start_time = stop_time;
time.start();
}
#undef SAMPLE_SIZE
}
if (timeout) break;
if (new_edges > 0) {
// Update escape states on each iteration if graph was updated.
if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
@ -1113,9 +1136,12 @@ bool ConnectionGraph::complete_connection_graph(
}
}
time.stop();
if (time.seconds() >= EscapeAnalysisTimeout) {
timeout = true;
break;
}
}
if ((iterations < CG_BUILD_ITER_LIMIT) &&
(time.seconds() < CG_BUILD_TIME_LIMIT)) {
if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {
time.start();
// Find fields which have unknown value.
int fields_length = oop_fields_worklist.length();
@ -1128,18 +1154,21 @@ bool ConnectionGraph::complete_connection_graph(
}
}
time.stop();
if (time.seconds() >= EscapeAnalysisTimeout) {
timeout = true;
break;
}
} else {
new_edges = 0; // Bailout
}
} while (new_edges > 0);
// Bailout if passed limits.
if ((iterations >= CG_BUILD_ITER_LIMIT) ||
(time.seconds() >= CG_BUILD_TIME_LIMIT)) {
if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
Compile* C = _compile;
if (C->log() != NULL) {
C->log()->begin_elem("connectionGraph_bailout reason='reached ");
C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
C->log()->text("%s", timeout ? "time" : "iterations");
C->log()->end_elem(" limit'");
}
assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
@ -1156,7 +1185,6 @@ bool ConnectionGraph::complete_connection_graph(
#endif
#undef CG_BUILD_ITER_LIMIT
#undef CG_BUILD_TIME_LIMIT
// Find fields initialized by NULL for non-escaping Allocations.
int non_escaped_length = non_escaped_worklist.length();
@ -1280,8 +1308,8 @@ int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_w
}
}
}
while(_worklist.length() > 0) {
PointsToNode* use = _worklist.pop();
for (int l = 0; l < _worklist.length(); l++) {
PointsToNode* use = _worklist.at(l);
if (PointsToNode::is_base_use(use)) {
// Add reference from jobj to field and from field to jobj (field's base).
use = PointsToNode::get_use_node(use)->as_Field();
@ -1328,6 +1356,8 @@ int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_w
add_field_uses_to_worklist(use->as_Field());
}
}
_worklist.clear();
_in_worklist.Reset();
return new_edges;
}
@ -1906,7 +1936,7 @@ void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
return;
}
Compile* C = _compile;
ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
_nodes.at_put(n->_idx, ptadr);
}
@ -1917,7 +1947,7 @@ void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
return;
}
Compile* C = _compile;
ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
_nodes.at_put(n->_idx, ptadr);
}
@ -1933,7 +1963,7 @@ void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offse
es = PointsToNode::GlobalEscape;
}
Compile* C = _compile;
FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
_nodes.at_put(n->_idx, field);
}
@ -1947,7 +1977,7 @@ void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
return;
}
Compile* C = _compile;
ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
_nodes.at_put(n->_idx, ptadr);
// Add edge from arraycopy node to source object.
(void)add_edge(ptadr, src);

@ -125,6 +125,8 @@ class LocalVarNode;
class FieldNode;
class ArraycopyNode;
class ConnectionGraph;
// ConnectionGraph nodes
class PointsToNode : public ResourceObj {
GrowableArray<PointsToNode*> _edges; // List of nodes this node points to
@ -137,6 +139,7 @@ class PointsToNode : public ResourceObj {
Node* const _node; // Ideal node corresponding to this PointsTo node.
const int _idx; // Cached ideal node's _idx
const uint _pidx; // Index of this node
public:
typedef enum {
@ -165,17 +168,9 @@ public:
} NodeFlags;
PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type):
_edges(C->comp_arena(), 2, 0, NULL),
_uses (C->comp_arena(), 2, 0, NULL),
_node(n),
_idx(n->_idx),
_type((u1)type),
_escape((u1)es),
_fields_escape((u1)es),
_flags(ScalarReplaceable) {
assert(n != NULL && es != UnknownEscape, "sanity");
}
inline PointsToNode(ConnectionGraph* CG, Node* n, EscapeState es, NodeType type);
uint pidx() const { return _pidx; }
Node* ideal_node() const { return _node; }
int idx() const { return _idx; }
@ -243,14 +238,14 @@ public:
class LocalVarNode: public PointsToNode {
public:
LocalVarNode(Compile *C, Node* n, EscapeState es):
PointsToNode(C, n, es, LocalVar) {}
LocalVarNode(ConnectionGraph *CG, Node* n, EscapeState es):
PointsToNode(CG, n, es, LocalVar) {}
};
class JavaObjectNode: public PointsToNode {
public:
JavaObjectNode(Compile *C, Node* n, EscapeState es):
PointsToNode(C, n, es, JavaObject) {
JavaObjectNode(ConnectionGraph *CG, Node* n, EscapeState es):
PointsToNode(CG, n, es, JavaObject) {
if (es > NoEscape)
set_scalar_replaceable(false);
}
@ -262,8 +257,8 @@ class FieldNode: public PointsToNode {
const bool _is_oop; // Field points to object
bool _has_unknown_base; // Has phantom_object base
public:
FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop):
PointsToNode(C, n, es, Field),
FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop):
PointsToNode(CG, n, es, Field),
_offset(offs), _is_oop(is_oop),
_has_unknown_base(false) {}
@ -284,8 +279,8 @@ public:
class ArraycopyNode: public PointsToNode {
public:
ArraycopyNode(Compile *C, Node* n, EscapeState es):
PointsToNode(C, n, es, Arraycopy) {}
ArraycopyNode(ConnectionGraph *CG, Node* n, EscapeState es):
PointsToNode(CG, n, es, Arraycopy) {}
};
// Iterators for PointsTo node's edges:
@ -323,11 +318,14 @@ public:
class ConnectionGraph: public ResourceObj {
friend class PointsToNode;
private:
GrowableArray<PointsToNode*> _nodes; // Map from ideal nodes to
// ConnectionGraph nodes.
GrowableArray<PointsToNode*> _worklist; // Nodes to be processed
VectorSet _in_worklist;
uint _next_pidx;
bool _collecting; // Indicates whether escape information
// is still being collected. If false,
@ -353,6 +351,8 @@ private:
}
uint nodes_size() const { return _nodes.length(); }
uint next_pidx() { return _next_pidx++; }
// Add nodes to ConnectionGraph.
void add_local_var(Node* n, PointsToNode::EscapeState es);
void add_java_object(Node* n, PointsToNode::EscapeState es);
@ -396,15 +396,26 @@ private:
int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist);
// Put node on worklist if it is (or was) not there.
void add_to_worklist(PointsToNode* pt) {
_worklist.push(pt);
return;
inline void add_to_worklist(PointsToNode* pt) {
PointsToNode* ptf = pt;
uint pidx_bias = 0;
if (PointsToNode::is_base_use(pt)) {
// Create a separate entry in _in_worklist for a marked base edge
// because _worklist may have an entry for a normal edge pointing
// to the same node. To separate them use _next_pidx as bias.
ptf = PointsToNode::get_use_node(pt)->as_Field();
pidx_bias = _next_pidx;
}
if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) {
_worklist.append(pt);
}
}
// Put on worklist all uses of this node.
void add_uses_to_worklist(PointsToNode* pt) {
for (UseIterator i(pt); i.has_next(); i.next())
_worklist.push(i.get());
inline void add_uses_to_worklist(PointsToNode* pt) {
for (UseIterator i(pt); i.has_next(); i.next()) {
add_to_worklist(i.get());
}
}
// Put on worklist all field's uses and related field nodes.
@ -517,8 +528,8 @@ private:
}
// Helper functions
bool is_oop_field(Node* n, int offset, bool* unsafe);
static Node* get_addp_base(Node *addp);
static Node* find_second_addp(Node* addp, Node* n);
static Node* get_addp_base(Node *addp);
static Node* find_second_addp(Node* addp, Node* n);
// offset of a field reference
int address_offset(Node* adr, PhaseTransform *phase);
@ -587,4 +598,17 @@ public:
#endif
};
inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type):
_edges(CG->_compile->comp_arena(), 2, 0, NULL),
_uses (CG->_compile->comp_arena(), 2, 0, NULL),
_node(n),
_idx(n->_idx),
_pidx(CG->next_pidx()),
_type((u1)type),
_escape((u1)es),
_fields_escape((u1)es),
_flags(ScalarReplaceable) {
assert(n != NULL && es != UnknownEscape, "sanity");
}
#endif // SHARE_VM_OPTO_ESCAPE_HPP

@ -1257,6 +1257,16 @@ Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
result = new ConvI2LNode(phase->transform(result));
}
#endif
// Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair).
// Need to preserve unboxing load type if it is unsigned.
switch(this->Opcode()) {
case Op_LoadUB:
result = new AndINode(phase->transform(result), phase->intcon(0xFF));
break;
case Op_LoadUS:
result = new AndINode(phase->transform(result), phase->intcon(0xFFFF));
break;
}
return result;
}
}

@ -1166,7 +1166,6 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
// Have we run out of code space?
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL;
}
// Configure the code buffer.
@ -1491,7 +1490,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return;
}
@ -1648,7 +1646,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// One last check for failed CodeBuffer::expand:
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return;
}

@ -36,6 +36,7 @@
#include "runtime/reflection.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/exceptions.hpp"
/*
@ -55,26 +56,30 @@
bool MethodHandles::_enabled = false; // set true after successful native linkage
MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
//------------------------------------------------------------------------------
// MethodHandles::generate_adapters
//
void MethodHandles::generate_adapters() {
if (SystemDictionary::MethodHandle_klass() == NULL) return;
/**
* Generates method handle adapters. Returns 'false' if memory allocation
* failed and true otherwise.
*/
bool MethodHandles::generate_adapters() {
if (SystemDictionary::MethodHandle_klass() == NULL) {
return true;
}
assert(_adapter_code == NULL, "generate only once");
ResourceMark rm;
TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
_adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
if (_adapter_code == NULL)
vm_exit_out_of_memory(adapter_code_size, OOM_MALLOC_ERROR,
"CodeCache: no room for MethodHandles adapters");
{
CodeBuffer code(_adapter_code);
MethodHandlesAdapterGenerator g(&code);
g.generate();
code.log_section_sizes("MethodHandlesAdapterBlob");
if (_adapter_code == NULL) {
return false;
}
CodeBuffer code(_adapter_code);
MethodHandlesAdapterGenerator g(&code);
g.generate();
code.log_section_sizes("MethodHandlesAdapterBlob");
return true;
}
//------------------------------------------------------------------------------
@ -1401,7 +1406,9 @@ JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class))
}
if (enable_MH) {
MethodHandles::generate_adapters();
if (MethodHandles::generate_adapters() == false) {
THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), "Out of space in CodeCache for method handle adapters");
}
MethodHandles::set_enabled(true);
}
}

@ -69,7 +69,7 @@ class MethodHandles: AllStatic {
enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
// Generate MethodHandles adapters.
static void generate_adapters();
static bool generate_adapters();
// Called from MethodHandlesAdapterGenerator.
static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid);

@ -386,19 +386,10 @@ WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jbool
CHECK_JNI_EXCEPTION_(env, result);
MutexLockerEx mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code;
if (is_osr) {
int bci = InvocationEntryBci;
while ((code = mh->lookup_osr_nmethod_for(bci, CompLevel_none, false)) != NULL) {
code->mark_for_deoptimization();
++result;
bci = code->osr_entry_bci() + 1;
}
} else {
code = mh->code();
}
if (code != NULL) {
code->mark_for_deoptimization();
result += mh->mark_osr_nmethods();
} else if (mh->code() != NULL) {
mh->code()->mark_for_deoptimization();
++result;
}
result += CodeCache::mark_for_deoptimization(mh());
@ -566,13 +557,13 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
WB_END
template <typename T>
static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*)) {
static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*, bool, bool)) {
if (name == NULL) {
return false;
}
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
const char* flag_name = env->GetStringUTFChars(name, NULL);
bool result = (*TAt)(flag_name, value);
bool result = (*TAt)(flag_name, value, true, true);
env->ReleaseStringUTFChars(name, flag_name);
return result;
}
@ -619,6 +610,24 @@ static jobject doubleBox(JavaThread* thread, JNIEnv* env, jdouble value) {
return box(thread, env, vmSymbols::java_lang_Double(), vmSymbols::Double_valueOf_signature(), value);
}
static Flag* getVMFlag(JavaThread* thread, JNIEnv* env, jstring name) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
const char* flag_name = env->GetStringUTFChars(name, NULL);
Flag* result = Flag::find_flag(flag_name, strlen(flag_name), true, true);
env->ReleaseStringUTFChars(name, flag_name);
return result;
}
WB_ENTRY(jboolean, WB_IsConstantVMFlag(JNIEnv* env, jobject o, jstring name))
Flag* flag = getVMFlag(thread, env, name);
return (flag != NULL) && flag->is_constant_in_binary();
WB_END
WB_ENTRY(jboolean, WB_IsLockedVMFlag(JNIEnv* env, jobject o, jstring name))
Flag* flag = getVMFlag(thread, env, name);
return (flag != NULL) && !(flag->is_unlocked() || flag->is_unlocker());
WB_END
WB_ENTRY(jobject, WB_GetBooleanVMFlag(JNIEnv* env, jobject o, jstring name))
bool result;
if (GetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAt)) {
@ -1018,6 +1027,8 @@ static JNINativeMethod methods[] = {
CC"(Ljava/lang/reflect/Executable;II)Z", (void*)&WB_EnqueueMethodForCompilation},
{CC"clearMethodState",
CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState},
{CC"isConstantVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsConstantVMFlag},
{CC"isLockedVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsLockedVMFlag},
{CC"setBooleanVMFlag", CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag},
{CC"setIntxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetIntxVMFlag},
{CC"setUintxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUintxVMFlag},

@ -306,6 +306,9 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "NmethodSweepFraction", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "NmethodSweepCheckInterval", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "CodeCacheMinimumFreeSpace", JDK_Version::jdk(9), JDK_Version::jdk(10) },
#ifndef ZERO
{ "UseFastAccessorMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
@ -2528,7 +2531,7 @@ bool Arguments::check_vm_args_consistency() {
// Check lower bounds of the code cache
// Template Interpreter code is approximately 3X larger in debug builds.
uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (InitialCodeCacheSize < (uintx)os::vm_page_size()) {
jio_fprintf(defaultStream::error_stream(),
"Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K,
@ -2564,10 +2567,11 @@ bool Arguments::check_vm_args_consistency() {
status = false;
}
status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
status &= verify_interval(CodeCacheMinBlockLength, 1, 100, "CodeCacheMinBlockLength");
status &= verify_interval(CodeCacheSegmentSize, 1, 1024, "CodeCacheSegmentSize");
status &= verify_interval(StartAggressiveSweepingAt, 0, 100, "StartAggressiveSweepingAt");
int min_number_of_compiler_threads = get_min_number_of_compiler_threads();
// The default CICompilerCount's value is CI_COMPILER_COUNT.
@ -3992,12 +3996,6 @@ jint Arguments::apply_ergo() {
#endif
#endif
// Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered)
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
}
// Set heap size based on available physical memory
set_heap_size();
@ -4065,13 +4063,6 @@ jint Arguments::apply_ergo() {
}
#ifndef PRODUCT
if (CompileTheWorld) {
// Force NmethodSweeper to sweep whole CodeCache each time.
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
NmethodSweepFraction = 1;
}
}
if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
if (use_vm_log()) {
LogVMOutput = true;

@ -634,8 +634,8 @@ static void trace_flag_changed(const char* name, const T old_value, const T new_
e.commit();
}
bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_bool()) return false;
*value = result->get_bool();
@ -662,8 +662,8 @@ void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Fla
faddr->set_origin(origin);
}
bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_intx()) return false;
*value = result->get_intx();
@ -690,8 +690,8 @@ void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Fla
faddr->set_origin(origin);
}
bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_uintx()) return false;
*value = result->get_uintx();
@ -718,8 +718,8 @@ void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, F
faddr->set_origin(origin);
}
bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_uint64_t()) return false;
*value = result->get_uint64_t();
@ -746,8 +746,8 @@ void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t va
faddr->set_origin(origin);
}
bool CommandLineFlags::size_tAt(const char* name, size_t len, size_t* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::size_tAt(const char* name, size_t len, size_t* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_size_t()) return false;
*value = result->get_size_t();
@ -774,8 +774,8 @@ void CommandLineFlagsEx::size_tAtPut(CommandLineFlagWithType flag, size_t value,
faddr->set_origin(origin);
}
bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_double()) return false;
*value = result->get_double();
@ -802,8 +802,8 @@ void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value,
faddr->set_origin(origin);
}
bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value) {
Flag* result = Flag::find_flag(name, len);
bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_ccstr()) return false;
*value = result->get_ccstr();

@ -379,38 +379,38 @@ class SizeTFlagSetting {
class CommandLineFlags {
public:
static bool boolAt(const char* name, size_t len, bool* value);
static bool boolAt(const char* name, bool* value) { return boolAt(name, strlen(name), value); }
static bool boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
static bool boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
static bool boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
static bool intxAt(const char* name, size_t len, intx* value);
static bool intxAt(const char* name, intx* value) { return intxAt(name, strlen(name), value); }
static bool intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
static bool intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
static bool intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
static bool uintxAt(const char* name, size_t len, uintx* value);
static bool uintxAt(const char* name, uintx* value) { return uintxAt(name, strlen(name), value); }
static bool uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
static bool uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
static bool size_tAt(const char* name, size_t len, size_t* value);
static bool size_tAt(const char* name, size_t* value) { return size_tAt(name, strlen(name), value); }
static bool size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false);
static bool size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); }
static bool size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin);
static bool size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); }
static bool uint64_tAt(const char* name, size_t len, uint64_t* value);
static bool uint64_tAt(const char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
static bool uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
static bool uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
static bool doubleAt(const char* name, size_t len, double* value);
static bool doubleAt(const char* name, double* value) { return doubleAt(name, strlen(name), value); }
static bool doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
static bool doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
static bool ccstrAt(const char* name, size_t len, ccstr* value);
static bool ccstrAt(const char* name, ccstr* value) { return ccstrAt(name, strlen(name), value); }
static bool ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
static bool ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
// Contract: Flag will make private copy of the incoming value.
// Outgoing value is always malloc-ed, and caller MUST call free.
static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
@ -2984,12 +2984,6 @@ class CommandLineFlags {
product(intx, SafepointTimeoutDelay, 10000, \
"Delay in milliseconds for option SafepointTimeout") \
\
product(intx, NmethodSweepFraction, 16, \
"Number of invocations of sweeper to cover all nmethods") \
\
product(intx, NmethodSweepCheckInterval, 5, \
"Compilers wake up every n seconds to possibly sweep nmethods") \
\
product(intx, NmethodSweepActivity, 10, \
"Removes cold nmethods from code cache if > 0. Higher values " \
"result in more aggressive sweeping") \
@ -3378,9 +3372,6 @@ class CommandLineFlags {
product_pd(uintx, NonNMethodCodeHeapSize, \
"Size of code heap with non-nmethods (in bytes)") \
\
product(uintx, CodeCacheMinimumFreeSpace, 500*K, \
"When less than X space left, we stop compiling") \
\
product_pd(uintx, CodeCacheExpansionSize, \
"Code cache expansion size (in bytes)") \
\
@ -3393,6 +3384,11 @@ class CommandLineFlags {
product(bool, UseCodeCacheFlushing, true, \
"Remove cold/old nmethods from the code cache") \
\
product(uintx, StartAggressiveSweepingAt, 10, \
"Start aggressive sweeping if X[%] of the code cache is free." \
"Segmented code cache: X[%] of the non-profiled heap." \
"Non-segmented code cache: X[%] of the total code cache") \
\
/* interpreter debugging */ \
develop(intx, BinarySwitchThreshold, 5, \
"Minimal number of lookupswitch entries for rewriting to binary " \

@ -61,7 +61,7 @@ Mutex* SymbolTable_lock = NULL;
Mutex* StringTable_lock = NULL;
Monitor* StringDedupQueue_lock = NULL;
Mutex* StringDedupTable_lock = NULL;
Mutex* CodeCache_lock = NULL;
Monitor* CodeCache_lock = NULL;
Mutex* MethodData_lock = NULL;
Mutex* RetData_lock = NULL;
Monitor* VMOperationQueue_lock = NULL;
@ -205,7 +205,7 @@ void mutex_init() {
}
def(ParGCRareEvent_lock , Mutex , leaf , true );
def(DerivedPointerTableGC_lock , Mutex, leaf, true );
def(CodeCache_lock , Mutex , special, true );
def(CodeCache_lock , Monitor, special, true );
def(Interrupt_lock , Monitor, special, true ); // used for interrupt processing
def(RawMonitor_lock , Mutex, special, true );
def(OopMapCacheAlloc_lock , Mutex, leaf, true ); // used for oop_map_cache allocation.

@ -53,7 +53,7 @@ extern Mutex* SymbolTable_lock; // a lock on the symbol table
extern Mutex* StringTable_lock; // a lock on the interned string table
extern Monitor* StringDedupQueue_lock; // a lock on the string deduplication queue
extern Mutex* StringDedupTable_lock; // a lock on the string deduplication table
extern Mutex* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
extern Monitor* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
extern Mutex* MethodData_lock; // a lock on installation of method data
extern Mutex* RetData_lock; // a lock on installation of RetData inside method data
extern Mutex* DerivedPointerTableGC_lock; // a lock to protect the derived pointer table

@ -2421,8 +2421,6 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
// CodeCache is full, disable compilation
// Ought to log this but compile log is only per compile thread
// and we're some non descript Java thread.
MutexUnlocker mu(AdapterHandlerLibrary_lock);
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL; // Out of CodeCache space
}
entry->relocate(new_adapter->content_begin());
@ -2594,9 +2592,6 @@ void AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : "");
}
nm->post_compiled_method_load_event();
} else {
// CodeCache is full, disable compilation
CompileBroker::handle_full_code_cache(CodeBlobType::MethodNonProfiled);
}
}

@ -52,7 +52,6 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
class SweeperRecord {
public:
int traversal;
int invocation;
int compile_id;
long traversal_mark;
int state;
@ -62,10 +61,9 @@ class SweeperRecord {
int line;
void print() {
tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
PTR_FORMAT " state = %d traversal_mark %d line = %d",
traversal,
invocation,
compile_id,
kind == NULL ? "" : kind,
uep,
@ -117,7 +115,6 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
if (_records != NULL) {
_records[_sweep_index].traversal = _traversals;
_records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
_records[_sweep_index].invocation = _sweep_fractions_left;
_records[_sweep_index].compile_id = nm->compile_id();
_records[_sweep_index].kind = nm->compile_kind();
_records[_sweep_index].state = nm->_state;
@ -127,6 +124,14 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
_sweep_index = (_sweep_index + 1) % SweeperLogEntries;
}
}
void NMethodSweeper::init_sweeper_log() {
if (LogSweeper && _records == NULL) {
// Create the ring buffer for the logging code
_records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
}
}
#else
#define SWEEP(nm)
#endif
@ -142,8 +147,6 @@ int NMethodSweeper::_zombified_count = 0; // Nof. nmethods
int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
// 1) alive -> not_entrant
// 2) not_entrant -> zombie
@ -190,13 +193,15 @@ int NMethodSweeper::hotness_counter_reset_val() {
}
return _hotness_counter_reset_val;
}
bool NMethodSweeper::sweep_in_progress() {
return !_current.end();
bool NMethodSweeper::wait_for_stack_scanning() {
return _current.end();
}
// Scans the stacks of all Java threads and marks activations of not-entrant methods.
// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
// safepoint.
/**
* Scans the stacks of all Java threads and marks activations of not-entrant methods.
* No need to synchronize access, since 'mark_active_nmethods' is always executed at a
* safepoint.
*/
void NMethodSweeper::mark_active_nmethods() {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
// If we do not want to reclaim not-entrant or zombie methods there is no need
@ -210,9 +215,8 @@ void NMethodSweeper::mark_active_nmethods() {
// Check for restart
assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
if (!sweep_in_progress()) {
if (wait_for_stack_scanning()) {
_seen = 0;
_sweep_fractions_left = NmethodSweepFraction;
_current = NMethodIterator();
// Initialize to first nmethod
_current.next();
@ -231,6 +235,64 @@ void NMethodSweeper::mark_active_nmethods() {
OrderAccess::storestore();
}
/**
* This function triggers a VM operation that does stack scanning of active
* methods. Stack scanning is mandatory for the sweeper to make progress.
*/
void NMethodSweeper::do_stack_scanning() {
assert(!CodeCache_lock->owned_by_self(), "just checking");
if (wait_for_stack_scanning()) {
VM_MarkActiveNMethods op;
VMThread::execute(&op);
_should_sweep = true;
}
}
void NMethodSweeper::sweeper_loop() {
bool timeout;
while (true) {
{
ThreadBlockInVM tbivm(JavaThread::current());
MutexLockerEx waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag);
const long wait_time = 60*60*24 * 1000;
timeout = CodeCache_lock->wait(Mutex::_no_safepoint_check_flag, wait_time);
}
if (!timeout) {
possibly_sweep();
}
}
}
/**
* Wakes up the sweeper thread to possibly sweep.
*/
void NMethodSweeper::notify(int code_blob_type) {
// Makes sure that we do not invoke the sweeper too often during startup.
double start_threshold = 100.0 / (double)StartAggressiveSweepingAt;
double aggressive_sweep_threshold = MIN2(start_threshold, 1.1);
if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) {
assert_locked_or_safepoint(CodeCache_lock);
CodeCache_lock->notify();
}
}
/**
* Handle a safepoint request
*/
void NMethodSweeper::handle_safepoint_request() {
if (SafepointSynchronize::is_synchronizing()) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nof_nmethods());
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
thread->java_suspend_self();
}
}
/**
* This function invokes the sweeper if at least one of the three conditions is met:
* (1) The code cache is getting full
@ -239,11 +301,6 @@ void NMethodSweeper::mark_active_nmethods() {
*/
void NMethodSweeper::possibly_sweep() {
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
// Only compiler threads are allowed to sweep
if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
return;
}
// If there was no state change while nmethod sweeping, 'should_sweep' will be false.
// This is one of the two places where should_sweep can be set to true. The general
// idea is as follows: If there is enough free space in the code cache, there is no
@ -280,46 +337,37 @@ void NMethodSweeper::possibly_sweep() {
}
}
if (_should_sweep && _sweep_fractions_left > 0) {
// Only one thread at a time will sweep
jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
if (old != 0) {
return;
}
#ifdef ASSERT
if (LogSweeper && _records == NULL) {
// Create the ring buffer for the logging code
_records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
}
#endif
// Force stack scanning if there is only 10% free space in the code cache.
// We force stack scanning only non-profiled code heap gets full, since critical
// allocation go to the non-profiled heap and we must be make sure that there is
// enough space.
double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100;
if (free_percent <= StartAggressiveSweepingAt) {
do_stack_scanning();
}
if (_sweep_fractions_left > 0) {
sweep_code_cache();
_sweep_fractions_left--;
}
if (_should_sweep) {
init_sweeper_log();
sweep_code_cache();
}
// We are done with sweeping the code cache once.
if (_sweep_fractions_left == 0) {
_total_nof_code_cache_sweeps++;
_last_sweep = _time_counter;
// Reset flag; temporarily disables sweeper
_should_sweep = false;
// If there was enough state change, 'possibly_enable_sweeper()'
// sets '_should_sweep' to true
possibly_enable_sweeper();
// Reset _bytes_changed only if there was enough state change. _bytes_changed
// can further increase by calls to 'report_state_change'.
if (_should_sweep) {
_bytes_changed = 0;
}
}
// Release work, because another compiler thread could continue.
OrderAccess::release_store((int*)&_sweep_started, 0);
// We are done with sweeping the code cache once.
_total_nof_code_cache_sweeps++;
_last_sweep = _time_counter;
// Reset flag; temporarily disables sweeper
_should_sweep = false;
// If there was enough state change, 'possibly_enable_sweeper()'
// sets '_should_sweep' to true
possibly_enable_sweeper();
// Reset _bytes_changed only if there was enough state change. _bytes_changed
// can further increase by calls to 'report_state_change'.
if (_should_sweep) {
_bytes_changed = 0;
}
}
void NMethodSweeper::sweep_code_cache() {
ResourceMark rm;
Ticks sweep_start_counter = Ticks::now();
_flushed_count = 0;
@ -327,25 +375,10 @@ void NMethodSweeper::sweep_code_cache() {
_marked_for_reclamation_count = 0;
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nof_nmethods());
}
if (!CompileBroker::should_compile_new_jobs()) {
// If we have turned off compilations we might as well do full sweeps
// in order to reach the clean state faster. Otherwise the sleeping compiler
// threads will slow down sweeping.
_sweep_fractions_left = 1;
}
// We want to visit all nmethods after NmethodSweepFraction
// invocations so divide the remaining number of nmethods by the
// remaining number of invocations. This is only an estimate since
// the number of nmethods changes during the sweep so the final
// stage must iterate until it there are no more nmethods.
int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
int swept_count = 0;
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
assert(!CodeCache_lock->owned_by_self(), "just checking");
@ -354,19 +387,9 @@ void NMethodSweeper::sweep_code_cache() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// The last invocation iterates until there are no more nmethods
while ((swept_count < todo || _sweep_fractions_left == 1) && !_current.end()) {
while (!_current.end()) {
swept_count++;
if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
assert(Thread::current()->is_Java_thread(), "should be java thread");
JavaThread* thread = (JavaThread*)Thread::current();
ThreadBlockInVM tbivm(thread);
thread->java_suspend_self();
}
handle_safepoint_request();
// Since we will give up the CodeCache_lock, always skip ahead
// to the next nmethod. Other blobs can be deleted by other
// threads but nmethods are only reclaimed by the sweeper.
@ -382,7 +405,7 @@ void NMethodSweeper::sweep_code_cache() {
}
}
assert(_sweep_fractions_left > 1 || _current.end(), "must have scanned the whole cache");
assert(_current.end(), "must have scanned the whole cache");
const Ticks sweep_end_counter = Ticks::now();
const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
@ -397,7 +420,6 @@ void NMethodSweeper::sweep_code_cache() {
event.set_starttime(sweep_start_counter);
event.set_endtime(sweep_end_counter);
event.set_sweepIndex(_traversals);
event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
event.set_sweptCount(swept_count);
event.set_flushedCount(_flushed_count);
event.set_markedCount(_marked_for_reclamation_count);
@ -407,15 +429,12 @@ void NMethodSweeper::sweep_code_cache() {
#ifdef ASSERT
if(PrintMethodFlushing) {
tty->print_cr("### sweeper: sweep time(%d): "
INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value());
tty->print_cr("### sweeper: sweep time(%d): ", (jlong)sweep_time.value());
}
#endif
if (_sweep_fractions_left == 1) {
_peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
log_sweep("finished");
}
_peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
log_sweep("finished");
// Sweeper is the only case where memory is released, check here if it
// is time to restart the compiler. Only checking if there is a certain
@ -459,10 +478,12 @@ void NMethodSweeper::possibly_enable_sweeper() {
class NMethodMarker: public StackObj {
private:
CompilerThread* _thread;
CodeCacheSweeperThread* _thread;
public:
NMethodMarker(nmethod* nm) {
_thread = CompilerThread::current();
JavaThread* current = JavaThread::current();
assert (current->is_Code_cache_sweeper_thread(), "Must be");
_thread = (CodeCacheSweeperThread*)JavaThread::current();
if (!nm->is_zombie() && !nm->is_unloaded()) {
// Only expose live nmethods for scanning
_thread->set_scanned_nmethod(nm);
@ -473,7 +494,7 @@ class NMethodMarker: public StackObj {
}
};
void NMethodSweeper::release_nmethod(nmethod *nm) {
void NMethodSweeper::release_nmethod(nmethod* nm) {
// Clean up any CompiledICHolders
{
ResourceMark rm;
@ -490,7 +511,7 @@ void NMethodSweeper::release_nmethod(nmethod *nm) {
nm->flush();
}
int NMethodSweeper::process_nmethod(nmethod *nm) {
int NMethodSweeper::process_nmethod(nmethod* nm) {
assert(!CodeCache_lock->owned_by_self(), "just checking");
int freed_memory = 0;

@ -49,9 +49,7 @@
// remove the nmethod, all inline caches (IC) that point to the the nmethod must be
// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
// state change happens during separate sweeps. It may take at least 3 sweeps before an
// nmethod's space is freed. Sweeping is currently done by compiler threads between
// compilations or at least each 5 sec (NmethodSweepCheckInterval) when the code cache
// is full.
// nmethod's space is freed.
class NMethodSweeper : public AllStatic {
static long _traversals; // Stack scan count, also sweep ID.
@ -64,7 +62,6 @@ class NMethodSweeper : public AllStatic {
static int _zombified_count; // Nof. nmethods made zombie in current sweep
static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
static volatile bool _should_sweep; // Indicates if we should invoke the sweeper
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
@ -85,8 +82,12 @@ class NMethodSweeper : public AllStatic {
static int process_nmethod(nmethod *nm);
static void release_nmethod(nmethod* nm);
static bool sweep_in_progress();
static void init_sweeper_log() NOT_DEBUG_RETURN;
static bool wait_for_stack_scanning();
static void sweep_code_cache();
static void handle_safepoint_request();
static void do_stack_scanning();
static void possibly_sweep();
public:
static long traversal_count() { return _traversals; }
@ -106,7 +107,8 @@ class NMethodSweeper : public AllStatic {
#endif
static void mark_active_nmethods(); // Invoked at the end of each safepoint
static void possibly_sweep(); // Compiler threads call this to sweep
static void sweeper_loop();
static void notify(int code_blob_type); // Possibly start the sweeper thread.
static int hotness_counter_reset_val();
static void report_state_change(nmethod* nm);

@ -66,6 +66,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/task.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
@ -1553,6 +1554,7 @@ void JavaThread::block_if_vm_exited() {
// Remove this ifdef when C1 is ported to the compiler interface.
static void compiler_thread_entry(JavaThread* thread, TRAPS);
static void sweeper_thread_entry(JavaThread* thread, TRAPS);
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
Thread()
@ -3172,6 +3174,10 @@ static void compiler_thread_entry(JavaThread* thread, TRAPS) {
CompileBroker::compiler_thread_loop();
}
static void sweeper_thread_entry(JavaThread* thread, TRAPS) {
NMethodSweeper::sweeper_loop();
}
// Create a CompilerThread
CompilerThread::CompilerThread(CompileQueue* queue,
CompilerCounters* counters)
@ -3182,7 +3188,6 @@ CompilerThread::CompilerThread(CompileQueue* queue,
_queue = queue;
_counters = counters;
_buffer_blob = NULL;
_scanned_nmethod = NULL;
_compiler = NULL;
#ifndef PRODUCT
@ -3190,7 +3195,12 @@ CompilerThread::CompilerThread(CompileQueue* queue,
#endif
}
void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
// Create sweeper thread
CodeCacheSweeperThread::CodeCacheSweeperThread()
: JavaThread(&sweeper_thread_entry) {
_scanned_nmethod = NULL;
}
void CodeCacheSweeperThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
JavaThread::oops_do(f, cld_f, cf);
if (_scanned_nmethod != NULL && cf != NULL) {
// Safepoints can occur when the sweeper is scanning an nmethod so

@ -311,6 +311,7 @@ class Thread: public ThreadShadow {
virtual bool is_VM_thread() const { return false; }
virtual bool is_Java_thread() const { return false; }
virtual bool is_Compiler_thread() const { return false; }
virtual bool is_Code_cache_sweeper_thread() const { return false; }
virtual bool is_hidden_from_external_view() const { return false; }
virtual bool is_jvmti_agent_thread() const { return false; }
// True iff the thread can perform GC operations at a safepoint.
@ -1755,6 +1756,27 @@ inline CompilerThread* JavaThread::as_CompilerThread() {
return (CompilerThread*)this;
}
// Dedicated thread to sweep the code cache
class CodeCacheSweeperThread : public JavaThread {
nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper
public:
CodeCacheSweeperThread();
// Track the nmethod currently being scanned by the sweeper
void set_scanned_nmethod(nmethod* nm) {
assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
_scanned_nmethod = nm;
}
// Hide sweeper thread from external view.
bool is_hidden_from_external_view() const { return true; }
bool is_Code_cache_sweeper_thread() const { return true; }
// GC support
// Apply "f->do_oop" to all root oops in "this".
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
};
// A thread used for Compilation.
class CompilerThread : public JavaThread {
friend class VMStructs;
@ -1767,7 +1789,6 @@ class CompilerThread : public JavaThread {
CompileQueue* _queue;
BufferBlob* _buffer_blob;
nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper
AbstractCompiler* _compiler;
public:
@ -1801,28 +1822,17 @@ class CompilerThread : public JavaThread {
_log = log;
}
// GC support
// Apply "f->do_oop" to all root oops in "this".
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
#ifndef PRODUCT
private:
IdealGraphPrinter *_ideal_graph_printer;
public:
IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; }
void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; }
void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
#endif
// Get/set the thread's current task
CompileTask* task() { return _task; }
void set_task(CompileTask* task) { _task = task; }
// Track the nmethod currently being scanned by the sweeper
void set_scanned_nmethod(nmethod* nm) {
assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
_scanned_nmethod = nm;
}
CompileTask* task() { return _task; }
void set_task(CompileTask* task) { _task = task; }
};
inline CompilerThread* CompilerThread::current() {

@ -111,6 +111,9 @@ void VM_Deoptimize::doit() {
CodeCache::make_marked_nmethods_zombies();
}
void VM_MarkActiveNMethods::doit() {
NMethodSweeper::mark_active_nmethods();
}
VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id) {
_thread = thread;

@ -100,6 +100,7 @@
template(RotateGCLog) \
template(WhiteBoxOperation) \
template(ClassLoaderStatsOperation) \
template(MarkActiveNMethods) \
template(PrintCompileQueue) \
template(PrintCodeList) \
template(PrintCodeCache) \
@ -252,6 +253,13 @@ class VM_Deoptimize: public VM_Operation {
bool allow_nested_vm_operations() const { return true; }
};
class VM_MarkActiveNMethods: public VM_Operation {
public:
VM_MarkActiveNMethods() {}
VMOp_Type type() const { return VMOp_MarkActiveNMethods; }
void doit();
bool allow_nested_vm_operations() const { return true; }
};
// Deopt helper that can deoptimize frames in threads other than the
// current thread. Only used through Deoptimization::deoptimize_frame.

@ -383,7 +383,6 @@ Declares a structure type that can be used in other events.
<event id="SweepCodeCache" path="vm/code_sweeper/sweep" label="Sweep Code Cache"
has_thread="true" is_requestable="false" is_constant="false">
<value type="INTEGER" field="sweepIndex" label="Sweep Index" relation="SWEEP_ID"/>
<value type="USHORT" field="sweepFractionIndex" label="Fraction Index"/>
<value type="UINT" field="sweptCount" label="Methods Swept"/>
<value type="UINT" field="flushedCount" label="Methods Flushed"/>
<value type="UINT" field="markedCount" label="Methods Reclaimed"/>

@ -198,7 +198,8 @@ compact2_minimal = \
# Tests that require compact2 API's
#
needs_compact2 =
needs_compact2 = \
compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java
# All tests that run on the most minimal configuration: Minimal VM on Compact 1
compact1_minimal = \
@ -443,6 +444,7 @@ hotspot_compiler_3 = \
compiler/arraycopy/TestMissingControl.java \
compiler/ciReplay/TestVM_no_comp_level.sh \
compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java \
compiler/codecache/CheckSegmentedCodeCache.java \
compiler/codecache/CheckUpperLimit.java \
compiler/codegen/ \
compiler/cpuflags/RestoreMXCSR.java \
@ -479,7 +481,6 @@ hotspot_compiler_3 = \
compiler/intrinsics/unsafe/UnsafeGetAddressTest.java \
compiler/jsr292/ConcurrentClassLoadingTest.java \
compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java \
compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java \
compiler/loopopts/TestLogSum.java \
compiler/macronodes/TestEliminateAllocationPhi.java \
compiler/membars/TestMemBarAcquire.java \
@ -602,3 +603,14 @@ hotspot_all = \
:hotspot_gc \
:hotspot_runtime \
:hotspot_serviceability
#All tests that depends on nashorn extension.
#
needs_nashorn = \
compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java
#All tests that do not depends on nashorn extension
#
not_needs_nashorn = \
:jdk \
-:needs_nashorh

@ -0,0 +1,63 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @library /testlibrary
* @run main/othervm -Xbatch -XX:+EliminateAutoBox
* -XX:CompileOnly=::valueOf,::byteValue,::shortValue,::testUnsignedByte,::testUnsignedShort
* UnsignedLoads
*/
import static com.oracle.java.testlibrary.Asserts.assertEQ;
public class UnsignedLoads {
public static int testUnsignedByte() {
byte[] bytes = new byte[] {-1};
int res = 0;
for (int i = 0; i < 100000; i++) {
for (Byte b : bytes) {
res = b & 0xff;
}
}
return res;
}
public static int testUnsignedShort() {
int res = 0;
short[] shorts = new short[] {-1};
for (int i = 0; i < 100000; i++) {
for (Short s : shorts) {
res = s & 0xffff;
}
}
return res;
}
public static void main(String[] args) {
assertEQ(testUnsignedByte(), 255);
assertEQ(testUnsignedShort(), 65535);
System.out.println("TEST PASSED");
}
}

@ -22,15 +22,20 @@
*/
import com.oracle.java.testlibrary.*;
import sun.hotspot.WhiteBox;
/*
* @test CheckSegmentedCodeCache
* @bug 8015774
* @library /testlibrary /testlibrary/whitebox
* @summary "Checks VM options related to the segmented code cache"
* @library /testlibrary
* @run main/othervm CheckSegmentedCodeCache
* @build CheckSegmentedCodeCache
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI CheckSegmentedCodeCache
*/
public class CheckSegmentedCodeCache {
private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
// Code heap names
private static final String NON_METHOD = "CodeHeap 'non-nmethods'";
private static final String PROFILED = "CodeHeap 'profiled nmethods'";
@ -133,8 +138,11 @@ public class CheckSegmentedCodeCache {
failsWith(pb, "Invalid code heap sizes");
// Fails if not enough space for VM internal code
long minUseSpace = WHITE_BOX.getUintxVMFlag("CodeCacheMinimumUseSpace");
// minimum size: CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)
long minSize = (Platform.isDebugBuild() ? 3 : 1) * minUseSpace;
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:ReservedCodeCacheSize=1700K",
"-XX:ReservedCodeCacheSize=" + minSize,
"-XX:InitialCodeCacheSize=100K");
failsWith(pb, "Not enough space in non-nmethod code heap to run VM");
}

@ -0,0 +1,39 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @run main/othervm -Xbatch -XX:-TieredCompilation
* -XX:+IgnoreUnrecognizedVMOptions -XX:+TraceIterativeGVN
* TraceIterativeGVN
*/
public class TraceIterativeGVN {
public static void main(String[] args) {
for (int i = 0; i < 100_000; i++) {
Byte.valueOf((byte)0);
}
System.out.println("TEST PASSED");
}
}

@ -70,7 +70,7 @@ public class CatchInlineExceptions {
if (counter1 != 0) {
throw new RuntimeException("Failed: counter1(" + counter1 + ") != 0");
}
if (counter2 != counter) {
if (counter2 != counter0) {
throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter0(" + counter0 + ")");
}
if (counter2 != counter) {

@ -41,14 +41,14 @@ public class TestAndnI {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. "+
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(AndnIExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(AndnICommutativeExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}

@ -41,14 +41,14 @@ public class TestAndnL {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(AndnLExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(AndnLCommutativeExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}

@ -41,14 +41,14 @@ public class TestBlsiI {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsiIExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsiICommutativeExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}

@ -41,14 +41,14 @@ public class TestBlsiL {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsiLExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsiLCommutativeExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}

@ -41,14 +41,14 @@ public class TestBlsmskI {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsmskIExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsmskICommutativeExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}

@ -41,14 +41,14 @@ public class TestBlsmskL {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsmskLExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsmskLCommutativeExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}

@ -41,14 +41,14 @@ public class TestBlsrI {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsrIExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsrICommutativeExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}

@ -41,14 +41,14 @@ public class TestBlsrL {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsrLExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsrLCommutativeExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}

@ -41,12 +41,11 @@ public class TestLzcntI {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("lzcnt")) {
System.out.println("CPU does not support lzcnt feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support lzcnt feature.");
}
BMITestRunner.runTests(LzcntIExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseCountLeadingZerosInstruction");
}

@ -41,12 +41,11 @@ public class TestLzcntL {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("lzcnt")) {
System.out.println("CPU does not support lzcnt feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support lzcnt feature.");
}
BMITestRunner.runTests(LzcntLExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseCountLeadingZerosInstruction");
}

@ -41,12 +41,11 @@ public class TestTzcntI {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(TzcntIExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseCountTrailingZerosInstruction");
}

@ -41,12 +41,11 @@ public class TestTzcntL {
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
System.out.println("CPU does not support bmi1 feature. " +
"Test skipped.");
return;
System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(TzcntLExpr.class, args,
"-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseCountTrailingZerosInstruction");
}

@ -27,10 +27,20 @@
* @summary Test ensures that there is no crash if there is not enough ReservedCodeacacheSize
* to initialize all compiler threads. The option -Xcomp gives the VM more time to
* to trigger the old bug.
* @run main/othervm -XX:ReservedCodeCacheSize=3m -XX:CICompilerCount=64 -Xcomp SmallCodeCacheStartup
* @library /testlibrary
*/
import com.oracle.java.testlibrary.*;
public class SmallCodeCacheStartup {
public static void main(String[] args) throws Exception {
try {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m",
"-XX:CICompilerCount=64",
"-Xcomp",
"SmallCodeCacheStartup");
pb.start();
} catch (VirtualMachineError e) {}
System.out.println("TEST PASSED");
}
}

@ -73,8 +73,6 @@ public abstract class CompilerWhiteBoxTest {
protected static final int THRESHOLD;
/** invocation count to trigger OSR compilation */
protected static final long BACKEDGE_THRESHOLD;
/** invocation count to warm up method before triggering OSR compilation */
protected static final long OSR_WARMUP = 2000;
/** Value of {@code java.vm.info} (interpreted|mixed|comp mode) */
protected static final String MODE = System.getProperty("java.vm.info");
@ -197,7 +195,6 @@ public abstract class CompilerWhiteBoxTest {
* is compiled, or if {@linkplain #method} has zero
* compilation level.
*/
protected final void checkNotCompiled(int compLevel) {
if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
throw new RuntimeException(method + " must not be in queue");
@ -218,24 +215,30 @@ public abstract class CompilerWhiteBoxTest {
* compilation level.
*/
protected final void checkNotCompiled() {
if (WHITE_BOX.isMethodCompiled(method, false)) {
throw new RuntimeException(method + " must be not compiled");
}
if (WHITE_BOX.getMethodCompilationLevel(method, false) != 0) {
throw new RuntimeException(method + " comp_level must be == 0");
}
checkNotOsrCompiled();
checkNotCompiled(true);
checkNotCompiled(false);
}
protected final void checkNotOsrCompiled() {
/**
* Checks, that {@linkplain #method} is not (OSR-)compiled.
*
* @param isOsr Check for OSR compilation if true
* @throws RuntimeException if {@linkplain #method} is in compiler queue or
* is compiled, or if {@linkplain #method} has zero
* compilation level.
*/
protected final void checkNotCompiled(boolean isOsr) {
waitBackgroundCompilation();
if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
throw new RuntimeException(method + " must not be in queue");
}
if (WHITE_BOX.isMethodCompiled(method, true)) {
throw new RuntimeException(method + " must be not osr_compiled");
if (WHITE_BOX.isMethodCompiled(method, isOsr)) {
throw new RuntimeException(method + " must not be " +
(isOsr ? "osr_" : "") + "compiled");
}
if (WHITE_BOX.getMethodCompilationLevel(method, true) != 0) {
throw new RuntimeException(method + " osr_comp_level must be == 0");
if (WHITE_BOX.getMethodCompilationLevel(method, isOsr) != 0) {
throw new RuntimeException(method + (isOsr ? " osr_" : " ") +
"comp_level must be == 0");
}
}
@ -498,8 +501,7 @@ enum SimpleTestCase implements CompilerWhiteBoxTest.TestCase {
= new Callable<Integer>() {
@Override
public Integer call() throws Exception {
int result = warmup(OSR_CONSTRUCTOR);
return result + new Helper(null, CompilerWhiteBoxTest.BACKEDGE_THRESHOLD).hashCode();
return new Helper(null, CompilerWhiteBoxTest.BACKEDGE_THRESHOLD).hashCode();
}
};
@ -509,8 +511,7 @@ enum SimpleTestCase implements CompilerWhiteBoxTest.TestCase {
@Override
public Integer call() throws Exception {
int result = warmup(OSR_METHOD);
return result + helper.osrMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
return helper.osrMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
}
};
@ -518,66 +519,10 @@ enum SimpleTestCase implements CompilerWhiteBoxTest.TestCase {
= new Callable<Integer>() {
@Override
public Integer call() throws Exception {
int result = warmup(OSR_STATIC);
return result + osrStaticMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
return osrStaticMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
}
};
/**
* Deoptimizes all non-osr versions of the given executable after
* compilation finished.
*
* @param e Executable
* @throws Exception
*/
private static void waitAndDeoptimize(Executable e) throws Exception {
CompilerWhiteBoxTest.waitBackgroundCompilation(e);
if (WhiteBox.getWhiteBox().isMethodQueuedForCompilation(e)) {
throw new RuntimeException(e + " must not be in queue");
}
// Deoptimize non-osr versions of executable
WhiteBox.getWhiteBox().deoptimizeMethod(e, false);
}
/**
* Executes the method multiple times to make sure we have
* enough profiling information before triggering an OSR
* compilation. Otherwise the C2 compiler may add uncommon traps.
*
* @param m Method to be executed
* @return Number of times the method was executed
* @throws Exception
*/
private static int warmup(Method m) throws Exception {
Helper helper = new Helper();
int result = 0;
for (long i = 0; i < CompilerWhiteBoxTest.OSR_WARMUP; ++i) {
result += (int)m.invoke(helper, 1);
}
// Deoptimize non-osr versions
waitAndDeoptimize(m);
return result;
}
/**
* Executes the constructor multiple times to make sure we
* have enough profiling information before triggering an OSR
* compilation. Otherwise the C2 compiler may add uncommon traps.
*
* @param c Constructor to be executed
* @return Number of times the constructor was executed
* @throws Exception
*/
private static int warmup(Constructor c) throws Exception {
int result = 0;
for (long i = 0; i < CompilerWhiteBoxTest.OSR_WARMUP; ++i) {
result += c.newInstance(null, 1).hashCode();
}
// Deoptimize non-osr versions
waitAndDeoptimize(c);
return result;
}
private static final Constructor CONSTRUCTOR;
private static final Constructor OSR_CONSTRUCTOR;
private static final Method METHOD;
@ -622,16 +567,83 @@ enum SimpleTestCase implements CompilerWhiteBoxTest.TestCase {
return 42;
}
private static int osrStaticMethod(long limit) {
/**
* Deoptimizes all non-osr versions of the given executable after
* compilation finished.
*
* @param e Executable
* @throws Exception
*/
private static void waitAndDeoptimize(Executable e) {
CompilerWhiteBoxTest.waitBackgroundCompilation(e);
if (WhiteBox.getWhiteBox().isMethodQueuedForCompilation(e)) {
throw new RuntimeException(e + " must not be in queue");
}
// Deoptimize non-osr versions of executable
WhiteBox.getWhiteBox().deoptimizeMethod(e, false);
}
/**
* Executes the method multiple times to make sure we have
* enough profiling information before triggering an OSR
* compilation. Otherwise the C2 compiler may add uncommon traps.
*
* @param m Method to be executed
* @return Number of times the method was executed
* @throws Exception
*/
private static int warmup(Method m) throws Exception {
waitAndDeoptimize(m);
Helper helper = new Helper();
int result = 0;
for (long i = 0; i < CompilerWhiteBoxTest.THRESHOLD; ++i) {
result += (int)m.invoke(helper, 1);
}
// Wait to make sure OSR compilation is not blocked by
// non-OSR compilation in the compile queue
CompilerWhiteBoxTest.waitBackgroundCompilation(m);
return result;
}
/**
* Executes the constructor multiple times to make sure we
* have enough profiling information before triggering an OSR
* compilation. Otherwise the C2 compiler may add uncommon traps.
*
* @param c Constructor to be executed
* @return Number of times the constructor was executed
* @throws Exception
*/
private static int warmup(Constructor c) throws Exception {
waitAndDeoptimize(c);
int result = 0;
for (long i = 0; i < CompilerWhiteBoxTest.THRESHOLD; ++i) {
result += c.newInstance(null, 1).hashCode();
}
// Wait to make sure OSR compilation is not blocked by
// non-OSR compilation in the compile queue
CompilerWhiteBoxTest.waitBackgroundCompilation(c);
return result;
}
private static int osrStaticMethod(long limit) throws Exception {
int result = 0;
if (limit != 1) {
result = warmup(OSR_STATIC);
}
// Trigger osr compilation
for (long i = 0; i < limit; ++i) {
result += staticMethod();
}
return result;
}
private int osrMethod(long limit) {
private int osrMethod(long limit) throws Exception {
int result = 0;
if (limit != 1) {
result = warmup(OSR_METHOD);
}
// Trigger osr compilation
for (long i = 0; i < limit; ++i) {
result += method();
}
@ -646,8 +658,12 @@ enum SimpleTestCase implements CompilerWhiteBoxTest.TestCase {
}
// for OSR constructor test case
private Helper(Object o, long limit) {
private Helper(Object o, long limit) throws Exception {
int result = 0;
if (limit != 1) {
result = warmup(OSR_CONSTRUCTOR);
}
// Trigger osr compilation
for (long i = 0; i < limit; ++i) {
result += method();
}

@ -0,0 +1,95 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import sun.hotspot.WhiteBox;
import java.lang.reflect.Executable;
import java.lang.reflect.Method;
/*
* @test DeoptimizeMultipleOSRTest
* @bug 8061817
* @library /testlibrary /testlibrary/whitebox
* @build DeoptimizeMultipleOSRTest
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,DeoptimizeMultipleOSRTest::triggerOSR DeoptimizeMultipleOSRTest
* @summary testing of WB::deoptimizeMethod()
*/
public class DeoptimizeMultipleOSRTest {
private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
private static final long BACKEDGE_THRESHOLD = 150000;
private Method method;
private int counter = 0;
public static void main(String[] args) throws Exception {
DeoptimizeMultipleOSRTest test = new DeoptimizeMultipleOSRTest();
test.test();
}
/**
* Triggers two different OSR compilations for the same method and
* checks if WhiteBox.deoptimizeMethod() deoptimizes both.
*
* @throws Exception
*/
public void test() throws Exception {
method = DeoptimizeMultipleOSRTest.class.getDeclaredMethod("triggerOSR", boolean.class, long.class);
// Trigger two OSR compiled versions
triggerOSR(true, BACKEDGE_THRESHOLD);
triggerOSR(false, BACKEDGE_THRESHOLD);
// Wait for compilation
CompilerWhiteBoxTest.waitBackgroundCompilation(method);
// Deoptimize
WHITE_BOX.deoptimizeMethod(method, true);
if (WHITE_BOX.isMethodCompiled(method, true)) {
throw new AssertionError("Not all OSR compiled versions were deoptimized");
}
}
/**
* Triggers OSR compilations by executing loops.
*
* @param first Determines which loop to execute
* @param limit The number of loop iterations
*/
public void triggerOSR(boolean first, long limit) {
if (limit != 1) {
// Warmup method to avoid uncommon traps
for (int i = 0; i < limit; ++i) {
triggerOSR(first, 1);
}
CompilerWhiteBoxTest.waitBackgroundCompilation(method);
}
if (first) {
// Trigger OSR compilation 1
for (int i = 0; i < limit; ++i) {
counter++;
}
} else {
// Trigger OSR compilation 2
for (int i = 0; i < limit; ++i) {
counter++;
}
}
}
}

@ -132,14 +132,15 @@ public class MakeMethodNotCompilableTest extends CompilerWhiteBoxTest {
throw new RuntimeException(method
+ " is not compilable after clearMethodState()");
}
// Make method not (OSR-)compilable (depending on testCase.isOsr())
makeNotCompilable();
if (isCompilable()) {
throw new RuntimeException(method + " must be not compilable");
}
// Try to (OSR-)compile method
compile();
checkNotOsrCompiled();
// Method should not be (OSR-)compiled
checkNotCompiled(testCase.isOsr());
if (isCompilable()) {
throw new RuntimeException(method + " must be not compilable");
}

@ -0,0 +1,60 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test TestUseNUMAInterleaving
* @summary Tests that UseNUMAInterleaving enabled for all collectors by
* ergonomics, on all platforms when UseNUMA feature is enabled.
* @bug 8059614
* @key gc
* @library /testlibrary
* @run driver TestUseNUMAInterleaving
*/
import com.oracle.java.testlibrary.ProcessTools;
import com.oracle.java.testlibrary.OutputAnalyzer;
public class TestUseNUMAInterleaving {
public static void main(String[] args) throws Exception {
String[] vmargs = new String[]{
"-XX:+UseNUMA",
"-XX:+PrintFlagsFinal",
"-version"
};
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true, vmargs);
OutputAnalyzer output = new OutputAnalyzer(pb.start());
boolean isNUMAEnabled
= Boolean.parseBoolean(output.firstMatch(NUMA_FLAG_PATTERN, 1));
if (isNUMAEnabled) {
output.shouldMatch("\\bUseNUMAInterleaving\\b.*?=.*?true");
System.out.println(output.getStdout());
} else {
System.out.println(output.firstMatch(NUMA_FLAG_PATTERN));
System.out.println(output.firstMatch(NUMA_FLAG_PATTERN, 1));
}
}
private static final String NUMA_FLAG_PATTERN = "\\bUseNUMA\\b.*?=.*?([a-z]+)";
}

@ -135,7 +135,6 @@ public class TestHumongousCodeCacheRoots {
"-XX:+UnlockDiagnosticVMOptions",
"-XX:InitiatingHeapOccupancyPercent=1", // strong code root marking
"-XX:+G1VerifyHeapRegionCodeRoots", "-XX:+VerifyAfterGC", // make sure that verification is run
"-XX:NmethodSweepFraction=1", "-XX:NmethodSweepCheckInterval=1", // make the code cache sweep more predictable
};
runTest("-client", baseArguments);
runTest("-server", baseArguments);

@ -27,6 +27,7 @@
* @library ..
* @build DcmdUtil CompilerQueueTest
* @run main CompilerQueueTest
* @run main/othervm -XX:-TieredCompilation CompilerQueueTest
* @run main/othervm -Xint CompilerQueueTest
* @summary Test of diagnostic command Compiler.queue
*/
@ -87,7 +88,9 @@ public class CompilerQueueTest {
}
private static void validateMethodLine(String str) throws Exception {
String name = str.substring(19);
// Skip until package/class name begins. Trim to remove whitespace that
// may differ.
String name = str.substring(14).trim();
int sep = name.indexOf("::");
if (sep == -1) {
throw new Exception("Failed dcmd queue, didn't find separator :: in: " + name);

@ -179,6 +179,8 @@ public class WhiteBox {
public native void printRegionInfo(int context);
// VM flags
public native boolean isConstantVMFlag(String name);
public native boolean isLockedVMFlag(String name);
public native void setBooleanVMFlag(String name, boolean value);
public native void setIntxVMFlag(String name, long value);
public native void setUintxVMFlag(String name, long value);

@ -43,6 +43,7 @@ public class BooleanTest {
private static final Boolean[] TESTS = {true, false, true, true, false};
private static final String TEST_NAME = "BooleanTest";
private static final String FLAG_NAME = "PrintCompilation";
private static final String FLAG_DEBUG_NAME = "SafepointALot";
private static final String METHOD = TEST_NAME + "::method";
private static final String METHOD1 = METHOD + "1";
private static final String METHOD2 = METHOD + "2";
@ -54,6 +55,7 @@ public class BooleanTest {
VmFlagTest.WHITE_BOX::getBooleanVMFlag);
testFunctional(false);
testFunctional(true);
VmFlagTest.runTest(FLAG_DEBUG_NAME, VmFlagTest.WHITE_BOX::getBooleanVMFlag);
} else {
boolean value = Boolean.valueOf(args[0]);
method1();

@ -35,6 +35,7 @@
public class IntxTest {
private static final String FLAG_NAME = "OnStackReplacePercentage";
private static final String FLAG_DEBUG_NAME = "InlineFrequencyCount";
private static final Long[] TESTS = {0L, 100L, -1L,
(long) Integer.MAX_VALUE, (long) Integer.MIN_VALUE};
@ -42,6 +43,7 @@ public class IntxTest {
VmFlagTest.runTest(FLAG_NAME, TESTS,
VmFlagTest.WHITE_BOX::setIntxVMFlag,
VmFlagTest.WHITE_BOX::getIntxVMFlag);
VmFlagTest.runTest(FLAG_DEBUG_NAME, VmFlagTest.WHITE_BOX::getIntxVMFlag);
}
}

@ -35,12 +35,14 @@
public class StringTest {
private static final String FLAG_NAME = "CompileOnly";
private static final String FLAG_DEBUG_NAME = "SuppressErrorAt";
private static final String[] TESTS = {"StringTest::*", ""};
public static void main(String[] args) throws Exception {
VmFlagTest.runTest(FLAG_NAME, TESTS,
VmFlagTest.WHITE_BOX::setStringVMFlag,
VmFlagTest.WHITE_BOX::getStringVMFlag);
VmFlagTest.runTest(FLAG_DEBUG_NAME, VmFlagTest.WHITE_BOX::getStringVMFlag);
}
}

@ -36,6 +36,7 @@ import com.oracle.java.testlibrary.Platform;
public class UintxTest {
private static final String FLAG_NAME = "VerifyGCStartAt";
private static final String FLAG_DEBUG_NAME = "CodeCacheMinimumUseSpace";
private static final Long[] TESTS = {0L, 100L, (long) Integer.MAX_VALUE,
(1L << 32L) - 1L, 1L << 32L};
private static final Long[] EXPECTED_64 = TESTS;
@ -47,6 +48,7 @@ public class UintxTest {
Platform.is64bit() ? EXPECTED_64 : EXPECTED_32,
VmFlagTest.WHITE_BOX::setUintxVMFlag,
VmFlagTest.WHITE_BOX::getUintxVMFlag);
VmFlagTest.runTest(FLAG_DEBUG_NAME, VmFlagTest.WHITE_BOX::getUintxVMFlag);
}
}

@ -37,16 +37,18 @@ public final class VmFlagTest<T> {
private final BiConsumer<T, T> test;
private final BiConsumer<String, T> set;
private final Function<String, T> get;
private final boolean isPositive;
protected VmFlagTest(String flagName, BiConsumer<String, T> set,
Function<String, T> get, boolean isPositive) {
this.flagName = flagName;
this.set = set;
this.get = get;
this.isPositive = isPositive;
if (isPositive) {
test = this::testPositive;
test = this::testWritePositive;
} else {
test = this::testNegative;
test = this::testWriteNegative;
}
}
@ -63,6 +65,10 @@ public final class VmFlagTest<T> {
runTest(existentFlag, tests, tests, set, get);
}
protected static <T> void runTest(String existentFlag, Function<String, T> get) {
runTest(existentFlag, null, null, null, get);
}
protected static <T> void runTest(String existentFlag, T[] tests,
T[] results, BiConsumer<String, T> set, Function<String, T> get) {
if (existentFlag != null) {
@ -72,13 +78,23 @@ public final class VmFlagTest<T> {
}
public final void test(T[] tests, T[] results) {
Asserts.assertEQ(tests.length, results.length, "[TESTBUG] tests.length != results.length");
for (int i = 0, n = tests.length ; i < n; ++i) {
test.accept(tests[i], results[i]);
if (isPositive) {
testRead();
}
if (tests != null) {
Asserts.assertEQ(tests.length, results.length, "[TESTBUG] tests.length != results.length");
for (int i = 0, n = tests.length ; i < n; ++i) {
test.accept(tests[i], results[i]);
}
}
}
protected String getVMOptionAsString() {
if (WHITE_BOX.isConstantVMFlag(flagName) || WHITE_BOX.isLockedVMFlag(flagName)) {
// JMM cannot access debug flags in product builds or locked flags,
// use whitebox methods to get such flags value.
return asString(getValue());
}
HotSpotDiagnosticMXBean diagnostic
= ManagementFactoryHelper.getDiagnosticMXBean();
VMOption tmp;
@ -90,18 +106,24 @@ public final class VmFlagTest<T> {
return tmp == null ? null : tmp.getValue();
}
private void testPositive(T value, T expected) {
String oldValue = getVMOptionAsString();
Asserts.assertEQ(oldValue, asString(getValue()));
Asserts.assertEQ(oldValue, asString(WHITE_BOX.getVMFlag(flagName)));
setNewValue(value);
String newValue = getVMOptionAsString();
Asserts.assertEQ(newValue, asString(expected));
Asserts.assertEQ(newValue, asString(getValue()));
Asserts.assertEQ(newValue, asString(WHITE_BOX.getVMFlag(flagName)));
private String testRead() {
String value = getVMOptionAsString();
Asserts.assertNotNull(value);
Asserts.assertEQ(value, asString(getValue()));
Asserts.assertEQ(value, asString(WHITE_BOX.getVMFlag(flagName)));
return value;
}
private void testNegative(T value, T expected) {
private void testWritePositive(T value, T expected) {
setNewValue(value);
String newValue = testRead();
Asserts.assertEQ(newValue, asString(expected));
}
private void testWriteNegative(T value, T expected) {
// Should always return false for non-existing flags
Asserts.assertFalse(WHITE_BOX.isConstantVMFlag(flagName));
Asserts.assertFalse(WHITE_BOX.isLockedVMFlag(flagName));
String oldValue = getVMOptionAsString();
Asserts.assertEQ(oldValue, asString(getValue()));
Asserts.assertEQ(oldValue, asString(WHITE_BOX.getVMFlag(flagName)));
@ -114,4 +136,3 @@ public final class VmFlagTest<T> {
return value == null ? null : "" + value;
}
}

@ -279,3 +279,4 @@ b940ca3d2c7e8a279ca850706b89c2ad3a841e82 jdk9-b32
6b343b9b7a7008f5f699a2d99881163cab7a2986 jdk9-b34
b9370464572fc663a38956047aa612d6e7854c3d jdk9-b35
61b4c9acaa58e482db6601ec5dc4fc3d2d8dbb55 jdk9-b36
48e4ec70cc1c8651e4a0324d91f193c4edd83af9 jdk9-b37

@ -529,6 +529,16 @@ public class UTF8Reader
invalidByte(4, 4, b2);
}
// check if output buffer is large enough to hold 2 surrogate chars
if (out + 1 >= ch.length) {
fBuffer[0] = (byte)b0;
fBuffer[1] = (byte)b1;
fBuffer[2] = (byte)b2;
fBuffer[3] = (byte)b3;
fOffset = 4;
return out - offset;
}
// decode bytes into surrogate characters
int uuuuu = ((b0 << 2) & 0x001C) | ((b1 >> 4) & 0x0003);
if (uuuuu > 0x10) {

@ -282,3 +282,4 @@ e58d3ea638c3824f01547596b2a98aa5f77c4a5c jdk9-b30
28ea43d925f1e5250976097a2977dd3e66e11f0b jdk9-b34
afe0c89e2edbdfb1a7ceff3d9b3ff46c4186202f jdk9-b35
84803c3be7f79d29c7dc40749d7743675f64107a jdk9-b36
90de6ecbff46386a3f9d6f7ca876e7aa6381f50a jdk9-b37

@ -279,3 +279,4 @@ f0870554049807d3392bd7976ab114f7f2b7bafa jdk9-b27
21568031434d7a9dbb0cc6516cc3183d349c2253 jdk9-b34
e549291a0227031310fa91c574891f892d27f959 jdk9-b35
cdcf2e599e42935c2d1d19a24bb19e808aeb43b5 jdk9-b36
27c3345d6dce39a22c262f30bb1f0e0b00c3709e jdk9-b37

@ -48,22 +48,6 @@ $(INCLUDE_DST_OS_DIR)/%.h: \
################################################################################
CALENDARS_SRC := $(JDK_TOPDIR)/src/java.base/share/conf
$(LIB_DST_DIR)/calendars.properties: $(CALENDARS_SRC)/calendars.properties
$(call install-file)
BASE_CONF_FILES += $(LIB_DST_DIR)/calendars.properties
$(LIB_DST_DIR)/hijrah-config-umalqura.properties: $(CALENDARS_SRC)/hijrah-config-umalqura.properties
$(MKDIR) -p $(@D)
$(RM) $@
$(CP) $< $@
BASE_CONF_FILES += $(LIB_DST_DIR)/hijrah-config-umalqura.properties
################################################################################
ifneq ($(findstring $(OPENJDK_TARGET_OS), windows aix),)
TZMAPPINGS_SRC := $(JDK_TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS)/conf

@ -67,8 +67,11 @@ ifneq ($(FREETYPE_BUNDLE_LIB_PATH), )
FREETYPE_TARGET_LIB := $(JDK_OUTPUTDIR)/lib$(OPENJDK_TARGET_CPU_LIBDIR)/$(call SHARED_LIBRARY,freetype).6
endif
# We can't use $(install-file) in this rule because it preserves symbolic links and
# libfreetype.so is usually a symbolic link to something like libfreetype.so.6 on Unix.
$(FREETYPE_TARGET_LIB): $(FREETYPE_BUNDLE_LIB_PATH)/$(call SHARED_LIBRARY,freetype)
$(install-file)
$(MKDIR) -p $(@D)
$(CP) $< $@
ifeq ($(OPENJDK_BUILD_OS), windows)
$(CHMOD) +rx $@
endif

@ -21,4 +21,4 @@
# or visit www.oracle.com if you need additional information or have any
# questions.
#
tzdata2014g
tzdata2014i

@ -133,23 +133,13 @@ Zone Africa/Algiers 0:12:12 - LMT 1891 Mar 15 0:01
# See Africa/Lagos.
# Botswana
# From Paul Eggert (2013-02-21):
# Milne says they were regulated by the Cape Town Signal in 1899;
# assume they switched to 2:00 when Cape Town did.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Gaborone 1:43:40 - LMT 1885
1:30 - SAST 1903 Mar
2:00 - CAT 1943 Sep 19 2:00
2:00 1:00 CAST 1944 Mar 19 2:00
2:00 - CAT
# See Africa/Maputo.
# Burkina Faso
# See Africa/Abidjan.
# Burundi
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bujumbura 1:57:28 - LMT 1890
2:00 - CAT
# See Africa/Maputo.
# Cameroon
# See Africa/Lagos.
@ -184,10 +174,7 @@ Zone Indian/Comoro 2:53:04 - LMT 1911 Jul # Moroni, Gran Comoro
3:00 - EAT
# Democratic Republic of the Congo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lubumbashi 1:49:52 - LMT 1897 Nov 9
2:00 - CAT
# The above is for the eastern part; see Africa/Lagos for the western part.
# See Africa/Lagos for the western part and Africa/Maputo for the eastern.
# Republic of the Congo
# See Africa/Lagos.
@ -339,7 +326,7 @@ Rule Egypt 2007 only - Sep Thu>=1 24:00 0 -
# Egypt is to change back to Daylight system on May 15
# http://english.ahram.org.eg/NewsContent/1/64/100735/Egypt/Politics-/Egypts-government-to-reapply-daylight-saving-time-.aspx
# From Gunther Vermier (2015-05-13):
# From Gunther Vermier (2014-05-13):
# our Egypt office confirms that the change will be at 15 May "midnight" (24:00)
# From Imed Chihi (2014-06-04):
@ -489,11 +476,7 @@ Zone Africa/Nairobi 2:27:16 - LMT 1928 Jul
3:00 - EAT
# Lesotho
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Maseru 1:50:00 - LMT 1903 Mar
2:00 - SAST 1943 Sep 19 2:00
2:00 1:00 SAST 1944 Mar 19 2:00
2:00 - SAST
# See Africa/Johannesburg.
# Liberia
# From Paul Eggert (2006-03-22):
@ -575,9 +558,7 @@ Zone Indian/Antananarivo 3:10:04 - LMT 1911 Jul
3:00 - EAT
# Malawi
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Blantyre 2:20:00 - LMT 1903 Mar
2:00 - CAT
# See Africa/Maputo.
# Mali
# Mauritania
@ -987,6 +968,13 @@ Zone Africa/El_Aaiun -0:52:48 - LMT 1934 Jan # El Aaiún
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Maputo 2:10:20 - LMT 1903 Mar
2:00 - CAT
Link Africa/Maputo Africa/Blantyre # Malawi
Link Africa/Maputo Africa/Bujumbura # Burundi
Link Africa/Maputo Africa/Gaborone # Botswana
Link Africa/Maputo Africa/Harare # Zimbabwe
Link Africa/Maputo Africa/Kigali # Rwanda
Link Africa/Maputo Africa/Lubumbashi # E Dem. Rep. of Congo
Link Africa/Maputo Africa/Lusaka # Zambia
# Namibia
# The 1994-04-03 transition is from Shanks & Pottenger.
@ -1054,9 +1042,7 @@ Zone Indian/Reunion 3:41:52 - LMT 1911 Jun # Saint-Denis
# Tromelin - inhabited until at least 1958
# Rwanda
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kigali 2:00:16 - LMT 1935 Jun
2:00 - CAT
# See Africa/Maputo.
# St Helena
# See Africa/Abidjan.
@ -1100,6 +1086,9 @@ Rule SA 1943 1944 - Mar Sun>=15 2:00 0 -
Zone Africa/Johannesburg 1:52:00 - LMT 1892 Feb 8
1:30 - SAST 1903 Mar
2:00 SA SAST
Link Africa/Johannesburg Africa/Maseru # Lesotho
Link Africa/Johannesburg Africa/Mbabane # Swaziland
#
# Marion and Prince Edward Is
# scientific station since 1947
# no information
@ -1127,9 +1116,7 @@ Zone Africa/Khartoum 2:10:08 - LMT 1931
Link Africa/Khartoum Africa/Juba
# Swaziland
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Mbabane 2:04:24 - LMT 1903 Mar
2:00 - SAST
# See Africa/Johannesburg.
# Tanzania
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
@ -1250,11 +1237,5 @@ Zone Africa/Kampala 2:09:40 - LMT 1928 Jul
3:00 - EAT
# Zambia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lusaka 1:53:08 - LMT 1903 Mar
2:00 - CAT
# Zimbabwe
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Harare 2:04:12 - LMT 1903 Mar
2:00 - CAT
# See Africa/Maputo.

@ -70,10 +70,11 @@
# 3:30 IRST IRDT Iran
# 4:00 GST Gulf*
# 5:30 IST India
# 7:00 ICT Indochina*
# 7:00 ICT Indochina, most times and locations*
# 7:00 WIB west Indonesia (Waktu Indonesia Barat)
# 8:00 WITA central Indonesia (Waktu Indonesia Tengah)
# 8:00 CST China
# 8:00 IDT Indochina, 1943-45, 1947-55, 1960-75 (some locations)*
# 8:00 JWST Western Standard Time (Japan, 1896/1937)*
# 9:00 JCST Central Standard Time (Japan, 1896/1937)
# 9:00 WIT east Indonesia (Waktu Indonesia Timur)
@ -294,12 +295,8 @@ Zone Asia/Rangoon 6:24:40 - LMT 1880 # or Yangon
6:30 - MMT # Myanmar Time
# Cambodia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Phnom_Penh 6:59:40 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# See Asia/Bangkok.
# China
@ -916,6 +913,10 @@ Zone Asia/Kolkata 5:53:28 - LMT 1880 # Kolkata
# Indonesia
#
# From Paul Eggert (2014-09-06):
# The 1876 Report of the Secretary of the [US] Navy, p 306 says that Batavia
# civil time was 7:07:12.5; round to even for Jakarta.
#
# From Gwillim Law (2001-05-28), overriding Shanks & Pottenger:
# http://www.sumatera-inc.com/go_to_invest/about_indonesia.asp#standtime
# says that Indonesia's time zones changed on 1988-01-01. Looking at some
@ -1733,12 +1734,8 @@ Zone Asia/Kuwait 3:11:56 - LMT 1950
3:00 - AST
# Laos
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Vientiane 6:50:24 - LMT 1906 Jun 9 # or Viangchan
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# See Asia/Bangkok.
# Lebanon
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
@ -2751,6 +2748,8 @@ Zone Asia/Dushanbe 4:35:12 - LMT 1924 May 2
Zone Asia/Bangkok 6:42:04 - LMT 1880
6:42:04 - BMT 1920 Apr # Bangkok Mean Time
7:00 - ICT
Link Asia/Bangkok Asia/Phnom_Penh # Cambodia
Link Asia/Bangkok Asia/Vientiane # Laos
# Turkmenistan
# From Shanks & Pottenger.
@ -2788,22 +2787,65 @@ Zone Asia/Tashkent 4:37:11 - LMT 1924 May 2
# Vietnam
# From Paul Eggert (2013-02-21):
# From Paul Eggert (2014-10-04):
# Milne gives 7:16:56 for the meridian of Saigon in 1899, as being
# used in Lower Laos, Cambodia, and Annam. But this is quite a ways
# from Saigon's location. For now, ignore this and stick with Shanks
# and Pottenger.
# and Pottenger for LMT before 1906.
# From Arthur David Olson (2008-03-18):
# The English-language name of Vietnam's most populous city is "Ho Chi Minh
# City"; use Ho_Chi_Minh below to avoid a name of more than 14 characters.
# From Shanks & Pottenger:
# From Paul Eggert (2014-10-21) after a heads-up from Trần Ngọc Quân:
# Trần Tiến Bình's authoritative book "Lịch Việt Nam: thế kỷ XX-XXI (1901-2100)"
# (Nhà xuất bản Văn Hoá - Thông Tin, Hanoi, 2005), pp 49-50,
# is quoted verbatim in:
# http://www.thoigian.com.vn/?mPage=P80D01
# is translated by Brian Inglis in:
# http://mm.icann.org/pipermail/tz/2014-October/021654.html
# and is the basis for the information below.
#
# The 1906 transition was effective July 1 and standardized Indochina to
# Phù Liễn Observatory, legally 104 deg. 17'17" east of Paris.
# It's unclear whether this meant legal Paris Mean Time (00:09:21) or
# the Paris Meridian (2 deg. 20'14.03" E); the former yields 07:06:30.1333...
# and the latter 07:06:29.333... so either way it rounds to 07:06:30,
# which is used below even though the modern-day Phù Liễn Observatory
# is closer to 07:06:31. Abbreviate Phù Liễn Mean Time as PLMT.
#
# The following transitions occurred in Indochina in general (before 1954)
# and in South Vietnam in particular (after 1954):
# To 07:00 on 1911-05-01.
# To 08:00 on 1942-12-31 at 23:00.
# To 09:00 in 1945-03-14 at 23:00.
# To 07:00 on 1945-09-02 in Vietnam.
# To 08:00 on 1947-04-01 in French-controlled Indochina.
# To 07:00 on 1955-07-01 in South Vietnam.
# To 08:00 on 1959-12-31 at 23:00 in South Vietnam.
# To 07:00 on 1975-06-13 in South Vietnam.
#
# Trần cites the following sources; it's unclear which supplied the info above.
#
# Hoàng Xuân Hãn: "Lịch và lịch Việt Nam". Tập san Khoa học Xã hội,
# No. 9, Paris, February 1982.
#
# Lê Thành Lân: "Lịch và niên biểu lịch sử hai mươi thế kỷ (0001-2010)",
# NXB Thống kê, Hanoi, 2000.
#
# Lê Thành Lân: "Lịch hai thế kỷ (1802-2010) và các lịch vĩnh cửu",
# NXB Thuận Hoá, Huế, 1995.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Ho_Chi_Minh 7:06:40 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
Zone Asia/Ho_Chi_Minh 7:06:40 - LMT 1906 Jul 1
7:06:30 - PLMT 1911 May 1
7:00 - ICT 1942 Dec 31 23:00
8:00 - IDT 1945 Mar 14 23:00
9:00 - JST 1945 Sep 2
7:00 - ICT 1947 Apr 1
8:00 - IDT 1955 Jul 1
7:00 - ICT 1959 Dec 31 23:00
8:00 - IDT 1975 Jun 13
7:00 - ICT
# Yemen

@ -354,20 +354,27 @@ Zone Indian/Cocos 6:27:40 - LMT 1900
# Fiji will end DST on 2014-01-19 02:00:
# http://www.fiji.gov.fj/Media-Center/Press-Releases/DAYLIGHT-SAVINGS-TO-END-THIS-MONTH-%281%29.aspx
# From Paul Eggert (2014-01-10):
# For now, guess that Fiji springs forward the Sunday before the fourth
# Monday in October, and springs back the penultimate Sunday in January.
# This is ad hoc, but matches recent practice.
# From Ken Rylander (2014-10-20):
# DST will start Nov. 2 this year.
# http://www.fiji.gov.fj/Media-Center/Press-Releases/DAYLIGHT-SAVING-STARTS-ON-SUNDAY,-NOVEMBER-2ND.aspx
# From Paul Eggert (2014-10-20):
# For now, guess DST from 02:00 the first Sunday in November to
# 03:00 the first Sunday on or after January 18. Although ad hoc, it
# matches this year's plan and seems more likely to match future
# practice than guessing no DST.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Fiji 1998 1999 - Nov Sun>=1 2:00 1:00 S
Rule Fiji 1999 2000 - Feb lastSun 3:00 0 -
Rule Fiji 2009 only - Nov 29 2:00 1:00 S
Rule Fiji 2010 only - Mar lastSun 3:00 0 -
Rule Fiji 2010 max - Oct Sun>=21 2:00 1:00 S
Rule Fiji 2010 2013 - Oct Sun>=21 2:00 1:00 S
Rule Fiji 2011 only - Mar Sun>=1 3:00 0 -
Rule Fiji 2012 2013 - Jan Sun>=18 3:00 0 -
Rule Fiji 2014 max - Jan Sun>=18 2:00 0 -
Rule Fiji 2014 only - Jan Sun>=18 2:00 0 -
Rule Fiji 2014 max - Nov Sun>=1 2:00 1:00 S
Rule Fiji 2015 max - Jan Sun>=18 3:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Fiji 11:55:44 - LMT 1915 Oct 26 # Suva
12:00 Fiji FJ%sT # Fiji Time
@ -542,6 +549,30 @@ Zone Pacific/Palau 8:57:56 - LMT 1901 # Koror
Zone Pacific/Port_Moresby 9:48:40 - LMT 1880
9:48:32 - PMMT 1895 # Port Moresby Mean Time
10:00 - PGT # Papua New Guinea Time
#
# From Paul Eggert (2014-10-13):
# Base the Bougainville entry on the Arawa-Kieta region, which appears to have
# the most people even though it was devastated in the Bougainville Civil War.
#
# Although Shanks gives 1942-03-15 / 1943-11-01 for JST, these dates
# are apparently rough guesswork from the starts of military campaigns.
# The World War II entries below are instead based on Arawa-Kieta.
# The Japanese occupied Kieta in July 1942,
# according to the Pacific War Online Encyclopedia
# http://pwencycl.kgbudge.com/B/o/Bougainville.htm
# and seem to have controlled it until their 1945-08-21 surrender.
#
# The Autonomous Region of Bougainville plans to switch from UTC+10 to UTC+11
# on 2014-12-28 at 02:00. They call UTC+11 "Bougainville Standard Time";
# abbreviate this as BST. See:
# http://www.bougainville24.com/bougainville-issues/bougainville-gets-own-timezone/
#
Zone Pacific/Bougainville 10:22:16 - LMT 1880
9:48:32 - PMMT 1895
10:00 - PGT 1942 Jul
9:00 - JST 1945 Aug 21
10:00 - PGT 2014 Dec 28 2:00
11:00 - BST
# Pitcairn
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
@ -826,6 +857,7 @@ Zone Pacific/Wallis 12:15:20 - LMT 1901
# 10:00 AEST AEDT Eastern Australia
# 10:00 ChST Chamorro
# 10:30 LHST LHDT Lord Howe*
# 11:00 BST Bougainville*
# 11:30 NZMT NZST New Zealand through 1945
# 12:00 NZST NZDT New Zealand 1946-present
# 12:15 CHAST Chatham through 1945*

@ -91,10 +91,11 @@
# 0:00 WET WEST WEMT Western Europe
# 0:19:32.13 AMT NST Amsterdam, Netherlands Summer (1835-1937)*
# 0:20 NET NEST Netherlands (1937-1940)*
# 1:00 BST British Standard (1968-1971)
# 1:00 CET CEST CEMT Central Europe
# 1:00:14 SET Swedish (1879-1899)*
# 2:00 EET EEST Eastern Europe
# 3:00 FET Further-eastern Europe*
# 3:00 FET Further-eastern Europe (2011-2014)*
# 3:00 MSK MSD MSM* Moscow
# From Peter Ilieve (1994-12-04),
@ -746,6 +747,13 @@ Zone Europe/Vienna 1:05:21 - LMT 1893 Apr
# http://www.belta.by/ru/all_news/society/V-Belarusi-otmenjaetsja-perexod-na-sezonnoe-vremja_i_572952.html
# http://naviny.by/rubrics/society/2011/09/16/ic_articles_116_175144/
# http://news.tut.by/society/250578.html
#
# From Alexander Bokovoy (2014-10-09):
# Belarussian government decided against changing to winter time....
# http://eng.belta.by/all_news/society/Belarus-decides-against-adjusting-time-in-Russias-wake_i_76335.html
# From Paul Eggert (2014-10-08):
# Hence Belarus can share time zone abbreviations with Moscow again.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Minsk 1:50:16 - LMT 1880
1:50 - MMT 1924 May 2 # Minsk Mean Time
@ -758,7 +766,8 @@ Zone Europe/Minsk 1:50:16 - LMT 1880
2:00 - EET 1992 Mar 29 0:00s
2:00 1:00 EEST 1992 Sep 27 0:00s
2:00 Russia EE%sT 2011 Mar 27 2:00s
3:00 - FET
3:00 - FET 2014 Oct 26 1:00s
3:00 - MSK
# Belgium
#
@ -2524,7 +2533,7 @@ Zone Asia/Novosibirsk 5:31:40 - LMT 1919 Dec 14 6:00
# The Kemerovo region will remain at UTC+7 through the 2014-10-26 change, thus
# realigning itself with KRAT.
Zone Asia/Novokuznetsk 5:48:48 - NMT 1920 Jan 6
Zone Asia/Novokuznetsk 5:48:48 - LMT 1924 May 1
6:00 - KRAT 1930 Jun 21 # Krasnoyarsk Time
7:00 Russia KRA%sT 1991 Mar 31 2:00s
6:00 Russia KRA%sT 1992 Jan 19 2:00s

@ -300,6 +300,12 @@ Zone PST8PDT -8:00 US P%sT
# time zone, but we do go by the Eastern time zone because so many people work
# in Columbus."
# From Paul Eggert (2014-09-06):
# Monthly Notices of the Royal Astronomical Society 44, 4 (1884-02-08), 208
# says that New York City Hall time was 3 minutes 58.4 seconds fast of
# Eastern time (i.e., -4:56:01.6) just before the 1883 switch. Round to the
# nearest second.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule NYC 1920 only - Mar lastSun 2:00 1:00 D
Rule NYC 1920 only - Oct lastSun 2:00 0 S
@ -1118,17 +1124,16 @@ Zone America/Menominee -5:50:27 - LMT 1885 Sep 18 12:00
# An amendment to the Interpretation Act was registered on February 19/2007....
# http://action.attavik.ca/home/justice-gn/attach/2007/gaz02part2.pdf
# From Paul Eggert (2006-04-25):
# From Paul Eggert (2014-10-18):
# H. David Matthews and Mary Vincent's map
# "It's about TIME", _Canadian Geographic_ (September-October 1998)
# http://www.canadiangeographic.ca/Magazine/SO98/geomap.asp
# http://www.canadiangeographic.ca/Magazine/SO98/alacarte.asp
# contains detailed boundaries for regions observing nonstandard
# time and daylight saving time arrangements in Canada circa 1998.
#
# INMS, the Institute for National Measurement Standards in Ottawa, has
# information about standard and daylight saving time zones in Canada.
# http://inms-ienm.nrc-cnrc.gc.ca/en/time_services/daylight_saving_e.php
# (updated periodically).
# National Research Council Canada maintains info about time zones and DST.
# http://www.nrc-cnrc.gc.ca/eng/services/time/time_zones.html
# http://www.nrc-cnrc.gc.ca/eng/services/time/faq/index.html#Q5
# Its unofficial information is often taken from Matthews and Vincent.
# From Paul Eggert (2006-06-27):
@ -1993,10 +1998,7 @@ Zone America/Creston -7:46:04 - LMT 1884
# [Also see <http://www.nunatsiaq.com/nunavut/nvt10309_06.html> (2001-03-09).]
# From Gwillim Law (2005-05-21):
# According to maps at
# http://inms-ienm.nrc-cnrc.gc.ca/images/time_services/TZ01SWE.jpg
# http://inms-ienm.nrc-cnrc.gc.ca/images/time_services/TZ01SSE.jpg
# (both dated 2003), and
# According to ...
# http://www.canadiangeographic.ca/Magazine/SO98/geomap.asp
# (from a 1998 Canadian Geographic article), the de facto and de jure time
# for Southampton Island (at the north end of Hudson Bay) is UTC-5 all year
@ -2005,9 +2007,11 @@ Zone America/Creston -7:46:04 - LMT 1884
# predates the creation of Nunavut, it probably goes back many years....
# The Inuktitut name of Coral Harbour is Sallit, but it's rarely used.
#
# From Paul Eggert (2005-07-26):
# From Paul Eggert (2014-10-17):
# For lack of better information, assume that Southampton Island observed
# daylight saving only during wartime.
# daylight saving only during wartime. Gwillim Law's email also
# mentioned maps now maintained by National Research Council Canada;
# see above for an up-to-date link.
# From Chris Walton (2007-03-01):
# ... the community of Resolute (located on Cornwallis Island in
@ -3008,10 +3012,21 @@ Zone America/Tegucigalpa -5:48:52 - LMT 1921 Apr
# Shanks & Pottenger give -5:07:12, but Milne records -5:07:10.41 from an
# unspecified official document, and says "This time is used throughout the
# island". Go with Milne. Round to the nearest second as required by zic.
#
# Shanks & Pottenger give April 28 for the 1974 spring-forward transition, but
# Lance Neita writes that Prime Minister Michael Manley decreed it January 5.
# Assume Neita meant Jan 6 02:00, the same as the US. Neita also writes that
# Manley's supporters associated this act with Manley's nickname "Joshua"
# (recall that in the Bible the sun stood still at Joshua's request),
# and with the Rod of Correction which Manley said he had received from
# Haile Selassie, Emperor of Ethiopia. See:
# Neita L. The politician in all of us. Jamaica Observer 2014-09-20
# http://www.jamaicaobserver.com/columns/The-politician-in-all-of-us_17573647
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Jamaica -5:07:11 - LMT 1890 # Kingston
-5:07:11 - KMT 1912 Feb # Kingston Mean Time
-5:00 - EST 1974 Apr 28 2:00
-5:00 - EST 1974
-5:00 US E%sT 1984
-5:00 - EST

@ -330,7 +330,8 @@ PE -1203-07703 America/Lima
PF -1732-14934 Pacific/Tahiti Society Islands
PF -0900-13930 Pacific/Marquesas Marquesas Islands
PF -2308-13457 Pacific/Gambier Gambier Islands
PG -0930+14710 Pacific/Port_Moresby
PG -0930+14710 Pacific/Port_Moresby most locations
PG -0613+15534 Pacific/Bougainville Bougainville
PH +1435+12100 Asia/Manila
PK +2452+06703 Asia/Karachi
PL +5215+02100 Europe/Warsaw

@ -75,6 +75,7 @@ SUNWprivate_1.1 {
Java_java_io_FileDescriptor_initIDs;
Java_java_io_FileDescriptor_sync;
Java_java_io_FileDescriptor_getAppend;
Java_java_io_FileInputStream_available;
Java_java_io_FileInputStream_close0;
Java_java_io_FileInputStream_initIDs;

@ -54,14 +54,12 @@ PROFILE_1_JRE_LIB_FILES := \
$(OPENJDK_TARGET_CPU_LEGACY_LIB)/server/$(LIBRARY_PREFIX)jvm$(SHARED_LIBRARY_SUFFIX) \
$(OPENJDK_TARGET_CPU_LEGACY_LIB)/server/$(LIBRARY_PREFIX)jvm.diz \
$(OPENJDK_TARGET_CPU_LEGACY_LIB)/server/Xusage.txt \
calendars.properties \
classlist \
ext/localedata.jar \
ext/meta-index \
ext/sunec.jar \
ext/sunjce_provider.jar \
ext/sunpkcs11.jar \
hijrah-config-umalqura.properties \
jce.jar \
jsse.jar \
logging.properties \

@ -1588,7 +1588,7 @@ public class File
/**
* A convenience method to set the owner's read permission for this abstract
* pathname. On some platforms it may be possible to start the Java virtual
* machine with special privileges that allow it to read files that that are
* machine with special privileges that allow it to read files that are
* marked as unreadable.
*
* <p>An invocation of this method of the form <tt>file.setReadable(arg)</tt>

@ -26,6 +26,8 @@
package java.io;
import java.nio.channels.FileChannel;
import sun.misc.SharedSecrets;
import sun.misc.JavaIOFileDescriptorAccess;
import sun.nio.ch.FileChannelImpl;
@ -52,16 +54,17 @@ import sun.nio.ch.FileChannelImpl;
public
class FileOutputStream extends OutputStream
{
/**
* Access to FileDescriptor internals.
*/
private static final JavaIOFileDescriptorAccess fdAccess =
SharedSecrets.getJavaIOFileDescriptorAccess();
/**
* The system dependent file descriptor.
*/
private final FileDescriptor fd;
/**
* True if the file is opened for append.
*/
private final boolean append;
/**
* The associated channel, initialized lazily.
*/
@ -207,7 +210,6 @@ class FileOutputStream extends OutputStream
}
this.fd = new FileDescriptor();
fd.attach(this);
this.append = append;
this.path = name;
open(name, append);
@ -245,7 +247,6 @@ class FileOutputStream extends OutputStream
security.checkWrite(fdObj);
}
this.fd = fdObj;
this.append = false;
this.path = null;
fd.attach(this);
@ -287,7 +288,7 @@ class FileOutputStream extends OutputStream
* @exception IOException if an I/O error occurs.
*/
public void write(int b) throws IOException {
write(b, append);
write(b, fdAccess.getAppend(fd));
}
/**
@ -310,7 +311,7 @@ class FileOutputStream extends OutputStream
* @exception IOException if an I/O error occurs.
*/
public void write(byte b[]) throws IOException {
writeBytes(b, 0, b.length, append);
writeBytes(b, 0, b.length, fdAccess.getAppend(fd));
}
/**
@ -323,7 +324,7 @@ class FileOutputStream extends OutputStream
* @exception IOException if an I/O error occurs.
*/
public void write(byte b[], int off, int len) throws IOException {
writeBytes(b, off, len, append);
writeBytes(b, off, len, fdAccess.getAppend(fd));
}
/**
@ -395,7 +396,7 @@ class FileOutputStream extends OutputStream
public FileChannel getChannel() {
synchronized (this) {
if (channel == null) {
channel = FileChannelImpl.open(fd, path, false, true, append, this);
channel = FileChannelImpl.open(fd, path, false, true, this);
}
return channel;
}

Some files were not shown because too many files have changed in this diff Show More