This commit is contained in:
Erik Trimble 2011-05-06 14:41:00 -07:00
commit cee74b9a63
103 changed files with 5378 additions and 702 deletions

View File

@ -257,7 +257,7 @@ RMIC = $(JDK_HOME)/bin/rmic
all: filelist all: filelist
@mkdir -p $(OUTPUT_DIR) @mkdir -p $(OUTPUT_DIR)
@echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES) @echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
$(JAVAC) -source 1.4 -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist $(JAVAC) -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist
$(RMIC) -classpath $(OUTPUT_DIR) -d $(OUTPUT_DIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer $(RMIC) -classpath $(OUTPUT_DIR) -d $(OUTPUT_DIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
rm -f $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql/sa.js rm -f $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql/sa.js
cp $(SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql cp $(SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql
@ -269,7 +269,7 @@ all: filelist
allprof: filelist allprof: filelist
@mkdir -p $(OUTPUT_DIR) @mkdir -p $(OUTPUT_DIR)
@echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES) @echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
$(JAVAC) -source 1.4 -J-Xprof -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist $(JAVAC) -J-Xprof -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist
$(RMIC) -classpath $(OUTPUT_DIR) -d $(OUTPUT_DIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer $(RMIC) -classpath $(OUTPUT_DIR) -d $(OUTPUT_DIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
rm -f $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql/sa.js rm -f $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql/sa.js
cp $(SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql cp $(SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -420,7 +420,22 @@ extern uintptr_t Ppltdest(struct ps_prochandle *, uintptr_t, int *);
/* /*
* Stack frame iteration interface. * Stack frame iteration interface.
*/ */
#ifdef SOLARIS_11_B159_OR_LATER
/* building on Nevada-B159 or later so define the new callback */
typedef int proc_stack_f(
void *, /* the cookie given to Pstack_iter() */
const prgregset_t, /* the frame's registers */
uint_t, /* argc for the frame's function */
const long *, /* argv for the frame's function */
int, /* bitwise flags describing the frame (see below) */
int); /* a signal number */
#define PR_SIGNAL_FRAME 1 /* called by a signal handler */
#define PR_FOUND_SIGNAL 2 /* we found the corresponding signal number */
#else
/* building on Nevada-B158 or earlier so define the old callback */
typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *); typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
#endif
extern int Pstack_iter(struct ps_prochandle *, extern int Pstack_iter(struct ps_prochandle *,
const prgregset_t, proc_stack_f *, void *); const prgregset_t, proc_stack_f *, void *);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -101,7 +101,23 @@ extern int Pstop(struct ps_prochandle *, uint_t);
/* /*
* Stack frame iteration interface. * Stack frame iteration interface.
*/ */
#ifdef SOLARIS_11_B159_OR_LATER
/* building on Nevada-B159 or later so define the new callback */
typedef int proc_stack_f(
void *, /* the cookie given to Pstack_iter() */
const prgregset_t, /* the frame's registers */
uint_t, /* argc for the frame's function */
const long *, /* argv for the frame's function */
int, /* bitwise flags describing the frame (see below) */
int); /* a signal number */
#define PR_SIGNAL_FRAME 1 /* called by a signal handler */
#define PR_FOUND_SIGNAL 2 /* we found the corresponding signal number */
#else
/* building on Nevada-B158 or earlier so define the old callback */
typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *); typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
#endif
extern int Pstack_iter(struct ps_prochandle *, extern int Pstack_iter(struct ps_prochandle *,
const prgregset_t, proc_stack_f *, void *); const prgregset_t, proc_stack_f *, void *);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,9 @@
#include "salibproc.h" #include "salibproc.h"
#include "sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h" #include "sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h"
#ifndef SOLARIS_11_B159_OR_LATER
#include <sys/utsname.h>
#endif
#include <thread_db.h> #include <thread_db.h>
#include <strings.h> #include <strings.h>
#include <limits.h> #include <limits.h>
@ -40,8 +43,22 @@
#define SYMBOL_BUF_SIZE 256 #define SYMBOL_BUF_SIZE 256
#define ERR_MSG_SIZE (PATH_MAX + 256) #define ERR_MSG_SIZE (PATH_MAX + 256)
// debug mode // debug modes
static int _libsaproc_debug = 0; static int _libsaproc_debug = 0;
#ifndef SOLARIS_11_B159_OR_LATER
static bool _Pstack_iter_debug = false;
static void dprintf_2(const char* format,...) {
if (_Pstack_iter_debug) {
va_list alist;
va_start(alist, format);
fputs("Pstack_iter DEBUG: ", stderr);
vfprintf(stderr, format, alist);
va_end(alist);
}
}
#endif // !SOLARIS_11_B159_OR_LATER
static void print_debug(const char* format,...) { static void print_debug(const char* format,...) {
if (_libsaproc_debug) { if (_libsaproc_debug) {
@ -450,6 +467,7 @@ fill_load_object_list(void *cd, const prmap_t* pmp, const char* obj_name) {
return 0; return 0;
} }
// Pstack_iter() proc_stack_f callback prior to Nevada-B159
static int static int
fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc, const long *argv) { fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc, const long *argv) {
DebuggerWith2Objects* dbgo2 = (DebuggerWith2Objects*) cd; DebuggerWith2Objects* dbgo2 = (DebuggerWith2Objects*) cd;
@ -472,6 +490,14 @@ fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc, const long *argv
return 0; return 0;
} }
// Pstack_iter() proc_stack_f callback in Nevada-B159 or later
/*ARGSUSED*/
static int
wrapper_fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc,
const long *argv, int frame_flags, int sig) {
return(fill_cframe_list(cd, regs, argc, argv));
}
// part of the class sharing workaround // part of the class sharing workaround
// FIXME: !!HACK ALERT!! // FIXME: !!HACK ALERT!!
@ -970,6 +996,11 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_fill
TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS); TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
} }
#ifndef SOLARIS_11_B159_OR_LATER
// building on Nevada-B158 or earlier so more hoops to jump through
static bool has_newer_Pstack_iter = false; // older version by default
#endif
/* /*
* Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal * Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
* Method: fillCFrameList0 * Method: fillCFrameList0
@ -997,7 +1028,24 @@ JNIEXPORT jobject JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_f
env->ReleaseLongArrayElements(regsArray, ptr, JNI_ABORT); env->ReleaseLongArrayElements(regsArray, ptr, JNI_ABORT);
CHECK_EXCEPTION_(0); CHECK_EXCEPTION_(0);
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs, fill_cframe_list, &dbgo2);
#ifdef SOLARIS_11_B159_OR_LATER
// building on Nevada-B159 or later so use the new callback
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
wrapper_fill_cframe_list, &dbgo2);
#else
// building on Nevada-B158 or earlier so figure out which callback to use
if (has_newer_Pstack_iter) {
// Since we're building on Nevada-B158 or earlier, we have to
// cast wrapper_fill_cframe_list to make the compiler happy.
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
(proc_stack_f *)wrapper_fill_cframe_list, &dbgo2);
} else {
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
fill_cframe_list, &dbgo2);
}
#endif // SOLARIS_11_B159_OR_LATER
return dbgo2.obj; return dbgo2.obj;
} }
@ -1218,6 +1266,102 @@ JNIEXPORT jstring JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_d
return res; return res;
} }
#ifndef SOLARIS_11_B159_OR_LATER
// Determine if the OS we're running on has the newer version
// of libproc's Pstack_iter.
//
// Set env var PSTACK_ITER_DEBUG=true to debug this logic.
// Set env var PSTACK_ITER_DEBUG_RELEASE to simulate a 'release' value.
// Set env var PSTACK_ITER_DEBUG_VERSION to simulate a 'version' value.
//
// frankenputer 'uname -r -v': 5.10 Generic_141445-09
// jurassic 'uname -r -v': 5.11 snv_164
// lonepeak 'uname -r -v': 5.11 snv_127
//
static void set_has_newer_Pstack_iter(JNIEnv *env) {
static bool done_set = false;
if (done_set) {
// already set has_newer_Pstack_iter
return;
}
struct utsname name;
if (uname(&name) == -1) {
THROW_NEW_DEBUGGER_EXCEPTION("uname() failed!");
}
dprintf_2("release='%s' version='%s'\n", name.release, name.version);
if (_Pstack_iter_debug) {
char *override = getenv("PSTACK_ITER_DEBUG_RELEASE");
if (override != NULL) {
strncpy(name.release, override, SYS_NMLN - 1);
name.release[SYS_NMLN - 2] = '\0';
dprintf_2("overriding with release='%s'\n", name.release);
}
override = getenv("PSTACK_ITER_DEBUG_VERSION");
if (override != NULL) {
strncpy(name.version, override, SYS_NMLN - 1);
name.version[SYS_NMLN - 2] = '\0';
dprintf_2("overriding with version='%s'\n", name.version);
}
}
// the major number corresponds to the old SunOS major number
int major = atoi(name.release);
if (major >= 6) {
dprintf_2("release is SunOS 6 or later\n");
has_newer_Pstack_iter = true;
done_set = true;
return;
}
if (major < 5) {
dprintf_2("release is SunOS 4 or earlier\n");
done_set = true;
return;
}
// some SunOS 5.* build so now check for Solaris versions
char *dot = strchr(name.release, '.');
int minor = 0;
if (dot != NULL) {
// release is major.minor format
*dot = NULL;
minor = atoi(dot + 1);
}
if (minor <= 10) {
dprintf_2("release is Solaris 10 or earlier\n");
done_set = true;
return;
} else if (minor >= 12) {
dprintf_2("release is Solaris 12 or later\n");
has_newer_Pstack_iter = true;
done_set = true;
return;
}
// some Solaris 11 build so now check for internal build numbers
if (strncmp(name.version, "snv_", 4) != 0) {
dprintf_2("release is Solaris 11 post-GA or later\n");
has_newer_Pstack_iter = true;
done_set = true;
return;
}
// version begins with "snv_" so a pre-GA build of Solaris 11
int build = atoi(&name.version[4]);
if (build >= 159) {
dprintf_2("release is Nevada-B159 or later\n");
has_newer_Pstack_iter = true;
} else {
dprintf_2("release is Nevada-B158 or earlier\n");
}
done_set = true;
}
#endif // !SOLARIS_11_B159_OR_LATER
/* /*
* Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal * Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
* Method: initIDs * Method: initIDs
@ -1237,6 +1381,14 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_init
if (libproc_handle == 0) if (libproc_handle == 0)
THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!"); THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!");
#ifndef SOLARIS_11_B159_OR_LATER
_Pstack_iter_debug = getenv("PSTACK_ITER_DEBUG") != NULL;
set_has_newer_Pstack_iter(env);
CHECK_EXCEPTION;
dprintf_2("has_newer_Pstack_iter=%d\n", has_newer_Pstack_iter);
#endif
p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J"); p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J");
CHECK_EXCEPTION; CHECK_EXCEPTION;

View File

@ -24,7 +24,8 @@
# This file defines variables and macros which are used in the makefiles to # This file defines variables and macros which are used in the makefiles to
# allow distributions to augment or replace common hotspot code with # allow distributions to augment or replace common hotspot code with
# distribution-specific source files. # distribution-specific source files. This capability is disabled when
# an OPENJDK build is requested, unless HS_ALT_SRC_REL has been set externally.
# Requires: GAMMADIR # Requires: GAMMADIR
# Provides: # Provides:
@ -33,14 +34,17 @@
HS_COMMON_SRC_REL=src HS_COMMON_SRC_REL=src
# This needs to be changed to a more generic location, but we keep it as this ifneq ($(OPENJDK),true)
# for now for compatibility # This needs to be changed to a more generic location, but we keep it
HS_ALT_SRC_REL=src/closed # as this for now for compatibility
HS_ALT_SRC_REL=src/closed
else
HS_ALT_SRC_REL=NO_SUCH_PATH
endif
HS_COMMON_SRC=$(GAMMADIR)/$(HS_COMMON_SRC_REL) HS_COMMON_SRC=$(GAMMADIR)/$(HS_COMMON_SRC_REL)
HS_ALT_SRC=$(GAMMADIR)/$(HS_ALT_SRC_REL) HS_ALT_SRC=$(GAMMADIR)/$(HS_ALT_SRC_REL)
## altsrc-equiv ## altsrc-equiv
# #
# Convert a common source path to an alternative source path # Convert a common source path to an alternative source path

View File

@ -0,0 +1,32 @@
#
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This file format must remain compatible with both
# GNU Makefile and Microsoft nmake formats.
#
# Don't put quotes (fail windows build).
HOTSPOT_VM_DISTRO=Java HotSpot(TM)
COMPANY_NAME=Sun Microsystems, Inc.
PRODUCT_NAME=Java(TM) Platform SE

View File

@ -205,7 +205,7 @@ SONAMEFLAG = -Xlinker -soname=SONAME
SHARED_FLAG = -shared SHARED_FLAG = -shared
# Keep symbols even they are not used # Keep symbols even they are not used
AOUT_FLAGS += -export-dynamic AOUT_FLAGS += -Xlinker -export-dynamic
#------------------------------------------------------------------------ #------------------------------------------------------------------------
# Debug flags # Debug flags

View File

@ -102,6 +102,10 @@ CFLAGS += $(CFLAGS/NOEX)
CFLAGS += $(EXTRA_CFLAGS) CFLAGS += $(EXTRA_CFLAGS)
LFLAGS += $(EXTRA_CFLAGS) LFLAGS += $(EXTRA_CFLAGS)
# Don't set excutable bit on stack segment
# the same could be done by separate execstack command
LFLAGS += -Xlinker -z -Xlinker noexecstack
LIBS += -lm -ldl -lpthread LIBS += -lm -ldl -lpthread
# By default, link the *.o into the library, not the executable. # By default, link the *.o into the library, not the executable.

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -56,6 +56,30 @@ else
SA_LFLAGS += -mt -xnolib -norunpath SA_LFLAGS += -mt -xnolib -norunpath
endif endif
# The libproc Pstack_iter() interface changed in Nevada-B159.
# This logic needs to match
# agent/src/os/solaris/proc/saproc.cpp: set_has_newer_Pstack_iter():
# - skip SunOS 4 or older
# - skip Solaris 10 or older
# - skip two digit Nevada builds
# - skip three digit Nevada builds thru 149
# - skip Nevada builds 150-158
SOLARIS_11_B159_OR_LATER := \
$(shell uname -r -v \
| sed -n ' \
/^[0-3]\. /b \
/^5\.[0-9] /b \
/^5\.10 /b \
/ snv_[0-9][0-9]$/b \
/ snv_[01][0-4][0-9]$/b \
/ snv_15[0-8]$/b \
s/.*/-DSOLARIS_11_B159_OR_LATER/p \
')
# Uncomment the following to simulate building on Nevada-B159 or later
# when actually building on Nevada-B158 or earlier:
#SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER
$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE) $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@ -68,6 +92,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
-I$(GENERATED) \ -I$(GENERATED) \
-I$(BOOT_JAVA_HOME)/include \ -I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \ -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
$(SOLARIS_11_B159_OR_LATER) \
$(SASRCFILES) \ $(SASRCFILES) \
$(SA_LFLAGS) \ $(SA_LFLAGS) \
-o $@ \ -o $@ \

View File

@ -100,11 +100,6 @@ JVM_CHECK_SYMBOLS = $(NM) -u -p $(LIBJVM.o) | \
LINK_LIB.CC/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1; LINK_LIB.CC/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1;
# Some interfaces (_lwp_create) changed with LP64 and Solaris 7
SOLARIS_7_OR_LATER := \
$(shell uname -r | awk -F. '{ if ($$2 >= 7) print "-DSOLARIS_7_OR_LATER"; }')
CFLAGS += ${SOLARIS_7_OR_LATER}
# New architecture options started in SS12 (5.9), we need both styles to build. # New architecture options started in SS12 (5.9), we need both styles to build.
# The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as. # The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as.
# Note: default for 32bit sparc is now the same as v8plus, so the # Note: default for 32bit sparc is now the same as v8plus, so the

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -125,7 +125,25 @@ VARIANT_TEXT=Kernel
# or make/hotspot_distro. # or make/hotspot_distro.
!ifndef HOTSPOT_VM_DISTRO !ifndef HOTSPOT_VM_DISTRO
!if exists($(WorkSpace)\src\closed) !if exists($(WorkSpace)\src\closed)
# if the build is for JDK6 or earlier version, it should include jdk6_hotspot_distro,
# instead of hotspot_distro.
JDK6_OR_EARLIER=0
!if "$(JDK_MAJOR_VERSION)" != "" && "$(JDK_MINOR_VERSION)" != "" && "$(JDK_MICRO_VERSION)" != ""
!if $(JDK_MAJOR_VERSION) == 1 && $(JDK_MINOR_VERSION) < 7
JDK6_OR_EARLIER=1
!endif
!else
!if $(JDK_MAJOR_VER) == 1 && $(JDK_MINOR_VER) < 7
JDK6_OR_EARLIER=1
!endif
!endif
!if $(JDK6_OR_EARLIER) == 1
!include $(WorkSpace)\make\jdk6_hotspot_distro
!else
!include $(WorkSpace)\make\hotspot_distro !include $(WorkSpace)\make\hotspot_distro
!endif
!else !else
!include $(WorkSpace)\make\openjdk_distro !include $(WorkSpace)\make\openjdk_distro
!endif !endif

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -806,3 +806,34 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize) - 1; int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize) - 1;
return &interpreter_frame_tos_address()[index]; return &interpreter_frame_tos_address()[index];
} }
#ifdef ASSERT
#define DESCRIBE_FP_OFFSET(name) \
values.describe(-1, fp() + frame::name##_offset, #name)
void frame::describe_pd(FrameValues& values, int frame_no) {
for (int w = 0; w < frame::register_save_words; w++) {
values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
}
if (is_interpreted_frame()) {
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_padding);
DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
}
if (!is_compiled_frame()) {
if (frame::callee_aggregate_return_pointer_words != 0) {
values.describe(frame_no, sp() + frame::callee_aggregate_return_pointer_sp_offset, "callee_aggregate_return_pointer_word");
}
for (int w = 0; w < frame::callee_register_argument_save_area_words; w++) {
values.describe(frame_no, sp() + frame::callee_register_argument_save_area_sp_offset + w,
err_msg("callee_register_argument_save_area_words %d", w));
}
}
}
#endif

View File

@ -350,8 +350,9 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
#ifndef PRODUCT #ifndef PRODUCT
extern "C" void print_method_handle(oop mh); extern "C" void print_method_handle(oop mh);
void trace_method_handle_stub(const char* adaptername, void trace_method_handle_stub(const char* adaptername,
oopDesc* mh) { oopDesc* mh,
printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh); intptr_t* saved_sp) {
tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
print_method_handle(mh); print_method_handle(mh);
} }
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
@ -361,6 +362,7 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt
__ save_frame(16); __ save_frame(16);
__ set((intptr_t) adaptername, O0); __ set((intptr_t) adaptername, O0);
__ mov(G3_method_handle, O1); __ mov(G3_method_handle, O1);
__ mov(I5_savedSP, O2);
__ mov(G3_method_handle, L3); __ mov(G3_method_handle, L3);
__ mov(Gargs, L4); __ mov(Gargs, L4);
__ mov(G5_method_type, L5); __ mov(G5_method_type, L5);
@ -643,9 +645,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Live at this point: // Live at this point:
// - G5_klass : klass required by the target method // - G5_klass : klass required by the target method
// - O0_argslot : argslot index in vmarg; may be required in the failing path
// - O1_scratch : argument klass to test // - O1_scratch : argument klass to test
// - G3_method_handle: adapter method handle // - G3_method_handle: adapter method handle
__ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done); __ check_klass_subtype(O1_scratch, G5_klass, O2_scratch, O3_scratch, done);
// If we get here, the type check failed! // If we get here, the type check failed!
__ load_heap_oop(G3_amh_argument, O2_required); // required class __ load_heap_oop(G3_amh_argument, O2_required); // required class

View File

@ -1698,35 +1698,21 @@ int AbstractInterpreter::layout_activation(methodOop method,
popframe_extra_args; popframe_extra_args;
int local_words = method->max_locals() * Interpreter::stackElementWords; int local_words = method->max_locals() * Interpreter::stackElementWords;
int parm_words = method->size_of_parameters() * Interpreter::stackElementWords;
NEEDS_CLEANUP;
intptr_t* locals; intptr_t* locals;
if (caller->is_interpreted_frame()) {
// Can force the locals area to end up properly overlapping the top of the expression stack.
intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
// Note that this computation means we replace size_of_parameters() values from the caller
// interpreter frame's expression stack with our argument locals
locals = Lesp_ptr + parm_words;
int delta = local_words - parm_words;
int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
} else {
assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
// Don't have Lesp available; lay out locals block in the caller
// adjacent to the register window save area.
//
// Compiled frames do not allocate a varargs area which is why this if
// statement is needed.
//
if (caller->is_compiled_frame()) { if (caller->is_compiled_frame()) {
// Compiled frames do not allocate a varargs area so place them
// next to the register save area.
locals = fp + frame::register_save_words + local_words - 1; locals = fp + frame::register_save_words + local_words - 1;
} else {
locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
}
if (!caller->is_entry_frame()) {
// Caller wants his own SP back // Caller wants his own SP back
int caller_frame_size = caller->cb()->frame_size(); int caller_frame_size = caller->cb()->frame_size();
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS; *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
} else {
assert(caller->is_interpreted_frame() || caller->is_entry_frame(), "only possible cases");
// The entry and interpreter frames are laid out like normal C
// frames so place the locals adjacent to the varargs area.
locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
if (caller->is_interpreted_frame()) {
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + rounded_cls) - STACK_BIAS;
} }
} }
if (TraceDeoptimization) { if (TraceDeoptimization) {

View File

@ -6039,6 +6039,43 @@ void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register
call_VM_leaf(entry_point, 3); call_VM_leaf(entry_point, 3);
} }
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
pass_arg0(this, arg_0);
MacroAssembler::call_VM_leaf_base(entry_point, 1);
}
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
pass_arg1(this, arg_1);
pass_arg0(this, arg_0);
MacroAssembler::call_VM_leaf_base(entry_point, 2);
}
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
pass_arg2(this, arg_2);
LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
pass_arg1(this, arg_1);
pass_arg0(this, arg_0);
MacroAssembler::call_VM_leaf_base(entry_point, 3);
}
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
pass_arg3(this, arg_3);
LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
pass_arg2(this, arg_2);
LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
pass_arg1(this, arg_1);
pass_arg0(this, arg_0);
MacroAssembler::call_VM_leaf_base(entry_point, 4);
}
void MacroAssembler::check_and_handle_earlyret(Register java_thread) { void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
} }

View File

@ -1655,6 +1655,14 @@ class MacroAssembler: public Assembler {
void call_VM_leaf(address entry_point, void call_VM_leaf(address entry_point,
Register arg_1, Register arg_2, Register arg_3); Register arg_1, Register arg_2, Register arg_3);
// These always tightly bind to MacroAssembler::call_VM_leaf_base
// bypassing the virtual implementation
void super_call_VM_leaf(address entry_point);
void super_call_VM_leaf(address entry_point, Register arg_1);
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
// last Java Frame (fills frame anchor) // last Java Frame (fills frame anchor)
void set_last_Java_frame(Register thread, void set_last_Java_frame(Register thread,
Register last_java_sp, Register last_java_sp,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -669,3 +669,23 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
return &interpreter_frame_tos_address()[index]; return &interpreter_frame_tos_address()[index];
} }
#ifdef ASSERT
#define DESCRIBE_FP_OFFSET(name) \
values.describe(-1, fp() + frame::name##_offset, #name)
void frame::describe_pd(FrameValues& values, int frame_no) {
if (is_interpreted_frame()) {
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_method);
DESCRIBE_FP_OFFSET(interpreter_frame_mdx);
DESCRIBE_FP_OFFSET(interpreter_frame_cache);
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
}
}
#endif

View File

@ -383,32 +383,6 @@ void InterpreterMacroAssembler::store_ptr(int n, Register val) {
movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
} }
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
MacroAssembler::call_VM_leaf_base(entry_point, 0);
}
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) {
push(arg_1);
MacroAssembler::call_VM_leaf_base(entry_point, 1);
}
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
push(arg_2);
push(arg_1);
MacroAssembler::call_VM_leaf_base(entry_point, 2);
}
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
push(arg_3);
push(arg_2);
push(arg_1);
MacroAssembler::call_VM_leaf_base(entry_point, 3);
}
void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
// set sender sp // set sender sp
lea(rsi, Address(rsp, wordSize)); lea(rsi, Address(rsp, wordSize));

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -124,12 +124,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_ptr(int n, Register val); void load_ptr(int n, Register val);
void store_ptr(int n, Register val); void store_ptr(int n, Register val);
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
void super_call_VM_leaf(address entry_point);
void super_call_VM_leaf(address entry_point, Register arg_1);
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is // Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. EAX holds the super_klass. Blows ECX // a subtype of super_klass. EAX holds the super_klass. Blows ECX
// and EDI. Register sub_klass cannot be any of the above. // and EDI. Register sub_klass cannot be any of the above.

View File

@ -381,56 +381,6 @@ void InterpreterMacroAssembler::store_ptr(int n, Register val) {
} }
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
MacroAssembler::call_VM_leaf_base(entry_point, 0);
}
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
Register arg_1) {
if (c_rarg0 != arg_1) {
mov(c_rarg0, arg_1);
}
MacroAssembler::call_VM_leaf_base(entry_point, 1);
}
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
Register arg_1,
Register arg_2) {
assert(c_rarg0 != arg_2, "smashed argument");
assert(c_rarg1 != arg_1, "smashed argument");
if (c_rarg0 != arg_1) {
mov(c_rarg0, arg_1);
}
if (c_rarg1 != arg_2) {
mov(c_rarg1, arg_2);
}
MacroAssembler::call_VM_leaf_base(entry_point, 2);
}
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
Register arg_1,
Register arg_2,
Register arg_3) {
assert(c_rarg0 != arg_2, "smashed argument");
assert(c_rarg0 != arg_3, "smashed argument");
assert(c_rarg1 != arg_1, "smashed argument");
assert(c_rarg1 != arg_3, "smashed argument");
assert(c_rarg2 != arg_1, "smashed argument");
assert(c_rarg2 != arg_2, "smashed argument");
if (c_rarg0 != arg_1) {
mov(c_rarg0, arg_1);
}
if (c_rarg1 != arg_2) {
mov(c_rarg1, arg_2);
}
if (c_rarg2 != arg_3) {
mov(c_rarg2, arg_3);
}
MacroAssembler::call_VM_leaf_base(entry_point, 3);
}
void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
// set sender sp // set sender sp
lea(r13, Address(rsp, wordSize)); lea(r13, Address(rsp, wordSize));

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -136,13 +136,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_ptr(int n, Register val); void load_ptr(int n, Register val);
void store_ptr(int n, Register val); void store_ptr(int n, Register val);
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
void super_call_VM_leaf(address entry_point);
void super_call_VM_leaf(address entry_point, Register arg_1);
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
void super_call_VM_leaf(address entry_point,
Register arg_1, Register arg_2, Register arg_3);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is // Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. // a subtype of super_klass.
void gen_subtype_check( Register sub_klass, Label &ok_is_subtype ); void gen_subtype_check( Register sub_klass, Label &ok_is_subtype );

View File

@ -315,56 +315,38 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
#ifndef PRODUCT #ifndef PRODUCT
extern "C" void print_method_handle(oop mh); extern "C" void print_method_handle(oop mh);
void trace_method_handle_stub(const char* adaptername, void trace_method_handle_stub(const char* adaptername,
oop mh,
intptr_t* saved_regs,
intptr_t* entry_sp,
intptr_t* saved_sp, intptr_t* saved_sp,
intptr_t* saved_bp) { oop mh,
intptr_t* sp) {
// called as a leaf from native code: do not block the JVM! // called as a leaf from native code: do not block the JVM!
intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset]; intptr_t* entry_sp = sp + LP64_ONLY(16) NOT_LP64(8);
intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset]; tty->print_cr("MH %s mh="INTPTR_FORMAT" sp="INTPTR_FORMAT" saved_sp="INTPTR_FORMAT")",
printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n", adaptername, (intptr_t)mh, (intptr_t)entry_sp, saved_sp);
adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
if (last_sp != saved_sp && last_sp != NULL)
printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
if (Verbose) { if (Verbose) {
printf(" reg dump: ");
int saved_regs_count = (entry_sp-1) - saved_regs;
// 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
int i;
for (i = 0; i <= saved_regs_count; i++) {
if (i > 0 && i % 4 == 0 && i != saved_regs_count)
printf("\n + dump: ");
printf(" %d: "INTPTR_FORMAT, i, saved_regs[i]);
}
printf("\n");
int stack_dump_count = 16;
if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
stack_dump_count = (int)(saved_bp + 2 - saved_sp);
if (stack_dump_count > 64) stack_dump_count = 48;
for (i = 0; i < stack_dump_count; i += 4) {
printf(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT"\n",
i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
}
print_method_handle(mh); print_method_handle(mh);
} }
} }
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
if (!TraceMethodHandles) return; if (!TraceMethodHandles) return;
BLOCK_COMMENT("trace_method_handle {"); BLOCK_COMMENT("trace_method_handle {");
__ push(rax);
__ lea(rax, Address(rsp, wordSize*6)); // entry_sp
__ pusha(); __ pusha();
#ifdef _LP64
// Pass arguments carefully since the registers overlap with the calling convention.
// rcx: method handle
// r13: saved sp
__ mov(c_rarg2, rcx); // mh
__ mov(c_rarg1, r13); // saved sp
__ mov(c_rarg3, rsp); // sp
__ movptr(c_rarg0, (intptr_t) adaptername);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), c_rarg0, c_rarg1, c_rarg2, c_rarg3);
#else
// arguments: // arguments:
__ push(rbp); // interpreter frame pointer // rcx: method handle
__ push(rsi); // saved_sp // rsi: saved sp
__ push(rax); // entry_sp __ movptr(rbx, (intptr_t) adaptername);
__ push(rcx); // mh __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), rbx, rsi, rcx, rsp);
__ push(rcx); #endif
__ movptr(Address(rsp, 0), (intptr_t) adaptername);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
__ popa(); __ popa();
__ pop(rax);
BLOCK_COMMENT("} trace_method_handle"); BLOCK_COMMENT("} trace_method_handle");
} }
#endif //PRODUCT #endif //PRODUCT

View File

@ -12988,6 +12988,53 @@ instruct maxI_eReg(eRegI dst, eRegI src, eFlagsReg flags) %{
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
// ============================================================================
// Counted Loop limit node which represents exact final iterator value.
// Note: the resulting value should fit into integer range since
// counted loops have limit check on overflow.
instruct loopLimit_eReg(eAXRegI limit, nadxRegI init, immI stride, eDXRegI limit_hi, nadxRegI tmp, eFlagsReg flags) %{
match(Set limit (LoopLimit (Binary init limit) stride));
effect(TEMP limit_hi, TEMP tmp, KILL flags);
ins_cost(300);
format %{ "loopLimit $init,$limit,$stride # $limit = $init + $stride *( $limit - $init + $stride -1)/ $stride, kills $limit_hi" %}
ins_encode %{
int strd = (int)$stride$$constant;
assert(strd != 1 && strd != -1, "sanity");
int m1 = (strd > 0) ? 1 : -1;
// Convert limit to long (EAX:EDX)
__ cdql();
// Convert init to long (init:tmp)
__ movl($tmp$$Register, $init$$Register);
__ sarl($tmp$$Register, 31);
// $limit - $init
__ subl($limit$$Register, $init$$Register);
__ sbbl($limit_hi$$Register, $tmp$$Register);
// + ($stride - 1)
if (strd > 0) {
__ addl($limit$$Register, (strd - 1));
__ adcl($limit_hi$$Register, 0);
__ movl($tmp$$Register, strd);
} else {
__ addl($limit$$Register, (strd + 1));
__ adcl($limit_hi$$Register, -1);
__ lneg($limit_hi$$Register, $limit$$Register);
__ movl($tmp$$Register, -strd);
}
// signed devision: (EAX:EDX) / pos_stride
__ idivl($tmp$$Register);
if (strd < 0) {
// restore sign
__ negl($tmp$$Register);
}
// (EAX) * stride
__ mull($tmp$$Register);
// + init (ignore upper bits)
__ addl($limit$$Register, $init$$Register);
%}
ins_pipe( pipe_slow );
%}
// ============================================================================ // ============================================================================
// Branch Instructions // Branch Instructions
// Jump Table // Jump Table

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@
// Defines Linux-specific default values. The flags are available on all // Defines Linux-specific default values. The flags are available on all
// platforms, but they may have different default values on other platforms. // platforms, but they may have different default values on other platforms.
// //
define_pd_global(bool, UseLargePages, false); define_pd_global(bool, UseLargePages, true);
define_pd_global(bool, UseLargePagesIndividualAllocation, false); define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseOSErrorReporting, false); define_pd_global(bool, UseOSErrorReporting, false);
define_pd_global(bool, UseThreadPriorities, true) ; define_pd_global(bool, UseThreadPriorities, true) ;

View File

@ -2914,17 +2914,22 @@ static void set_coredump_filter(void) {
static size_t _large_page_size = 0; static size_t _large_page_size = 0;
bool os::large_page_init() { void os::large_page_init() {
if (!UseLargePages) { if (!UseLargePages) {
UseHugeTLBFS = false; UseHugeTLBFS = false;
UseSHM = false; UseSHM = false;
return false; return;
} }
if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) { if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
// Our user has not expressed a preference, so we'll try both. // If UseLargePages is specified on the command line try both methods,
// if it's default, then try only HugeTLBFS.
if (FLAG_IS_DEFAULT(UseLargePages)) {
UseHugeTLBFS = true;
} else {
UseHugeTLBFS = UseSHM = true; UseHugeTLBFS = UseSHM = true;
} }
}
if (LargePageSizeInBytes) { if (LargePageSizeInBytes) {
_large_page_size = LargePageSizeInBytes; _large_page_size = LargePageSizeInBytes;
@ -2978,7 +2983,6 @@ bool os::large_page_init() {
_page_sizes[1] = default_page_size; _page_sizes[1] = default_page_size;
_page_sizes[2] = 0; _page_sizes[2] = 0;
} }
UseHugeTLBFS = UseHugeTLBFS && UseHugeTLBFS = UseHugeTLBFS &&
Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size); Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
@ -2988,12 +2992,6 @@ bool os::large_page_init() {
UseLargePages = UseHugeTLBFS || UseSHM; UseLargePages = UseHugeTLBFS || UseSHM;
set_coredump_filter(); set_coredump_filter();
// Large page support is available on 2.6 or newer kernel, some vendors
// (e.g. Redhat) have backported it to their 2.4 based distributions.
// We optimistically assume the support is available. If later it turns out
// not true, VM will automatically switch to use regular page size.
return true;
} }
#ifndef SHM_HUGETLB #ifndef SHM_HUGETLB
@ -4118,7 +4116,7 @@ jint os::init_2(void)
#endif #endif
} }
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); os::large_page_init();
// initialize suspend/resume support - must do this before signal_sets_init() // initialize suspend/resume support - must do this before signal_sets_init()
if (SR_initialize() != 0) { if (SR_initialize() != 0) {

View File

@ -3336,11 +3336,11 @@ bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
return true; return true;
} }
bool os::large_page_init() { void os::large_page_init() {
if (!UseLargePages) { if (!UseLargePages) {
UseISM = false; UseISM = false;
UseMPSS = false; UseMPSS = false;
return false; return;
} }
// print a warning if any large page related flag is specified on command line // print a warning if any large page related flag is specified on command line
@ -3361,7 +3361,6 @@ bool os::large_page_init() {
Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
UseLargePages = UseISM || UseMPSS; UseLargePages = UseISM || UseMPSS;
return UseLargePages;
} }
bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
@ -4992,7 +4991,7 @@ jint os::init_2(void) {
#endif #endif
} }
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); os::large_page_init();
// Check minimum allowable stack size for thread creation and to initialize // Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page // the java system classes, including StackOverflowError - depends on page

View File

@ -2762,8 +2762,8 @@ static void cleanup_after_large_page_init() {
_hToken = NULL; _hToken = NULL;
} }
bool os::large_page_init() { void os::large_page_init() {
if (!UseLargePages) return false; if (!UseLargePages) return;
// print a warning if any large page related flag is specified on command line // print a warning if any large page related flag is specified on command line
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
@ -2808,7 +2808,7 @@ bool os::large_page_init() {
} }
cleanup_after_large_page_init(); cleanup_after_large_page_init();
return success; UseLargePages = success;
} }
// On win32, one cannot release just a part of reserved memory, it's an // On win32, one cannot release just a part of reserved memory, it's an
@ -3561,7 +3561,7 @@ jint os::init_2(void) {
#endif #endif
} }
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); os::large_page_init();
// Setup Windows Exceptions // Setup Windows Exceptions

View File

@ -93,7 +93,7 @@ inline void OrderAccess::release_store_ptr(volatile void* p, void* v)
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
__asm__ volatile ( "xchgb (%2),%0" __asm__ volatile ( "xchgb (%2),%0"
: "=r" (v) : "=q" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
} }
@ -155,7 +155,7 @@ inline void OrderAccess::store_ptr_fence(void** p, void* v) {
// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile. // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
__asm__ volatile ( "xchgb (%2),%0" __asm__ volatile ( "xchgb (%2),%0"
: "=r" (v) : "=q" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
} }

View File

@ -826,6 +826,14 @@ public:
void ConcurrentMark::checkpointRootsInitialPost() { void ConcurrentMark::checkpointRootsInitialPost() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
// If we force an overflow during remark, the remark operation will
// actually abort and we'll restart concurrent marking. If we always
// force an oveflow during remark we'll never actually complete the
// marking phase. So, we initilize this here, at the start of the
// cycle, so that at the remaining overflow number will decrease at
// every remark and we'll eventually not need to cause one.
force_overflow_stw()->init();
// For each region note start of marking. // For each region note start of marking.
NoteStartOfMarkHRClosure startcl; NoteStartOfMarkHRClosure startcl;
g1h->heap_region_iterate(&startcl); g1h->heap_region_iterate(&startcl);
@ -893,27 +901,37 @@ void ConcurrentMark::checkpointRootsInitial() {
} }
/* /*
Notice that in the next two methods, we actually leave the STS * Notice that in the next two methods, we actually leave the STS
during the barrier sync and join it immediately afterwards. If we * during the barrier sync and join it immediately afterwards. If we
do not do this, this then the following deadlock can occur: one * do not do this, the following deadlock can occur: one thread could
thread could be in the barrier sync code, waiting for the other * be in the barrier sync code, waiting for the other thread to also
thread to also sync up, whereas another one could be trying to * sync up, whereas another one could be trying to yield, while also
yield, while also waiting for the other threads to sync up too. * waiting for the other threads to sync up too.
*
Because the thread that does the sync barrier has left the STS, it * Note, however, that this code is also used during remark and in
is possible to be suspended for a Full GC or an evacuation pause * this case we should not attempt to leave / enter the STS, otherwise
could occur. This is actually safe, since the entering the sync * we'll either hit an asseert (debug / fastdebug) or deadlock
barrier is one of the last things do_marking_step() does, and it * (product). So we should only leave / enter the STS if we are
doesn't manipulate any data structures afterwards. * operating concurrently.
*/ *
* Because the thread that does the sync barrier has left the STS, it
* is possible to be suspended for a Full GC or an evacuation pause
* could occur. This is actually safe, since the entering the sync
* barrier is one of the last things do_marking_step() does, and it
* doesn't manipulate any data structures afterwards.
*/
void ConcurrentMark::enter_first_sync_barrier(int task_num) { void ConcurrentMark::enter_first_sync_barrier(int task_num) {
if (verbose_low()) if (verbose_low())
gclog_or_tty->print_cr("[%d] entering first barrier", task_num); gclog_or_tty->print_cr("[%d] entering first barrier", task_num);
if (concurrent()) {
ConcurrentGCThread::stsLeave(); ConcurrentGCThread::stsLeave();
}
_first_overflow_barrier_sync.enter(); _first_overflow_barrier_sync.enter();
if (concurrent()) {
ConcurrentGCThread::stsJoin(); ConcurrentGCThread::stsJoin();
}
// at this point everyone should have synced up and not be doing any // at this point everyone should have synced up and not be doing any
// more work // more work
@ -923,7 +941,12 @@ void ConcurrentMark::enter_first_sync_barrier(int task_num) {
// let task 0 do this // let task 0 do this
if (task_num == 0) { if (task_num == 0) {
// task 0 is responsible for clearing the global data structures // task 0 is responsible for clearing the global data structures
clear_marking_state(); // We should be here because of an overflow. During STW we should
// not clear the overflow flag since we rely on it being true when
// we exit this method to abort the pause and restart concurent
// marking.
clear_marking_state(concurrent() /* clear_overflow */);
force_overflow()->update();
if (PrintGC) { if (PrintGC) {
gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGCDateStamps);
@ -940,15 +963,45 @@ void ConcurrentMark::enter_second_sync_barrier(int task_num) {
if (verbose_low()) if (verbose_low())
gclog_or_tty->print_cr("[%d] entering second barrier", task_num); gclog_or_tty->print_cr("[%d] entering second barrier", task_num);
if (concurrent()) {
ConcurrentGCThread::stsLeave(); ConcurrentGCThread::stsLeave();
}
_second_overflow_barrier_sync.enter(); _second_overflow_barrier_sync.enter();
if (concurrent()) {
ConcurrentGCThread::stsJoin(); ConcurrentGCThread::stsJoin();
}
// at this point everything should be re-initialised and ready to go // at this point everything should be re-initialised and ready to go
if (verbose_low()) if (verbose_low())
gclog_or_tty->print_cr("[%d] leaving second barrier", task_num); gclog_or_tty->print_cr("[%d] leaving second barrier", task_num);
} }
#ifndef PRODUCT
void ForceOverflowSettings::init() {
_num_remaining = G1ConcMarkForceOverflow;
_force = false;
update();
}
void ForceOverflowSettings::update() {
if (_num_remaining > 0) {
_num_remaining -= 1;
_force = true;
} else {
_force = false;
}
}
bool ForceOverflowSettings::should_force() {
if (_force) {
_force = false;
return true;
} else {
return false;
}
}
#endif // !PRODUCT
void ConcurrentMark::grayRoot(oop p) { void ConcurrentMark::grayRoot(oop p) {
HeapWord* addr = (HeapWord*) p; HeapWord* addr = (HeapWord*) p;
// We can't really check against _heap_start and _heap_end, since it // We can't really check against _heap_start and _heap_end, since it
@ -1117,6 +1170,7 @@ void ConcurrentMark::markFromRoots() {
_restart_for_overflow = false; _restart_for_overflow = false;
size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
force_overflow_conc()->init();
set_phase(active_workers, true /* concurrent */); set_phase(active_workers, true /* concurrent */);
CMConcurrentMarkingTask markingTask(this, cmThread()); CMConcurrentMarkingTask markingTask(this, cmThread());
@ -1845,7 +1899,7 @@ void ConcurrentMark::completeCleanup() {
while (!_cleanup_list.is_empty()) { while (!_cleanup_list.is_empty()) {
HeapRegion* hr = _cleanup_list.remove_head(); HeapRegion* hr = _cleanup_list.remove_head();
assert(hr != NULL, "the list was not empty"); assert(hr != NULL, "the list was not empty");
hr->rem_set()->clear(); hr->par_clear();
tmp_free_list.add_as_tail(hr); tmp_free_list.add_as_tail(hr);
// Instead of adding one region at a time to the secondary_free_list, // Instead of adding one region at a time to the secondary_free_list,
@ -2703,12 +2757,16 @@ void ConcurrentMark::oops_do(OopClosure* cl) {
} }
void ConcurrentMark::clear_marking_state() { void ConcurrentMark::clear_marking_state(bool clear_overflow) {
_markStack.setEmpty(); _markStack.setEmpty();
_markStack.clear_overflow(); _markStack.clear_overflow();
_regionStack.setEmpty(); _regionStack.setEmpty();
_regionStack.clear_overflow(); _regionStack.clear_overflow();
if (clear_overflow) {
clear_has_overflown(); clear_has_overflown();
} else {
assert(has_overflown(), "pre-condition");
}
_finger = _heap_start; _finger = _heap_start;
for (int i = 0; i < (int)_max_task_num; ++i) { for (int i = 0; i < (int)_max_task_num; ++i) {
@ -4279,6 +4337,15 @@ void CMTask::do_marking_step(double time_target_ms,
} }
} }
// If we are about to wrap up and go into termination, check if we
// should raise the overflow flag.
if (do_termination && !has_aborted()) {
if (_cm->force_overflow()->should_force()) {
_cm->set_has_overflown();
regular_clock_call();
}
}
// We still haven't aborted. Now, let's try to get into the // We still haven't aborted. Now, let's try to get into the
// termination protocol. // termination protocol.
if (do_termination && !has_aborted()) { if (do_termination && !has_aborted()) {

View File

@ -316,6 +316,19 @@ public:
void setEmpty() { _index = 0; clear_overflow(); } void setEmpty() { _index = 0; clear_overflow(); }
}; };
class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
private:
#ifndef PRODUCT
uintx _num_remaining;
bool _force;
#endif // !defined(PRODUCT)
public:
void init() PRODUCT_RETURN;
void update() PRODUCT_RETURN;
bool should_force() PRODUCT_RETURN_( return false; );
};
// this will enable a variety of different statistics per GC task // this will enable a variety of different statistics per GC task
#define _MARKING_STATS_ 0 #define _MARKING_STATS_ 0
// this will enable the higher verbose levels // this will enable the higher verbose levels
@ -462,6 +475,9 @@ protected:
WorkGang* _parallel_workers; WorkGang* _parallel_workers;
ForceOverflowSettings _force_overflow_conc;
ForceOverflowSettings _force_overflow_stw;
void weakRefsWork(bool clear_all_soft_refs); void weakRefsWork(bool clear_all_soft_refs);
void swapMarkBitMaps(); void swapMarkBitMaps();
@ -470,7 +486,7 @@ protected:
// task local ones; should be called during initial mark. // task local ones; should be called during initial mark.
void reset(); void reset();
// It resets all the marking data structures. // It resets all the marking data structures.
void clear_marking_state(); void clear_marking_state(bool clear_overflow = true);
// It should be called to indicate which phase we're in (concurrent // It should be called to indicate which phase we're in (concurrent
// mark or remark) and how many threads are currently active. // mark or remark) and how many threads are currently active.
@ -547,6 +563,22 @@ protected:
void enter_first_sync_barrier(int task_num); void enter_first_sync_barrier(int task_num);
void enter_second_sync_barrier(int task_num); void enter_second_sync_barrier(int task_num);
ForceOverflowSettings* force_overflow_conc() {
return &_force_overflow_conc;
}
ForceOverflowSettings* force_overflow_stw() {
return &_force_overflow_stw;
}
ForceOverflowSettings* force_overflow() {
if (concurrent()) {
return force_overflow_conc();
} else {
return force_overflow_stw();
}
}
public: public:
// Manipulation of the global mark stack. // Manipulation of the global mark stack.
// Notice that the first mark_stack_push is CAS-based, whereas the // Notice that the first mark_stack_push is CAS-based, whereas the

View File

@ -3975,6 +3975,9 @@ void G1CollectedHeap::drain_evac_failure_scan_stack() {
oop oop
G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
oop old) { oop old) {
assert(obj_in_cs(old),
err_msg("obj: "PTR_FORMAT" should still be in the CSet",
(HeapWord*) old));
markOop m = old->mark(); markOop m = old->mark();
oop forward_ptr = old->forward_to_atomic(old); oop forward_ptr = old->forward_to_atomic(old);
if (forward_ptr == NULL) { if (forward_ptr == NULL) {
@ -3997,7 +4000,13 @@ G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
} }
return old; return old;
} else { } else {
// Someone else had a place to copy it. // Forward-to-self failed. Either someone else managed to allocate
// space for this object (old != forward_ptr) or they beat us in
// self-forwarding it (old == forward_ptr).
assert(old == forward_ptr || !obj_in_cs(forward_ptr),
err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
"should not be in the CSet",
(HeapWord*) old, (HeapWord*) forward_ptr));
return forward_ptr; return forward_ptr;
} }
} }
@ -4308,12 +4317,11 @@ template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
T heap_oop = oopDesc::load_heap_oop(p); T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) { if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop(heap_oop); oop obj = oopDesc::decode_heap_oop(heap_oop);
assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
"shouldn't still be in the CSet if evacuation didn't fail.");
HeapWord* addr = (HeapWord*)obj; HeapWord* addr = (HeapWord*)obj;
if (_g1->is_in_g1_reserved(addr)) if (_g1->is_in_g1_reserved(addr)) {
_cm->grayRoot(oop(addr)); _cm->grayRoot(oop(addr));
} }
}
} }
oop G1ParCopyHelper::copy_to_survivor_space(oop old) { oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
@ -4961,36 +4969,45 @@ public:
#ifndef PRODUCT #ifndef PRODUCT
class G1VerifyCardTableCleanup: public HeapRegionClosure { class G1VerifyCardTableCleanup: public HeapRegionClosure {
G1CollectedHeap* _g1h;
CardTableModRefBS* _ct_bs; CardTableModRefBS* _ct_bs;
public: public:
G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
: _ct_bs(ct_bs) { } : _g1h(g1h), _ct_bs(ct_bs) { }
virtual bool doHeapRegion(HeapRegion* r) { virtual bool doHeapRegion(HeapRegion* r) {
MemRegion mr(r->bottom(), r->end());
if (r->is_survivor()) { if (r->is_survivor()) {
_ct_bs->verify_dirty_region(mr); _g1h->verify_dirty_region(r);
} else { } else {
_ct_bs->verify_clean_region(mr); _g1h->verify_not_dirty_region(r);
} }
return false; return false;
} }
}; };
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); // All of the region should be clean.
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
MemRegion mr(hr->bottom(), hr->end());
ct_bs->verify_not_dirty_region(mr);
}
void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
// We cannot guarantee that [bottom(),end()] is dirty. Threads // We cannot guarantee that [bottom(),end()] is dirty. Threads
// dirty allocated blocks as they allocate them. The thread that // dirty allocated blocks as they allocate them. The thread that
// retires each region and replaces it with a new one will do a // retires each region and replaces it with a new one will do a
// maximal allocation to fill in [pre_dummy_top(),end()] but will // maximal allocation to fill in [pre_dummy_top(),end()] but will
// not dirty that area (one less thing to have to do while holding // not dirty that area (one less thing to have to do while holding
// a lock). So we can only verify that [bottom(),pre_dummy_top()] // a lock). So we can only verify that [bottom(),pre_dummy_top()]
// is dirty. Also note that verify_dirty_region() requires // is dirty.
// mr.start() and mr.end() to be card aligned and pre_dummy_top() CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
// is not guaranteed to be. MemRegion mr(hr->bottom(), hr->pre_dummy_top());
MemRegion mr(hr->bottom(),
ct_bs->align_to_card_boundary(hr->pre_dummy_top()));
ct_bs->verify_dirty_region(mr); ct_bs->verify_dirty_region(mr);
}
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
verify_dirty_region(hr);
} }
} }
@ -5033,7 +5050,7 @@ void G1CollectedHeap::cleanUpCardTable() {
g1_policy()->record_clear_ct_time( elapsed * 1000.0); g1_policy()->record_clear_ct_time( elapsed * 1000.0);
#ifndef PRODUCT #ifndef PRODUCT
if (G1VerifyCTCleanup || VerifyAfterGC) { if (G1VerifyCTCleanup || VerifyAfterGC) {
G1VerifyCardTableCleanup cleanup_verifier(ct_bs); G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
heap_region_iterate(&cleanup_verifier); heap_region_iterate(&cleanup_verifier);
} }
#endif #endif

View File

@ -970,6 +970,8 @@ public:
// The number of regions available for "regular" expansion. // The number of regions available for "regular" expansion.
size_t expansion_regions() { return _expansion_regions; } size_t expansion_regions() { return _expansion_regions; }
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
void verify_dirty_young_regions() PRODUCT_RETURN; void verify_dirty_young_regions() PRODUCT_RETURN;

View File

@ -157,7 +157,6 @@ public:
void set_try_claimed() { _try_claimed = true; } void set_try_claimed() { _try_claimed = true; }
void scanCard(size_t index, HeapRegion *r) { void scanCard(size_t index, HeapRegion *r) {
_cards_done++;
DirtyCardToOopClosure* cl = DirtyCardToOopClosure* cl =
r->new_dcto_closure(_oc, r->new_dcto_closure(_oc,
CardTableModRefBS::Precise, CardTableModRefBS::Precise,
@ -168,17 +167,14 @@ public:
HeapWord* card_start = _bot_shared->address_for_index(index); HeapWord* card_start = _bot_shared->address_for_index(index);
HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
Space *sp = SharedHeap::heap()->space_containing(card_start); Space *sp = SharedHeap::heap()->space_containing(card_start);
MemRegion sm_region; MemRegion sm_region = sp->used_region_at_save_marks();
if (ParallelGCThreads > 0) {
// first find the used area
sm_region = sp->used_region_at_save_marks();
} else {
// The closure is not idempotent. We shouldn't look at objects
// allocated during the GC.
sm_region = sp->used_region_at_save_marks();
}
MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
if (!mr.is_empty()) { if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
// We make the card as "claimed" lazily (so races are possible
// but they're benign), which reduces the number of duplicate
// scans (the rsets of the regions in the cset can intersect).
_ct_bs->set_card_claimed(index);
_cards_done++;
cl->do_MemRegion(mr); cl->do_MemRegion(mr);
} }
} }
@ -199,6 +195,9 @@ public:
HeapRegionRemSet* hrrs = r->rem_set(); HeapRegionRemSet* hrrs = r->rem_set();
if (hrrs->iter_is_complete()) return false; // All done. if (hrrs->iter_is_complete()) return false; // All done.
if (!_try_claimed && !hrrs->claim_iter()) return false; if (!_try_claimed && !hrrs->claim_iter()) return false;
// If we ever free the collection set concurrently, we should also
// clear the card table concurrently therefore we won't need to
// add regions of the collection set to the dirty cards region.
_g1h->push_dirty_cards_region(r); _g1h->push_dirty_cards_region(r);
// If we didn't return above, then // If we didn't return above, then
// _try_claimed || r->claim_iter() // _try_claimed || r->claim_iter()
@ -231,23 +230,16 @@ public:
} }
// If the card is dirty, then we will scan it during updateRS. // If the card is dirty, then we will scan it during updateRS.
if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) { if (!card_region->in_collection_set() &&
// We make the card as "claimed" lazily (so races are possible but they're benign), !_ct_bs->is_card_dirty(card_index)) {
// which reduces the number of duplicate scans (the rsets of the regions in the cset
// can intersect).
if (!_ct_bs->is_card_claimed(card_index)) {
_ct_bs->set_card_claimed(card_index);
scanCard(card_index, card_region); scanCard(card_index, card_region);
} }
} }
}
if (!_try_claimed) { if (!_try_claimed) {
hrrs->set_iter_complete(); hrrs->set_iter_complete();
} }
return false; return false;
} }
// Set all cards back to clean.
void cleanup() {_g1h->cleanUpCardTable();}
size_t cards_done() { return _cards_done;} size_t cards_done() { return _cards_done;}
size_t cards_looked_up() { return _cards;} size_t cards_looked_up() { return _cards;}
}; };
@ -567,7 +559,8 @@ public:
HeapWord* stop_point = HeapWord* stop_point =
r->oops_on_card_seq_iterate_careful(scanRegion, r->oops_on_card_seq_iterate_careful(scanRegion,
&filter_then_update_rs_cset_oop_cl, &filter_then_update_rs_cset_oop_cl,
false /* filter_young */); false /* filter_young */,
NULL /* card_ptr */);
// Since this is performed in the event of an evacuation failure, we // Since this is performed in the event of an evacuation failure, we
// we shouldn't see a non-null stop point // we shouldn't see a non-null stop point
@ -735,12 +728,6 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
(OopClosure*)&mux : (OopClosure*)&mux :
(OopClosure*)&update_rs_oop_cl)); (OopClosure*)&update_rs_oop_cl));
// Undirty the card.
*card_ptr = CardTableModRefBS::clean_card_val();
// We must complete this write before we do any of the reads below.
OrderAccess::storeload();
// And process it, being careful of unallocated portions of TLAB's.
// The region for the current card may be a young region. The // The region for the current card may be a young region. The
// current card may have been a card that was evicted from the // current card may have been a card that was evicted from the
// card cache. When the card was inserted into the cache, we had // card cache. When the card was inserted into the cache, we had
@ -749,7 +736,7 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
// and tagged as young. // and tagged as young.
// //
// We wish to filter out cards for such a region but the current // We wish to filter out cards for such a region but the current
// thread, if we're running conucrrently, may "see" the young type // thread, if we're running concurrently, may "see" the young type
// change at any time (so an earlier "is_young" check may pass or // change at any time (so an earlier "is_young" check may pass or
// fail arbitrarily). We tell the iteration code to perform this // fail arbitrarily). We tell the iteration code to perform this
// filtering when it has been determined that there has been an actual // filtering when it has been determined that there has been an actual
@ -759,7 +746,8 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
HeapWord* stop_point = HeapWord* stop_point =
r->oops_on_card_seq_iterate_careful(dirtyRegion, r->oops_on_card_seq_iterate_careful(dirtyRegion,
&filter_then_update_rs_oop_cl, &filter_then_update_rs_oop_cl,
filter_young); filter_young,
card_ptr);
// If stop_point is non-null, then we encountered an unallocated region // If stop_point is non-null, then we encountered an unallocated region
// (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the

View File

@ -311,7 +311,11 @@
\ \
develop(bool, G1ExitOnExpansionFailure, false, \ develop(bool, G1ExitOnExpansionFailure, false, \
"Raise a fatal VM exit out of memory failure in the event " \ "Raise a fatal VM exit out of memory failure in the event " \
" that heap expansion fails due to running out of swap.") " that heap expansion fails due to running out of swap.") \
\
develop(uintx, G1ConcMarkForceOverflow, 0, \
"The number of times we'll force an overflow during " \
"concurrent marking")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)

View File

@ -376,6 +376,17 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
if (clear_space) clear(SpaceDecorator::Mangle); if (clear_space) clear(SpaceDecorator::Mangle);
} }
void HeapRegion::par_clear() {
assert(used() == 0, "the region should have been already cleared");
assert(capacity() == (size_t) HeapRegion::GrainBytes,
"should be back to normal");
HeapRegionRemSet* hrrs = rem_set();
hrrs->clear();
CardTableModRefBS* ct_bs =
(CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
ct_bs->clear(MemRegion(bottom(), end()));
}
// <PREDICTION> // <PREDICTION>
void HeapRegion::calc_gc_efficiency() { void HeapRegion::calc_gc_efficiency() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -600,7 +611,15 @@ HeapWord*
HeapRegion:: HeapRegion::
oops_on_card_seq_iterate_careful(MemRegion mr, oops_on_card_seq_iterate_careful(MemRegion mr,
FilterOutOfRegionClosure* cl, FilterOutOfRegionClosure* cl,
bool filter_young) { bool filter_young,
jbyte* card_ptr) {
// Currently, we should only have to clean the card if filter_young
// is true and vice versa.
if (filter_young) {
assert(card_ptr != NULL, "pre-condition");
} else {
assert(card_ptr == NULL, "pre-condition");
}
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
// If we're within a stop-world GC, then we might look at a card in a // If we're within a stop-world GC, then we might look at a card in a
@ -626,6 +645,15 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
assert(!is_young(), "check value of filter_young"); assert(!is_young(), "check value of filter_young");
// We can only clean the card here, after we make the decision that
// the card is not young. And we only clean the card if we have been
// asked to (i.e., card_ptr != NULL).
if (card_ptr != NULL) {
*card_ptr = CardTableModRefBS::clean_card_val();
// We must complete this write before we do any of the reads below.
OrderAccess::storeload();
}
// We used to use "block_start_careful" here. But we're actually happy // We used to use "block_start_careful" here. But we're actually happy
// to update the BOT while we do this... // to update the BOT while we do this...
HeapWord* cur = block_start(mr.start()); HeapWord* cur = block_start(mr.start());

View File

@ -584,6 +584,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Reset HR stuff to default values. // Reset HR stuff to default values.
void hr_clear(bool par, bool clear_space); void hr_clear(bool par, bool clear_space);
void par_clear();
void initialize(MemRegion mr, bool clear_space, bool mangle_space); void initialize(MemRegion mr, bool clear_space, bool mangle_space);
@ -802,12 +803,16 @@ class HeapRegion: public G1OffsetTableContigSpace {
HeapWord* HeapWord*
object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
// In this version - if filter_young is true and the region // filter_young: if true and the region is a young region then we
// is a young region then we skip the iteration. // skip the iteration.
// card_ptr: if not NULL, and we decide that the card is not young
// and we iterate over it, we'll clean the card before we start the
// iteration.
HeapWord* HeapWord*
oops_on_card_seq_iterate_careful(MemRegion mr, oops_on_card_seq_iterate_careful(MemRegion mr,
FilterOutOfRegionClosure* cl, FilterOutOfRegionClosure* cl,
bool filter_young); bool filter_young,
jbyte* card_ptr);
// A version of block start that is guaranteed to find *some* block // A version of block start that is guaranteed to find *some* block
// boundary at or before "p", but does not object iteration, and may // boundary at or before "p", but does not object iteration, and may

View File

@ -224,6 +224,12 @@ void PSOldGen::expand(size_t bytes) {
const size_t alignment = virtual_space()->alignment(); const size_t alignment = virtual_space()->alignment();
size_t aligned_bytes = align_size_up(bytes, alignment); size_t aligned_bytes = align_size_up(bytes, alignment);
size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
if (UseNUMA) {
// With NUMA we use round-robin page allocation for the old gen. Expand by at least
// providing a page per lgroup. Alignment is larger or equal to the page size.
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
}
if (aligned_bytes == 0){ if (aligned_bytes == 0){
// The alignment caused the number of bytes to wrap. An expand_by(0) will // The alignment caused the number of bytes to wrap. An expand_by(0) will
// return true with the implication that and expansion was done when it // return true with the implication that and expansion was done when it

View File

@ -327,6 +327,7 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
// 1. check if klass is not interface // 1. check if klass is not interface
if (resolved_klass->is_interface()) { if (resolved_klass->is_interface()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name()); jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name());
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
@ -413,6 +414,7 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
// check if klass is interface // check if klass is interface
if (!resolved_klass->is_interface()) { if (!resolved_klass->is_interface()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Found class %s, but interface was expected", Klass::cast(resolved_klass())->external_name()); jio_snprintf(buf, sizeof(buf), "Found class %s, but interface was expected", Klass::cast(resolved_klass())->external_name());
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
@ -534,6 +536,7 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
// check for errors // check for errors
if (is_static != fd.is_static()) { if (is_static != fd.is_static()) {
ResourceMark rm(THREAD);
char msg[200]; char msg[200];
jio_snprintf(msg, sizeof(msg), "Expected %s field %s.%s", is_static ? "static" : "non-static", Klass::cast(resolved_klass())->external_name(), fd.name()->as_C_string()); jio_snprintf(msg, sizeof(msg), "Expected %s field %s.%s", is_static ? "static" : "non-static", Klass::cast(resolved_klass())->external_name(), fd.name()->as_C_string());
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), msg); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), msg);
@ -631,6 +634,7 @@ void LinkResolver::linktime_resolve_static_method(methodHandle& resolved_method,
// check if static // check if static
if (!resolved_method->is_static()) { if (!resolved_method->is_static()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Expected static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), jio_snprintf(buf, sizeof(buf), "Expected static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
resolved_method->name(), resolved_method->name(),
@ -671,6 +675,7 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
// check if not static // check if not static
if (resolved_method->is_static()) { if (resolved_method->is_static()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), jio_snprintf(buf, sizeof(buf),
"Expecting non-static method %s", "Expecting non-static method %s",
@ -717,6 +722,7 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result, methodHandle
// check if not static // check if not static
if (sel_method->is_static()) { if (sel_method->is_static()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
resolved_method->name(), resolved_method->name(),
@ -757,6 +763,7 @@ void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method
// check if not static // check if not static
if (resolved_method->is_static()) { if (resolved_method->is_static()) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
resolved_method->name(), resolved_method->name(),
@ -873,6 +880,7 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
// check if receiver klass implements the resolved interface // check if receiver klass implements the resolved interface
if (!recv_klass->is_subtype_of(resolved_klass())) { if (!recv_klass->is_subtype_of(resolved_klass())) {
ResourceMark rm(THREAD);
char buf[200]; char buf[200];
jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s", jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s",
(Klass::cast(recv_klass()))->external_name(), (Klass::cast(recv_klass()))->external_name(),

View File

@ -44,6 +44,14 @@ void* CHeapObj::operator new(size_t size){
return (void *) AllocateHeap(size, "CHeapObj-new"); return (void *) AllocateHeap(size, "CHeapObj-new");
} }
void* CHeapObj::operator new (size_t size, const std::nothrow_t& nothrow_constant) {
char* p = (char*) os::malloc(size);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
#endif
return p;
}
void CHeapObj::operator delete(void* p){ void CHeapObj::operator delete(void* p){
FreeHeap(p); FreeHeap(p);
} }

View File

@ -34,6 +34,8 @@
#include "opto/c2_globals.hpp" #include "opto/c2_globals.hpp"
#endif #endif
#include <new>
#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
@ -99,6 +101,7 @@ class AllocatedObj {
class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
public: public:
void* operator new(size_t size); void* operator new(size_t size);
void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
void operator delete(void* p); void operator delete(void* p);
void* new_array(size_t size); void* new_array(size_t size);
}; };

View File

@ -652,43 +652,37 @@ void CardTableModRefBS::verify() {
} }
#ifndef PRODUCT #ifndef PRODUCT
class GuaranteeNotModClosure: public MemRegionClosure { void CardTableModRefBS::verify_region(MemRegion mr,
CardTableModRefBS* _ct; jbyte val, bool val_equals) {
public: jbyte* start = byte_for(mr.start());
GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {} jbyte* end = byte_for(mr.last());
void do_MemRegion(MemRegion mr) { bool failures = false;
jbyte* entry = _ct->byte_for(mr.start()); for (jbyte* curr = start; curr <= end; ++curr) {
guarantee(*entry != CardTableModRefBS::clean_card, jbyte curr_val = *curr;
"Dirty card in region that should be clean"); bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
if (failed) {
if (!failures) {
tty->cr();
tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]");
tty->print_cr("== %sexpecting value: %d",
(val_equals) ? "" : "not ", val);
failures = true;
} }
}; tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
"val: %d", curr, addr_for(curr),
void CardTableModRefBS::verify_clean_region(MemRegion mr) { (HeapWord*) (((size_t) addr_for(curr)) + card_size),
GuaranteeNotModClosure blk(this); (int) curr_val);
non_clean_card_iterate_serial(mr, &blk); }
}
guarantee(!failures, "there should not have been any failures");
} }
// To verify a MemRegion is entirely dirty this closure is passed to void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
// dirty_card_iterate. If the region is dirty do_MemRegion will be verify_region(mr, dirty_card, false /* val_equals */);
// invoked only once with a MemRegion equal to the one being }
// verified.
class GuaranteeDirtyClosure: public MemRegionClosure {
CardTableModRefBS* _ct;
MemRegion _mr;
bool _result;
public:
GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
: _ct(ct), _mr(mr), _result(false) {}
void do_MemRegion(MemRegion mr) {
_result = _mr.equals(mr);
}
bool result() const { return _result; }
};
void CardTableModRefBS::verify_dirty_region(MemRegion mr) { void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
GuaranteeDirtyClosure blk(this, mr); verify_region(mr, dirty_card, true /* val_equals */);
dirty_card_iterate(mr, &blk);
guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
} }
#endif #endif

View File

@ -475,7 +475,10 @@ public:
void verify(); void verify();
void verify_guard(); void verify_guard();
void verify_clean_region(MemRegion mr) PRODUCT_RETURN; // val_equals -> it will check that all cards covered by mr equal val
// !val_equals -> it will check that all cards covered by mr do not equal val
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
static size_t par_chunk_heapword_alignment() { static size_t par_chunk_heapword_alignment() {

View File

@ -265,8 +265,6 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
always_do_update_barrier = UseConcMarkSweepGC; always_do_update_barrier = UseConcMarkSweepGC;
BlockOffsetArrayUseUnallocatedBlock =
BlockOffsetArrayUseUnallocatedBlock || ParallelGCThreads > 0;
// Check validity of heap flags // Check validity of heap flags
assert(OldSize % min_alignment() == 0, "old space alignment"); assert(OldSize % min_alignment() == 0, "old space alignment");

View File

@ -175,7 +175,7 @@ class VerifyOopClosure: public OopClosure {
protected: protected:
template <class T> inline void do_oop_work(T* p) { template <class T> inline void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p); oop obj = oopDesc::load_decode_heap_oop(p);
guarantee(obj->is_oop_or_null(), err_msg("invalid oop: " INTPTR_FORMAT, obj)); guarantee(obj->is_oop_or_null(), err_msg("invalid oop: " INTPTR_FORMAT, (oopDesc*) obj));
} }
public: public:
virtual void do_oop(oop* p); virtual void do_oop(oop* p);

View File

@ -100,12 +100,6 @@ public:
// Pass along the argument to the superclass. // Pass along the argument to the superclass.
ModRefBarrierSet(int max_covered_regions) : ModRefBarrierSet(int max_covered_regions) :
BarrierSet(max_covered_regions) {} BarrierSet(max_covered_regions) {}
#ifndef PRODUCT
// Verifies that the given region contains no modified references.
virtual void verify_clean_region(MemRegion mr) = 0;
#endif
}; };
#endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP

View File

@ -1194,7 +1194,7 @@ private:
// Whole-method sticky bits and flags // Whole-method sticky bits and flags
public: public:
enum { enum {
_trap_hist_limit = 16, // decoupled from Deoptimization::Reason_LIMIT _trap_hist_limit = 17, // decoupled from Deoptimization::Reason_LIMIT
_trap_hist_mask = max_jubyte, _trap_hist_mask = max_jubyte,
_extra_data_count = 4 // extra DataLayout headers, for trap history _extra_data_count = 4 // extra DataLayout headers, for trap history
}; // Public flag values }; // Public flag values

View File

@ -310,13 +310,14 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
return "inlining too deep"; return "inlining too deep";
} }
// We need to detect recursive inlining of method handle targets: if // detect direct and indirect recursive inlining
// the current method is a method handle adapter and one of the {
// callers is the same method as the callee, we bail out if // count the current method and the callee
// MaxRecursiveInlineLevel is hit. int inline_level = (method() == callee_method) ? 1 : 0;
if (method()->is_method_handle_adapter()) { if (inline_level > MaxRecursiveInlineLevel)
return "recursively inlining too deep";
// count callers of current method and callee
JVMState* jvms = caller_jvms(); JVMState* jvms = caller_jvms();
int inline_level = 0;
while (jvms != NULL && jvms->has_method()) { while (jvms != NULL && jvms->has_method()) {
if (jvms->method() == callee_method) { if (jvms->method() == callee_method) {
inline_level++; inline_level++;
@ -327,10 +328,6 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
} }
} }
if (method() == callee_method && inline_depth() > MaxRecursiveInlineLevel) {
return "recursively inlining too deep";
}
int size = callee_method->code_size(); int size = callee_method->code_size();
if (UseOldInlining && ClipInlining if (UseOldInlining && ClipInlining
@ -376,7 +373,6 @@ bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* call
return true; return true;
} }
#ifndef PRODUCT
//------------------------------print_inlining--------------------------------- //------------------------------print_inlining---------------------------------
// Really, the failure_msg can be a success message also. // Really, the failure_msg can be a success message also.
void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const { void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
@ -388,7 +384,6 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const c
tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
} }
} }
#endif
//------------------------------ok_to_inline----------------------------------- //------------------------------ok_to_inline-----------------------------------
WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci) { WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci) {

View File

@ -183,6 +183,21 @@
develop(bool, TraceLoopOpts, false, \ develop(bool, TraceLoopOpts, false, \
"Trace executed loop optimizations") \ "Trace executed loop optimizations") \
\ \
diagnostic(bool, LoopLimitCheck, true, \
"Generate a loop limits check for overflow") \
\
develop(bool, TraceLoopLimitCheck, false, \
"Trace generation of loop limits checks") \
\
diagnostic(bool, RangeLimitCheck, true, \
"Additional overflow checks during range check elimination") \
\
develop(bool, TraceRangeLimitCheck, false, \
"Trace additional overflow checks in RCE") \
\
diagnostic(bool, UnrollLimitCheck, true, \
"Additional overflow checks during loop unroll") \
\
product(bool, OptimizeFill, false, \ product(bool, OptimizeFill, false, \
"convert fill/copy loops into intrinsic") \ "convert fill/copy loops into intrinsic") \
\ \

View File

@ -1373,7 +1373,7 @@ static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *
// Clone loop predicates // Clone loop predicates
if (predicate_proj != NULL) { if (predicate_proj != NULL) {
newn = igvn->clone_loop_predicates(predicate_proj, newn); newn = igvn->clone_loop_predicates(predicate_proj, newn, !n->is_CountedLoop());
} }
// Now I can point to the new node. // Now I can point to the new node.

View File

@ -156,6 +156,7 @@ macro(Lock)
macro(LogD) macro(LogD)
macro(Log10D) macro(Log10D)
macro(Loop) macro(Loop)
macro(LoopLimit)
macro(Mach) macro(Mach)
macro(MachProj) macro(MachProj)
macro(MaxI) macro(MaxI)

View File

@ -3378,6 +3378,10 @@ void GraphKit::add_predicate(int nargs) {
if (UseLoopPredicate) { if (UseLoopPredicate) {
add_predicate_impl(Deoptimization::Reason_predicate, nargs); add_predicate_impl(Deoptimization::Reason_predicate, nargs);
} }
// loop's limit check predicate should be near the loop.
if (LoopLimitCheck) {
add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
}
} }
//----------------------------- store barriers ---------------------------- //----------------------------- store barriers ----------------------------

View File

@ -236,6 +236,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
} }
Node* predicate_c = NULL; Node* predicate_c = NULL;
Node* predicate_x = NULL; Node* predicate_x = NULL;
bool counted_loop = r->is_CountedLoop();
Node *region_c = new (igvn->C, req_c + 1) RegionNode(req_c + 1); Node *region_c = new (igvn->C, req_c + 1) RegionNode(req_c + 1);
Node *phi_c = con1; Node *phi_c = con1;
@ -294,16 +295,16 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
if (predicate_c != NULL) { if (predicate_c != NULL) {
assert(predicate_x == NULL, "only one predicate entry expected"); assert(predicate_x == NULL, "only one predicate entry expected");
// Clone loop predicates to each path // Clone loop predicates to each path
iff_c_t = igvn->clone_loop_predicates(predicate_c, iff_c_t); iff_c_t = igvn->clone_loop_predicates(predicate_c, iff_c_t, !counted_loop);
iff_c_f = igvn->clone_loop_predicates(predicate_c, iff_c_f); iff_c_f = igvn->clone_loop_predicates(predicate_c, iff_c_f, !counted_loop);
} }
Node *iff_x_t = phase->transform(new (igvn->C, 1) IfTrueNode (iff_x)); Node *iff_x_t = phase->transform(new (igvn->C, 1) IfTrueNode (iff_x));
Node *iff_x_f = phase->transform(new (igvn->C, 1) IfFalseNode(iff_x)); Node *iff_x_f = phase->transform(new (igvn->C, 1) IfFalseNode(iff_x));
if (predicate_x != NULL) { if (predicate_x != NULL) {
assert(predicate_c == NULL, "only one predicate entry expected"); assert(predicate_c == NULL, "only one predicate entry expected");
// Clone loop predicates to each path // Clone loop predicates to each path
iff_x_t = igvn->clone_loop_predicates(predicate_x, iff_x_t); iff_x_t = igvn->clone_loop_predicates(predicate_x, iff_x_t, !counted_loop);
iff_x_f = igvn->clone_loop_predicates(predicate_x, iff_x_f); iff_x_f = igvn->clone_loop_predicates(predicate_x, iff_x_f, !counted_loop);
} }
// Merge the TRUE paths // Merge the TRUE paths
@ -545,6 +546,7 @@ static void adjust_check(Node* proj, Node* range, Node* index,
Node *new_bol = gvn->transform( new (gvn->C, 2) BoolNode( new_cmp, bol->as_Bool()->_test._test ) ); Node *new_bol = gvn->transform( new (gvn->C, 2) BoolNode( new_cmp, bol->as_Bool()->_test._test ) );
igvn->hash_delete( iff ); igvn->hash_delete( iff );
iff->set_req_X( 1, new_bol, igvn ); iff->set_req_X( 1, new_bol, igvn );
igvn->_worklist.push( iff );
} }
//------------------------------up_one_dom------------------------------------- //------------------------------up_one_dom-------------------------------------

View File

@ -867,12 +867,10 @@ Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1, Node* cnt1
Node* str1_offset = make_load(no_ctrl, str1_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset)); Node* str1_offset = make_load(no_ctrl, str1_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset));
Node* str1_start = array_element_address(str1_value, str1_offset, T_CHAR); Node* str1_start = array_element_address(str1_value, str1_offset, T_CHAR);
// Pin loads from String::equals() argument since it could be NULL.
Node* str2_ctrl = (opcode == Op_StrEquals) ? control() : no_ctrl;
Node* str2_valuea = basic_plus_adr(str2, str2, value_offset); Node* str2_valuea = basic_plus_adr(str2, str2, value_offset);
Node* str2_value = make_load(str2_ctrl, str2_valuea, value_type, T_OBJECT, string_type->add_offset(value_offset)); Node* str2_value = make_load(no_ctrl, str2_valuea, value_type, T_OBJECT, string_type->add_offset(value_offset));
Node* str2_offseta = basic_plus_adr(str2, str2, offset_offset); Node* str2_offseta = basic_plus_adr(str2, str2, offset_offset);
Node* str2_offset = make_load(str2_ctrl, str2_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset)); Node* str2_offset = make_load(no_ctrl, str2_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset));
Node* str2_start = array_element_address(str2_value, str2_offset, T_CHAR); Node* str2_start = array_element_address(str2_value, str2_offset, T_CHAR);
Node* result = NULL; Node* result = NULL;
@ -1012,14 +1010,15 @@ bool LibraryCallKit::inline_string_equals() {
if (!stopped()) { if (!stopped()) {
// Properly cast the argument to String // Properly cast the argument to String
argument = _gvn.transform(new (C, 2) CheckCastPPNode(control(), argument, string_type)); argument = _gvn.transform(new (C, 2) CheckCastPPNode(control(), argument, string_type));
// This path is taken only when argument's type is String:NotNull.
argument = cast_not_null(argument, false);
// Get counts for string and argument // Get counts for string and argument
Node* receiver_cnta = basic_plus_adr(receiver, receiver, count_offset); Node* receiver_cnta = basic_plus_adr(receiver, receiver, count_offset);
receiver_cnt = make_load(no_ctrl, receiver_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset)); receiver_cnt = make_load(no_ctrl, receiver_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
// Pin load from argument string since it could be NULL.
Node* argument_cnta = basic_plus_adr(argument, argument, count_offset); Node* argument_cnta = basic_plus_adr(argument, argument, count_offset);
argument_cnt = make_load(control(), argument_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset)); argument_cnt = make_load(no_ctrl, argument_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
// Check for receiver count != argument count // Check for receiver count != argument count
Node* cmp = _gvn.transform( new(C, 3) CmpINode(receiver_cnt, argument_cnt) ); Node* cmp = _gvn.transform( new(C, 3) CmpINode(receiver_cnt, argument_cnt) );

View File

@ -341,7 +341,7 @@ ProjNode* PhaseIdealLoop::move_predicate(ProjNode* predicate_proj, Node* new_ent
// Cut predicate from old place. // Cut predicate from old place.
Node* old = predicate_proj; Node* old = predicate_proj;
igvn->_worklist.push(old); igvn->_worklist.push(old);
for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) { for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin;) {
Node* use = old->last_out(i); // for each use... Node* use = old->last_out(i); // for each use...
igvn->hash_delete(use); igvn->hash_delete(use);
igvn->_worklist.push(use); igvn->_worklist.push(use);
@ -384,24 +384,25 @@ ProjNode* PhaseIdealLoop::move_predicate(ProjNode* predicate_proj, Node* new_ent
//--------------------------clone_loop_predicates----------------------- //--------------------------clone_loop_predicates-----------------------
// Interface from IGVN // Interface from IGVN
Node* PhaseIterGVN::clone_loop_predicates(Node* old_entry, Node* new_entry) { Node* PhaseIterGVN::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, false, NULL, this); return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, false, clone_limit_check, NULL, this);
} }
Node* PhaseIterGVN::move_loop_predicates(Node* old_entry, Node* new_entry) { Node* PhaseIterGVN::move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, true, NULL, this); return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, true, clone_limit_check, NULL, this);
} }
// Interface from PhaseIdealLoop // Interface from PhaseIdealLoop
Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry) { Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
return clone_loop_predicates(old_entry, new_entry, false, this, &this->_igvn); return clone_loop_predicates(old_entry, new_entry, false, clone_limit_check, this, &this->_igvn);
} }
Node* PhaseIdealLoop::move_loop_predicates(Node* old_entry, Node* new_entry) { Node* PhaseIdealLoop::move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
return clone_loop_predicates(old_entry, new_entry, true, this, &this->_igvn); return clone_loop_predicates(old_entry, new_entry, true, clone_limit_check, this, &this->_igvn);
} }
// Clone loop predicates to cloned loops (peeled, unswitched, split_if). // Clone loop predicates to cloned loops (peeled, unswitched, split_if).
Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry, Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry,
bool move_predicates, bool move_predicates,
bool clone_limit_check,
PhaseIdealLoop* loop_phase, PhaseIdealLoop* loop_phase,
PhaseIterGVN* igvn) { PhaseIterGVN* igvn) {
#ifdef ASSERT #ifdef ASSERT
@ -413,10 +414,16 @@ Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry,
#endif #endif
// Search original predicates // Search original predicates
Node* entry = old_entry; Node* entry = old_entry;
ProjNode* limit_check_proj = NULL;
if (LoopLimitCheck) {
limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (limit_check_proj != NULL) {
entry = entry->in(0)->in(0);
}
}
if (UseLoopPredicate) { if (UseLoopPredicate) {
ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
if (predicate_proj != NULL) { // right pattern that can be used by loop predication if (predicate_proj != NULL) { // right pattern that can be used by loop predication
assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
if (move_predicates) { if (move_predicates) {
new_entry = move_predicate(predicate_proj, new_entry, new_entry = move_predicate(predicate_proj, new_entry,
Deoptimization::Reason_predicate, Deoptimization::Reason_predicate,
@ -435,11 +442,37 @@ Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry,
} }
} }
} }
if (limit_check_proj != NULL && clone_limit_check) {
// Clone loop limit check last to insert it before loop.
// Don't clone a limit check which was already finalized
// for this counted loop (only one limit check is needed).
if (move_predicates) {
new_entry = move_predicate(limit_check_proj, new_entry,
Deoptimization::Reason_loop_limit_check,
loop_phase, igvn);
assert(new_entry == limit_check_proj, "old limit check fall through projection");
} else {
new_entry = clone_predicate(limit_check_proj, new_entry,
Deoptimization::Reason_loop_limit_check,
loop_phase, igvn);
assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone limit check");
}
if (TraceLoopLimitCheck) {
tty->print_cr("Loop Limit Check %s: ", move_predicates ? "moved" : "cloned");
debug_only( new_entry->in(0)->dump(); )
}
}
return new_entry; return new_entry;
} }
//--------------------------eliminate_loop_predicates----------------------- //--------------------------eliminate_loop_predicates-----------------------
void PhaseIdealLoop::eliminate_loop_predicates(Node* entry) { void PhaseIdealLoop::eliminate_loop_predicates(Node* entry) {
if (LoopLimitCheck) {
Node* predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (predicate != NULL) {
entry = entry->in(0)->in(0);
}
}
if (UseLoopPredicate) { if (UseLoopPredicate) {
ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
if (predicate_proj != NULL) { // right pattern that can be used by loop predication if (predicate_proj != NULL) { // right pattern that can be used by loop predication
@ -456,10 +489,15 @@ void PhaseIdealLoop::eliminate_loop_predicates(Node* entry) {
// Skip related predicates. // Skip related predicates.
Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) { Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) {
Node* predicate = NULL; Node* predicate = NULL;
if (LoopLimitCheck) {
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (predicate != NULL) {
entry = entry->in(0)->in(0);
}
}
if (UseLoopPredicate) { if (UseLoopPredicate) {
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
if (predicate != NULL) { // right pattern that can be used by loop predication if (predicate != NULL) { // right pattern that can be used by loop predication
assert(entry->is_Proj() && entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
IfNode* iff = entry->in(0)->as_If(); IfNode* iff = entry->in(0)->as_If();
ProjNode* uncommon_proj = iff->proj_out(1 - entry->as_Proj()->_con); ProjNode* uncommon_proj = iff->proj_out(1 - entry->as_Proj()->_con);
Node* rgn = uncommon_proj->unique_ctrl_out(); Node* rgn = uncommon_proj->unique_ctrl_out();
@ -491,10 +529,15 @@ ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimi
// Find a predicate // Find a predicate
Node* PhaseIdealLoop::find_predicate(Node* entry) { Node* PhaseIdealLoop::find_predicate(Node* entry) {
Node* predicate = NULL; Node* predicate = NULL;
if (LoopLimitCheck) {
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (predicate != NULL) { // right pattern that can be used by loop predication
return entry;
}
}
if (UseLoopPredicate) { if (UseLoopPredicate) {
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
if (predicate != NULL) { // right pattern that can be used by loop predication if (predicate != NULL) { // right pattern that can be used by loop predication
assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
return entry; return entry;
} }
} }
@ -658,7 +701,7 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invari
Node* range = cmp->in(2); Node* range = cmp->in(2);
if (range->Opcode() != Op_LoadRange) { if (range->Opcode() != Op_LoadRange) {
const TypeInt* tint = phase->_igvn.type(range)->isa_int(); const TypeInt* tint = phase->_igvn.type(range)->isa_int();
if (!OptimizeFill || tint == NULL || tint->empty() || tint->_lo < 0) { if (tint == NULL || tint->empty() || tint->_lo < 0) {
// Allow predication on positive values that aren't LoadRanges. // Allow predication on positive values that aren't LoadRanges.
// This allows optimization of loops where the length of the // This allows optimization of loops where the length of the
// array is a known value and doesn't need to be loaded back // array is a known value and doesn't need to be loaded back
@ -696,36 +739,49 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invari
// max(scale*i + offset) = scale*(limit-stride) + offset // max(scale*i + offset) = scale*(limit-stride) + offset
// (2) stride*scale < 0 // (2) stride*scale < 0
// max(scale*i + offset) = scale*init + offset // max(scale*i + offset) = scale*init + offset
BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl, BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl,
int scale, Node* offset, int scale, Node* offset,
Node* init, Node* limit, Node* stride, Node* init, Node* limit, Node* stride,
Node* range, bool upper) { Node* range, bool upper) {
DEBUG_ONLY(ttyLocker ttyl); stringStream* predString = NULL;
if (TraceLoopPredicate) tty->print("rc_predicate "); if (TraceLoopPredicate) {
predString = new stringStream();
predString->print("rc_predicate ");
}
Node* max_idx_expr = init; Node* max_idx_expr = init;
int stride_con = stride->get_int(); int stride_con = stride->get_int();
if ((stride_con > 0) == (scale > 0) == upper) { if ((stride_con > 0) == (scale > 0) == upper) {
if (LoopLimitCheck) {
// With LoopLimitCheck limit is not exact.
// Calculate exact limit here.
// Note, counted loop's test is '<' or '>'.
limit = exact_limit(loop);
max_idx_expr = new (C, 3) SubINode(limit, stride); max_idx_expr = new (C, 3) SubINode(limit, stride);
register_new_node(max_idx_expr, ctrl); register_new_node(max_idx_expr, ctrl);
if (TraceLoopPredicate) tty->print("(limit - stride) "); if (TraceLoopPredicate) predString->print("(limit - stride) ");
} else { } else {
if (TraceLoopPredicate) tty->print("init "); max_idx_expr = new (C, 3) SubINode(limit, stride);
register_new_node(max_idx_expr, ctrl);
if (TraceLoopPredicate) predString->print("(limit - stride) ");
}
} else {
if (TraceLoopPredicate) predString->print("init ");
} }
if (scale != 1) { if (scale != 1) {
ConNode* con_scale = _igvn.intcon(scale); ConNode* con_scale = _igvn.intcon(scale);
max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale); max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale);
register_new_node(max_idx_expr, ctrl); register_new_node(max_idx_expr, ctrl);
if (TraceLoopPredicate) tty->print("* %d ", scale); if (TraceLoopPredicate) predString->print("* %d ", scale);
} }
if (offset && (!offset->is_Con() || offset->get_int() != 0)){ if (offset && (!offset->is_Con() || offset->get_int() != 0)){
max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset); max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset);
register_new_node(max_idx_expr, ctrl); register_new_node(max_idx_expr, ctrl);
if (TraceLoopPredicate) if (TraceLoopPredicate)
if (offset->is_Con()) tty->print("+ %d ", offset->get_int()); if (offset->is_Con()) predString->print("+ %d ", offset->get_int());
else tty->print("+ offset "); else predString->print("+ offset ");
} }
CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range); CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range);
@ -733,7 +789,10 @@ BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl,
BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt); BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt);
register_new_node(bol, ctrl); register_new_node(bol, ctrl);
if (TraceLoopPredicate) tty->print_cr("<u range"); if (TraceLoopPredicate) {
predString->print_cr("<u range");
tty->print(predString->as_string());
}
return bol; return bol;
} }
@ -746,29 +805,36 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
// Could be a simple region when irreducible loops are present. // Could be a simple region when irreducible loops are present.
return false; return false;
} }
LoopNode* head = loop->_head->as_Loop();
if (loop->_head->unique_ctrl_out()->Opcode() == Op_NeverBranch) { if (head->unique_ctrl_out()->Opcode() == Op_NeverBranch) {
// do nothing for infinite loops // do nothing for infinite loops
return false; return false;
} }
CountedLoopNode *cl = NULL; CountedLoopNode *cl = NULL;
if (loop->_head->is_CountedLoop()) { if (head->is_CountedLoop()) {
cl = loop->_head->as_CountedLoop(); cl = head->as_CountedLoop();
// do nothing for iteration-splitted loops // do nothing for iteration-splitted loops
if (!cl->is_normal_loop()) return false; if (!cl->is_normal_loop()) return false;
} }
LoopNode *lpn = loop->_head->as_Loop(); Node* entry = head->in(LoopNode::EntryControl);
Node* entry = lpn->in(LoopNode::EntryControl); ProjNode *predicate_proj = NULL;
// Loop limit check predicate should be near the loop.
if (LoopLimitCheck) {
predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (predicate_proj != NULL)
entry = predicate_proj->in(0)->in(0);
}
ProjNode *predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
if (!predicate_proj) { if (!predicate_proj) {
#ifndef PRODUCT #ifndef PRODUCT
if (TraceLoopPredicate) { if (TraceLoopPredicate) {
tty->print("missing predicate:"); tty->print("missing predicate:");
loop->dump_head(); loop->dump_head();
lpn->dump(1); head->dump(1);
} }
#endif #endif
return false; return false;
@ -782,7 +848,6 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
// Create list of if-projs such that a newer proj dominates all older // Create list of if-projs such that a newer proj dominates all older
// projs in the list, and they all dominate loop->tail() // projs in the list, and they all dominate loop->tail()
Node_List if_proj_list(area); Node_List if_proj_list(area);
LoopNode *head = loop->_head->as_Loop();
Node *current_proj = loop->tail(); //start from tail Node *current_proj = loop->tail(); //start from tail
while (current_proj != head) { while (current_proj != head) {
if (loop == get_loop(current_proj) && // still in the loop ? if (loop == get_loop(current_proj) && // still in the loop ?
@ -856,8 +921,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
const Node* cmp = bol->in(1)->as_Cmp(); const Node* cmp = bol->in(1)->as_Cmp();
Node* idx = cmp->in(1); Node* idx = cmp->in(1);
assert(!invar.is_invariant(idx), "index is variant"); assert(!invar.is_invariant(idx), "index is variant");
assert(cmp->in(2)->Opcode() == Op_LoadRange || OptimizeFill, "must be");
Node* rng = cmp->in(2); Node* rng = cmp->in(2);
assert(rng->Opcode() == Op_LoadRange || _igvn.type(rng)->is_int() >= 0, "must be");
assert(invar.is_invariant(rng), "range must be invariant"); assert(invar.is_invariant(rng), "range must be invariant");
int scale = 1; int scale = 1;
Node* offset = zero; Node* offset = zero;
@ -886,14 +951,14 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
} }
// Test the lower bound // Test the lower bound
Node* lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false); Node* lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false);
IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If(); IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
_igvn.hash_delete(lower_bound_iff); _igvn.hash_delete(lower_bound_iff);
lower_bound_iff->set_req(1, lower_bound_bol); lower_bound_iff->set_req(1, lower_bound_bol);
if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx); if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
// Test the upper bound // Test the upper bound
Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, true); Node* upper_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, true);
IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If(); IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
_igvn.hash_delete(upper_bound_iff); _igvn.hash_delete(upper_bound_iff);
upper_bound_iff->set_req(1, upper_bound_bol); upper_bound_iff->set_req(1, upper_bound_bol);
@ -957,4 +1022,3 @@ bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) {
return hoisted; return hoisted;
} }

View File

@ -83,7 +83,7 @@ void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) {
#ifdef ASSERT #ifdef ASSERT
BoolTest::mask bt = cl->loopexit()->test_trip(); BoolTest::mask bt = cl->loopexit()->test_trip();
assert(bt == BoolTest::lt || bt == BoolTest::gt || assert(bt == BoolTest::lt || bt == BoolTest::gt ||
bt == BoolTest::ne, "canonical test is expected"); (bt == BoolTest::ne && !LoopLimitCheck), "canonical test is expected");
#endif #endif
Node* init_n = cl->init_trip(); Node* init_n = cl->init_trip();
@ -510,7 +510,7 @@ void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
// the pre-loop with only 1 user (the new peeled iteration), but the // the pre-loop with only 1 user (the new peeled iteration), but the
// peeled-loop backedge has 2 users. // peeled-loop backedge has 2 users.
Node* new_exit_value = old_new[head->in(LoopNode::LoopBackControl)->_idx]; Node* new_exit_value = old_new[head->in(LoopNode::LoopBackControl)->_idx];
new_exit_value = move_loop_predicates(entry, new_exit_value); new_exit_value = move_loop_predicates(entry, new_exit_value, !counted_loop);
_igvn.hash_delete(head); _igvn.hash_delete(head);
head->set_req(LoopNode::EntryControl, new_exit_value); head->set_req(LoopNode::EntryControl, new_exit_value);
for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
@ -593,6 +593,12 @@ bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
return false; return false;
} }
// Fully unroll a loop with few iterations regardless next
// conditions since following loop optimizations will split
// such loop anyway (pre-main-post).
if (trip_count <= 3)
return true;
// Take into account that after unroll conjoined heads and tails will fold, // Take into account that after unroll conjoined heads and tails will fold,
// otherwise policy_unroll() may allow more unrolling than max unrolling. // otherwise policy_unroll() may allow more unrolling than max unrolling.
uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count; uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count;
@ -605,15 +611,6 @@ bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
return false; return false;
} }
// Currently we don't have policy to optimize one iteration loops.
// Maximally unrolling transformation is used for that:
// it is peeled and the original loop become non reachable (dead).
// Also fully unroll a loop with few iterations regardless next
// conditions since following loop optimizations will split
// such loop anyway (pre-main-post).
if (trip_count <= 3)
return true;
// Do not unroll a loop with String intrinsics code. // Do not unroll a loop with String intrinsics code.
// String intrinsics are large and have loops. // String intrinsics are large and have loops.
for (uint k = 0; k < _body.size(); k++) { for (uint k = 0; k < _body.size(); k++) {
@ -632,6 +629,8 @@ bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
} }
#define MAX_UNROLL 16 // maximum number of unrolls for main loop
//------------------------------policy_unroll---------------------------------- //------------------------------policy_unroll----------------------------------
// Return TRUE or FALSE if the loop should be unrolled or not. Unroll if // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
// the loop is a CountedLoop and the body is small enough. // the loop is a CountedLoop and the body is small enough.
@ -643,13 +642,15 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
if (!cl->is_valid_counted_loop()) if (!cl->is_valid_counted_loop())
return false; // Malformed counted loop return false; // Malformed counted loop
// protect against over-unrolling // Protect against over-unrolling.
if (cl->trip_count() <= 1) return false; // After split at least one iteration will be executed in pre-loop.
if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
// Check for stride being a small enough constant
if (abs(cl->stride_con()) > (1<<3)) return false;
int future_unroll_ct = cl->unrolled_count() * 2; int future_unroll_ct = cl->unrolled_count() * 2;
if (future_unroll_ct > MAX_UNROLL) return false;
// Check for initial stride being a small enough constant
if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
// Don't unroll if the next round of unrolling would push us // Don't unroll if the next round of unrolling would push us
// over the expected trip count of the loop. One is subtracted // over the expected trip count of the loop. One is subtracted
@ -675,6 +676,7 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
Node *init_n = cl->init_trip(); Node *init_n = cl->init_trip();
Node *limit_n = cl->limit(); Node *limit_n = cl->limit();
int stride_con = cl->stride_con();
// Non-constant bounds. // Non-constant bounds.
// Protect against over-unrolling when init or/and limit are not constant // Protect against over-unrolling when init or/and limit are not constant
// (so that trip_count's init value is maxint) but iv range is known. // (so that trip_count's init value is maxint) but iv range is known.
@ -684,7 +686,7 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
if (phi != NULL) { if (phi != NULL) {
assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
int next_stride = cl->stride_con() * 2; // stride after this unroll int next_stride = stride_con * 2; // stride after this unroll
if (next_stride > 0) { if (next_stride > 0) {
if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow
iv_type->_lo + next_stride > iv_type->_hi) { iv_type->_lo + next_stride > iv_type->_hi) {
@ -699,15 +701,19 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
} }
} }
// After unroll limit will be adjusted: new_limit = limit-stride.
// Bailout if adjustment overflow.
const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int();
if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) ||
stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo))
return false; // overflow
// Adjust body_size to determine if we unroll or not // Adjust body_size to determine if we unroll or not
uint body_size = _body.size(); uint body_size = _body.size();
// Key test to unroll CaffeineMark's Logic test
int xors_in_loop = 0;
// Also count ModL, DivL and MulL which expand mightly // Also count ModL, DivL and MulL which expand mightly
for (uint k = 0; k < _body.size(); k++) { for (uint k = 0; k < _body.size(); k++) {
Node* n = _body.at(k); Node* n = _body.at(k);
switch (n->Opcode()) { switch (n->Opcode()) {
case Op_XorI: xors_in_loop++; break; // CaffeineMark's Logic test
case Op_ModL: body_size += 30; break; case Op_ModL: body_size += 30; break;
case Op_DivL: body_size += 30; break; case Op_DivL: body_size += 30; break;
case Op_MulL: body_size += 10; break; case Op_MulL: body_size += 10; break;
@ -724,7 +730,6 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
// Check for being too big // Check for being too big
if (body_size > (uint)LoopUnrollLimit) { if (body_size > (uint)LoopUnrollLimit) {
if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
// Normal case: loop too big // Normal case: loop too big
return false; return false;
} }
@ -747,28 +752,31 @@ bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
// Return TRUE or FALSE if the loop should be range-check-eliminated. // Return TRUE or FALSE if the loop should be range-check-eliminated.
// Actually we do iteration-splitting, a more powerful form of RCE. // Actually we do iteration-splitting, a more powerful form of RCE.
bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const { bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
if( !RangeCheckElimination ) return false; if (!RangeCheckElimination) return false;
CountedLoopNode *cl = _head->as_CountedLoop(); CountedLoopNode *cl = _head->as_CountedLoop();
// If we unrolled with no intention of doing RCE and we later // If we unrolled with no intention of doing RCE and we later
// changed our minds, we got no pre-loop. Either we need to // changed our minds, we got no pre-loop. Either we need to
// make a new pre-loop, or we gotta disallow RCE. // make a new pre-loop, or we gotta disallow RCE.
if( cl->is_main_no_pre_loop() ) return false; // Disallowed for now. if (cl->is_main_no_pre_loop()) return false; // Disallowed for now.
Node *trip_counter = cl->phi(); Node *trip_counter = cl->phi();
// Check loop body for tests of trip-counter plus loop-invariant vs // Check loop body for tests of trip-counter plus loop-invariant vs
// loop-invariant. // loop-invariant.
for( uint i = 0; i < _body.size(); i++ ) { for (uint i = 0; i < _body.size(); i++) {
Node *iff = _body[i]; Node *iff = _body[i];
if( iff->Opcode() == Op_If ) { // Test? if (iff->Opcode() == Op_If) { // Test?
// Comparing trip+off vs limit // Comparing trip+off vs limit
Node *bol = iff->in(1); Node *bol = iff->in(1);
if( bol->req() != 2 ) continue; // dead constant test if (bol->req() != 2) continue; // dead constant test
if (!bol->is_Bool()) { if (!bol->is_Bool()) {
assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only"); assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
continue; continue;
} }
if (bol->as_Bool()->_test._test == BoolTest::ne)
continue; // not RC
Node *cmp = bol->in(1); Node *cmp = bol->in(1);
Node *rc_exp = cmp->in(1); Node *rc_exp = cmp->in(1);
@ -1064,6 +1072,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
// negative stride use > // negative stride use >
if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) { if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
assert(!LoopLimitCheck, "only canonical tests (lt or gt) are expected");
BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt; BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
// Modify pre loop end condition // Modify pre loop end condition
@ -1090,6 +1099,9 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
main_head->set_main_loop(); main_head->set_main_loop();
if( peel_only ) main_head->set_main_no_pre_loop(); if( peel_only ) main_head->set_main_no_pre_loop();
// Subtract a trip count for the pre-loop.
main_head->set_trip_count(main_head->trip_count() - 1);
// It's difficult to be precise about the trip-counts // It's difficult to be precise about the trip-counts
// for the pre/post loops. They are usually very short, // for the pre/post loops. They are usually very short,
// so guess that 4 trips is a reasonable value. // so guess that 4 trips is a reasonable value.
@ -1141,7 +1153,8 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
Node *stride = loop_head->stride(); Node *stride = loop_head->stride();
Node *opaq = NULL; Node *opaq = NULL;
if( adjust_min_trip ) { // If not maximally unrolling, need adjustment if (adjust_min_trip) { // If not maximally unrolling, need adjustment
// Search for zero-trip guard.
assert( loop_head->is_main_loop(), "" ); assert( loop_head->is_main_loop(), "" );
assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
Node *iff = ctrl->in(0); Node *iff = ctrl->in(0);
@ -1151,15 +1164,160 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
Node *cmp = bol->in(1); Node *cmp = bol->in(1);
assert( cmp->Opcode() == Op_CmpI, "" ); assert( cmp->Opcode() == Op_CmpI, "" );
opaq = cmp->in(2); opaq = cmp->in(2);
// Occasionally it's possible for a pre-loop Opaque1 node to be // Occasionally it's possible for a zero-trip guard Opaque1 node to be
// optimized away and then another round of loop opts attempted. // optimized away and then another round of loop opts attempted.
// We can not optimize this particular loop in that case. // We can not optimize this particular loop in that case.
if( opaq->Opcode() != Op_Opaque1 ) if (opaq->Opcode() != Op_Opaque1)
return; // Cannot find pre-loop! Bail out! return; // Cannot find zero-trip guard! Bail out!
// Zero-trip test uses an 'opaque' node which is not shared.
assert(opaq->outcnt() == 1 && opaq->in(1) == limit, "");
} }
C->set_major_progress(); C->set_major_progress();
Node* new_limit = NULL;
if (UnrollLimitCheck) {
int stride_con = stride->get_int();
int stride_p = (stride_con > 0) ? stride_con : -stride_con;
uint old_trip_count = loop_head->trip_count();
// Verify that unroll policy result is still valid.
assert(old_trip_count > 1 &&
(!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity");
// Adjust loop limit to keep valid iterations number after unroll.
// Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride
// which may overflow.
if (!adjust_min_trip) {
assert(old_trip_count > 1 && (old_trip_count & 1) == 0,
"odd trip count for maximally unroll");
// Don't need to adjust limit for maximally unroll since trip count is even.
} else if (loop_head->has_exact_trip_count() && init->is_Con()) {
// Loop's limit is constant. Loop's init could be constant when pre-loop
// become peeled iteration.
long init_con = init->get_int();
// We can keep old loop limit if iterations count stays the same:
// old_trip_count == new_trip_count * 2
// Note: since old_trip_count >= 2 then new_trip_count >= 1
// so we also don't need to adjust zero trip test.
long limit_con = limit->get_int();
// (stride_con*2) not overflow since stride_con <= 8.
int new_stride_con = stride_con * 2;
int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1);
long trip_count = (limit_con - init_con + stride_m)/new_stride_con;
// New trip count should satisfy next conditions.
assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity");
uint new_trip_count = (uint)trip_count;
adjust_min_trip = (old_trip_count != new_trip_count*2);
}
if (adjust_min_trip) {
// Step 2: Adjust the trip limit if it is called for.
// The adjustment amount is -stride. Need to make sure if the
// adjustment underflows or overflows, then the main loop is skipped.
Node* cmp = loop_end->cmp_node();
assert(cmp->in(2) == limit, "sanity");
assert(opaq != NULL && opaq->in(1) == limit, "sanity");
// Verify that policy_unroll result is still valid.
const TypeInt* limit_type = _igvn.type(limit)->is_int();
assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) ||
stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity");
if (limit->is_Con()) {
// The check in policy_unroll and the assert above guarantee
// no underflow if limit is constant.
new_limit = _igvn.intcon(limit->get_int() - stride_con);
set_ctrl(new_limit, C->root());
} else {
// Limit is not constant.
{
// Separate limit by Opaque node in case it is an incremented
// variable from previous loop to avoid using pre-incremented
// value which could increase register pressure.
// Otherwise reorg_offsets() optimization will create a separate
// Opaque node for each use of trip-counter and as result
// zero trip guard limit will be different from loop limit.
assert(has_ctrl(opaq), "should have it");
Node* opaq_ctrl = get_ctrl(opaq);
limit = new (C, 2) Opaque2Node( C, limit );
register_new_node( limit, opaq_ctrl );
}
if (stride_con > 0 && ((limit_type->_lo - stride_con) < limit_type->_lo) ||
stride_con < 0 && ((limit_type->_hi - stride_con) > limit_type->_hi)) {
// No underflow.
new_limit = new (C, 3) SubINode(limit, stride);
} else {
// (limit - stride) may underflow.
// Clamp the adjustment value with MININT or MAXINT:
//
// new_limit = limit-stride
// if (stride > 0)
// new_limit = (limit < new_limit) ? MININT : new_limit;
// else
// new_limit = (limit > new_limit) ? MAXINT : new_limit;
//
BoolTest::mask bt = loop_end->test_trip();
assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint);
set_ctrl(adj_max, C->root());
Node* old_limit = NULL;
Node* adj_limit = NULL;
Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL;
if (loop_head->unrolled_count() > 1 &&
limit->is_CMove() && limit->Opcode() == Op_CMoveI &&
limit->in(CMoveNode::IfTrue) == adj_max &&
bol->as_Bool()->_test._test == bt &&
bol->in(1)->Opcode() == Op_CmpI &&
bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) {
// Loop was unrolled before.
// Optimize the limit to avoid nested CMove:
// use original limit as old limit.
old_limit = bol->in(1)->in(1);
// Adjust previous adjusted limit.
adj_limit = limit->in(CMoveNode::IfFalse);
adj_limit = new (C, 3) SubINode(adj_limit, stride);
} else {
old_limit = limit;
adj_limit = new (C, 3) SubINode(limit, stride);
}
assert(old_limit != NULL && adj_limit != NULL, "");
register_new_node( adj_limit, ctrl ); // adjust amount
Node* adj_cmp = new (C, 3) CmpINode(old_limit, adj_limit);
register_new_node( adj_cmp, ctrl );
Node* adj_bool = new (C, 2) BoolNode(adj_cmp, bt);
register_new_node( adj_bool, ctrl );
new_limit = new (C, 4) CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT);
}
register_new_node(new_limit, ctrl);
}
assert(new_limit != NULL, "");
// Replace in loop test.
_igvn.hash_delete(cmp);
cmp->set_req(2, new_limit);
// Step 3: Find the min-trip test guaranteed before a 'main' loop.
// Make it a 1-trip test (means at least 2 trips).
// Guard test uses an 'opaque' node which is not shared. Hence I
// can edit it's inputs directly. Hammer in the new limit for the
// minimum-trip guard.
assert(opaq->outcnt() == 1, "");
_igvn.hash_delete(opaq);
opaq->set_req(1, new_limit);
}
// Adjust max trip count. The trip count is intentionally rounded
// down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
// the main, unrolled, part of the loop will never execute as it is protected
// by the min-trip test. See bug 4834191 for a case where we over-unrolled
// and later determined that part of the unrolled loop was dead.
loop_head->set_trip_count(old_trip_count / 2);
// Double the count of original iterations in the unrolled loop body.
loop_head->double_unrolled_count();
} else { // LoopLimitCheck
// Adjust max trip count. The trip count is intentionally rounded // Adjust max trip count. The trip count is intentionally rounded
// down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
// the main, unrolled, part of the loop will never execute as it is protected // the main, unrolled, part of the loop will never execute as it is protected
@ -1186,12 +1344,12 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
register_new_node( rond, ctrl ); register_new_node( rond, ctrl );
Node *spn2 = new (C, 3) MulINode( rond, stride ); Node *spn2 = new (C, 3) MulINode( rond, stride );
register_new_node( spn2, ctrl ); register_new_node( spn2, ctrl );
Node *lim2 = new (C, 3) AddINode( spn2, init ); new_limit = new (C, 3) AddINode( spn2, init );
register_new_node( lim2, ctrl ); register_new_node( new_limit, ctrl );
// Hammer in the new limit // Hammer in the new limit
Node *ctrl2 = loop_end->in(0); Node *ctrl2 = loop_end->in(0);
Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), lim2 ); Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), new_limit );
register_new_node( cmp2, ctrl2 ); register_new_node( cmp2, ctrl2 );
Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() ); Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() );
register_new_node( bol2, ctrl2 ); register_new_node( bol2, ctrl2 );
@ -1201,13 +1359,15 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
// Step 3: Find the min-trip test guaranteed before a 'main' loop. // Step 3: Find the min-trip test guaranteed before a 'main' loop.
// Make it a 1-trip test (means at least 2 trips). // Make it a 1-trip test (means at least 2 trips).
if( adjust_min_trip ) { if( adjust_min_trip ) {
assert( new_limit != NULL, "" );
// Guard test uses an 'opaque' node which is not shared. Hence I // Guard test uses an 'opaque' node which is not shared. Hence I
// can edit it's inputs directly. Hammer in the new limit for the // can edit it's inputs directly. Hammer in the new limit for the
// minimum-trip guard. // minimum-trip guard.
assert( opaq->outcnt() == 1, "" ); assert( opaq->outcnt() == 1, "" );
_igvn.hash_delete(opaq); _igvn.hash_delete(opaq);
opaq->set_req(1, lim2); opaq->set_req(1, new_limit);
} }
} // LoopLimitCheck
// --------- // ---------
// Step 4: Clone the loop body. Move it inside the loop. This loop body // Step 4: Clone the loop body. Move it inside the loop. This loop body
@ -1263,6 +1423,7 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) { void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
CountedLoopNode *cl = loop->_head->as_CountedLoop(); CountedLoopNode *cl = loop->_head->as_CountedLoop();
assert(cl->has_exact_trip_count(), "trip count is not exact");
assert(cl->trip_count() > 0, ""); assert(cl->trip_count() > 0, "");
#ifndef PRODUCT #ifndef PRODUCT
if (TraceLoopOpts) { if (TraceLoopOpts) {
@ -1279,6 +1440,7 @@ void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_ne
// Now its tripping an even number of times remaining. Double loop body. // Now its tripping an even number of times remaining. Double loop body.
// Do not adjust pre-guards; they are not needed and do not exist. // Do not adjust pre-guards; they are not needed and do not exist.
if (cl->trip_count() > 0) { if (cl->trip_count() > 0) {
assert((cl->trip_count() & 1) == 0, "missed peeling");
do_unroll(loop, old_new, false); do_unroll(loop, old_new, false);
} }
} }
@ -1292,22 +1454,13 @@ bool IdealLoopTree::dominates_backedge(Node* ctrl) {
} }
//------------------------------add_constraint--------------------------------- //------------------------------add_constraint---------------------------------
// Constrain the main loop iterations so the condition: // Constrain the main loop iterations so the conditions:
// scale_con * I + offset < limit // low_limit <= scale_con * I + offset < upper_limit
// always holds true. That is, either increase the number of iterations in // always holds true. That is, either increase the number of iterations in
// the pre-loop or the post-loop until the condition holds true in the main // the pre-loop or the post-loop until the condition holds true in the main
// loop. Stride, scale, offset and limit are all loop invariant. Further, // loop. Stride, scale, offset and limit are all loop invariant. Further,
// stride and scale are constants (offset and limit often are). // stride and scale are constants (offset and limit often are).
void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) { void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
// Compute "I :: (limit-offset)/scale_con"
Node *con = new (C, 3) SubINode( limit, offset );
register_new_node( con, pre_ctrl );
Node *scale = _igvn.intcon(scale_con);
set_ctrl(scale, C->root());
Node *X = new (C, 3) DivINode( 0, con, scale );
register_new_node( X, pre_ctrl );
// For positive stride, the pre-loop limit always uses a MAX function // For positive stride, the pre-loop limit always uses a MAX function
// and the main loop a MIN function. For negative stride these are // and the main loop a MIN function. For negative stride these are
// reversed. // reversed.
@ -1316,48 +1469,143 @@ void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset
// pre-loop must check for underflow and the post-loop for overflow. // pre-loop must check for underflow and the post-loop for overflow.
// Negative stride*scale reverses this; pre-loop checks for overflow and // Negative stride*scale reverses this; pre-loop checks for overflow and
// post-loop for underflow. // post-loop for underflow.
if( stride_con*scale_con > 0 ) { if (stride_con*scale_con > 0) {
// Compute I < (limit-offset)/scale_con // The overflow limit: scale*I+offset < upper_limit
// Adjust main-loop last iteration to be MIN/MAX(main_loop,X) // For main-loop compute
*main_limit = (stride_con > 0) // ( if (scale > 0) /* and stride > 0 */
? (Node*)(new (C, 3) MinINode( *main_limit, X )) // I < (upper_limit-offset)/scale
: (Node*)(new (C, 3) MaxINode( *main_limit, X )); // else /* scale < 0 and stride < 0 */
register_new_node( *main_limit, pre_ctrl ); // I > (upper_limit-offset)/scale
// )
//
// (upper_limit-offset) may overflow when offset < 0.
// But it is fine since main loop will either have
// less iterations or will be skipped in such case.
Node *con = new (C, 3) SubINode(upper_limit, offset);
register_new_node(con, pre_ctrl);
Node *scale = _igvn.intcon(scale_con);
set_ctrl(scale, C->root());
Node *X = new (C, 3) DivINode(0, con, scale);
register_new_node(X, pre_ctrl);
// Adjust main-loop last iteration
Node *loop_limit = *main_limit;
loop_limit = (stride_con > 0) // scale > 0
? (Node*)(new (C, 3) MinINode(loop_limit, X))
: (Node*)(new (C, 3) MaxINode(loop_limit, X));
register_new_node(loop_limit, pre_ctrl);
*main_limit = loop_limit;
// The underflow limit: low_limit <= scale*I+offset.
// For pre-loop compute
// NOT(scale*I+offset >= low_limit)
// scale*I+offset < low_limit
// ( if (scale > 0) /* and stride > 0 */
// I < (low_limit-offset)/scale
// else /* scale < 0 and stride < 0 */
// I > (low_limit-offset)/scale
// )
if (low_limit->get_int() == -max_jint) {
if (!RangeLimitCheck) return;
// We need this guard when scale*pre_limit+offset >= limit
// due to underflow so we need execute pre-loop until
// scale*I+offset >= min_int. But (low_limit-offset) will
// underflow when offset > 0 and X will be > original_limit.
// To avoid it we replace offset = offset > 0 ? 0 : offset
// and add min(pre_limit, original_limit).
Node* shift = _igvn.intcon(31);
set_ctrl(shift, C->root());
Node *neg_off = new (C, 3) RShiftINode(offset, shift);
register_new_node(neg_off, pre_ctrl);
offset = new (C, 3) AndINode(offset, neg_off);
register_new_node(offset, pre_ctrl);
} else { } else {
// Compute (limit-offset)/scale_con + SGN(-scale_con) <= I assert(low_limit->get_int() == 0, "wrong low limit for range check");
// Add the negation of the main-loop constraint to the pre-loop. // The only problem we have here when offset == min_int
// See footnote [++] below for a derivation of the limit expression. // since (0-min_int) == min_int. It may be fine for scale > 0
Node *incr = _igvn.intcon(scale_con > 0 ? -1 : 1); // but for scale < 0 X will be < original_limit.
set_ctrl(incr, C->root()); }
Node *adj = new (C, 3) AddINode( X, incr ); con = new (C, 3) SubINode(low_limit, offset);
register_new_node( adj, pre_ctrl ); register_new_node(con, pre_ctrl);
*pre_limit = (scale_con > 0) scale = _igvn.intcon(scale_con);
? (Node*)new (C, 3) MinINode( *pre_limit, adj ) set_ctrl(scale, C->root());
: (Node*)new (C, 3) MaxINode( *pre_limit, adj ); X = new (C, 3) DivINode(0, con, scale);
register_new_node( *pre_limit, pre_ctrl ); register_new_node(X, pre_ctrl);
// Adjust pre-loop last iteration
loop_limit = *pre_limit;
loop_limit = (stride_con > 0) // scale > 0
? (Node*)(new (C, 3) MaxINode(loop_limit, X))
: (Node*)(new (C, 3) MinINode(loop_limit, X));
register_new_node( loop_limit, pre_ctrl );
*pre_limit = loop_limit;
} else { // stride_con*scale_con < 0
// For negative stride*scale pre-loop checks for overflow and
// post-loop for underflow.
//
// The underflow limit: low_limit <= scale*I+offset.
// For main-loop compute
// scale*I+offset+1 > low_limit
// ( if (scale < 0) /* and stride > 0 */
// I < (low_limit-(offset+1))/scale
// else /* scale < 0 and stride < 0 */
// I > (low_limit-(offset+1))/scale
// )
if (low_limit->get_int() == -max_jint) {
if (!RangeLimitCheck) return;
} else {
assert(low_limit->get_int() == 0, "wrong low limit for range check");
}
Node *one = _igvn.intcon(1);
set_ctrl(one, C->root());
Node *plus_one = new (C, 3) AddINode(offset, one);
register_new_node( plus_one, pre_ctrl );
Node *con = new (C, 3) SubINode(low_limit, plus_one);
register_new_node(con, pre_ctrl);
Node *scale = _igvn.intcon(scale_con);
set_ctrl(scale, C->root());
Node *X = new (C, 3) DivINode(0, con, scale);
register_new_node(X, pre_ctrl);
// Adjust main-loop last iteration
Node *loop_limit = *main_limit;
loop_limit = (stride_con > 0) // scale < 0
? (Node*)(new (C, 3) MinINode(loop_limit, X))
: (Node*)(new (C, 3) MaxINode(loop_limit, X));
register_new_node(loop_limit, pre_ctrl);
*main_limit = loop_limit;
// The overflow limit: scale*I+offset < upper_limit
// For pre-loop compute
// NOT(scale*I+offset < upper_limit)
// scale*I+offset >= upper_limit
// scale*I+offset+1 > upper_limit
// ( if (scale < 0) /* and stride > 0 */
// I < (upper_limit-(offset+1))/scale
// else /* scale < 0 and stride < 0 */
// I > (upper_limit-(offset+1))/scale
// )
plus_one = new (C, 3) AddINode(offset, one);
register_new_node( plus_one, pre_ctrl );
con = new (C, 3) SubINode(upper_limit, plus_one);
register_new_node(con, pre_ctrl);
scale = _igvn.intcon(scale_con);
set_ctrl(scale, C->root());
X = new (C, 3) DivINode(0, con, scale);
register_new_node(X, pre_ctrl);
// Adjust pre-loop last iteration
loop_limit = *pre_limit;
loop_limit = (stride_con > 0) // scale < 0
? (Node*)(new (C, 3) MaxINode(loop_limit, X))
: (Node*)(new (C, 3) MinINode(loop_limit, X));
register_new_node( loop_limit, pre_ctrl );
*pre_limit = loop_limit;
// [++] Here's the algebra that justifies the pre-loop limit expression:
//
// NOT( scale_con * I + offset < limit )
// ==
// scale_con * I + offset >= limit
// ==
// SGN(scale_con) * I >= (limit-offset)/|scale_con|
// ==
// (limit-offset)/|scale_con| <= I * SGN(scale_con)
// ==
// (limit-offset)/|scale_con|-1 < I * SGN(scale_con)
// ==
// ( if (scale_con > 0) /*common case*/
// (limit-offset)/scale_con - 1 < I
// else
// (limit-offset)/scale_con + 1 > I
// )
// ( if (scale_con > 0) /*common case*/
// (limit-offset)/scale_con + SGN(-scale_con) < I
// else
// (limit-offset)/scale_con + SGN(-scale_con) > I
} }
} }
@ -1488,7 +1736,7 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
Node *cmpzm = bolzm->in(1); Node *cmpzm = bolzm->in(1);
assert(cmpzm->is_Cmp(), ""); assert(cmpzm->is_Cmp(), "");
Node *opqzm = cmpzm->in(2); Node *opqzm = cmpzm->in(2);
// Can not optimize a loop if pre-loop Opaque1 node is optimized // Can not optimize a loop if zero-trip Opaque1 node is optimized
// away and then another round of loop opts attempted. // away and then another round of loop opts attempted.
if (opqzm->Opcode() != Op_Opaque1) if (opqzm->Opcode() != Op_Opaque1)
return; return;
@ -1523,8 +1771,11 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
int stride_con = cl->stride_con(); int stride_con = cl->stride_con();
Node *zero = _igvn.intcon(0); Node *zero = _igvn.intcon(0);
Node *one = _igvn.intcon(1); Node *one = _igvn.intcon(1);
// Use symmetrical int range [-max_jint,max_jint]
Node *mini = _igvn.intcon(-max_jint);
set_ctrl(zero, C->root()); set_ctrl(zero, C->root());
set_ctrl(one, C->root()); set_ctrl(one, C->root());
set_ctrl(mini, C->root());
// Range checks that do not dominate the loop backedge (ie. // Range checks that do not dominate the loop backedge (ie.
// conditionally executed) can lengthen the pre loop limit beyond // conditionally executed) can lengthen the pre loop limit beyond
@ -1599,7 +1850,12 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
if( offset_c == ctrl ) { if( offset_c == ctrl ) {
continue; // Don't rce this check but continue looking for other candidates. continue; // Don't rce this check but continue looking for other candidates.
} }
#ifdef ASSERT
if (TraceRangeLimitCheck) {
tty->print_cr("RC bool node%s", flip ? " flipped:" : ":");
bol->dump(2);
}
#endif
// At this point we have the expression as: // At this point we have the expression as:
// scale_con * trip_counter + offset :: limit // scale_con * trip_counter + offset :: limit
// where scale_con, offset and limit are loop invariant. Trip_counter // where scale_con, offset and limit are loop invariant. Trip_counter
@ -1610,17 +1866,16 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
// Adjust pre and main loop limits to guard the correct iteration set // Adjust pre and main loop limits to guard the correct iteration set
if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
if( b_test._test == BoolTest::lt ) { // Range checks always use lt if( b_test._test == BoolTest::lt ) { // Range checks always use lt
// The overflow limit: scale*I+offset < limit // The underflow and overflow limits: 0 <= scale*I+offset < limit
add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit ); add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
// The underflow limit: 0 <= scale*I+offset.
// Some math yields: -scale*I-(offset+1) < 0
Node *plus_one = new (C, 3) AddINode( offset, one );
register_new_node( plus_one, pre_ctrl );
Node *neg_offset = new (C, 3) SubINode( zero, plus_one );
register_new_node( neg_offset, pre_ctrl );
add_constraint( stride_con, -scale_con, neg_offset, zero, pre_ctrl, &pre_limit, &main_limit );
if (!conditional_rc) { if (!conditional_rc) {
conditional_rc = !loop->dominates_backedge(iff); conditional_rc = !loop->dominates_backedge(iff);
// It is also needed if offset->_lo == min_int since
// (0-min_int) == min_int. It may be fine for stride > 0
// but for stride < 0 pre_limit will be < original_limit.
const TypeInt* offset_t = _igvn.type(offset)->is_int();
conditional_rc |= RangeLimitCheck && (offset_t->_lo == min_jint) &&
(scale_con<0) && (stride_con<0);
} }
} else { } else {
#ifndef PRODUCT #ifndef PRODUCT
@ -1631,21 +1886,35 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
} }
} else { // Otherwise work on normal compares } else { // Otherwise work on normal compares
switch( b_test._test ) { switch( b_test._test ) {
case BoolTest::ge: // Convert X >= Y to -X <= -Y case BoolTest::gt:
// Fall into GE case
case BoolTest::ge:
// Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit
scale_con = -scale_con; scale_con = -scale_con;
offset = new (C, 3) SubINode( zero, offset ); offset = new (C, 3) SubINode( zero, offset );
register_new_node( offset, pre_ctrl ); register_new_node( offset, pre_ctrl );
limit = new (C, 3) SubINode( zero, limit ); limit = new (C, 3) SubINode( zero, limit );
register_new_node( limit, pre_ctrl ); register_new_node( limit, pre_ctrl );
// Fall into LE case // Fall into LE case
case BoolTest::le: // Convert X <= Y to X < Y+1 case BoolTest::le:
if (b_test._test != BoolTest::gt) {
// Convert X <= Y to X < Y+1
limit = new (C, 3) AddINode( limit, one ); limit = new (C, 3) AddINode( limit, one );
register_new_node( limit, pre_ctrl ); register_new_node( limit, pre_ctrl );
}
// Fall into LT case // Fall into LT case
case BoolTest::lt: case BoolTest::lt:
add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit ); // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
if (!conditional_rc) { if (!conditional_rc) {
conditional_rc = !loop->dominates_backedge(iff); conditional_rc = !loop->dominates_backedge(iff);
// It is also needed if scale*pre_limit+offset >= limit
// due to underflow so we need execute pre-loop until
// scale*I+offset >= min_int. But (low_limit-offset) will
// underflow when offset > 0 and X will be > original_limit.
const TypeInt* offset_t = _igvn.type(offset)->is_int();
conditional_rc |= RangeLimitCheck && (offset_t->_hi > 0) &&
(scale_con>0) && (stride_con>0);
} }
break; break;
default: default:
@ -1696,7 +1965,8 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
// Note:: we are making the main loop limit no longer precise; // Note:: we are making the main loop limit no longer precise;
// need to round up based on stride. // need to round up based on stride.
if( stride_con != 1 && stride_con != -1 ) { // Cutout for common case cl->set_nonexact_trip_count();
if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case
// "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init
// Hopefully, compiler will optimize for powers of 2. // Hopefully, compiler will optimize for powers of 2.
Node *ctrl = get_ctrl(main_limit); Node *ctrl = get_ctrl(main_limit);
@ -1876,7 +2146,19 @@ bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
// iteration. Then the CountedLoopEnd will collapse (backedge never // iteration. Then the CountedLoopEnd will collapse (backedge never
// taken) and all loop-invariant uses of the exit values will be correct. // taken) and all loop-invariant uses of the exit values will be correct.
Node *phi = cl->phi(); Node *phi = cl->phi();
Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() ); Node *exact_limit = phase->exact_limit(this);
if (exact_limit != cl->limit()) {
// We also need to replace the original limit to collapse loop exit.
Node* cmp = cl->loopexit()->cmp_node();
assert(cl->limit() == cmp->in(2), "sanity");
phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
phase->_igvn.hash_delete(cmp);
cmp->set_req(2, exact_limit);
phase->_igvn._worklist.push(cmp); // put cmp on worklist
}
// Note: the final value after increment should not overflow since
// counted loop has limit check predicate.
Node *final = new (phase->C, 3) SubINode( exact_limit, cl->stride() );
phase->register_new_node(final,cl->in(LoopNode::EntryControl)); phase->register_new_node(final,cl->in(LoopNode::EntryControl));
phase->_igvn.replace_node(phi,final); phase->_igvn.replace_node(phi,final);
phase->C->set_major_progress(); phase->C->set_major_progress();

View File

@ -130,6 +130,11 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
Node* uniqc = proj_true->unique_ctrl_out(); Node* uniqc = proj_true->unique_ctrl_out();
Node* entry = head->in(LoopNode::EntryControl); Node* entry = head->in(LoopNode::EntryControl);
Node* predicate = find_predicate(entry); Node* predicate = find_predicate(entry);
if (predicate != NULL && LoopLimitCheck && UseLoopPredicate) {
// We may have two predicates, find first.
entry = find_predicate(entry->in(0)->in(0));
if (entry != NULL) predicate = entry;
}
if (predicate != NULL) predicate = predicate->in(0); if (predicate != NULL) predicate = predicate->in(0);
assert(proj_true->is_IfTrue() && assert(proj_true->is_IfTrue() &&
(predicate == NULL && uniqc == head || (predicate == NULL && uniqc == head ||
@ -217,6 +222,7 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop, ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
Node_List &old_new) { Node_List &old_new) {
LoopNode* head = loop->_head->as_Loop(); LoopNode* head = loop->_head->as_Loop();
bool counted_loop = head->is_CountedLoop();
Node* entry = head->in(LoopNode::EntryControl); Node* entry = head->in(LoopNode::EntryControl);
_igvn.hash_delete(entry); _igvn.hash_delete(entry);
_igvn._worklist.push(entry); _igvn._worklist.push(entry);
@ -242,14 +248,14 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
assert(old_new[head->_idx]->is_Loop(), "" ); assert(old_new[head->_idx]->is_Loop(), "" );
// Fast (true) control // Fast (true) control
Node* iffast_pred = clone_loop_predicates(entry, iffast); Node* iffast_pred = clone_loop_predicates(entry, iffast, !counted_loop);
_igvn.hash_delete(head); _igvn.hash_delete(head);
head->set_req(LoopNode::EntryControl, iffast_pred); head->set_req(LoopNode::EntryControl, iffast_pred);
set_idom(head, iffast_pred, dom_depth(head)); set_idom(head, iffast_pred, dom_depth(head));
_igvn._worklist.push(head); _igvn._worklist.push(head);
// Slow (false) control // Slow (false) control
Node* ifslow_pred = move_loop_predicates(entry, ifslow); Node* ifslow_pred = move_loop_predicates(entry, ifslow, !counted_loop);
LoopNode* slow_head = old_new[head->_idx]->as_Loop(); LoopNode* slow_head = old_new[head->_idx]->as_Loop();
_igvn.hash_delete(slow_head); _igvn.hash_delete(slow_head);
slow_head->set_req(LoopNode::EntryControl, ifslow_pred); slow_head->set_req(LoopNode::EntryControl, ifslow_pred);

View File

@ -206,7 +206,7 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// Get backedge compare // Get backedge compare
Node *cmp = test->in(1); Node *cmp = test->in(1);
int cmp_op = cmp->Opcode(); int cmp_op = cmp->Opcode();
if( cmp_op != Op_CmpI ) if (cmp_op != Op_CmpI)
return false; // Avoid pointer & float compares return false; // Avoid pointer & float compares
// Find the trip-counter increment & limit. Limit must be loop invariant. // Find the trip-counter increment & limit. Limit must be loop invariant.
@ -259,7 +259,8 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
} }
// Stride must be constant // Stride must be constant
int stride_con = stride->get_int(); int stride_con = stride->get_int();
assert(stride_con != 0, "missed some peephole opt"); if (stride_con == 0)
return false; // missed some peephole opt
if (!xphi->is_Phi()) if (!xphi->is_Phi())
return false; // Too much math on the trip counter return false; // Too much math on the trip counter
@ -319,7 +320,7 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// Count down loop rolls through MAXINT // Count down loop rolls through MAXINT
(bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0 || (bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0 ||
// Count up loop rolls through MININT // Count up loop rolls through MININT
(bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0 ) { (bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0) {
return false; // Bail out return false; // Bail out
} }
@ -341,12 +342,137 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// //
assert(x->Opcode() == Op_Loop, "regular loops only"); assert(x->Opcode() == Op_Loop, "regular loops only");
C->print_method("Before CountedLoop", 3); C->print_method("Before CountedLoop", 3);
#ifndef PRODUCT
if (TraceLoopOpts) { Node *hook = new (C, 6) Node(6);
tty->print("Counted ");
if (LoopLimitCheck) {
// ===================================================
// Generate loop limit check to avoid integer overflow
// in cases like next (cyclic loops):
//
// for (i=0; i <= max_jint; i++) {}
// for (i=0; i < max_jint; i+=2) {}
//
//
// Limit check predicate depends on the loop test:
//
// for(;i != limit; i++) --> limit <= (max_jint)
// for(;i < limit; i+=stride) --> limit <= (max_jint - stride + 1)
// for(;i <= limit; i+=stride) --> limit <= (max_jint - stride )
//
// Check if limit is excluded to do more precise int overflow check.
bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge);
int stride_m = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1));
// If compare points directly to the phi we need to adjust
// the compare so that it points to the incr. Limit have
// to be adjusted to keep trip count the same and the
// adjusted limit should be checked for int overflow.
if (phi_incr != NULL) {
stride_m += stride_con;
}
if (limit->is_Con()) {
int limit_con = limit->get_int();
if ((stride_con > 0 && limit_con > (max_jint - stride_m)) ||
(stride_con < 0 && limit_con < (min_jint - stride_m))) {
// Bailout: it could be integer overflow.
return false;
}
} else if ((stride_con > 0 && limit_t->_hi <= (max_jint - stride_m)) ||
(stride_con < 0 && limit_t->_lo >= (min_jint - stride_m))) {
// Limit's type may satisfy the condition, for example,
// when it is an array length.
} else {
// Generate loop's limit check.
// Loop limit check predicate should be near the loop.
ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check);
if (!limit_check_proj) {
// The limit check predicate is not generated if this method trapped here before.
#ifdef ASSERT
if (TraceLoopLimitCheck) {
tty->print("missing loop limit check:");
loop->dump_head(); loop->dump_head();
x->dump(1);
} }
#endif #endif
return false;
}
IfNode* check_iff = limit_check_proj->in(0)->as_If();
Node* cmp_limit;
Node* bol;
if (stride_con > 0) {
cmp_limit = new (C, 3) CmpINode(limit, _igvn.intcon(max_jint - stride_m));
bol = new (C, 2) BoolNode(cmp_limit, BoolTest::le);
} else {
cmp_limit = new (C, 3) CmpINode(limit, _igvn.intcon(min_jint - stride_m));
bol = new (C, 2) BoolNode(cmp_limit, BoolTest::ge);
}
cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit);
bol = _igvn.register_new_node_with_optimizer(bol);
set_subtree_ctrl(bol);
// Replace condition in original predicate but preserve Opaque node
// so that previous predicates could be found.
assert(check_iff->in(1)->Opcode() == Op_Conv2B &&
check_iff->in(1)->in(1)->Opcode() == Op_Opaque1, "");
Node* opq = check_iff->in(1)->in(1);
_igvn.hash_delete(opq);
opq->set_req(1, bol);
// Update ctrl.
set_ctrl(opq, check_iff->in(0));
set_ctrl(check_iff->in(1), check_iff->in(0));
#ifndef PRODUCT
// report that the loop predication has been actually performed
// for this loop
if (TraceLoopLimitCheck) {
tty->print_cr("Counted Loop Limit Check generated:");
debug_only( bol->dump(2); )
}
#endif
}
if (phi_incr != NULL) {
// If compare points directly to the phi we need to adjust
// the compare so that it points to the incr. Limit have
// to be adjusted to keep trip count the same and we
// should avoid int overflow.
//
// i = init; do {} while(i++ < limit);
// is converted to
// i = init; do {} while(++i < limit+1);
//
limit = gvn->transform(new (C, 3) AddINode(limit, stride));
}
// Now we need to canonicalize loop condition.
if (bt == BoolTest::ne) {
assert(stride_con == 1 || stride_con == -1, "simple increment only");
bt = (stride_con > 0) ? BoolTest::lt : BoolTest::gt;
}
if (incl_limit) {
// The limit check guaranties that 'limit <= (max_jint - stride)' so
// we can convert 'i <= limit' to 'i < limit+1' since stride != 0.
//
Node* one = (stride_con > 0) ? gvn->intcon( 1) : gvn->intcon(-1);
limit = gvn->transform(new (C, 3) AddINode(limit, one));
if (bt == BoolTest::le)
bt = BoolTest::lt;
else if (bt == BoolTest::ge)
bt = BoolTest::gt;
else
ShouldNotReachHere();
}
set_subtree_ctrl( limit );
} else { // LoopLimitCheck
// If compare points to incr, we are ok. Otherwise the compare // If compare points to incr, we are ok. Otherwise the compare
// can directly point to the phi; in this case adjust the compare so that // can directly point to the phi; in this case adjust the compare so that
// it points to the incr by adjusting the limit. // it points to the incr by adjusting the limit.
@ -359,7 +485,6 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
Node *one_m = gvn->intcon(-1); Node *one_m = gvn->intcon(-1);
Node *trip_count = NULL; Node *trip_count = NULL;
Node *hook = new (C, 6) Node(6);
switch( bt ) { switch( bt ) {
case BoolTest::eq: case BoolTest::eq:
ShouldNotReachHere(); ShouldNotReachHere();
@ -441,6 +566,8 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
limit = gvn->transform(new (C, 3) AddINode(span,init_trip)); limit = gvn->transform(new (C, 3) AddINode(span,init_trip));
set_subtree_ctrl( limit ); set_subtree_ctrl( limit );
} // LoopLimitCheck
// Check for SafePoint on backedge and remove // Check for SafePoint on backedge and remove
Node *sfpt = x->in(LoopNode::LoopBackControl); Node *sfpt = x->in(LoopNode::LoopBackControl);
if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) { if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
@ -531,7 +658,7 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// Check for immediately preceding SafePoint and remove // Check for immediately preceding SafePoint and remove
Node *sfpt2 = le->in(0); Node *sfpt2 = le->in(0);
if( sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) if (sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2))
lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control)); lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
// Free up intermediate goo // Free up intermediate goo
@ -541,12 +668,56 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
assert(l->is_valid_counted_loop(), "counted loop shape is messed up"); assert(l->is_valid_counted_loop(), "counted loop shape is messed up");
assert(l == loop->_head && l->phi() == phi && l->loopexit() == lex, "" ); assert(l == loop->_head && l->phi() == phi && l->loopexit() == lex, "" );
#endif #endif
#ifndef PRODUCT
if (TraceLoopOpts) {
tty->print("Counted ");
loop->dump_head();
}
#endif
C->print_method("After CountedLoop", 3); C->print_method("After CountedLoop", 3);
return true; return true;
} }
//----------------------exact_limit-------------------------------------------
Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
assert(loop->_head->is_CountedLoop(), "");
CountedLoopNode *cl = loop->_head->as_CountedLoop();
if (!LoopLimitCheck || ABS(cl->stride_con()) == 1 ||
cl->limit()->Opcode() == Op_LoopLimit) {
// Old code has exact limit (it could be incorrect in case of int overflow).
// Loop limit is exact with stride == 1. And loop may already have exact limit.
return cl->limit();
}
Node *limit = NULL;
#ifdef ASSERT
BoolTest::mask bt = cl->loopexit()->test_trip();
assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
#endif
if (cl->has_exact_trip_count()) {
// Simple case: loop has constant boundaries.
// Use longs to avoid integer overflow.
int stride_con = cl->stride_con();
long init_con = cl->init_trip()->get_int();
long limit_con = cl->limit()->get_int();
julong trip_cnt = cl->trip_count();
long final_con = init_con + trip_cnt*stride_con;
final_con -= stride_con;
int final_int = (int)final_con;
// The final value should be in integer range since the loop
// is counted and the limit was checked for overflow.
assert(final_con == (long)final_int, "final value should be integer");
limit = _igvn.intcon(final_int);
} else {
// Create new LoopLimit node to get exact limit (final iv value).
limit = new (C, 4) LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride());
register_new_node(limit, cl->in(LoopNode::EntryControl));
}
assert(limit != NULL, "sanity");
return limit;
}
//------------------------------Ideal------------------------------------------ //------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node. // Return a node which is more "ideal" than the current node.
@ -572,14 +743,12 @@ Node *CountedLoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
#ifndef PRODUCT #ifndef PRODUCT
void CountedLoopNode::dump_spec(outputStream *st) const { void CountedLoopNode::dump_spec(outputStream *st) const {
LoopNode::dump_spec(st); LoopNode::dump_spec(st);
if( stride_is_con() ) { if (stride_is_con()) {
st->print("stride: %d ",stride_con()); st->print("stride: %d ",stride_con());
} else {
st->print("stride: not constant ");
} }
if( is_pre_loop () ) st->print("pre of N%d" , _main_idx ); if (is_pre_loop ()) st->print("pre of N%d" , _main_idx);
if( is_main_loop() ) st->print("main of N%d", _idx ); if (is_main_loop()) st->print("main of N%d", _idx);
if( is_post_loop() ) st->print("post of N%d", _main_idx ); if (is_post_loop()) st->print("post of N%d", _main_idx);
} }
#endif #endif
@ -588,7 +757,130 @@ int CountedLoopEndNode::stride_con() const {
return stride()->bottom_type()->is_int()->get_con(); return stride()->bottom_type()->is_int()->get_con();
} }
//=============================================================================
//------------------------------Value-----------------------------------------
const Type *LoopLimitNode::Value( PhaseTransform *phase ) const {
const Type* init_t = phase->type(in(Init));
const Type* limit_t = phase->type(in(Limit));
const Type* stride_t = phase->type(in(Stride));
// Either input is TOP ==> the result is TOP
if (init_t == Type::TOP) return Type::TOP;
if (limit_t == Type::TOP) return Type::TOP;
if (stride_t == Type::TOP) return Type::TOP;
int stride_con = stride_t->is_int()->get_con();
if (stride_con == 1)
return NULL; // Identity
if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) {
// Use longs to avoid integer overflow.
long init_con = init_t->is_int()->get_con();
long limit_con = limit_t->is_int()->get_con();
int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
long trip_count = (limit_con - init_con + stride_m)/stride_con;
long final_con = init_con + stride_con*trip_count;
int final_int = (int)final_con;
// The final value should be in integer range since the loop
// is counted and the limit was checked for overflow.
assert(final_con == (long)final_int, "final value should be integer");
return TypeInt::make(final_int);
}
return bottom_type(); // TypeInt::INT
}
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node.
Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (phase->type(in(Init)) == Type::TOP ||
phase->type(in(Limit)) == Type::TOP ||
phase->type(in(Stride)) == Type::TOP)
return NULL; // Dead
int stride_con = phase->type(in(Stride))->is_int()->get_con();
if (stride_con == 1)
return NULL; // Identity
if (in(Init)->is_Con() && in(Limit)->is_Con())
return NULL; // Value
// Delay following optimizations until all loop optimizations
// done to keep Ideal graph simple.
if (!can_reshape || phase->C->major_progress())
return NULL;
const TypeInt* init_t = phase->type(in(Init) )->is_int();
const TypeInt* limit_t = phase->type(in(Limit))->is_int();
int stride_p;
long lim, ini;
julong max;
if (stride_con > 0) {
stride_p = stride_con;
lim = limit_t->_hi;
ini = init_t->_lo;
max = (julong)max_jint;
} else {
stride_p = -stride_con;
lim = init_t->_hi;
ini = limit_t->_lo;
max = (julong)min_jint;
}
julong range = lim - ini + stride_p;
if (range <= max) {
// Convert to integer expression if it is not overflow.
Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1));
Node *range = phase->transform(new (phase->C, 3) SubINode(in(Limit), in(Init)));
Node *bias = phase->transform(new (phase->C, 3) AddINode(range, stride_m));
Node *trip = phase->transform(new (phase->C, 3) DivINode(0, bias, in(Stride)));
Node *span = phase->transform(new (phase->C, 3) MulINode(trip, in(Stride)));
return new (phase->C, 3) AddINode(span, in(Init)); // exact limit
}
if (is_power_of_2(stride_p) || // divisor is 2^n
!Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node?
// Convert to long expression to avoid integer overflow
// and let igvn optimizer convert this division.
//
Node* init = phase->transform( new (phase->C, 2) ConvI2LNode(in(Init)));
Node* limit = phase->transform( new (phase->C, 2) ConvI2LNode(in(Limit)));
Node* stride = phase->longcon(stride_con);
Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1));
Node *range = phase->transform(new (phase->C, 3) SubLNode(limit, init));
Node *bias = phase->transform(new (phase->C, 3) AddLNode(range, stride_m));
Node *span;
if (stride_con > 0 && is_power_of_2(stride_p)) {
// bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride)
// and avoid generating rounding for division. Zero trip guard should
// guarantee that init < limit but sometimes the guard is missing and
// we can get situation when init > limit. Note, for the empty loop
// optimization zero trip guard is generated explicitly which leaves
// only RCE predicate where exact limit is used and the predicate
// will simply fail forcing recompilation.
Node* neg_stride = phase->longcon(-stride_con);
span = phase->transform(new (phase->C, 3) AndLNode(bias, neg_stride));
} else {
Node *trip = phase->transform(new (phase->C, 3) DivLNode(0, bias, stride));
span = phase->transform(new (phase->C, 3) MulLNode(trip, stride));
}
// Convert back to int
Node *span_int = phase->transform(new (phase->C, 2) ConvL2INode(span));
return new (phase->C, 3) AddINode(span_int, in(Init)); // exact limit
}
return NULL; // No progress
}
//------------------------------Identity---------------------------------------
// If stride == 1 return limit node.
Node *LoopLimitNode::Identity( PhaseTransform *phase ) {
int stride_con = phase->type(in(Stride))->is_int()->get_con();
if (stride_con == 1 || stride_con == -1)
return in(Limit);
return this;
}
//=============================================================================
//----------------------match_incr_with_optional_truncation-------------------- //----------------------match_incr_with_optional_truncation--------------------
// Match increment with optional truncation: // Match increment with optional truncation:
// CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16 // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16
@ -870,7 +1162,7 @@ void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) {
outer = igvn.register_new_node_with_optimizer(outer, _head); outer = igvn.register_new_node_with_optimizer(outer, _head);
phase->set_created_loop_node(); phase->set_created_loop_node();
Node* pred = phase->clone_loop_predicates(ctl, outer); Node* pred = phase->clone_loop_predicates(ctl, outer, true);
// Outermost loop falls into '_head' loop // Outermost loop falls into '_head' loop
_head->set_req(LoopNode::EntryControl, pred); _head->set_req(LoopNode::EntryControl, pred);
_head->del_req(outer_idx); _head->del_req(outer_idx);
@ -1440,9 +1732,16 @@ void IdealLoopTree::dump_head( ) const {
tty->print(" "); tty->print(" ");
tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx); tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx);
if (_irreducible) tty->print(" IRREDUCIBLE"); if (_irreducible) tty->print(" IRREDUCIBLE");
Node* entry = _head->in(LoopNode::EntryControl);
if (LoopLimitCheck) {
Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (predicate != NULL ) {
tty->print(" limit_check");
entry = entry->in(0)->in(0);
}
}
if (UseLoopPredicate) { if (UseLoopPredicate) {
Node* entry = PhaseIdealLoop::find_predicate_insertion_point(_head->in(LoopNode::EntryControl), entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
Deoptimization::Reason_predicate);
if (entry != NULL) { if (entry != NULL) {
tty->print(" predicated"); tty->print(" predicated");
} }
@ -1528,10 +1827,15 @@ void PhaseIdealLoop::collect_potentially_useful_predicates(
!loop->tail()->is_top()) { !loop->tail()->is_top()) {
LoopNode* lpn = loop->_head->as_Loop(); LoopNode* lpn = loop->_head->as_Loop();
Node* entry = lpn->in(LoopNode::EntryControl); Node* entry = lpn->in(LoopNode::EntryControl);
Node* predicate_proj = find_predicate(entry); Node* predicate_proj = find_predicate(entry); // loop_limit_check first
if (predicate_proj != NULL ) { // right pattern that can be used by loop predication if (predicate_proj != NULL ) { // right pattern that can be used by loop predication
assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be"); assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be");
useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
entry = entry->in(0)->in(0);
}
predicate_proj = find_predicate(entry); // Predicate
if (predicate_proj != NULL ) {
useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
} }
} }
@ -1542,6 +1846,8 @@ void PhaseIdealLoop::collect_potentially_useful_predicates(
//------------------------eliminate_useless_predicates----------------------------- //------------------------eliminate_useless_predicates-----------------------------
// Eliminate all inserted predicates if they could not be used by loop predication. // Eliminate all inserted predicates if they could not be used by loop predication.
// Note: it will also eliminates loop limits check predicate since it also uses
// Opaque1 node (see Parse::add_predicate()).
void PhaseIdealLoop::eliminate_useless_predicates() { void PhaseIdealLoop::eliminate_useless_predicates() {
if (C->predicate_count() == 0) if (C->predicate_count() == 0)
return; // no predicate left return; // no predicate left
@ -1731,7 +2037,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) {
// Some parser-inserted loop predicates could never be used by loop // Some parser-inserted loop predicates could never be used by loop
// predication or they were moved away from loop during some optimizations. // predication or they were moved away from loop during some optimizations.
// For example, peeling. Eliminate them before next loop optimizations. // For example, peeling. Eliminate them before next loop optimizations.
if (UseLoopPredicate) { if (UseLoopPredicate || LoopLimitCheck) {
eliminate_useless_predicates(); eliminate_useless_predicates();
} }

View File

@ -289,6 +289,28 @@ inline Node *CountedLoopNode::limit() const { return loopexit() ? loopexit()->li
inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; } inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; }
inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; } inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; }
//------------------------------LoopLimitNode-----------------------------
// Counted Loop limit node which represents exact final iterator value:
// trip_count = (limit - init_trip + stride - 1)/stride
// final_value= trip_count * stride + init_trip.
// Use HW instructions to calculate it when it can overflow in integer.
// Note, final_value should fit into integer since counted loop has
// limit check: limit <= max_int-stride.
class LoopLimitNode : public Node {
enum { Init=1, Limit=2, Stride=3 };
public:
LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) {
// Put it on the Macro nodes list to optimize during macro nodes expansion.
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual Node *Identity( PhaseTransform *phase );
};
// -----------------------------IdealLoopTree---------------------------------- // -----------------------------IdealLoopTree----------------------------------
class IdealLoopTree : public ResourceObj { class IdealLoopTree : public ResourceObj {
@ -775,6 +797,8 @@ public:
bool is_counted_loop( Node *x, IdealLoopTree *loop ); bool is_counted_loop( Node *x, IdealLoopTree *loop );
Node* exact_limit( IdealLoopTree *loop );
// Return a post-walked LoopNode // Return a post-walked LoopNode
IdealLoopTree *get_loop( Node *n ) const { IdealLoopTree *get_loop( Node *n ) const {
// Dead nodes have no loop, so return the top level loop instead // Dead nodes have no loop, so return the top level loop instead
@ -836,7 +860,6 @@ public:
// Return true if exp is a scaled induction var plus (or minus) constant // Return true if exp is a scaled induction var plus (or minus) constant
bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
// Return true if proj is for "proj->[region->..]call_uct"
// Return true if proj is for "proj->[region->..]call_uct" // Return true if proj is for "proj->[region->..]call_uct"
static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason); static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason);
// Return true for "if(test)-> proj -> ... // Return true for "if(test)-> proj -> ...
@ -860,10 +883,11 @@ public:
PhaseIterGVN* igvn); PhaseIterGVN* igvn);
static Node* clone_loop_predicates(Node* old_entry, Node* new_entry, static Node* clone_loop_predicates(Node* old_entry, Node* new_entry,
bool move_predicates, bool move_predicates,
bool clone_limit_check,
PhaseIdealLoop* loop_phase, PhaseIdealLoop* loop_phase,
PhaseIterGVN* igvn); PhaseIterGVN* igvn);
Node* clone_loop_predicates(Node* old_entry, Node* new_entry); Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
Node* move_loop_predicates(Node* old_entry, Node* new_entry); Node* move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
void eliminate_loop_predicates(Node* entry); void eliminate_loop_predicates(Node* entry);
static Node* skip_loop_predicates(Node* entry); static Node* skip_loop_predicates(Node* entry);
@ -873,7 +897,7 @@ public:
// Find a predicate // Find a predicate
static Node* find_predicate(Node* entry); static Node* find_predicate(Node* entry);
// Construct a range check for a predicate if // Construct a range check for a predicate if
BoolNode* rc_predicate(Node* ctrl, BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl,
int scale, Node* offset, int scale, Node* offset,
Node* init, Node* limit, Node* stride, Node* init, Node* limit, Node* stride,
Node* range, bool upper); Node* range, bool upper);
@ -903,11 +927,11 @@ public:
// Range Check Elimination uses this function! // Range Check Elimination uses this function!
// Constrain the main loop iterations so the affine function: // Constrain the main loop iterations so the affine function:
// scale_con * I + offset < limit // low_limit <= scale_con * I + offset < upper_limit
// always holds true. That is, either increase the number of iterations in // always holds true. That is, either increase the number of iterations in
// the pre-loop or the post-loop until the condition holds true in the main // the pre-loop or the post-loop until the condition holds true in the main
// loop. Scale_con, offset and limit are all loop invariant. // loop. Scale_con, offset and limit are all loop invariant.
void add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ); void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
// Partially peel loop up through last_peel node. // Partially peel loop up through last_peel node.
bool partial_peel( IdealLoopTree *loop, Node_List &old_new ); bool partial_peel( IdealLoopTree *loop, Node_List &old_new );

View File

@ -2154,6 +2154,11 @@ bool PhaseMacroExpand::expand_macro_nodes() {
debug_only(int old_macro_count = C->macro_count();); debug_only(int old_macro_count = C->macro_count(););
if (n->is_AbstractLock()) { if (n->is_AbstractLock()) {
success = eliminate_locking_node(n->as_AbstractLock()); success = eliminate_locking_node(n->as_AbstractLock());
} else if (n->Opcode() == Op_LoopLimit) {
// Remove it from macro list and put on IGVN worklist to optimize.
C->remove_macro_node(n);
_igvn._worklist.push(n);
success = true;
} else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
_igvn.replace_node(n, n->in(1)); _igvn.replace_node(n, n->in(1));
success = true; success = true;

View File

@ -2086,6 +2086,13 @@ void Matcher::find_shared( Node *n ) {
n->del_req(3); n->del_req(3);
break; break;
} }
case Op_LoopLimit: {
Node *pair1 = new (C, 3) BinaryNode(n->in(1),n->in(2));
n->set_req(1,pair1);
n->set_req(2,n->in(3));
n->del_req(3);
break;
}
case Op_StrEquals: { case Op_StrEquals: {
Node *pair1 = new (C, 3) BinaryNode(n->in(2),n->in(3)); Node *pair1 = new (C, 3) BinaryNode(n->in(2),n->in(3));
n->set_req(2,pair1); n->set_req(2,pair1);

View File

@ -70,7 +70,7 @@ protected:
const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result); const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result);
const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN; void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const;
InlineTree *caller_tree() const { return _caller_tree; } InlineTree *caller_tree() const { return _caller_tree; }
InlineTree* callee_at(int bci, ciMethod* m) const; InlineTree* callee_at(int bci, ciMethod* m) const;

View File

@ -638,7 +638,7 @@ void Parse::do_all_blocks() {
ensure_phis_everywhere(); ensure_phis_everywhere();
if (block->is_SEL_head() && if (block->is_SEL_head() &&
UseLoopPredicate) { (UseLoopPredicate || LoopLimitCheck)) {
// Add predicate to single entry (not irreducible) loop head. // Add predicate to single entry (not irreducible) loop head.
assert(!block->has_merged_backedge(), "only entry paths should be merged for now"); assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
// Need correct bci for predicate. // Need correct bci for predicate.

View File

@ -472,8 +472,8 @@ public:
} }
// Clone loop predicates. Defined in loopTransform.cpp. // Clone loop predicates. Defined in loopTransform.cpp.
Node* clone_loop_predicates(Node* old_entry, Node* new_entry); Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
Node* move_loop_predicates(Node* old_entry, Node* new_entry); Node* move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
// Create a new if below new_entry for the predicate to be cloned // Create a new if below new_entry for the predicate to be cloned
ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason); Deoptimization::DeoptReason reason);

View File

@ -1222,21 +1222,6 @@ bool BoolNode::is_counted_loop_exit_test() {
return false; return false;
} }
//=============================================================================
//------------------------------NegNode----------------------------------------
Node *NegFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( in(1)->Opcode() == Op_SubF )
return new (phase->C, 3) SubFNode( in(1)->in(2), in(1)->in(1) );
return NULL;
}
Node *NegDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( in(1)->Opcode() == Op_SubD )
return new (phase->C, 3) SubDNode( in(1)->in(2), in(1)->in(1) );
return NULL;
}
//============================================================================= //=============================================================================
//------------------------------Value------------------------------------------ //------------------------------Value------------------------------------------
// Compute sqrt // Compute sqrt

View File

@ -377,7 +377,6 @@ class NegFNode : public NegNode {
public: public:
NegFNode( Node *in1 ) : NegNode(in1) {} NegFNode( Node *in1 ) : NegNode(in1) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
const Type *bottom_type() const { return Type::FLOAT; } const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; } virtual uint ideal_reg() const { return Op_RegF; }
}; };
@ -391,7 +390,6 @@ class NegDNode : public NegNode {
public: public:
NegDNode( Node *in1 ) : NegNode(in1) {} NegDNode( Node *in1 ) : NegNode(in1) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
const Type *bottom_type() const { return Type::DOUBLE; } const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; } virtual uint ideal_reg() const { return Op_RegD; }
}; };

View File

@ -280,10 +280,8 @@
<!ELEMENT externallink (#PCDATA|jvmti|code|i|b|tm)*> <!ELEMENT externallink (#PCDATA|jvmti|code|i|b|tm)*>
<!ATTLIST externallink id CDATA #REQUIRED> <!ATTLIST externallink id CDATA #REQUIRED>
<!ELEMENT vmspeclink EMPTY> <!ELEMENT vmspec EMPTY>
<!ATTLIST vmspeclink id CDATA #IMPLIED> <!ATTLIST vmspec chapter CDATA #IMPLIED>
<!ATTLIST vmspeclink name CDATA #IMPLIED>
<!ATTLIST vmspeclink preposition CDATA #IMPLIED>
<!ELEMENT internallink (#PCDATA|jvmti|code|i|b)*> <!ELEMENT internallink (#PCDATA|jvmti|code|i|b)*>
<!ATTLIST internallink id CDATA #REQUIRED> <!ATTLIST internallink id CDATA #REQUIRED>
@ -2285,9 +2283,8 @@ jvmtiEnv *jvmti;
Stack frames are referenced by depth. Stack frames are referenced by depth.
The frame at depth zero is the current frame. The frame at depth zero is the current frame.
<p/> <p/>
Stack frames are as described in the Stack frames are as described in
<vmspeclink id="Overview.doc.html#17257" <vmspec chapter="3.6"/>,
name="Frames section"/>.
That is, they correspond to method That is, they correspond to method
invocations (including native methods) but do not correspond to platform native or invocations (including native methods) but do not correspond to platform native or
VM internal frames. VM internal frames.
@ -2627,7 +2624,7 @@ err = (*jvmti)-&gt;Deallocate(jvmti, stack_info);
<param id="use_java_stack"> <param id="use_java_stack">
<jboolean/> <jboolean/>
<description> <description>
Return the stack showing the <vmspeclink/> Return the stack showing <vmspec/>
model of the stack; model of the stack;
otherwise, show the internal representation of the stack with otherwise, show the internal representation of the stack with
inlined and optimized methods missing. If the virtual machine inlined and optimized methods missing. If the virtual machine
@ -2707,7 +2704,7 @@ err = (*jvmti)-&gt;Deallocate(jvmti, stack_info);
When the thread is resumed, the execution When the thread is resumed, the execution
state of the thread is reset to the state state of the thread is reset to the state
immediately before the called method was invoked. immediately before the called method was invoked.
That is (using the <vmspeclink/> terminology): That is (using <vmspec/> terminology):
<ul> <ul>
<li>the current frame is discarded as the previous frame becomes the current one</li> <li>the current frame is discarded as the previous frame becomes the current one</li>
<li>the operand stack is restored--the argument values are added back <li>the operand stack is restored--the argument values are added back
@ -2868,9 +2865,8 @@ err = (*jvmti)-&gt;Deallocate(jvmti, stack_info);
to return at any point during its execution. to return at any point during its execution.
The method which will return early is referred to as the <i>called method</i>. The method which will return early is referred to as the <i>called method</i>.
The called method is the current method The called method is the current method
(as defined by the (as defined by
<vmspeclink id="Overview.doc.html#17257" <vmspec chapter="3.6"/>)
name="Frames section"/>)
for the specified thread at for the specified thread at
the time the function is called. the time the function is called.
<p/> <p/>
@ -3576,10 +3572,8 @@ class C2 extends C1 implements I2 {
<field id="index"> <field id="index">
<jint/> <jint/>
<description> <description>
The index into the constant pool of the class. See the The index into the constant pool of the class. See the description in
<vmspeclink id="ClassFile.doc.html#20080" <vmspec chapter="4.4"/>.
name="Constant Pool section"/>
description.
</description> </description>
</field> </field>
</typedef> </typedef>
@ -5006,9 +5000,8 @@ class C2 extends C1 implements I2 {
For references of this kind the <code>referrer_index</code> For references of this kind the <code>referrer_index</code>
parameter to the <internallink id="jvmtiObjectReferenceCallback"> parameter to the <internallink id="jvmtiObjectReferenceCallback">
jvmtiObjectReferenceCallback</internallink> is the index into jvmtiObjectReferenceCallback</internallink> is the index into
constant pool table of the class, starting at 1. See the constant pool table of the class, starting at 1. See
<vmspeclink id="ClassFile.doc.html#20080" <vmspec chapter="4.4"/>.
name="Constant Pool section"/>
</constant> </constant>
</constants> </constants>
@ -6441,9 +6434,7 @@ class C2 extends C1 implements I2 {
been recorded as an initiating loader. Each been recorded as an initiating loader. Each
class in the returned array was created by this class loader, class in the returned array was created by this class loader,
either by defining it directly or by delegation to another class loader. either by defining it directly or by delegation to another class loader.
See the See <vmspec chapter="5.3"/>.
<vmspeclink id="ConstantPool.doc.html#72007"
name="Creation and Loading section"/>.
<p/> <p/>
For JDK version 1.1 implementations that don't For JDK version 1.1 implementations that don't
recognize the distinction between initiating and defining class loaders, recognize the distinction between initiating and defining class loaders,
@ -6626,9 +6617,7 @@ class C2 extends C1 implements I2 {
For the class indicated by <code>klass</code>, return the access For the class indicated by <code>klass</code>, return the access
flags flags
via <code>modifiers_ptr</code>. via <code>modifiers_ptr</code>.
Access flags are defined in the Access flags are defined in <vmspec chapter="4"/>.
<vmspeclink id="ClassFile.doc.html"
name="Class File Format chapter"/>.
<p/> <p/>
If the class is an array class, then its public, private, and protected If the class is an array class, then its public, private, and protected
modifiers are the same as those of its component type. For arrays of modifiers are the same as those of its component type. For arrays of
@ -6794,9 +6783,8 @@ class C2 extends C1 implements I2 {
<description> <description>
For the class indicated by <code>klass</code>, For the class indicated by <code>klass</code>,
return the minor and major version numbers, return the minor and major version numbers,
as defined in the as defined in
<vmspeclink id="ClassFile.doc.html" <vmspec chapter="4"/>.
name="Class File Format chapter"/>.
</description> </description>
<origin>new</origin> <origin>new</origin>
<capabilities> <capabilities>
@ -6839,10 +6827,8 @@ class C2 extends C1 implements I2 {
<description> <description>
For the class indicated by <code>klass</code>, For the class indicated by <code>klass</code>,
return the raw bytes of the constant pool in the format of the return the raw bytes of the constant pool in the format of the
<code>constant_pool</code> item of the <code>constant_pool</code> item of
<vmspeclink id="ClassFile.doc.html" <vmspec chapter="4"/>.
name="Class File Format"
preposition="in"/>.
The format of the constant pool may differ between versions The format of the constant pool may differ between versions
of the Class File Format, so, the of the Class File Format, so, the
<functionlink id="GetClassVersionNumbers">minor and major <functionlink id="GetClassVersionNumbers">minor and major
@ -7286,9 +7272,7 @@ class C2 extends C1 implements I2 {
<field id="class_bytes"> <field id="class_bytes">
<inbuf incount="class_byte_count"><uchar/></inbuf> <inbuf incount="class_byte_count"><uchar/></inbuf>
<description> <description>
Bytes defining class (in the Bytes defining class (in <vmspec chapter="4"/>)
<vmspeclink id="ClassFile.doc.html"
name="Class File Format"/>)
</description> </description>
</field> </field>
</typedef> </typedef>
@ -7611,10 +7595,8 @@ class C2 extends C1 implements I2 {
<paramlink id="signature_ptr"/>. <paramlink id="signature_ptr"/>.
<p/> <p/>
Field signatures are defined in the JNI Specification and Field signatures are defined in the JNI Specification and
are referred to as are referred to as <code>field descriptors</code> in
<vmspeclink id="ClassFile.doc.html#14152" <vmspec chapter="4.3.2"/>.
name="field descriptors"
preposition="in"/>.
</description> </description>
<origin>jvmdiClone</origin> <origin>jvmdiClone</origin>
<capabilities> <capabilities>
@ -7709,9 +7691,7 @@ class C2 extends C1 implements I2 {
<description> <description>
For the field indicated by <code>klass</code> and <code>field</code> For the field indicated by <code>klass</code> and <code>field</code>
return the access flags via <code>modifiers_ptr</code>. return the access flags via <code>modifiers_ptr</code>.
Access flags are defined in the Access flags are defined in <vmspec chapter="4"/>.
<vmspeclink id="ClassFile.doc.html"
name="Class File Format chapter"/>.
</description> </description>
<origin>jvmdi</origin> <origin>jvmdi</origin>
<capabilities> <capabilities>
@ -7810,10 +7790,9 @@ class C2 extends C1 implements I2 {
return the method name via <code>name_ptr</code> and method signature via return the method name via <code>name_ptr</code> and method signature via
<code>signature_ptr</code>. <code>signature_ptr</code>.
<p/> <p/>
Method signatures are defined in the JNI Specification and are referred to as Method signatures are defined in the JNI Specification and are
<vmspeclink id="ClassFile.doc.html#7035" referred to as <code>method descriptors</code> in
name="method descriptors" <vmspec chapter="4.3.3"/>.
preposition="in"/>.
Note this is different Note this is different
than method signatures as defined in the <i>Java Language Specification</i>. than method signatures as defined in the <i>Java Language Specification</i>.
</description> </description>
@ -7902,9 +7881,7 @@ class C2 extends C1 implements I2 {
<description> <description>
For the method indicated by <code>method</code>, For the method indicated by <code>method</code>,
return the access flags via <code>modifiers_ptr</code>. return the access flags via <code>modifiers_ptr</code>.
Access flags are defined in the Access flags are defined in <vmspec chapter="4"/>.
<vmspeclink id="ClassFile.doc.html"
name="Class File Format chapter"/>.
</description> </description>
<origin>jvmdi</origin> <origin>jvmdi</origin>
<capabilities> <capabilities>
@ -7941,9 +7918,7 @@ class C2 extends C1 implements I2 {
including the local variables used to pass parameters to the including the local variables used to pass parameters to the
method on its invocation. method on its invocation.
<p/> <p/>
See <code>max_locals</code> in the See <code>max_locals</code> in <vmspec chapter="4.7.3"/>.
<vmspeclink id="ClassFile.doc.html#1546"
name="Code Attribute section"/>.
</description> </description>
<origin>jvmdi</origin> <origin>jvmdi</origin>
<capabilities> <capabilities>
@ -8150,8 +8125,7 @@ class C2 extends C1 implements I2 {
The local variable's type signature, encoded as a The local variable's type signature, encoded as a
<internallink id="mUTF">modified UTF-8</internallink> string. <internallink id="mUTF">modified UTF-8</internallink> string.
The signature format is the same as that defined in The signature format is the same as that defined in
<vmspeclink id="ClassFile.doc.html#14152" <vmspec chapter="4.3.2"/>.
name="Field Descriptors section"/>
</description> </description>
</field> </field>
<field id="generic_signature"> <field id="generic_signature">
@ -10460,10 +10434,7 @@ myInit() {
<synopsis>Add To Bootstrap Class Loader Search</synopsis> <synopsis>Add To Bootstrap Class Loader Search</synopsis>
<description> <description>
This function can be used to cause instrumentation classes to be defined by the This function can be used to cause instrumentation classes to be defined by the
bootstrap class loader. See bootstrap class loader. See <vmspec chapter="5.3.1"/>.
<vmspeclink id="ConstantPool.doc.html#79383"
name="Loading Using the Bootstrap Class Loader"
preposition="in"/>.
After the bootstrap After the bootstrap
class loader unsuccessfully searches for a class, the specified platform-dependent class loader unsuccessfully searches for a class, the specified platform-dependent
search path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in search path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in
@ -10480,7 +10451,7 @@ myInit() {
contain any classes or resources other than those to be defined by the bootstrap contain any classes or resources other than those to be defined by the bootstrap
class loader for the purposes of instrumentation. class loader for the purposes of instrumentation.
<p/> <p/>
The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic <vmspec/> specifies that a subsequent attempt to resolve a symbolic
reference that the Java virtual machine has previously unsuccessfully attempted reference that the Java virtual machine has previously unsuccessfully attempted
to resolve always fails with the same error that was thrown as a result of the to resolve always fails with the same error that was thrown as a result of the
initial resolution attempt. Consequently, if the JAR file contains an entry initial resolution attempt. Consequently, if the JAR file contains an entry
@ -10512,10 +10483,7 @@ myInit() {
<synopsis>Add To System Class Loader Search</synopsis> <synopsis>Add To System Class Loader Search</synopsis>
<description> <description>
This function can be used to cause instrumentation classes to be This function can be used to cause instrumentation classes to be
defined by the system class loader. See defined by the system class loader. See <vmspec chapter="5.3.2"/>.
<vmspeclink id="ConstantPool.doc.html#79441"
name="Loading Using a User-defined Class Loader"
preposition="in"/>.
After the class loader unsuccessfully searches for a class, the specified platform-dependent search After the class loader unsuccessfully searches for a class, the specified platform-dependent search
path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in the path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in the
<paramlink id="segment"/>. This function may be called multiple times to add multiple segments, the <paramlink id="segment"/>. This function may be called multiple times to add multiple segments, the
@ -10536,7 +10504,7 @@ myInit() {
which takes a single parameter of type <code>java.lang.String</code>. The method is not required which takes a single parameter of type <code>java.lang.String</code>. The method is not required
to have <code>public</code> access. to have <code>public</code> access.
<p/> <p/>
The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic <vmspec/> specifies that a subsequent attempt to resolve a symbolic
reference that the Java virtual machine has previously unsuccessfully attempted reference that the Java virtual machine has previously unsuccessfully attempted
to resolve always fails with the same error that was thrown as a result of the to resolve always fails with the same error that was thrown as a result of the
initial resolution attempt. Consequently, if the JAR file contains an entry initial resolution attempt. Consequently, if the JAR file contains an entry
@ -11438,7 +11406,7 @@ myInit() {
at the finest granularity allowed by the VM. A single step event is at the finest granularity allowed by the VM. A single step event is
generated whenever a thread reaches a new location. generated whenever a thread reaches a new location.
Typically, single step events represent the completion of one VM Typically, single step events represent the completion of one VM
instruction as defined in the <vmspeclink/>. However, some implementations instruction as defined in <vmspec/>. However, some implementations
may define locations differently. In any case the may define locations differently. In any case the
<code>method</code> and <code>location</code> <code>method</code> and <code>location</code>
parameters uniquely identify the current location and allow parameters uniquely identify the current location and allow
@ -13841,7 +13809,7 @@ typedef void (JNICALL *jvmtiEventVMInit)
and can_get_source_debug_extension. and can_get_source_debug_extension.
PopFrame cannot have a native calling method. PopFrame cannot have a native calling method.
Removed incorrect statement in GetClassloaderClasses Removed incorrect statement in GetClassloaderClasses
(see http://java.sun.com/docs/books/vmspec/2nd-edition/html/ConstantPool.doc.html#79383). (see <vmspec chapter="4.4"/>).
</change> </change>
<change date="24 July 2003" version="v79"> <change date="24 July 2003" version="v79">
XML and text fixes. XML and text fixes.

View File

@ -1039,34 +1039,14 @@ typedef struct {
</a> </a>
</xsl:template> </xsl:template>
<xsl:template match="vmspeclink"> <xsl:template match="vmspec">
<xsl:if test="count(@id)=1"> <cite>
<a> <xsl:text>The Java&#8482; Virtual Machine Specification</xsl:text>
<xsl:attribute name="href"> <xsl:if test="count(@chapter)=1">
<xsl:text>http://java.sun.com/docs/books/vmspec/2nd-edition/html/</xsl:text> <xsl:text>, Chapter </xsl:text>
<xsl:value-of select="@id"/> <xsl:value-of select="@chapter"/>
</xsl:attribute>
<xsl:value-of select="@name"/>
</a>
<xsl:text> </xsl:text>
<xsl:choose>
<xsl:when test="count(@preposition)=1">
<xsl:value-of select="@preposition"/>
</xsl:when>
<xsl:otherwise>
<xsl:text>of</xsl:text>
</xsl:otherwise>
</xsl:choose>
<xsl:text> the </xsl:text>
</xsl:if> </xsl:if>
<a> </cite>
<xsl:attribute name="href">
<xsl:text>http://java.sun.com/docs/books/vmspec/</xsl:text>
</xsl:attribute>
<i>
<xsl:text>Java Virtual Machine Specification</xsl:text>
</i>
</a>
</xsl:template> </xsl:template>
<xsl:template match="internallink"> <xsl:template match="internallink">

View File

@ -1804,6 +1804,8 @@ void JvmtiExport::post_compiled_method_load(JvmtiEnv* env, const jmethodID metho
} }
void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) { void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) {
assert(name != NULL && name[0] != '\0', "sanity check");
JavaThread* thread = JavaThread::current(); JavaThread* thread = JavaThread::current();
// In theory everyone coming thru here is in_vm but we need to be certain // In theory everyone coming thru here is in_vm but we need to be certain
// because a callee will do a vm->native transition // because a callee will do a vm->native transition

View File

@ -38,6 +38,7 @@
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp" #include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
#include "runtime/serviceThread.hpp" #include "runtime/serviceThread.hpp"
#include "runtime/signature.hpp" #include "runtime/signature.hpp"
#include "runtime/vframe.hpp" #include "runtime/vframe.hpp"
@ -939,10 +940,15 @@ JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
nmethodLocker::lock_nmethod(nm, true /* zombie_ok */); nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
return event; return event;
} }
JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event( JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
const char* name, const void* code_begin, const void* code_end) { const char* name, const void* code_begin, const void* code_end) {
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED); JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
event._event_data.dynamic_code_generated.name = name; // Need to make a copy of the name since we don't know how long
// the event poster will keep it around after we enqueue the
// deferred event and return. strdup() failure is handled in
// the post() routine below.
event._event_data.dynamic_code_generated.name = os::strdup(name);
event._event_data.dynamic_code_generated.code_begin = code_begin; event._event_data.dynamic_code_generated.code_begin = code_begin;
event._event_data.dynamic_code_generated.code_end = code_end; event._event_data.dynamic_code_generated.code_end = code_end;
return event; return event;
@ -968,12 +974,19 @@ void JvmtiDeferredEvent::post() {
nmethodLocker::unlock_nmethod(nm); nmethodLocker::unlock_nmethod(nm);
break; break;
} }
case TYPE_DYNAMIC_CODE_GENERATED: case TYPE_DYNAMIC_CODE_GENERATED: {
JvmtiExport::post_dynamic_code_generated_internal( JvmtiExport::post_dynamic_code_generated_internal(
_event_data.dynamic_code_generated.name, // if strdup failed give the event a default name
(_event_data.dynamic_code_generated.name == NULL)
? "unknown_code" : _event_data.dynamic_code_generated.name,
_event_data.dynamic_code_generated.code_begin, _event_data.dynamic_code_generated.code_begin,
_event_data.dynamic_code_generated.code_end); _event_data.dynamic_code_generated.code_end);
if (_event_data.dynamic_code_generated.name != NULL) {
// release our copy
os::free((void *)_event_data.dynamic_code_generated.name);
}
break; break;
}
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }

View File

@ -960,7 +960,7 @@ void Arguments::set_mode_flags(Mode mode) {
// Ensure Agent_OnLoad has the correct initial values. // Ensure Agent_OnLoad has the correct initial values.
// This may not be the final mode; mode may change later in onload phase. // This may not be the final mode; mode may change later in onload phase.
PropertyList_unique_add(&_system_properties, "java.vm.info", PropertyList_unique_add(&_system_properties, "java.vm.info",
(char*)Abstract_VM_Version::vm_info_string(), false); (char*)VM_Version::vm_info_string(), false);
UseInterpreter = true; UseInterpreter = true;
UseCompiler = true; UseCompiler = true;
@ -969,10 +969,10 @@ void Arguments::set_mode_flags(Mode mode) {
#ifndef ZERO #ifndef ZERO
// Turn these off for mixed and comp. Leave them on for Zero. // Turn these off for mixed and comp. Leave them on for Zero.
if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) { if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) {
UseFastAccessorMethods = mode == _int; UseFastAccessorMethods = (mode == _int);
} }
if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) { if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) {
UseFastEmptyMethods = mode == _int; UseFastEmptyMethods = (mode == _int);
} }
#endif #endif
@ -1423,6 +1423,11 @@ void Arguments::set_parallel_gc_flags() {
} }
} }
} }
if (UseNUMA) {
if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
}
}
} }
void Arguments::set_g1_gc_flags() { void Arguments::set_g1_gc_flags() {
@ -1987,6 +1992,9 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs* args) {
Arguments::_ClipInlining = ClipInlining; Arguments::_ClipInlining = ClipInlining;
Arguments::_BackgroundCompilation = BackgroundCompilation; Arguments::_BackgroundCompilation = BackgroundCompilation;
// Setup flags for mixed which is the default
set_mode_flags(_mixed);
// Parse JAVA_TOOL_OPTIONS environment variable (if present) // Parse JAVA_TOOL_OPTIONS environment variable (if present)
jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required); jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required);
if (result != JNI_OK) { if (result != JNI_OK) {
@ -2376,7 +2384,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
_gc_log_filename = strdup(tail); _gc_log_filename = strdup(tail);
FLAG_SET_CMDLINE(bool, PrintGC, true); FLAG_SET_CMDLINE(bool, PrintGC, true);
FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true); FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
// JNI hooks // JNI hooks
} else if (match_option(option, "-Xcheck", &tail)) { } else if (match_option(option, "-Xcheck", &tail)) {

View File

@ -189,6 +189,10 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
assert(thread->deopt_nmethod() == NULL, "Pending deopt!"); assert(thread->deopt_nmethod() == NULL, "Pending deopt!");
thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null()); thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null());
if (VerifyStack) {
thread->validate_frame_layout();
}
// Create a growable array of VFrames where each VFrame represents an inlined // Create a growable array of VFrames where each VFrame represents an inlined
// Java frame. This storage is allocated with the usual system arena. // Java frame. This storage is allocated with the usual system arena.
assert(deoptee.is_compiled_frame(), "Wrong frame type"); assert(deoptee.is_compiled_frame(), "Wrong frame type");
@ -421,6 +425,21 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller
// It's possible that the number of paramters at the call site is
// different than number of arguments in the callee when method
// handles are used. If the caller is interpreted get the real
// value so that the proper amount of space can be added to it's
// frame.
int sender_callee_parameters = callee_parameters;
if (deopt_sender.is_interpreted_frame()) {
methodHandle method = deopt_sender.interpreter_frame_method();
Bytecode_invoke cur = Bytecode_invoke_check(method,
deopt_sender.interpreter_frame_bci());
Symbol* signature = method->constants()->signature_ref_at(cur.index());
ArgumentSizeComputer asc(signature);
sender_callee_parameters = asc.size() + (cur.has_receiver() ? 1 : 0);
}
// Compute the amount the oldest interpreter frame will have to adjust // Compute the amount the oldest interpreter frame will have to adjust
// its caller's stack by. If the caller is a compiled frame then // its caller's stack by. If the caller is a compiled frame then
// we pretend that the callee has no parameters so that the // we pretend that the callee has no parameters so that the
@ -435,14 +454,13 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
if (deopt_sender.is_compiled_frame()) { if (deopt_sender.is_compiled_frame()) {
caller_adjustment = last_frame_adjust(0, callee_locals); caller_adjustment = last_frame_adjust(0, callee_locals);
} else if (callee_locals > callee_parameters) { } else if (callee_locals > sender_callee_parameters) {
// The caller frame may need extending to accommodate // The caller frame may need extending to accommodate
// non-parameter locals of the first unpacked interpreted frame. // non-parameter locals of the first unpacked interpreted frame.
// Compute that adjustment. // Compute that adjustment.
caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); caller_adjustment = last_frame_adjust(sender_callee_parameters, callee_locals);
} }
// If the sender is deoptimized the we must retrieve the address of the handler // If the sender is deoptimized the we must retrieve the address of the handler
// since the frame will "magically" show the original pc before the deopt // since the frame will "magically" show the original pc before the deopt
// and we'd undo the deopt. // and we'd undo the deopt.
@ -569,6 +587,8 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m
if (VerifyStack) { if (VerifyStack) {
ResourceMark res_mark; ResourceMark res_mark;
thread->validate_frame_layout();
// Verify that the just-unpacked frames match the interpreter's // Verify that the just-unpacked frames match the interpreter's
// notions of expression stack and locals // notions of expression stack and locals
vframeArray* cur_array = thread->vframe_array_last(); vframeArray* cur_array = thread->vframe_array_last();
@ -1753,7 +1773,8 @@ const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
"constraint", "constraint",
"div0_check", "div0_check",
"age", "age",
"predicate" "predicate",
"loop_limit_check"
}; };
const char* Deoptimization::_trap_action_name[Action_LIMIT] = { const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
// Note: Keep this in sync. with enum DeoptAction. // Note: Keep this in sync. with enum DeoptAction.

View File

@ -56,6 +56,7 @@ class Deoptimization : AllStatic {
Reason_div0_check, // a null_check due to division by zero Reason_div0_check, // a null_check due to division by zero
Reason_age, // nmethod too old; tier threshold reached Reason_age, // nmethod too old; tier threshold reached
Reason_predicate, // compiler generated predicate failed Reason_predicate, // compiler generated predicate failed
Reason_loop_limit_check, // compiler generated loop limits check failed
Reason_LIMIT, Reason_LIMIT,
// Note: Keep this enum in sync. with _trap_reason_name. // Note: Keep this enum in sync. with _trap_reason_name.
Reason_RECORDED_LIMIT = Reason_bimorphic // some are not recorded per bc Reason_RECORDED_LIMIT = Reason_bimorphic // some are not recorded per bc
@ -78,7 +79,7 @@ class Deoptimization : AllStatic {
enum { enum {
_action_bits = 3, _action_bits = 3,
_reason_bits = 4, _reason_bits = 5,
_action_shift = 0, _action_shift = 0,
_reason_shift = _action_shift+_action_bits, _reason_shift = _action_shift+_action_bits,
BC_CASE_LIMIT = PRODUCT_ONLY(1) NOT_PRODUCT(4) // for _deoptimization_hist BC_CASE_LIMIT = PRODUCT_ONLY(1) NOT_PRODUCT(4) // for _deoptimization_hist

View File

@ -1308,6 +1308,72 @@ void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*"); guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*");
guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark"); guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark");
} }
void frame::describe(FrameValues& values, int frame_no) {
if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) {
// Label values common to most frames
values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no));
values.describe(-1, sp(), err_msg("sp for #%d", frame_no));
values.describe(-1, fp(), err_msg("fp for #%d", frame_no));
}
if (is_interpreted_frame()) {
methodOop m = interpreter_frame_method();
int bci = interpreter_frame_bci();
// Label the method and current bci
values.describe(-1, MAX2(sp(), fp()),
FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2);
values.describe(-1, MAX2(sp(), fp()),
err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1);
if (m->max_locals() > 0) {
intptr_t* l0 = interpreter_frame_local_at(0);
intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1);
values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1);
// Report each local and mark as owned by this frame
for (int l = 0; l < m->max_locals(); l++) {
intptr_t* l0 = interpreter_frame_local_at(l);
values.describe(frame_no, l0, err_msg("local %d", l));
}
}
// Compute the actual expression stack size
InterpreterOopMap mask;
OopMapCache::compute_one_oop_map(m, bci, &mask);
intptr_t* tos = NULL;
// Report each stack element and mark as owned by this frame
for (int e = 0; e < mask.expression_stack_size(); e++) {
tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
values.describe(frame_no, interpreter_frame_expression_stack_at(e),
err_msg("stack %d", e));
}
if (tos != NULL) {
values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1);
}
if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) {
values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin");
values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end");
}
} else if (is_entry_frame()) {
// For now just label the frame
values.describe(-1, MAX2(sp(), fp()), err_msg("#%d entry frame", frame_no), 2);
} else if (is_compiled_frame()) {
// For now just label the frame
nmethod* nm = cb()->as_nmethod_or_null();
values.describe(-1, MAX2(sp(), fp()),
FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no,
nm, nm->method()->name_and_sig_as_C_string(),
is_deoptimized_frame() ? " (deoptimized" : ""), 2);
} else if (is_native_frame()) {
// For now just label the frame
nmethod* nm = cb()->as_nmethod_or_null();
values.describe(-1, MAX2(sp(), fp()),
FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,
nm, nm->method()->name_and_sig_as_C_string()), 2);
}
describe_pd(values, frame_no);
}
#endif #endif
@ -1319,3 +1385,71 @@ StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(t
_fr = thread->last_frame(); _fr = thread->last_frame();
_is_done = false; _is_done = false;
} }
#ifdef ASSERT
void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) {
FrameValue fv;
fv.location = location;
fv.owner = owner;
fv.priority = priority;
fv.description = NEW_RESOURCE_ARRAY(char, strlen(description) + 1);
strcpy(fv.description, description);
_values.append(fv);
}
bool FrameValues::validate() {
_values.sort(compare);
bool error = false;
FrameValue prev;
prev.owner = -1;
for (int i = _values.length() - 1; i >= 0; i--) {
FrameValue fv = _values.at(i);
if (fv.owner == -1) continue;
if (prev.owner == -1) {
prev = fv;
continue;
}
if (prev.location == fv.location) {
if (fv.owner != prev.owner) {
tty->print_cr("overlapping storage");
tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", prev.location, *prev.location, prev.description);
tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description);
error = true;
}
} else {
prev = fv;
}
}
return error;
}
void FrameValues::print() {
_values.sort(compare);
intptr_t* v0 = _values.at(0).location;
intptr_t* v1 = _values.at(_values.length() - 1).location;
intptr_t* min = MIN2(v0, v1);
intptr_t* max = MAX2(v0, v1);
intptr_t* cur = max;
intptr_t* last = NULL;
for (int i = _values.length() - 1; i >= 0; i--) {
FrameValue fv = _values.at(i);
while (cur > fv.location) {
tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur);
cur--;
}
if (last == fv.location) {
const char* spacer = " " LP64_ONLY(" ");
tty->print_cr(" %s %s %s", spacer, spacer, fv.description);
} else {
tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description);
last = fv.location;
cur--;
}
}
}
#endif

View File

@ -60,6 +60,7 @@
typedef class BytecodeInterpreter* interpreterState; typedef class BytecodeInterpreter* interpreterState;
class CodeBlob; class CodeBlob;
class FrameValues;
class vframeArray; class vframeArray;
@ -381,6 +382,8 @@ class frame VALUE_OBJ_CLASS_SPEC {
private: private:
const char* print_name() const; const char* print_name() const;
void describe_pd(FrameValues& values, int frame_no);
public: public:
void print_value() const { print_value_on(tty,NULL); } void print_value() const { print_value_on(tty,NULL); }
void print_value_on(outputStream* st, JavaThread *thread) const; void print_value_on(outputStream* st, JavaThread *thread) const;
@ -388,6 +391,9 @@ class frame VALUE_OBJ_CLASS_SPEC {
void interpreter_frame_print_on(outputStream* st) const; void interpreter_frame_print_on(outputStream* st) const;
void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const; void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const;
// Add annotated descriptions of memory locations belonging to this frame to values
void describe(FrameValues& values, int frame_no);
// Conversion from an VMReg to physical stack location // Conversion from an VMReg to physical stack location
oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const; oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const;
@ -472,6 +478,41 @@ class frame VALUE_OBJ_CLASS_SPEC {
}; };
#ifdef ASSERT
// A simple class to describe a location on the stack
class FrameValue VALUE_OBJ_CLASS_SPEC {
public:
intptr_t* location;
char* description;
int owner;
int priority;
};
// A collection of described stack values that can print a symbolic
// description of the stack memory. Interpreter frame values can be
// in the caller frames so all the values are collected first and then
// sorted before being printed.
class FrameValues {
private:
GrowableArray<FrameValue> _values;
static int compare(FrameValue* a, FrameValue* b) {
if (a->location == b->location) {
return a->priority - b->priority;
}
return a->location - b->location;
}
public:
// Used by frame functions to describe locations.
void describe(int owner, intptr_t* location, const char* description, int priority = 0);
bool validate();
void print();
};
#endif
// //
// StackFrameStream iterates through the frames of a thread starting from // StackFrameStream iterates through the frames of a thread starting from

View File

@ -1827,7 +1827,7 @@ class CommandLineFlags {
develop(bool, VerifyBlockOffsetArray, false, \ develop(bool, VerifyBlockOffsetArray, false, \
"Do (expensive!) block offset array verification") \ "Do (expensive!) block offset array verification") \
\ \
product(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \
"Maintain _unallocated_block in BlockOffsetArray" \ "Maintain _unallocated_block in BlockOffsetArray" \
" (currently applicable only to CMS collector)") \ " (currently applicable only to CMS collector)") \
\ \
@ -2882,7 +2882,7 @@ class CommandLineFlags {
"Max. no. of lines in the stack trace for Java exceptions " \ "Max. no. of lines in the stack trace for Java exceptions " \
"(0 means all)") \ "(0 means all)") \
\ \
NOT_EMBEDDED(develop(intx, GuaranteedSafepointInterval, 1000, \ NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \
"Guarantee a safepoint (at least) every so many milliseconds " \ "Guarantee a safepoint (at least) every so many milliseconds " \
"(0 means none)")) \ "(0 means none)")) \
\ \

View File

@ -274,7 +274,7 @@ class os: AllStatic {
static char* reserve_memory_special(size_t size, char* addr = NULL, static char* reserve_memory_special(size_t size, char* addr = NULL,
bool executable = false); bool executable = false);
static bool release_memory_special(char* addr, size_t bytes); static bool release_memory_special(char* addr, size_t bytes);
static bool large_page_init(); static void large_page_init();
static size_t large_page_size(); static size_t large_page_size();
static bool can_commit_large_page_memory(); static bool can_commit_large_page_memory();
static bool can_execute_large_page_memory(); static bool can_execute_large_page_memory();

View File

@ -31,6 +31,7 @@
#include "compiler/compileBroker.hpp" #include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp" #include "interpreter/linkResolver.hpp"
#include "interpreter/oopMapCache.hpp"
#include "jvmtifiles/jvmtiEnv.hpp" #include "jvmtifiles/jvmtiEnv.hpp"
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "memory/universe.inline.hpp" #include "memory/universe.inline.hpp"
@ -2860,6 +2861,25 @@ void JavaThread::trace_frames() {
} }
// Print or validate the layout of stack frames
void JavaThread::print_frame_layout(int depth, bool validate_only) {
ResourceMark rm;
PRESERVE_EXCEPTION_MARK;
FrameValues values;
int frame_no = 0;
for(StackFrameStream fst(this, false); !fst.is_done(); fst.next()) {
fst.current()->describe(values, ++frame_no);
if (depth == frame_no) break;
}
if (validate_only) {
values.validate();
} else {
tty->print_cr("[Describe stack layout]");
values.print();
}
}
void JavaThread::trace_stack_from(vframe* start_vf) { void JavaThread::trace_stack_from(vframe* start_vf) {
ResourceMark rm; ResourceMark rm;
int vframe_no = 1; int vframe_no = 1;

View File

@ -1380,6 +1380,12 @@ public:
void trace_stack_from(vframe* start_vf) PRODUCT_RETURN; void trace_stack_from(vframe* start_vf) PRODUCT_RETURN;
void trace_frames() PRODUCT_RETURN; void trace_frames() PRODUCT_RETURN;
// Print an annotated view of the stack frames
void print_frame_layout(int depth = 0, bool validate_only = false) PRODUCT_RETURN;
void validate_frame_layout() {
print_frame_layout(0, true);
}
// Returns the number of stack frames on the stack // Returns the number of stack frames on the stack
int depth() const; int depth() const;

View File

@ -469,6 +469,7 @@ extern "C" void verify() {
extern "C" void pp(void* p) { extern "C" void pp(void* p) {
Command c("pp"); Command c("pp");
FlagSetting fl(PrintVMMessages, true); FlagSetting fl(PrintVMMessages, true);
FlagSetting f2(DisplayVMOutput, true);
if (Universe::heap()->is_in(p)) { if (Universe::heap()->is_in(p)) {
oop obj = oop(p); oop obj = oop(p);
obj->print(); obj->print();
@ -507,6 +508,17 @@ extern "C" void ps() { // print stack
} }
extern "C" void pfl() {
// print frame layout
Command c("pfl");
JavaThread* p = JavaThread::active();
tty->print(" for thread: ");
p->print();
tty->cr();
if (p->has_last_Java_frame()) {
p->print_frame_layout();
}
}
extern "C" void psf() { // print stack frames extern "C" void psf() { // print stack frames
{ {

View File

@ -29,6 +29,7 @@
#include <string.h> #include <string.h>
#include <stdio.h> #include <stdio.h>
#include <limits.h> #include <limits.h>
#include <new>
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "utilities/decoder.hpp" #include "utilities/decoder.hpp"
@ -46,7 +47,7 @@ ElfFile::ElfFile(const char* filepath) {
m_status = Decoder::no_error; m_status = Decoder::no_error;
int len = strlen(filepath) + 1; int len = strlen(filepath) + 1;
m_filepath = NEW_C_HEAP_ARRAY(char, len); m_filepath = (const char*)os::malloc(len * sizeof(char));
if (m_filepath != NULL) { if (m_filepath != NULL) {
strcpy((char*)m_filepath, filepath); strcpy((char*)m_filepath, filepath);
m_file = fopen(filepath, "r"); m_file = fopen(filepath, "r");
@ -74,7 +75,7 @@ ElfFile::~ElfFile() {
} }
if (m_filepath != NULL) { if (m_filepath != NULL) {
FREE_C_HEAP_ARRAY(char, m_filepath); os::free((void*)m_filepath);
} }
if (m_next != NULL) { if (m_next != NULL) {
@ -120,14 +121,14 @@ bool ElfFile::load_tables() {
} }
// string table // string table
if (shdr.sh_type == SHT_STRTAB) { if (shdr.sh_type == SHT_STRTAB) {
ElfStringTable* table = new ElfStringTable(m_file, shdr, index); ElfStringTable* table = new (std::nothrow) ElfStringTable(m_file, shdr, index);
if (table == NULL) { if (table == NULL) {
m_status = Decoder::out_of_memory; m_status = Decoder::out_of_memory;
return false; return false;
} }
add_string_table(table); add_string_table(table);
} else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) { } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) {
ElfSymbolTable* table = new ElfSymbolTable(m_file, shdr); ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(m_file, shdr);
if (table == NULL) { if (table == NULL) {
m_status = Decoder::out_of_memory; m_status = Decoder::out_of_memory;
return false; return false;

View File

@ -27,6 +27,7 @@
#ifndef _WINDOWS #ifndef _WINDOWS
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "runtime/os.hpp"
#include "utilities/elfStringTable.hpp" #include "utilities/elfStringTable.hpp"
// We will try to load whole string table into memory if we can. // We will try to load whole string table into memory if we can.
@ -41,14 +42,14 @@ ElfStringTable::ElfStringTable(FILE* file, Elf_Shdr shdr, int index) {
// try to load the string table // try to load the string table
long cur_offset = ftell(file); long cur_offset = ftell(file);
m_table = (char*)NEW_C_HEAP_ARRAY(char, shdr.sh_size); m_table = (char*)os::malloc(sizeof(char) * shdr.sh_size);
if (m_table != NULL) { if (m_table != NULL) {
// if there is an error, mark the error // if there is an error, mark the error
if (fseek(file, shdr.sh_offset, SEEK_SET) || if (fseek(file, shdr.sh_offset, SEEK_SET) ||
fread((void*)m_table, shdr.sh_size, 1, file) != 1 || fread((void*)m_table, shdr.sh_size, 1, file) != 1 ||
fseek(file, cur_offset, SEEK_SET)) { fseek(file, cur_offset, SEEK_SET)) {
m_status = Decoder::file_invalid; m_status = Decoder::file_invalid;
FREE_C_HEAP_ARRAY(char, m_table); os::free((void*)m_table);
m_table = NULL; m_table = NULL;
} }
} else { } else {
@ -58,7 +59,7 @@ ElfStringTable::ElfStringTable(FILE* file, Elf_Shdr shdr, int index) {
ElfStringTable::~ElfStringTable() { ElfStringTable::~ElfStringTable() {
if (m_table != NULL) { if (m_table != NULL) {
FREE_C_HEAP_ARRAY(char, m_table); os::free((void*)m_table);
} }
if (m_next != NULL) { if (m_next != NULL) {

View File

@ -0,0 +1,309 @@
/*
* Copyright (c) 2011 Hewlett-Packard Company. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 5091921
* @summary Sign flip issues in loop optimizer
*
* @run main/othervm -Xcomp -XX:CompileOnly=Test5091921 -XX:MaxInlineSize=1 Test5091921
*/
public class Test5091921 {
private static int result = 0;
/* Test for the bug of transforming indx >= MININT to indx > MININT-1 */
public static int test_ge1(int limit) {
int indx;
int sum = 0;
for (indx = 500; indx >= limit; indx -= 2) {
sum += 2000 / indx;
result = sum;
}
return sum;
}
/* Test for the bug of transforming indx <= MAXINT to indx < MAXINT+1 */
public static int test_le1(int limit) {
int indx;
int sum = 0;
for (indx = -500; indx <= limit; indx += 2)
{
sum += 3000 / indx;
result = sum;
}
return sum;
}
/* Run with -Xcomp -XX:CompileOnly=wrap1.test1 -XX:MaxInlineSize=1 */
/* limit reset to ((limit-init+stride-1)/stride)*stride+init */
/* Calculation may overflow */
public static volatile int c = 1;
public static int test_wrap1(int limit)
{
int indx;
int sum = 0;
for (indx = 0xffffffff; indx < limit; indx += 0x20000000)
{
sum += c;
}
return sum;
}
/* Test for range check elimination with bit flip issue for
scale*i+offset<limit where offset is not 0 */
static int[] box5 = {1,2,3,4,5,6,7,8,9};
public static int test_rce5(int[] b, int limit)
{
int indx;
int sum = b[1];
result = sum;
for (indx = 0x80000000; indx < limit; ++indx)
{
if (indx > 0x80000000)
{
// this test is not issued in pre-loop but issued in main loop
// trick rce into thinking expression is false when indx >= 0
// in fact it is false when indx==0x80000001
if (indx - 9 < -9)
{
sum += indx;
result = sum;
sum ^= b[indx & 7];
result = sum;
}
else
break;
}
else
{
sum += b[indx & 3];
result = sum;
}
}
return sum;
}
/* Test for range check elimination with bit flip issue for
scale*i<limit where scale > 1 */
static int[] box6 = {1,2,3,4,5,6,7,8,9};
public static int test_rce6(int[] b, int limit)
{
int indx;
int sum = b[1];
result = sum;
for (indx = 0x80000000; indx < limit; ++indx)
{
if (indx > 0x80000000)
{
// harmless rce target
if (indx < 0)
{
sum += result;
result = sum;
}
else
break;
// this test is not issued in pre-loop but issued in main loop
// trick rce into thinking expression is false when indx >= 0
// in fact it is false when indx==0x80000001
// In compilers that transform mulI to shiftI may mask this issue.
if (indx * 28 + 1 < 0)
{
sum += indx;
result = sum;
sum ^= b[indx & 7];
result = sum;
}
else
break;
}
else
{
sum += b[indx & 3];
result = sum;
}
}
return sum;
}
/* Test for range check elimination with i <= limit */
static int[] box7 = {1,2,3,4,5,6,7,8,9,0x7fffffff};
public static int test_rce7(int[] b)
{
int indx;
int max = b[9];
int sum = b[7];
result = sum;
for (indx = 0; indx < b.length; ++indx)
{
if (indx <= max)
{
sum += (indx ^ 15) + ((result != 0) ? 0 : sum);
result = sum;
}
else
throw new RuntimeException();
}
for (indx = -7; indx < b.length; ++indx)
{
if (indx <= 9)
{
sum += (sum ^ 15) + ((result != 0) ? 0 : sum);
result = sum;
}
else
throw new RuntimeException();
}
return sum;
}
/* Test for range check elimination with i >= limit */
static int[] box8 = {-1,0,1,2,3,4,5,6,7,8,0x80000000};
public static int test_rce8(int[] b)
{
int indx;
int sum = b[5];
int min = b[10];
result = sum;
for (indx = b.length-1; indx >= 0; --indx)
{
if (indx >= min)
{
sum += (sum ^ 9) + ((result != 0) ? 0 :sum);
result = sum;
}
else
throw new RuntimeException();
}
return sum;
}
public static void main(String[] args)
{
result=1;
int r = 0;
try {
r = test_ge1(0x80000000);
System.out.println(result);
System.out.println("test_ge1 FAILED");
System.exit(1);
}
catch (ArithmeticException e1) {
System.out.println("test_ge1: Expected exception caught");
if (result != 5986) {
System.out.println(result);
System.out.println("test_ge1 FAILED");
System.exit(97);
}
}
System.out.println("test_ge1 WORKED");
result=0;
try
{
r = test_le1(0x7fffffff);
System.out.println(result);
System.out.println("test_le1 FAILED");
System.exit(1);
}
catch (ArithmeticException e1)
{
System.out.println("test_le1: Expected exception caught");
if (result != -9039)
{
System.out.println(result);
System.out.println("test_le1 FAILED");
System.exit(97);
}
}
System.out.println("test_le1 WORKED");
result=0;
r = test_wrap1(0x7fffffff);
if (r != 4)
{
System.out.println(result);
System.out.println("test_wrap1 FAILED");
System.exit(97);
}
else
{
System.out.println("test_wrap1 WORKED");
}
result=0;
r = test_rce5(box5,0x80000100);
if (result != 3)
{
System.out.println(result);
System.out.println("test_rce5 FAILED");
System.exit(97);
}
else
{
System.out.println("test_rce5 WORKED");
}
result=0;
r = test_rce6(box6,0x80000100);
if (result != 6)
{
System.out.println(result);
System.out.println("test_rce6 FAILED");
System.exit(97);
}
else
{
System.out.println("test_rce6 WORKED");
}
result=0;
r = test_rce7(box7);
if (result != 14680079)
{
System.out.println(result);
System.out.println("test_rce7 FAILED");
System.exit(97);
}
else
{
System.out.println("test_rce7 WORKED");
}
result=0;
r = test_rce8(box8);
if (result != 16393)
{
System.out.println(result);
System.out.println("test_rce8 FAILED");
System.exit(97);
}
else
{
System.out.println("test_rce8 WORKED");
}
}
}

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6186134
* @summary Server virtual machine produces/exeutes incorrect code.
*
* @run main Test6186134 100000
*/
import java.util.ArrayList;
public class Test6186134 {
int num = 0;
public Test6186134(int n) {
num = n;
}
public boolean more() {
return num-- > 0;
}
public ArrayList test1() {
ArrayList res = new ArrayList();
int maxResults = Integer.MAX_VALUE;
int n = 0;
boolean more = more();
while ((n++ < maxResults) && more) {
res.add(new Object());
more = more();
}
return res;
}
public static void main(String[] pars) {
int n = Integer.parseInt(pars[0]);
for (int i=0; i<n; i++) {
Test6186134 t = new Test6186134(10);
int size = t.test1().size();
if (size != 10) {
System.out.println("wrong size: " + size +", should be 10");
System.exit(97);
}
}
System.out.println("Passed");
}
}

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6196102
* @summary Integer seems to be greater than Integer.MAX_VALUE
*
* @run main Test6196102
*/
public class Test6196102 {
static public void main(String[] args) {
int i1 = 0;
int i2 = Integer.MAX_VALUE;
while (i1 >= 0) {
i1++;
if (i1 > i2) {
System.out.println("E R R O R: " + i1);
System.exit(97);
}
}
}
}

View File

@ -0,0 +1,179 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6357214
* @summary Hotspot server compiler gets integer comparison wrong
*
* @run main/othervm/timeout=60 -DshowAll=ffo -DeventID=444 Test6357214
*/
// The test hangs after few iterations before the fix. So it fails if timeout.
class MyResult {
public boolean next() {
return true;
}
public String getString(String in) {
if (in.equals("id"))
return "idFoo";
if (in.equals("contentKey"))
return "ckFoo";
return "Foo";
}
public int getInt(String in) {
if (in.equals("processingComplete"))
return 0;
return 1;
}
public byte[] getBytes(String in) {
byte[] arr = null;
if (in.equals("content")) {
arr = new byte[65536];
byte j = 32;
for (int i=0; i<65536; i++) {
arr[i] = j;
if (++j == 127)
j=32;
}
}
return arr;
}
}
public class Test6357214 {
public static volatile boolean bollocks = true;
public String create(String context) throws Exception {
//
// Extract HTTP parameters
//
boolean showAll = System.getProperty("showAll") != null;
String eventID = System.getProperty("eventID");
String eventContentKey = System.getProperty("cKey");
//
// Build ContentStaging query based on eventID or eventContentKey
//
String sql = "select id, processingComplete, contentKey, content "
+ "from ContentStaging cs, ContentStagingKey csk "
+ "where cs.eventContentKey = csk.eventContentKey ";
if (eventID != null) {
sql += "and id = " + eventID;
}
else if (eventContentKey != null) {
sql += "and cs.eventContentKey = '"
+ eventContentKey
+ "' having id = max(id)";
}
else {
throw new Exception("Need eventID or eventContentKey");
}
//
// This factory builds a static panel, there is no JSP
//
StringBuffer html = new StringBuffer();
try {
MyResult result = new MyResult();
if (result.next()) {
eventID = result.getString("id");
int processingComplete = result.getInt("processingComplete");
String contentKey = result.getString("contentKey");
byte[] bytes = result.getBytes("content");
//
// Print content status and associated controls
//
html.append("<br/><font class=\"small\">");
html.append("Status: ");
switch (processingComplete) {
case 0 :
case 1 : html.append("PENDING"); break;
case 2 : html.append(contentKey); break;
case 3 : html.append(eventID); break;
default : html.append("UNKNONW");
}
html.append("</font><br/>");
//
// Print at most 20Kb of content unless "showAll" is set
//
int limit = showAll ? Integer.MAX_VALUE : 1024 * 20;
System.out.println(limit);
html.append("<pre>");
for (int i = 0; bytes != null && i < bytes.length; i++) {
char c = (char) bytes[i];
switch (c) {
case '<' : html.append("&lt;"); break;
case '>' : html.append("&gt;"); break;
case '&' : html.append("&amp;"); break;
default : html.append(c);
}
if (i > limit) {
while (bollocks);
// System.out.println("i is " + i);
// System.out.println("limit is " + limit);
html.append("...\n</pre>");
html.append(eventID);
html.append("<pre>");
break;
}
}
html.append("</pre>");
}
}
catch (Exception exception) {
throw exception;
}
finally {
html.append("Oof!!");
}
String ret = html.toString();
System.out.println("Returning string length = "+ ret.length());
return ret;
}
public static void main(String[] args) throws Exception {
int length=0;
for (int i = 0; i < 100; i++) {
length = new Test6357214().create("boo").length();
System.out.println(length);
}
}
}

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6559156
* @summary Server compiler generates bad code for "<= Integer.MAX_VALUE" expression
*
* @run main Test6559156
*/
public class Test6559156 {
static final int N_TESTS = 1000000;
public static void main(String[] args) throws Exception {
/*
* If MAX_VALUE is changed to MAX_VALUE - 1 below, the test passes
* because (apparently) bad code is only generated when comparing
* <= MAX_VALUE in the doTest method.
*/
Test6559156 test = new Test6559156();
for (int i = 0; i < N_TESTS; i += 1) {
test.doTest1(10, Integer.MAX_VALUE, i);
test.doTest2(10, Integer.MAX_VALUE, i);
}
System.out.println("No failure");
}
void doTest1(int expected, int max, int i) {
int counted;
for (counted = 0;
(counted <= max) && (counted < expected);
counted += 1) {
}
if (counted != expected) {
throw new RuntimeException("Failed test1 iteration=" + i +
" max=" + max +
" counted=" + counted +
" expected=" + expected);
}
}
void doTest2(int expected, int max, int i) {
int counted;
for (counted = 0;
// change test sequence.
(counted < expected) && (counted <= max);
counted += 1) {
}
if (counted != expected) {
throw new RuntimeException("Failed test1 iteration=" + i +
" max=" + max +
" counted=" + counted +
" expected=" + expected);
}
}
}

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6753639
* @summary Strange optimisation in for loop with cyclic integer condition
*
* @run main/othervm -Xbatch Test6753639
*/
public class Test6753639 {
public static void main(String[] args) throws InterruptedException {
int END = Integer.MAX_VALUE;
int count = 0;
for(int i = Integer.MAX_VALUE - 5; i <= END; i++) {
count++;
if (count > 100000) {
System.out.println("Passed");
System.exit(95);
}
}
System.out.println("broken " + count);
System.out.println("FAILED");
System.exit(97);
}
}

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6850611
* @summary int / long arithmetic seems to be broken in 1.6.0_14 HotSpot Server VM (Win XP)
*
* @run main Test6850611
*/
public class Test6850611 {
public static void main(String[] args) {
test();
}
private static void test() {
for (int j = 0; j < 5; ++j) {
long x = 0;
for (int i = Integer.MIN_VALUE; i < Integer.MAX_VALUE; ++i) {
x += i;
}
System.out.println("sum: " + x);
if (x != -4294967295l) {
System.out.println("FAILED");
System.exit(97);
}
}
}
}

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6890943
* @summary JVM mysteriously gives wrong result on 64-bit 1.6 VMs in hotspot mode.
*
* @run shell Test6890943.sh
*/
import java.util.*;
import java.io.*;
import java.util.regex.*;
public class Test6890943 {
public static final boolean AIR = true, ROCK = false;
public static void main(String[] args) {
new Test6890943().go();
}
int r, c, f, t;
boolean[][] grid;
public void go() {
Scanner s = new Scanner(System.in);
s.useDelimiter("\\s+");
int T = s.nextInt();
for (t = 0 ; t < T ; t++) {
r = s.nextInt(); c = s.nextInt(); f = s.nextInt();
grid = new boolean[r][c];
for (int x = 0 ; x < r ; x++) {
String line = s.next();
for (int y = 0 ; y < c ; y++) grid[x][y] = line.charAt(y) == '.';
}
int digs = solve();
String res = digs == -1 ? "No" : "Yes " + digs;
System.out.printf("Case #%d: %s\n", t+1, res);
}
}
Map<Integer, Integer> M = new HashMap<Integer, Integer>();
private int solve() {
M = new HashMap<Integer, Integer>();
M.put(calcWalkingRange(0, 0), 0);
for (int digDown = 0 ; digDown < r ; digDown++) {
Map<Integer, Integer> tries = new HashMap<Integer, Integer>();
for (Map.Entry<Integer, Integer> m : M.entrySet()) {
int q = m.getKey();
if (depth(q) != (digDown)) continue;
if (stuck(q)) continue;
tries.put(q, m.getValue());
}
for (Map.Entry<Integer, Integer> m : tries.entrySet()) {
int q = m.getKey();
int fallLeftDelta = 0, fallRightDelta = 0;
//fall left
int fallLeft = fall(digDown, start(q));
if (fallLeft > 0) {
fallLeftDelta = 1;
if (fallLeft <= f) addToM(calcWalkingRange(digDown+fallLeft, start(q)), m.getValue());
}
//fall right
int fallRight = fall(digDown, end(q));
if (fallRight > 0) {
fallRightDelta = 1;
if (fallRight <= f) addToM(calcWalkingRange(digDown+fallRight, end(q)), m.getValue());
}
for (int p = start(q) + fallLeftDelta ; p <= end(q) - fallRightDelta ; p++) {
//goLeft
for (int digSpot = p ; digSpot > start(q) +fallLeftDelta ; digSpot--) {
int fallDown = 1+fall(digDown+1, digSpot);
if (fallDown <= f) {
if (fallDown == 1) {
addToM(calcWalkingRange(digDown + 1, digSpot, digSpot, p), m.getValue() + Math.abs(digSpot-p)+1);
} else {
addToM(calcWalkingRange(digDown + fallDown, digSpot), m.getValue() + Math.abs(digSpot-p)+1);
}
}
}
//goRight
for (int digSpot = p ; digSpot < end(q)-fallRightDelta ;digSpot++) {
int fallDown = 1+fall(digDown+1, digSpot);
if (fallDown <= f) {
if (fallDown == 1) {
addToM(calcWalkingRange(digDown + 1, digSpot, p, digSpot), m.getValue() + Math.abs(digSpot-p)+1);
} else {
addToM(calcWalkingRange(digDown + fallDown, digSpot), m.getValue() + Math.abs(digSpot-p)+1);
}
}
}
}
}
}
int result = Integer.MAX_VALUE;
for (Map.Entry<Integer, Integer> m : M.entrySet()) {
if (depth(m.getKey()) == r-1) result = Math.min(m.getValue(), result);
}
if (result == Integer.MAX_VALUE) return -1;
return result;
}
private void addToM(int q, int i) {
Integer original = M.get(q);
if ( original == null ) M.put(q, i);
else M.put(q, Math.min(original, i));
}
private int fall(int row, int column) {
int res = 0;
for ( int p = row+1 ; p < r ; p++) {
if (grid[p][column] == AIR) res++;
else break;
}
return res;
}
private boolean stuck(int q) {
return start(q) == end(q);
}
private int depth(int q) {
return q % 50;
}
private int start(int q) {
return q / (50*50);
}
private int end(int q) {
return (q / 50) % 50;
}
private int calcWalkingRange(int depth, int pos) {
return calcWalkingRange(depth, pos, Integer.MAX_VALUE, Integer.MIN_VALUE);
}
private int calcWalkingRange(int depth, int pos, int airOverrideStart, int airOverrideEnd) {
int left = pos, right = pos;
if (depth >= r) return (c-1)*50 + depth;
while (left > 0) {
if (grid[depth][left-1] == ROCK && (left-1 < airOverrideStart || left-1 > airOverrideEnd)) break;
if (depth < r-1 && grid[depth+1][left-1] == AIR) {
left--;
break;
}
left--;
}
while (right < c-1) {
if (grid[depth][right+1] == ROCK && (right+1 < airOverrideStart || right+1 > airOverrideEnd)) break;
if (depth < r-1 && grid[depth+1][right+1] == AIR) {
right++;
break;
}
right++;
}
return left *50*50 + right*50 + depth;
}
}

View File

@ -0,0 +1,67 @@
#!/bin/sh
#
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
if [ "${TESTSRC}" = "" ]
then
echo "TESTSRC not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTSRC=${TESTSRC}"
if [ "${TESTJAVA}" = "" ]
then
echo "TESTJAVA not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTJAVA=${TESTJAVA}"
if [ "${TESTCLASSES}" = "" ]
then
echo "TESTCLASSES not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTCLASSES=${TESTCLASSES}"
echo "CLASSPATH=${CLASSPATH}"
set -x
cp ${TESTSRC}/Test6890943.java .
cp ${TESTSRC}/input6890943.txt .
cp ${TESTSRC}/output6890943.txt .
cp ${TESTSRC}/Test6890943.sh .
${TESTJAVA}/bin/javac -d . Test6890943.java
${TESTJAVA}/bin/java ${TESTVMOPTS} Test6890943 < input6890943.txt > test.out 2>&1
diff output6890943.txt test.out
result=$?
if [ $result -eq 0 ]
then
echo "Passed"
exit 0
else
echo "Failed"
exit 1
fi

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6897150
* @summary Hotspot optimises away a valid loop
*
* @run main Test6897150
*/
// Should be compiled with javac from JDK1.3 to get bytecode which shows the problem.
public class Test6897150 {
public static void main(String[] args) {
// This works
loopAndPrint(Integer.MAX_VALUE -1);
// This doesn't
loopAndPrint(Integer.MAX_VALUE);
}
static void verify(int max, int a) {
if ( a != (max - 1)) {
System.out.println("Expected: " + (max - 1));
System.out.println("Actual : " + a);
System.exit(97);
}
}
static void loopAndPrint(int max) {
int a = -1;
int i = 1;
for (; i < max; i++) {
a = i;
}
verify(max, a);
}
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6905845
* @summary Server VM improperly optimizing away loop.
*
* @run main Test6905845
*/
public class Test6905845 {
public static void main(String[] args){
for (int asdf = 0; asdf < 5; asdf++){
//test block
{
StringBuilder strBuf1 = new StringBuilder(65);
long start = System.currentTimeMillis();
int count = 0;
for (int i = Integer.MIN_VALUE; i < (Integer.MAX_VALUE - 80); i += 79){
strBuf1.append(i);
count++;
strBuf1.delete(0, 65);
}
System.out.println(count);
if (count != 54366674) {
System.out.println("wrong count: " + count +", should be 54366674");
System.exit(97);
}
}
//test block
{
StringBuilder strBuf1 = new StringBuilder(65);
long start = System.currentTimeMillis();
int count = 0;
for (int i = Integer.MIN_VALUE; i < (Integer.MAX_VALUE - 80); i += 79){
strBuf1.append(i);
count++;
strBuf1.delete(0, 65);
}
System.out.println(count);
if (count != 54366674) {
System.out.println("wrong count: " + count +", should be 54366674");
System.exit(97);
}
}
}
}
}

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6931567
* @summary JIT Error (on class file compiled with eclipse) on JVM x64 (but not on x32!).
*
* @run main Test6931567
*/
// Should be compiled with javac from JDK1.3 to get bytecode which shows the problem.
public class Test6931567 {
public static void main(final String[] args) {
booleanInvert(Integer.MAX_VALUE);
booleanInvert(Integer.MAX_VALUE - 1);
}
private static void booleanInvert(final int max) {
boolean test1 = false;
boolean test2 = false;
for (int i = 0; i < max; i++) {
test1 = !test1;
}
for (int i = 0; i < max; i++) {
test2 ^= true;
}
if (test1 != test2) {
System.out.println("ERROR: Boolean invert\n\ttest1=" + test1
+ "\n\ttest2=" + test2);
System.exit(97);
} else {
System.out.println("Passed!");
}
}
}

View File

@ -0,0 +1,74 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6935022
* @summary Server VM incorrectly breaks out of while loop
*
* @run main Test6935022
*/
public class Test6935022 {
public static final void main(String[] args) throws Exception {
Test6935022 test = new Test6935022();
int cnt = 0;
while (cnt < 10000) {
try {
++cnt;
if ((cnt&1023) == 0)
System.out.println("Thread="+Thread.currentThread().getName() + " iteration: " + cnt);
test.loop(2147483647, (cnt&1023));
}
catch (Exception e) {
System.out.println("Caught on iteration " + cnt);
e.printStackTrace();
System.exit(97);
}
}
}
private void loop(int endingRow, int mask) throws Exception {
int rows = 1;
boolean next = true;
while(rows <= endingRow && next) {
rows++;
if (rows == mask)
System.out.println("Rows="+rows+", end="+endingRow+", next="+next);
next = next(rows);
}
if (next)
throw new Exception("Ended on rows(no rs): " + rows);
}
private boolean next(int rows) {
return rows < 12;
}
}

View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6959129
* @summary COMPARISON WITH INTEGER.MAX_INT DOES NOT WORK CORRECTLY IN THE CLIENT VM.
*
* @run main/othervm -ea Test6959129
*/
public class Test6959129 {
public static void main(String[] args) {
long start = System.currentTimeMillis();
int min = Integer.MAX_VALUE-30000;
int max = Integer.MAX_VALUE;
long maxmoves = 0;
try {
maxmoves = maxMoves(min, max);
} catch (AssertionError e) {
System.out.println("Passed");
System.exit(95);
}
System.out.println("maxMove:" + maxmoves);
System.out.println("FAILED");
System.exit(97);
}
/**
* Imperative implementation that returns the length hailstone moves
* for a given number.
*/
public static long hailstoneLengthImp(long n) {
long moves = 0;
while (n != 1) {
assert n > 1;
if (isEven(n)) {
n = n / 2;
} else {
n = 3 * n + 1;
}
++moves;
}
return moves;
}
private static boolean isEven(long n) {
return n % 2 == 0;
}
/**
* Returns the maximum length of the hailstone sequence for numbers
* between min to max.
*
* For rec1 - Assume that min is bigger than max.
*/
public static long maxMoves(int min, int max) {
long maxmoves = 0;
for (int n = min; n <= max; n++) {
if ((n & 1023) == 0) System.out.println(n);
long moves = hailstoneLengthImp(n);
if (moves > maxmoves) {
maxmoves = moves;
}
}
return maxmoves;
}
}

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6985295
* @summary JVM fails to evaluate condition randomly
*
* @run main/othervm -Xbatch Test6985295
*/
public class Test6985295 {
public static void main(String[] args) {
int min = Integer.MAX_VALUE-50000;
int max = Integer.MAX_VALUE;
System.out.println("max = " + max);
long counter = 0;
int i;
for(i = min; i <= max; i++) {
counter++;
if (counter > 1000000) {
System.out.println("Passed");
System.exit(95);
}
}
System.out.println("iteration went " + counter + " times (" + i + ")");
System.out.println("FAILED");
System.exit(97);
}
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6992759
* @summary Bad code generated for integer <= comparison, fails for Integer.MAX_VALUE
*
* @run main Test6992759
*/
public class Test6992759 {
static final int N_TESTS = 1000000000;
public static void main(String[] args) throws Exception {
/*
* If MAX_VALUE is changed to MAX_VALUE - 1 below, the test passes
* because (apparently) bad code is only generated when comparing
* <= MAX_VALUE in the doTest method.
*/
Test6992759 test = new Test6992759();
for (int i = 0; i < N_TESTS; i += 1) {
test.doTest(10, Integer.MAX_VALUE, i);
//test.doTest(10, Integer.MAX_VALUE - 1, i);
}
System.out.println("No failure");
}
void doTest(int expected, int max, int i) {
int counted;
for (counted = 0;
(counted <= max) && (counted < expected);
counted += 1) {
}
if (counted != expected) {
throw new RuntimeException("Failed test iteration=" + i +
" max=" + max +
" counted=" + counted +
" expected=" + expected);
}
}
}

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 7005594
* @summary Array overflow not handled correctly with loop optimzations
*
* @run main/othervm -Xms2048m -Xcomp -XX:CompileOnly=Test7005594.test Test7005594
*/
public class Test7005594 {
static int test(byte a[]){
int result=0;
for( int i=0; i<a.length; i+=((0x7fffffff>>1)+1) ){
result += a[i];
}
return result;
}
public static void main(String [] args){
byte a[]=new byte[(0x7fffffff>>1)+2];
int result = 0;
try {
result = test(a);
} catch (ArrayIndexOutOfBoundsException e) {
e.printStackTrace(System.out);
System.out.println("Passed");
System.exit(95);
}
System.out.println(result);
System.out.println("FAILED");
System.exit(97);
}
}

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 7020614
* @summary "-server" mode optimizer makes code hang
*
* @run main/othervm/timeout=30 -Xbatch Test7020614
*/
public class Test7020614 {
private static final int ITERATIONS = 1000;
private static int doNotOptimizeOut = 0;
public static long bitCountShort() {
long t0 = System.currentTimeMillis();
int sum = 0;
for (int it = 0; it < ITERATIONS; ++it) {
short value = 0;
do {
sum += Integer.bitCount(value);
} while (++value != 0);
}
doNotOptimizeOut += sum;
return System.currentTimeMillis() - t0;
}
public static void main(String[] args) {
for (int i = 0; i < 4; ++i) {
System.out.println((i + 1) + ": " + bitCountShort());
}
System.out.println("doNotOptimizeOut value: " + doNotOptimizeOut);
}
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More