This commit is contained in:
Phil Race 2015-04-06 10:35:43 -07:00
commit be08183427
210 changed files with 7069 additions and 5109 deletions

View File

@ -298,3 +298,5 @@ d6224d6021459ac8b3832e822f5acc849fa944af jdk9-b52
874d76e4699dfcd61ae1826c9fe0ddc1610ad598 jdk9-b53
82cd31c5d6ca8d4c1653f4eb1c09eb2d9a3b2813 jdk9-b54
c97e2d1bad9708d379793ba2a4c848eda14c741e jdk9-b55
47544495db2d3d2edf0f85862d8715592fdb919f jdk9-b56
ddb95d8f169b09544cc17e72a6baaff2400092f5 jdk9-b57

View File

@ -298,3 +298,5 @@ d1f37d39ff2421f956a6ddf316cf763807bc3363 jdk9-b50
d6ed47125a76cd1cf8a100568507bfb5e9669d9f jdk9-b53
cb7367141e910e265b8344a8facee740bd1e5467 jdk9-b54
0c37a832458f0e0b7d2a3f1a6f69aeae311aeb18 jdk9-b55
eb7febe45865ba6b81f2ea68082262d0708a0b22 jdk9-b56
f25ee9f62427a9ba27418e5531a89754791a305b jdk9-b57

View File

@ -441,7 +441,7 @@
</li>
<li>
Install
<a href="#vs2010">Visual Studio 2010</a>
<a href="#vs2013">Visual Studio 2013</a>
</li>
</ul>
</td>
@ -750,23 +750,23 @@
</blockquote>
<h5><a name="vs2010">Visual Studio 2010 Compilers</a></h5>
<h5><a name="vs2013">Visual Studio 2013 Compilers</a></h5>
<blockquote>
<p>
The 32-bit and 64-bit OpenJDK Windows build requires
Microsoft Visual Studio C++ 2010 (VS2010) Professional
Microsoft Visual Studio C++ 2013 (VS2013) Professional
Edition or Express compiler.
The compiler and other tools are expected to reside
in the location defined by the variable
<code>VS100COMNTOOLS</code> which
<code>VS120COMNTOOLS</code> which
is set by the Microsoft Visual Studio installer.
</p>
<p>
Only the C++ part of VS2010 is needed.
Only the C++ part of VS2013 is needed.
Try to let the installation go to the default
install directory.
Always reboot your system after installing VS2010.
The system environment variable VS100COMNTOOLS
Always reboot your system after installing VS2013.
The system environment variable VS120COMNTOOLS
should be
set in your environment.
</p>
@ -1040,7 +1040,7 @@
<br>
This is usually picked up automatically
from the redist
directories of Visual Studio 2010.
directories of Visual Studio 2013.
</td>
</tr>
<tr>
@ -1854,54 +1854,54 @@
<tbody>
<tr>
<td>Linux X86 (32-bit) and X64 (64-bit)</td>
<td>Fedora 9</td>
<td>gcc 4.3 </td>
<td>JDK 7u7</td>
<td>Oracle Enterprise Linux 6.4</td>
<td>gcc 4.8.2 </td>
<td>JDK 8</td>
<td>2 or more</td>
<td>1 GB</td>
<td>6 GB</td>
</tr>
<tr>
<td>Solaris SPARC (32-bit) and SPARCV9 (64-bit)</td>
<td>Solaris 10 Update 6</td>
<td>Studio 12 Update 1 + patches</td>
<td>JDK 7u7</td>
<td>Solaris SPARCV9 (64-bit)</td>
<td>Solaris 10 Update 10</td>
<td>Studio 12 Update 3 + patches</td>
<td>JDK 8</td>
<td>4 or more</td>
<td>4 GB</td>
<td>8 GB</td>
</tr>
<tr>
<td>Solaris X86 (32-bit) and X64 (64-bit)</td>
<td>Solaris 10 Update 6</td>
<td>Studio 12 Update 1 + patches</td>
<td>JDK 7u7</td>
<td>Solaris X64 (64-bit)</td>
<td>Solaris 10 Update 10</td>
<td>Studio 12 Update 3 + patches</td>
<td>JDK 8</td>
<td>4 or more</td>
<td>4 GB</td>
<td>8 GB</td>
</tr>
<tr>
<td>Windows X86 (32-bit)</td>
<td>Windows XP</td>
<td>Microsoft Visual Studio C++ 2010 Professional Edition</td>
<td>JDK 7u7</td>
<td>Windows Server 2012 R2 x64</td>
<td>Microsoft Visual Studio C++ 2013 Professional Edition</td>
<td>JDK 8</td>
<td>2 or more</td>
<td>2 GB</td>
<td>6 GB</td>
</tr>
<tr>
<td>Windows X64 (64-bit)</td>
<td>Windows Server 2003 - Enterprise x64 Edition</td>
<td>Microsoft Visual Studio C++ 2010 Professional Edition</td>
<td>JDK 7u7</td>
<td>Windows Server 2012 R2 x64</td>
<td>Microsoft Visual Studio C++ 2013 Professional Edition</td>
<td>JDK 8</td>
<td>2 or more</td>
<td>2 GB</td>
<td>6 GB</td>
</tr>
<tr>
<td>Mac OS X X64 (64-bit)</td>
<td>Mac OS X 10.7 "Lion"</td>
<td>XCode 4.5.2 or newer</td>
<td>JDK 7u7</td>
<td>Mac OS X 10.9 "Mavericks"</td>
<td>XCode 5.1.1 or newer</td>
<td>JDK 8</td>
<td>2 or more</td>
<td>4 GB</td>
<td>6 GB</td>
@ -2009,7 +2009,7 @@
</blockquote>
<h4><a name="ubuntu">Ubuntu 12.04</a></h4>
<blockquote>
<blockquote>
After installing <a href="http://ubuntu.org">Ubuntu</a> 12.04
you need to install several build dependencies. The simplest
way to do it is to execute the following commands:

View File

@ -4290,8 +4290,8 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
#
################################################################################
VALID_VS_VERSIONS="2010 2012 2013"
# The order of these defines the priority by which we try to find them.
VALID_VS_VERSIONS="2013 2012 2010"
VS_DESCRIPTION_2010="Microsoft Visual Studio 2010"
VS_VERSION_INTERNAL_2010=100
@ -4365,7 +4365,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1427843803
DATE_WHEN_GENERATED=1428017006
###############################################################################
#

View File

@ -24,8 +24,8 @@
#
################################################################################
VALID_VS_VERSIONS="2010 2012 2013"
# The order of these defines the priority by which we try to find them.
VALID_VS_VERSIONS="2013 2012 2010"
VS_DESCRIPTION_2010="Microsoft Visual Studio 2010"
VS_VERSION_INTERNAL_2010=100
@ -72,7 +72,7 @@ AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],
else
VCVARSFILE="vc/bin/amd64/vcvars64.bat"
fi
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(VS_BASE)
if test -d "$VS_BASE"; then
if test -f "$VS_BASE/$VCVARSFILE"; then
@ -404,7 +404,7 @@ AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_MSVC_DLL],
METHOD="$3"
if test -n "$POSSIBLE_MSVC_DLL" -a -e "$POSSIBLE_MSVC_DLL"; then
AC_MSG_NOTICE([Found $1 at $POSSIBLE_MSVC_DLL using $METHOD])
# Need to check if the found msvcr is correct architecture
AC_MSG_CHECKING([found $1 architecture])
MSVC_DLL_FILETYPE=`$FILE -b "$POSSIBLE_MSVC_DLL"`
@ -463,9 +463,9 @@ AC_DEFUN([TOOLCHAIN_SETUP_MSVC_DLL],
TOOLCHAIN_CHECK_POSSIBLE_MSVC_DLL([$DLL_NAME], [$POSSIBLE_MSVC_DLL],
[well-known location in Boot JDK])
fi
if test "x$MSVC_DLL" = x; then
# Probe: Look in the Windows system32 directory
# Probe: Look in the Windows system32 directory
CYGWIN_SYSTEMROOT="$SYSTEMROOT"
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(CYGWIN_SYSTEMROOT)
POSSIBLE_MSVC_DLL="$CYGWIN_SYSTEMROOT/system32/$DLL_NAME"
@ -489,7 +489,7 @@ AC_DEFUN([TOOLCHAIN_SETUP_MSVC_DLL],
[search of VS100COMNTOOLS])
fi
fi
if test "x$MSVC_DLL" = x; then
# Probe: Search wildly in the VCINSTALLDIR. We've probably lost by now.
# (This was the original behaviour; kept since it might turn something up)
@ -506,12 +506,12 @@ AC_DEFUN([TOOLCHAIN_SETUP_MSVC_DLL],
| $HEAD --lines 1`
fi
fi
TOOLCHAIN_CHECK_POSSIBLE_MSVC_DLL([$DLL_NAME], [$POSSIBLE_MSVC_DLL],
[search of VCINSTALLDIR])
fi
fi
if test "x$MSVC_DLL" = x; then
AC_MSG_CHECKING([for $DLL_NAME])
AC_MSG_RESULT([no])

View File

@ -298,3 +298,5 @@ b8538bbb6f224ab1dabba579137099c166ad4724 jdk9-b52
aadc16ca5ab7d56f92ef9dbfa443595a939241b4 jdk9-b53
d469c5ad0c763e325a78e0af3016878a57dfc5cc jdk9-b54
734ca5311a225711b79618f3e92f47f07c82154a jdk9-b55
ef4afd6832b00b8687832c2a36c90e43750ebe40 jdk9-b56
d8ebf1a5b18ccbc849f5bf0f80aa3d78583eee68 jdk9-b57

View File

@ -458,3 +458,5 @@ e0947f58c9c1426aa0d98b98ebb78357b27a7b99 jdk9-b50
effd5ef0c3eb4bb85aa975c489d6761dbf13ad6a jdk9-b53
c3b117fa5bdedfafd9ed236403e6d406911195b1 jdk9-b54
be49ab55e5c498c5077bbf58c2737100d1992339 jdk9-b55
fd2d5ec7e7b16c7bf4043a7fe7cfd8af96b819e2 jdk9-b56
56a85ffe743d3f9d70ba25d6ce82ddd2ad1bf33c jdk9-b57

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,10 +63,6 @@ public class PPC64Frame extends Frame {
// Entry frames
private static int ENTRY_FRAME_CALL_WRAPPER_OFFSET;
// Native frames
private static int NATIVE_FRAME_INITIAL_PARAM_OFFSET;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
@ -76,10 +72,8 @@ public class PPC64Frame extends Frame {
}
private static synchronized void initialize(TypeDataBase db) {
int abi_minframe_size = db.lookupIntConstant("frame::abi_minframe_size").intValue();
int entry_frame_locals_size = db.lookupIntConstant("frame::entry_frame_locals_size").intValue();
int wordLength = (int) VM.getVM().getAddressSize();
NATIVE_FRAME_INITIAL_PARAM_OFFSET = -abi_minframe_size/wordLength;
ENTRY_FRAME_CALL_WRAPPER_OFFSET = -entry_frame_locals_size/wordLength;
}
@ -389,13 +383,6 @@ public class PPC64Frame extends Frame {
// Return address:
public Address getSenderPC() { return getSenderSP().getAddressAt(2 * VM.getVM().getAddressSize()); }
// return address of param, zero origin index.
// MPJ note: Appears to be unused.
public Address getNativeParamAddr(int idx) {
return null;
// return addressOfStackSlot(NATIVE_FRAME_INITIAL_PARAM_OFFSET + idx);
}
public Address getSenderSP() { return getFP(); }
public Address addressOfInterpreterFrameLocals() {
return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,9 +63,6 @@ public class X86Frame extends Frame {
// Entry frames
private static int ENTRY_FRAME_CALL_WRAPPER_OFFSET;
// Native frames
private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET = 2;
private static VMReg rbp;
static {
@ -423,20 +420,12 @@ public class X86Frame extends Frame {
return addressOfStackSlot(LINK_OFFSET).getAddressAt(0);
}
// FIXME: not implementable yet
//inline void frame::set_link(intptr_t* addr) { *(intptr_t **)addr_at(link_offset) = addr; }
public Address getUnextendedSP() { return raw_unextendedSP; }
// Return address:
public Address getSenderPCAddr() { return addressOfStackSlot(RETURN_ADDR_OFFSET); }
public Address getSenderPC() { return getSenderPCAddr().getAddressAt(0); }
// return address of param, zero origin index.
public Address getNativeParamAddr(int idx) {
return addressOfStackSlot(NATIVE_FRAME_INITIAL_PARAM_OFFSET + idx);
}
public Address getSenderSP() { return addressOfStackSlot(SENDER_SP_OFFSET); }
public Address addressOfInterpreterFrameLocals() {

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -233,7 +233,7 @@ checks: check_os_version check_j2se_version
# Solaris 2.5.1, 2.6).
# Disable this check by setting DISABLE_HOTSPOT_OS_VERSION_CHECK=ok.
SUPPORTED_OS_VERSION = 2.4% 2.5% 2.6% 3%
SUPPORTED_OS_VERSION = 2.4% 2.5% 2.6% 3% 4%
OS_VERSION := $(shell uname -r)
EMPTY_IF_NOT_SUPPORTED = $(filter $(SUPPORTED_OS_VERSION),$(OS_VERSION))

View File

@ -214,6 +214,11 @@ ifeq ($(USE_CLANG),)
# conversions which might affect the values. Only enable it in earlier versions.
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
WARNING_FLAGS += -Wconversion
endif
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
# This flag is only known since GCC 4.3. Gcc 4.8 contains a fix so that with templates no
# warnings are issued: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=11856
WARNING_FLAGS += -Wtype-limits
endif
endif

View File

@ -69,6 +69,7 @@ ProjectCreatorIncludesPRIVATE=\
-ignorePath ppc \
-ignorePath zero \
-ignorePath aix \
-ignorePath aarch64 \
-hidePath .hg

View File

@ -3735,12 +3735,12 @@ operand immD()
interface(CONST_INTER);
%}
// constant 'double +0.0'.
// Double Immediate: +0.0d
operand immD0()
%{
predicate((n->getd() == 0) &&
(fpclassify(n->getd()) == FP_ZERO) && (signbit(n->getd()) == 0));
predicate(jlong_cast(n->getd()) == 0);
match(ConD);
op_cost(0);
format %{ %}
interface(CONST_INTER);
@ -3765,12 +3765,12 @@ operand immF()
interface(CONST_INTER);
%}
// constant 'float +0.0'.
// Float Immediate: +0.0f.
operand immF0()
%{
predicate((n->getf() == 0) &&
(fpclassify(n->getf()) == FP_ZERO) && (signbit(n->getf()) == 0));
predicate(jint_cast(n->getf()) == 0);
match(ConF);
op_cost(0);
format %{ %}
interface(CONST_INTER);
@ -6056,7 +6056,7 @@ instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
%}
// Store Byte
instruct storeB_volatile(iRegI src, /* sync_memory*/indirect mem)
instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreB mem src));
@ -6069,7 +6069,7 @@ instruct storeB_volatile(iRegI src, /* sync_memory*/indirect mem)
%}
// Store Char/Short
instruct storeC_volatile(iRegI src, /* sync_memory*/indirect mem)
instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreC mem src));
@ -6225,7 +6225,7 @@ instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
// ============================================================================
// Zero Count Instructions
instruct countLeadingZerosI(iRegINoSp dst, iRegI src) %{
instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
match(Set dst (CountLeadingZerosI src));
ins_cost(INSN_COST);
@ -6249,7 +6249,7 @@ instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
ins_pipe(ialu_reg);
%}
instruct countTrailingZerosI(iRegINoSp dst, iRegI src) %{
instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
match(Set dst (CountTrailingZerosI src));
ins_cost(INSN_COST * 2);
@ -6935,7 +6935,7 @@ instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg fla
// which throws a ShouldNotHappen. So, we have to provide two flavours
// of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegI src1, iRegI src2) %{
instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
@ -6951,7 +6951,7 @@ instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegI src1, iRegI
ins_pipe(icond_reg_reg);
%}
instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegI src1, iRegI src2) %{
instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
@ -6976,7 +6976,7 @@ instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegI src1, iR
// we ought only to be able to cull one of these variants as the ideal
// transforms ought always to order the zero consistently (to left/right?)
instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegI src) %{
instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
@ -6992,7 +6992,7 @@ instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iReg
ins_pipe(icond_reg);
%}
instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegI src) %{
instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
@ -7008,7 +7008,7 @@ instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, i
ins_pipe(icond_reg);
%}
instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegI src, immI0 zero) %{
instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
@ -7024,7 +7024,7 @@ instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegI src, immI0
ins_pipe(icond_reg);
%}
instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegI src, immI0 zero) %{
instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
@ -7476,7 +7476,7 @@ instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
ins_pipe(ialu_reg_reg);
%}
instruct addI_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2) %{
instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
match(Set dst (AddI src1 src2));
ins_cost(INSN_COST);
@ -7869,7 +7869,7 @@ instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
ins_pipe(idiv_reg_reg);
%}
instruct signExtract(iRegINoSp dst, iRegI src1, immI_31 div1, immI_31 div2) %{
instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
match(Set dst (URShiftI (RShiftI src1 div1) div2));
ins_cost(INSN_COST);
format %{ "lsrw $dst, $src1, $div1" %}
@ -7879,7 +7879,7 @@ instruct signExtract(iRegINoSp dst, iRegI src1, immI_31 div1, immI_31 div2) %{
ins_pipe(ialu_reg_shift);
%}
instruct div2Round(iRegINoSp dst, iRegI src, immI_31 div1, immI_31 div2) %{
instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
ins_cost(INSN_COST);
format %{ "addw $dst, $src, LSR $div1" %}
@ -8189,7 +8189,7 @@ instruct regL_not_reg(iRegLNoSp dst,
ins_pipe(ialu_reg);
%}
instruct regI_not_reg(iRegINoSp dst,
iRegI src1, immI_M1 m1,
iRegIorL2I src1, immI_M1 m1,
rFlagsReg cr) %{
match(Set dst (XorI src1 m1));
ins_cost(INSN_COST);
@ -8206,14 +8206,14 @@ instruct regI_not_reg(iRegINoSp dst,
%}
instruct AndI_reg_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2, immI_M1 m1,
iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
rFlagsReg cr) %{
match(Set dst (AndI src1 (XorI src2 m1)));
ins_cost(INSN_COST);
format %{ "bic $dst, $src1, $src2" %}
format %{ "bicw $dst, $src1, $src2" %}
ins_encode %{
__ bic(as_Register($dst$$reg),
__ bicw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
@ -8240,14 +8240,14 @@ instruct AndL_reg_not_reg(iRegLNoSp dst,
%}
instruct OrI_reg_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2, immI_M1 m1,
iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
rFlagsReg cr) %{
match(Set dst (OrI src1 (XorI src2 m1)));
ins_cost(INSN_COST);
format %{ "orn $dst, $src1, $src2" %}
format %{ "ornw $dst, $src1, $src2" %}
ins_encode %{
__ orn(as_Register($dst$$reg),
__ ornw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
@ -8274,14 +8274,14 @@ instruct OrL_reg_not_reg(iRegLNoSp dst,
%}
instruct XorI_reg_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2, immI_M1 m1,
iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
rFlagsReg cr) %{
match(Set dst (XorI m1 (XorI src2 src1)));
ins_cost(INSN_COST);
format %{ "eon $dst, $src1, $src2" %}
format %{ "eonw $dst, $src1, $src2" %}
ins_encode %{
__ eon(as_Register($dst$$reg),
__ eonw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
@ -8308,7 +8308,7 @@ instruct XorL_reg_not_reg(iRegLNoSp dst,
%}
instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4, rFlagsReg cr) %{
match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
@ -8344,7 +8344,7 @@ instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
%}
instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4, rFlagsReg cr) %{
match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
@ -8380,7 +8380,7 @@ instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
%}
instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4, rFlagsReg cr) %{
match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
@ -8416,7 +8416,7 @@ instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
%}
instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4, rFlagsReg cr) %{
match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
@ -8452,7 +8452,7 @@ instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
%}
instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4, rFlagsReg cr) %{
match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
@ -8488,7 +8488,7 @@ instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
%}
instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4, rFlagsReg cr) %{
match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
@ -8524,7 +8524,7 @@ instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
%}
instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4, rFlagsReg cr) %{
match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
@ -8560,7 +8560,7 @@ instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
%}
instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4, rFlagsReg cr) %{
match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
@ -8596,7 +8596,7 @@ instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
%}
instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4, rFlagsReg cr) %{
match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
@ -8632,7 +8632,7 @@ instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
%}
instruct AndI_reg_URShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (AndI src1 (URShiftI src2 src3)));
@ -8670,7 +8670,7 @@ instruct AndL_reg_URShift_reg(iRegLNoSp dst,
%}
instruct AndI_reg_RShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (AndI src1 (RShiftI src2 src3)));
@ -8708,7 +8708,7 @@ instruct AndL_reg_RShift_reg(iRegLNoSp dst,
%}
instruct AndI_reg_LShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (AndI src1 (LShiftI src2 src3)));
@ -8746,7 +8746,7 @@ instruct AndL_reg_LShift_reg(iRegLNoSp dst,
%}
instruct XorI_reg_URShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (XorI src1 (URShiftI src2 src3)));
@ -8784,7 +8784,7 @@ instruct XorL_reg_URShift_reg(iRegLNoSp dst,
%}
instruct XorI_reg_RShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (XorI src1 (RShiftI src2 src3)));
@ -8822,7 +8822,7 @@ instruct XorL_reg_RShift_reg(iRegLNoSp dst,
%}
instruct XorI_reg_LShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (XorI src1 (LShiftI src2 src3)));
@ -8860,7 +8860,7 @@ instruct XorL_reg_LShift_reg(iRegLNoSp dst,
%}
instruct OrI_reg_URShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (OrI src1 (URShiftI src2 src3)));
@ -8898,7 +8898,7 @@ instruct OrL_reg_URShift_reg(iRegLNoSp dst,
%}
instruct OrI_reg_RShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (OrI src1 (RShiftI src2 src3)));
@ -8936,7 +8936,7 @@ instruct OrL_reg_RShift_reg(iRegLNoSp dst,
%}
instruct OrI_reg_LShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (OrI src1 (LShiftI src2 src3)));
@ -8974,7 +8974,7 @@ instruct OrL_reg_LShift_reg(iRegLNoSp dst,
%}
instruct AddI_reg_URShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (AddI src1 (URShiftI src2 src3)));
@ -9012,7 +9012,7 @@ instruct AddL_reg_URShift_reg(iRegLNoSp dst,
%}
instruct AddI_reg_RShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (AddI src1 (RShiftI src2 src3)));
@ -9050,7 +9050,7 @@ instruct AddL_reg_RShift_reg(iRegLNoSp dst,
%}
instruct AddI_reg_LShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (AddI src1 (LShiftI src2 src3)));
@ -9088,7 +9088,7 @@ instruct AddL_reg_LShift_reg(iRegLNoSp dst,
%}
instruct SubI_reg_URShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (SubI src1 (URShiftI src2 src3)));
@ -9126,7 +9126,7 @@ instruct SubL_reg_URShift_reg(iRegLNoSp dst,
%}
instruct SubI_reg_RShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (SubI src1 (RShiftI src2 src3)));
@ -9164,7 +9164,7 @@ instruct SubL_reg_RShift_reg(iRegLNoSp dst,
%}
instruct SubI_reg_LShift_reg(iRegINoSp dst,
iRegI src1, iRegI src2,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, rFlagsReg cr) %{
match(Set dst (SubI src1 (LShiftI src2 src3)));
@ -9228,7 +9228,7 @@ instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
// Shift Left followed by Shift Right.
// This idiom is used by the compiler for the i2b bytecode etc.
instruct sbfmwI(iRegINoSp dst, iRegI src, immI lshift_count, immI rshift_count)
instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
%{
match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
// Make sure we are not going to exceed what sbfmw can do.
@ -9274,7 +9274,7 @@ instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
// Shift Left followed by Shift Right.
// This idiom is used by the compiler for the i2b bytecode etc.
instruct ubfmwI(iRegINoSp dst, iRegI src, immI lshift_count, immI rshift_count)
instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
%{
match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
// Make sure we are not going to exceed what ubfmw can do.
@ -9296,7 +9296,7 @@ instruct ubfmwI(iRegINoSp dst, iRegI src, immI lshift_count, immI rshift_count)
%}
// Bitfield extract with shift & mask
instruct ubfxwI(iRegINoSp dst, iRegI src, immI rshift, immI_bitmask mask)
instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
%{
match(Set dst (AndI (URShiftI src rshift) mask));
@ -9362,7 +9362,7 @@ instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift
ins_pipe(ialu_reg_reg_extr);
%}
instruct extrOrI(iRegINoSp dst, iRegI src1, iRegI src2, immI lshift, immI rshift, rFlagsReg cr)
instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
@ -9392,7 +9392,7 @@ instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshif
ins_pipe(ialu_reg_reg_extr);
%}
instruct extrAddI(iRegINoSp dst, iRegI src1, iRegI src2, immI lshift, immI rshift, rFlagsReg cr)
instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
@ -9571,7 +9571,7 @@ instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
%};
instruct AddExtI_sxth(iRegINoSp dst, iRegI src1, iRegI src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
@ -9584,7 +9584,7 @@ instruct AddExtI_sxth(iRegINoSp dst, iRegI src1, iRegI src2, immI_16 lshift, imm
ins_pipe(ialu_reg_reg);
%}
instruct AddExtI_sxtb(iRegINoSp dst, iRegI src1, iRegI src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
@ -9597,7 +9597,7 @@ instruct AddExtI_sxtb(iRegINoSp dst, iRegI src1, iRegI src2, immI_24 lshift, imm
ins_pipe(ialu_reg_reg);
%}
instruct AddExtI_uxtb(iRegINoSp dst, iRegI src1, iRegI src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
@ -9663,7 +9663,7 @@ instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, imm
%}
instruct AddExtI_uxtb_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_255 mask, rFlagsReg cr)
instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
%{
match(Set dst (AddI src1 (AndI src2 mask)));
ins_cost(INSN_COST);
@ -9676,7 +9676,7 @@ instruct AddExtI_uxtb_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_255 mask,
ins_pipe(ialu_reg_reg);
%}
instruct AddExtI_uxth_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_65535 mask, rFlagsReg cr)
instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
%{
match(Set dst (AddI src1 (AndI src2 mask)));
ins_cost(INSN_COST);
@ -9728,7 +9728,7 @@ instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295
ins_pipe(ialu_reg_reg);
%}
instruct SubExtI_uxtb_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_255 mask, rFlagsReg cr)
instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
%{
match(Set dst (SubI src1 (AndI src2 mask)));
ins_cost(INSN_COST);
@ -9741,7 +9741,7 @@ instruct SubExtI_uxtb_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_255 mask,
ins_pipe(ialu_reg_reg);
%}
instruct SubExtI_uxth_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_65535 mask, rFlagsReg cr)
instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
%{
match(Set dst (SubI src1 (AndI src2 mask)));
ins_cost(INSN_COST);
@ -10343,7 +10343,7 @@ instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
%}
// this pattern occurs in bigmath arithmetic
instruct convUI2L_reg_reg(iRegLNoSp dst, iRegI src, immL_32bits mask)
instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
%{
match(Set dst (AndL (ConvI2L src) mask));
@ -10369,7 +10369,7 @@ instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
ins_pipe(ialu_reg);
%}
instruct convI2B(iRegINoSp dst, iRegI src, rFlagsReg cr)
instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
%{
match(Set dst (Conv2B src));
effect(KILL cr);
@ -10457,7 +10457,7 @@ instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
ins_pipe(pipe_class_default);
%}
instruct convI2F_reg_reg(vRegF dst, iRegI src) %{
instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
match(Set dst (ConvI2F src));
ins_cost(INSN_COST * 5);
@ -10509,7 +10509,7 @@ instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
ins_pipe(pipe_class_default);
%}
instruct convI2D_reg_reg(vRegD dst, iRegI src) %{
instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
match(Set dst (ConvI2D src));
ins_cost(INSN_COST * 5);
@ -10772,7 +10772,7 @@ instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlag
// ============================================================================
// Overflow Math Instructions
instruct overflowAddI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
%{
match(Set cr (OverflowAddI op1 op2));
@ -10785,7 +10785,7 @@ instruct overflowAddI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
ins_pipe(icmp_reg_reg);
%}
instruct overflowAddI_reg_imm(rFlagsReg cr, iRegI op1, immIAddSub op2)
instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
%{
match(Set cr (OverflowAddI op1 op2));
@ -10824,7 +10824,7 @@ instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
ins_pipe(icmp_reg_imm);
%}
instruct overflowSubI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
%{
match(Set cr (OverflowSubI op1 op2));
@ -10837,7 +10837,7 @@ instruct overflowSubI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
ins_pipe(icmp_reg_reg);
%}
instruct overflowSubI_reg_imm(rFlagsReg cr, iRegI op1, immIAddSub op2)
instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
%{
match(Set cr (OverflowSubI op1 op2));
@ -10876,7 +10876,7 @@ instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
ins_pipe(icmp_reg_imm);
%}
instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegI op1)
instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
%{
match(Set cr (OverflowSubI zero op1));
@ -10902,7 +10902,7 @@ instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
ins_pipe(icmp_reg_imm);
%}
instruct overflowMulI_reg(rFlagsReg cr, iRegI op1, iRegI op2)
instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
%{
match(Set cr (OverflowMulI op1 op2));
@ -10923,7 +10923,7 @@ instruct overflowMulI_reg(rFlagsReg cr, iRegI op1, iRegI op2)
ins_pipe(pipe_slow);
%}
instruct overflowMulI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, rFlagsReg cr)
instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
%{
match(If cmp (OverflowMulI op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
@ -11393,7 +11393,7 @@ instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
%}
instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegI p, iRegI q, rFlagsReg cr)
instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
%{
match(Set dst (CmpLTMask p q));
effect(KILL cr);
@ -11414,7 +11414,7 @@ instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegI p, iRegI q, rFlagsReg cr)
ins_pipe(ialu_reg_reg);
%}
instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegI src, immI0 zero, rFlagsReg cr)
instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
%{
match(Set dst (CmpLTMask src zero));
effect(KILL cr);
@ -11548,7 +11548,7 @@ instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
// shorter than (cmp; branch), have the additional benefit of not
// killing the flags.
instruct cmpI_imm0_branch(cmpOp cmp, iRegI op1, immI0 op2, label labl, rFlagsReg cr) %{
instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
match(If cmp (CmpI op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq);

View File

@ -24,11 +24,13 @@ dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic
dnl and shift patterns patterns used in aarch64.ad.
dnl
// BEGIN This section of the file is automatically generated. Do not edit --------------
dnl
define(`ORL2I', `ifelse($1,I,orL2I)')
dnl
define(`BASE_SHIFT_INSN',
`
instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
iReg$1 src1, iReg$1 src2,
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
immI src3, rFlagsReg cr) %{
match(Set dst ($2$1 src1 ($4$1 src2 src3)));
@ -48,7 +50,7 @@ instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
define(`BASE_INVERTED_INSN',
`
instruct $2$1_reg_not_reg(iReg$1NoSp dst,
iReg$1 src1, iReg$1 src2, imm$1_M1 m1,
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1,
rFlagsReg cr) %{
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
dnl into this canonical form.
@ -70,7 +72,7 @@ dnl into this canonical form.
define(`INVERTED_SHIFT_INSN',
`
instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
iReg$1 src1, iReg$1 src2,
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
immI src3, imm$1_M1 src4, rFlagsReg cr) %{
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
dnl into this canonical form.
@ -92,7 +94,7 @@ dnl into this canonical form.
%}')dnl
define(`NOT_INSN',
`instruct reg$1_not_reg(iReg$1NoSp dst,
iReg$1 src1, imm$1_M1 m1,
iReg$1`'ORL2I($1) src1, imm$1_M1 m1,
rFlagsReg cr) %{
match(Set dst (Xor$1 src1 m1));
ins_cost(INSN_COST);
@ -113,7 +115,7 @@ define(`BOTH_SHIFT_INSNS',
BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
dnl
define(`BOTH_INVERTED_INSNS',
`BASE_INVERTED_INSN(I, $1, $2, $3, $4)
`BASE_INVERTED_INSN(I, $1, $2w, $3, $4)
BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl
dnl
define(`BOTH_INVERTED_SHIFT_INSNS',
@ -149,7 +151,7 @@ define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)')
define(`BFM_INSN',`
// Shift Left followed by Shift Right.
// This idiom is used by the compiler for the i2b bytecode etc.
instruct $4$1(iReg$1NoSp dst, iReg$1 src, immI lshift_count, immI rshift_count)
instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count)
%{
match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count));
// Make sure we are not going to exceed what $4 can do.
@ -176,7 +178,7 @@ BFM_INSN(I, 31, URShift, ubfmw)
dnl
// Bitfield extract with shift & mask
define(`BFX_INSN',
`instruct $3$1(iReg$1NoSp dst, iReg$1 src, immI rshift, imm$1_bitmask mask)
`instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
%{
match(Set dst (And$1 ($2$1 src rshift) mask));
@ -215,7 +217,7 @@ instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask m
// Rotations
define(`EXTRACT_INSN',
`instruct extr$3$1(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, immI lshift, immI rshift, rFlagsReg cr)
`instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & $2));
@ -299,7 +301,7 @@ ROR_INSN(I, 0, ror)
// Add/subtract (extended)
dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize
define(`ADD_SUB_CONV', `
instruct $3Ext$1(iReg$2NoSp dst, iReg$2 src1, iReg$1orL2I src2, rFlagsReg cr)
instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr)
%{
match(Set dst ($3$2 src1 (ConvI2L src2)));
ins_cost(INSN_COST);
@ -315,7 +317,7 @@ ADD_SUB_CONV(I,L,Add,add,sxtw);
ADD_SUB_CONV(I,L,Sub,sub,sxtw);
dnl
define(`ADD_SUB_EXTENDED', `
instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
%{
match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
ins_cost(INSN_COST);
@ -337,7 +339,7 @@ ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64)
dnl
dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type)
define(`ADD_SUB_ZERO_EXTEND', `
instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, imm$1_$2 mask, rFlagsReg cr)
instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr)
%{
match(Set dst ($3$1 src1 (And$1 src2 mask)));
ins_cost(INSN_COST);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -136,12 +136,7 @@
entry_frame_call_wrapper_offset = -8,
// we don't need a save area
arg_reg_save_area_bytes = 0,
// TODO - check that this is still correct
// Native frames
native_frame_initial_param_offset = 2
arg_reg_save_area_bytes = 0
};
@ -195,9 +190,6 @@
inline address* sender_pc_addr() const;
// return address of param, zero origin index.
inline address* native_param_addr(int idx) const;
// expression stack tos if we are nested in a java call
intptr_t* interpreter_frame_last_sp() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -166,7 +166,6 @@ inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL &&
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
inline void frame::set_link(intptr_t* addr) { *(intptr_t **)addr_at(link_offset) = addr; }
inline intptr_t* frame::unextended_sp() const { return _unextended_sp; }
@ -176,9 +175,6 @@ inline intptr_t* frame::unextended_sp() const { return _unextended_sp; }
inline address* frame::sender_pc_addr() const { return (address*) addr_at( return_addr_offset); }
inline address frame::sender_pc() const { return *sender_pc_addr(); }
// return address of param, zero origin index.
inline address* frame::native_param_addr(int idx) const { return (address*) addr_at( native_frame_initial_param_offset+idx); }
#ifdef CC_INTERP
inline interpreterState frame::get_interpreterState() const {

View File

@ -4416,11 +4416,11 @@ operand immF() %{
interface(CONST_INTER);
%}
// constant 'float +0.0'.
// Float Immediate: +0.0f.
operand immF_0() %{
predicate((n->getf() == 0) &&
(fpclassify(n->getf()) == FP_ZERO) && (signbit(n->getf()) == 0));
predicate(jint_cast(n->getf()) == 0);
match(ConF);
op_cost(0);
format %{ %}
interface(CONST_INTER);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,8 +69,6 @@ inline int frame::frame_size(RegisterMap* map) const { return sender_sp() - sp()
inline intptr_t* frame::link() const { return (intptr_t *)(fp()[FP->sp_offset_in_saved_window()] + STACK_BIAS); }
inline void frame::set_link(intptr_t* addr) { assert(link()==addr, "frame nesting is controlled by hardware"); }
inline intptr_t* frame::unextended_sp() const { return sp() + _sp_adjustment_by_callee; }
// return address:

View File

@ -3758,13 +3758,9 @@ operand immD() %{
interface(CONST_INTER);
%}
// Double Immediate: +0.0d
operand immD0() %{
#ifdef _LP64
// on 64-bit architectures this comparision is faster
predicate(jlong_cast(n->getd()) == 0);
#else
predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO));
#endif
match(ConD);
op_cost(0);
@ -3781,9 +3777,9 @@ operand immF() %{
interface(CONST_INTER);
%}
// Float Immediate: 0
// Float Immediate: +0.0f
operand immF0() %{
predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO));
predicate(jint_cast(n->getf()) == 0);
match(ConF);
op_cost(0);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -128,21 +128,16 @@
entry_frame_after_call_words = 28,
entry_frame_call_wrapper_offset = 2,
arg_reg_save_area_bytes = 32, // Register argument save area
arg_reg_save_area_bytes = 32 // Register argument save area
#else
entry_frame_after_call_words = 13,
entry_frame_call_wrapper_offset = -6,
arg_reg_save_area_bytes = 0,
arg_reg_save_area_bytes = 0
#endif // _WIN64
#else
entry_frame_call_wrapper_offset = 2,
entry_frame_call_wrapper_offset = 2
#endif // AMD64
// Native frames
native_frame_initial_param_offset = 2
};
intptr_t ptr_at(int offset) const {
@ -195,9 +190,6 @@
inline address* sender_pc_addr() const;
// return address of param, zero origin index.
inline address* native_param_addr(int idx) const;
// expression stack tos if we are nested in a java call
intptr_t* interpreter_frame_last_sp() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -139,8 +139,6 @@ inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL &&
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
inline void frame::set_link(intptr_t* addr) { *(intptr_t **)addr_at(link_offset) = addr; }
inline intptr_t* frame::unextended_sp() const { return _unextended_sp; }
@ -149,9 +147,6 @@ inline intptr_t* frame::unextended_sp() const { return _unextended_sp; }
inline address* frame::sender_pc_addr() const { return (address*) addr_at( return_addr_offset); }
inline address frame::sender_pc() const { return *sender_pc_addr(); }
// return address of param, zero origin index.
inline address* frame::native_param_addr(int idx) const { return (address*) addr_at( native_frame_initial_param_offset+idx); }
#ifdef CC_INTERP
inline interpreterState frame::get_interpreterState() const {

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,12 +35,254 @@
class InterpreterMacroAssembler: public MacroAssembler {
#ifdef TARGET_ARCH_MODEL_x86_32
# include "interp_masm_x86_32.hpp"
#endif
#ifdef TARGET_ARCH_MODEL_x86_64
# include "interp_masm_x86_64.hpp"
#endif
#ifndef CC_INTERP
protected:
// Interpreter specific version of call_VM_base
virtual void call_VM_leaf_base(address entry_point,
int number_of_arguments);
virtual void call_VM_base(Register oop_result,
Register java_thread,
Register last_java_sp,
address entry_point,
int number_of_arguments,
bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true);
#endif // CC_INTERP
public:
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code),
_locals_register(LP64_ONLY(r14) NOT_LP64(rdi)),
_bcp_register(LP64_ONLY(r13) NOT_LP64(rsi)) {}
void load_earlyret_value(TosState state);
#ifdef CC_INTERP
void save_bcp() { /* not needed in c++ interpreter and harmless */ }
void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
// Helpers for runtime call arguments/results
void get_method(Register reg);
#else
// Interpreter-specific registers
void save_bcp() {
movptr(Address(rbp, frame::interpreter_frame_bcp_offset * wordSize), _bcp_register);
}
void restore_bcp() {
movptr(_bcp_register, Address(rbp, frame::interpreter_frame_bcp_offset * wordSize));
}
void restore_locals() {
movptr(_locals_register, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
}
// Helpers for runtime call arguments/results
void get_method(Register reg) {
movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
}
void get_const(Register reg) {
get_method(reg);
movptr(reg, Address(reg, Method::const_offset()));
}
void get_constant_pool(Register reg) {
get_const(reg);
movptr(reg, Address(reg, ConstMethod::constants_offset()));
}
void get_constant_pool_cache(Register reg) {
get_constant_pool(reg);
movptr(reg, Address(reg, ConstantPool::cache_offset_in_bytes()));
}
void get_cpool_and_tags(Register cpool, Register tags) {
get_constant_pool(cpool);
movptr(tags, Address(cpool, ConstantPool::tags_offset_in_bytes()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache,
Register index,
int bcp_offset,
size_t index_size = sizeof(u2));
void get_cache_and_index_and_bytecode_at_bcp(Register cache,
Register index,
Register bytecode,
int byte_no,
int bcp_offset,
size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,
size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index,
int bcp_offset,
size_t index_size = sizeof(u2));
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
NOT_LP64(void f2ieee();) // truncate ftos to 32bits
NOT_LP64(void d2ieee();) // truncate dtos to 64bits
// Expression stack
void pop_ptr(Register r = rax);
void pop_i(Register r = rax);
void push_ptr(Register r = rax);
void push_i(Register r = rax);
#ifdef _LP64
void pop_l(Register r = rax);
void pop_f(XMMRegister r = xmm0);
void pop_d(XMMRegister r = xmm0);
void push_l(Register r = rax);
void push_f(XMMRegister r = xmm0);
void push_d(XMMRegister r = xmm0);
#else
void pop_l(Register lo = rax, Register hi = rdx);
void pop_f();
void pop_d();
void push_l(Register lo = rax, Register hi = rdx);
void push_d(Register r = rax);
void push_f();
#endif // _LP64
void pop(Register r) { ((MacroAssembler*)this)->pop(r); }
void push(Register r) { ((MacroAssembler*)this)->push(r); }
void push(int32_t imm ) { ((MacroAssembler*)this)->push(imm); }
void pop(TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos
// These are dummies to prevent surprise implicit conversions to Register
void pop(void* v); // Add unimplemented ambiguous method
void push(void* v); // Add unimplemented ambiguous method
void empty_expression_stack() {
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
}
// Helpers for swap and dup
void load_ptr(int n, Register val);
void store_ptr(int n, Register val);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass.
void gen_subtype_check( Register sub_klass, Label &ok_is_subtype );
// Dispatching
void dispatch_prolog(TosState state, int step = 0);
void dispatch_epilog(TosState state, int step = 0);
// dispatch via rbx (assume rbx is loaded already)
void dispatch_only(TosState state);
// dispatch normal table via rbx (assume rbx is loaded already)
void dispatch_only_normal(TosState state);
void dispatch_only_noverify(TosState state);
// load rbx from [_bcp_register + step] and dispatch via rbx
void dispatch_next(TosState state, int step = 0);
// load rbx from [_bcp_register] and dispatch via rbx and table
void dispatch_via (TosState state, address* table);
// jump to an invoked target
void prepare_to_jump_from_interpreted();
void jump_from_interpreted(Register method, Register temp);
// Returning from interpreted functions
//
// Removes the current activation (incl. unlocking of monitors)
// and sets up the return address. This code is also used for
// exception unwindwing. In that case, we do not want to throw
// IllegalMonitorStateExceptions, since that might get us into an
// infinite rethrow exception loop.
// Additionally this code is used for popFrame and earlyReturn.
// In popFrame case we want to skip throwing an exception,
// installing an exception, and notifying jvmdi.
// In earlyReturn case we only want to skip throwing an exception
// and installing an exception.
void remove_activation(TosState state, Register ret_addr,
bool throw_monitor_exception = true,
bool install_monitor_exception = true,
bool notify_jvmdi = true);
#endif // CC_INTERP
void get_method_counters(Register method, Register mcs, Label& skip);
// Object locking
void lock_object (Register lock_reg);
void unlock_object(Register lock_reg);
#ifndef CC_INTERP
// Interpreter profiling operations
void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Register mdp, Label& zero_continue);
void verify_method_data_pointer();
void set_mdp_data_at(Register mdp_in, int constant, Register value);
void increment_mdp_data_at(Address data, bool decrement = false);
void increment_mdp_data_at(Register mdp_in, int constant,
bool decrement = false);
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, Address mask,
Register scratch, bool preloaded,
Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant);
void test_mdp_data_at(Register mdp_in, int offset, Register value,
Register test_value_out,
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row,
Label& done, bool is_virtual_call);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
void update_mdp_for_ret(Register return_bci);
void profile_taken_branch(Register mdp, Register bumped_count);
void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,
Register scratch2);
// Debugging
// only if +VerifyOops && state == atos
void verify_oop(Register reg, TosState state = atos);
// only if +VerifyFPU && (state == ftos || state == dtos)
void verify_FPU(int stack_depth, TosState state = ftos);
#endif // !CC_INTERP
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
// support for jvmti/dtrace
void notify_method_entry();
void notify_method_exit(TosState state, NotifyMethodExitMode mode);
private:

File diff suppressed because it is too large Load Diff

View File

@ -1,223 +0,0 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CC_INTERP
protected:
// Interpreter specific version of call_VM_base
virtual void call_VM_leaf_base(
address entry_point,
int number_of_arguments
);
virtual void call_VM_base(
Register oop_result,
Register java_thread,
Register last_java_sp,
address entry_point,
int number_of_arguments,
bool check_exceptions
);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true);
#endif /* CC_INTERP */
public:
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), _locals_register(rdi), _bcp_register(rsi) {}
void load_earlyret_value(TosState state);
// Interpreter-specific registers
#ifdef CC_INTERP
void save_bcp() { /* not needed in c++ interpreter and harmless */ }
void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
// Helpers for runtime call arguments/results
void get_method(Register reg);
#else
void save_bcp() { movptr(Address(rbp, frame::interpreter_frame_bcp_offset * wordSize), rsi); }
void restore_bcp() { movptr(rsi, Address(rbp, frame::interpreter_frame_bcp_offset * wordSize)); }
void restore_locals() { movptr(rdi, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); }
// Helpers for runtime call arguments/results
void get_method(Register reg) { movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); }
void get_const(Register reg) { get_method(reg); movptr(reg, Address(reg, Method::const_offset())); }
void get_constant_pool(Register reg) { get_const(reg); movptr(reg, Address(reg, ConstMethod::constants_offset())); }
void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movptr(reg, Address(reg, ConstantPool::cache_offset_in_bytes())); }
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, ConstantPool::tags_offset_in_bytes()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
// Expression stack
void f2ieee(); // truncate ftos to 32bits
void d2ieee(); // truncate dtos to 64bits
void pop_ptr(Register r = rax);
void pop_i(Register r = rax);
void pop_l(Register lo = rax, Register hi = rdx);
void pop_f();
void pop_d();
void push_ptr(Register r = rax);
void push_i(Register r = rax);
void push_l(Register lo = rax, Register hi = rdx);
void push_d(Register r = rax);
void push_f();
void pop(TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos
void pop(Register r ) { ((MacroAssembler*)this)->pop(r); }
void push(Register r ) { ((MacroAssembler*)this)->push(r); }
void push(int32_t imm ) { ((MacroAssembler*)this)->push(imm); }
// These are dummies to prevent surprise implicit conversions to Register
void pop(void* v ); // Add unimplemented ambiguous method
void push(void* v ); // Add unimplemented ambiguous method
void empty_expression_stack() {
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
}
// Helpers for swap and dup
void load_ptr(int n, Register val);
void store_ptr(int n, Register val);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. EAX holds the super_klass. Blows ECX
// and EDI. Register sub_klass cannot be any of the above.
void gen_subtype_check( Register sub_klass, Label &ok_is_subtype );
// Dispatching
void dispatch_prolog(TosState state, int step = 0);
void dispatch_epilog(TosState state, int step = 0);
void dispatch_only(TosState state); // dispatch via rbx, (assume rbx, is loaded already)
void dispatch_only_normal(TosState state); // dispatch normal table via rbx, (assume rbx, is loaded already)
void dispatch_only_noverify(TosState state);
void dispatch_next(TosState state, int step = 0); // load rbx, from [esi + step] and dispatch via rbx,
void dispatch_via (TosState state, address* table); // load rbx, from [esi] and dispatch via rbx, and table
// jump to an invoked target
void prepare_to_jump_from_interpreted();
void jump_from_interpreted(Register method, Register temp);
// Returning from interpreted functions
//
// Removes the current activation (incl. unlocking of monitors)
// and sets up the return address. This code is also used for
// exception unwindwing. In that case, we do not want to throw
// IllegalMonitorStateExceptions, since that might get us into an
// infinite rethrow exception loop.
// Additionally this code is used for popFrame and earlyReturn.
// In popFrame case we want to skip throwing an exception,
// installing an exception, and notifying jvmdi.
// In earlyReturn case we only want to skip throwing an exception
// and installing an exception.
void remove_activation(TosState state, Register ret_addr,
bool throw_monitor_exception = true,
bool install_monitor_exception = true,
bool notify_jvmdi = true);
#endif /* !CC_INTERP */
void get_method_counters(Register method, Register mcs, Label& skip);
// Debugging
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos
#ifndef CC_INTERP
void verify_FPU(int stack_depth, TosState state = ftos); // only if +VerifyFPU && (state == ftos || state == dtos)
#endif /* !CC_INTERP */
// Object locking
void lock_object (Register lock_reg);
void unlock_object(Register lock_reg);
#ifndef CC_INTERP
// Interpreter profiling operations
void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Register mdp, Label& zero_continue);
void verify_method_data_pointer();
void set_mdp_data_at(Register mdp_in, int constant, Register value);
void increment_mdp_data_at(Address data, bool decrement = false);
void increment_mdp_data_at(Register mdp_in, int constant,
bool decrement = false);
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, Address mask,
Register scratch, bool preloaded,
Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant);
void test_mdp_data_at(Register mdp_in, int offset, Register value,
Register test_value_out,
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row,
Label& done, bool is_virtual_call);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
void update_mdp_for_ret(Register return_bci);
void profile_taken_branch(Register mdp, Register bumped_count);
void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp, Register scratch2);
#endif /* !CC_INTERP */
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
// support for jvmti
void notify_method_entry();
void notify_method_exit(TosState state, NotifyMethodExitMode mode);

File diff suppressed because it is too large Load Diff

View File

@ -1,240 +0,0 @@
/*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CC_INTERP
protected:
// Interpreter specific version of call_VM_base
virtual void call_VM_leaf_base(address entry_point,
int number_of_arguments);
virtual void call_VM_base(Register oop_result,
Register java_thread,
Register last_java_sp,
address entry_point,
int number_of_arguments,
bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true);
#endif // CC_INTERP
public:
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), _locals_register(r14), _bcp_register(r13) {}
void load_earlyret_value(TosState state);
#ifdef CC_INTERP
void save_bcp() { /* not needed in c++ interpreter and harmless */ }
void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
// Helpers for runtime call arguments/results
void get_method(Register reg);
#else
// Interpreter-specific registers
void save_bcp() {
movptr(Address(rbp, frame::interpreter_frame_bcp_offset * wordSize), r13);
}
void restore_bcp() {
movptr(r13, Address(rbp, frame::interpreter_frame_bcp_offset * wordSize));
}
void restore_locals() {
movptr(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
}
// Helpers for runtime call arguments/results
void get_method(Register reg) {
movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
}
void get_const(Register reg) {
get_method(reg);
movptr(reg, Address(reg, Method::const_offset()));
}
void get_constant_pool(Register reg) {
get_const(reg);
movptr(reg, Address(reg, ConstMethod::constants_offset()));
}
void get_constant_pool_cache(Register reg) {
get_constant_pool(reg);
movptr(reg, Address(reg, ConstantPool::cache_offset_in_bytes()));
}
void get_cpool_and_tags(Register cpool, Register tags) {
get_constant_pool(cpool);
movptr(tags, Address(cpool, ConstantPool::tags_offset_in_bytes()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
void pop_ptr(Register r = rax);
void pop_i(Register r = rax);
void pop_l(Register r = rax);
void pop_f(XMMRegister r = xmm0);
void pop_d(XMMRegister r = xmm0);
void push_ptr(Register r = rax);
void push_i(Register r = rax);
void push_l(Register r = rax);
void push_f(XMMRegister r = xmm0);
void push_d(XMMRegister r = xmm0);
void pop(Register r ) { ((MacroAssembler*)this)->pop(r); }
void push(Register r ) { ((MacroAssembler*)this)->push(r); }
void push(int32_t imm ) { ((MacroAssembler*)this)->push(imm); }
void pop(TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos
void empty_expression_stack() {
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
}
// Helpers for swap and dup
void load_ptr(int n, Register val);
void store_ptr(int n, Register val);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass.
void gen_subtype_check( Register sub_klass, Label &ok_is_subtype );
// Dispatching
void dispatch_prolog(TosState state, int step = 0);
void dispatch_epilog(TosState state, int step = 0);
// dispatch via ebx (assume ebx is loaded already)
void dispatch_only(TosState state);
// dispatch normal table via ebx (assume ebx is loaded already)
void dispatch_only_normal(TosState state);
void dispatch_only_noverify(TosState state);
// load ebx from [esi + step] and dispatch via ebx
void dispatch_next(TosState state, int step = 0);
// load ebx from [esi] and dispatch via ebx and table
void dispatch_via (TosState state, address* table);
// jump to an invoked target
void prepare_to_jump_from_interpreted();
void jump_from_interpreted(Register method, Register temp);
// Returning from interpreted functions
//
// Removes the current activation (incl. unlocking of monitors)
// and sets up the return address. This code is also used for
// exception unwindwing. In that case, we do not want to throw
// IllegalMonitorStateExceptions, since that might get us into an
// infinite rethrow exception loop.
// Additionally this code is used for popFrame and earlyReturn.
// In popFrame case we want to skip throwing an exception,
// installing an exception, and notifying jvmdi.
// In earlyReturn case we only want to skip throwing an exception
// and installing an exception.
void remove_activation(TosState state, Register ret_addr,
bool throw_monitor_exception = true,
bool install_monitor_exception = true,
bool notify_jvmdi = true);
#endif // CC_INTERP
void get_method_counters(Register method, Register mcs, Label& skip);
// Object locking
void lock_object (Register lock_reg);
void unlock_object(Register lock_reg);
#ifndef CC_INTERP
// Interpreter profiling operations
void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Register mdp, Label& zero_continue);
void verify_method_data_pointer();
void set_mdp_data_at(Register mdp_in, int constant, Register value);
void increment_mdp_data_at(Address data, bool decrement = false);
void increment_mdp_data_at(Register mdp_in, int constant,
bool decrement = false);
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, Address mask,
Register scratch, bool preloaded,
Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant);
void test_mdp_data_at(Register mdp_in, int offset, Register value,
Register test_value_out,
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row,
Label& done, bool is_virtual_call);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
void update_mdp_for_ret(Register return_bci);
void profile_taken_branch(Register mdp, Register bumped_count);
void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,
Register scratch2);
// Debugging
// only if +VerifyOops && state == atos
void verify_oop(Register reg, TosState state = atos);
// only if +VerifyFPU && (state == ftos || state == dtos)
void verify_FPU(int stack_depth, TosState state = ftos);
#endif // !CC_INTERP
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
// support for jvmti/dtrace
void notify_method_entry();
void notify_method_exit(TosState state, NotifyMethodExitMode mode);

View File

@ -109,7 +109,15 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
}
sigthreadmask(SIG_UNBLOCK, &newset, NULL);
VMError err(NULL, sig, NULL, info, ucVoid);
// support safefetch faults in error handling
ucontext_t* const uc = (ucontext_t*) ucVoid;
address const pc = uc ? os::Aix::ucontext_get_pc(uc) : NULL;
if (uc && pc && StubRoutines::is_safefetch_fault(pc)) {
os::Aix::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return;
}
VMError err(NULL, sig, pc, info, ucVoid);
err.report_and_die();
}

View File

@ -99,6 +99,7 @@ class Bsd {
static void set_page_size(int val) { _page_size = val; }
static address ucontext_get_pc(ucontext_t* uc);
static void ucontext_set_pc(ucontext_t* uc, address pc);
static intptr_t* ucontext_get_sp(ucontext_t* uc);
static intptr_t* ucontext_get_fp(ucontext_t* uc);

View File

@ -112,7 +112,16 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
}
pthread_sigmask(SIG_UNBLOCK, &newset, NULL);
VMError err(NULL, sig, NULL, info, ucVoid);
// support safefetch faults in error handling
ucontext_t* const uc = (ucontext_t*) ucVoid;
address const pc = uc ? os::Bsd::ucontext_get_pc(uc) : NULL;
if (uc && pc && StubRoutines::is_safefetch_fault(pc)) {
os::Bsd::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return;
}
VMError err(NULL, sig, pc, info, ucVoid);
err.report_and_die();
}

View File

@ -3732,14 +3732,14 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
// Does this overlap the block we wanted? Give back the overlapped
// parts and try again.
size_t top_overlap = requested_addr + (bytes + gap) - base[i];
if (top_overlap >= 0 && top_overlap < bytes) {
ptrdiff_t top_overlap = requested_addr + (bytes + gap) - base[i];
if (top_overlap >= 0 && (size_t)top_overlap < bytes) {
unmap_memory(base[i], top_overlap);
base[i] += top_overlap;
size[i] = bytes - top_overlap;
} else {
size_t bottom_overlap = base[i] + bytes - requested_addr;
if (bottom_overlap >= 0 && bottom_overlap < bytes) {
ptrdiff_t bottom_overlap = base[i] + bytes - requested_addr;
if (bottom_overlap >= 0 && (size_t)bottom_overlap < bytes) {
unmap_memory(requested_addr, bottom_overlap);
size[i] = bytes - bottom_overlap;
} else {
@ -6003,11 +6003,11 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
}
if (strlen(core_pattern) == 0) {
return 0;
return -1;
}
char *pid_pos = strstr(core_pattern, "%p");
size_t written;
int written;
if (core_pattern[0] == '/') {
written = jio_snprintf(buffer, bufferSize, core_pattern);
@ -6016,8 +6016,7 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
const char* p = get_current_directory(cwd, PATH_MAX);
if (p == NULL) {
assert(p != NULL, "failed to get current directory");
return 0;
return -1;
}
if (core_pattern[0] == '|') {
@ -6029,8 +6028,11 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
}
}
if ((written >= 0) && (written < bufferSize)
&& (pid_pos == NULL) && (core_pattern[0] != '|')) {
if (written < 0) {
return -1;
}
if (((size_t)written < bufferSize) && (pid_pos == NULL) && (core_pattern[0] != '|')) {
int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY);
if (core_uses_pid_file != -1) {
@ -6038,7 +6040,7 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1);
::close(core_uses_pid_file);
if (core_uses_pid == '1'){
if (core_uses_pid == '1') {
jio_snprintf(buffer + written, bufferSize - written,
".%d", current_process_id());
}

View File

@ -143,6 +143,7 @@ class Linux {
static int vm_default_page_size(void) { return _vm_default_page_size; }
static address ucontext_get_pc(ucontext_t* uc);
static void ucontext_set_pc(ucontext_t* uc, address pc);
static intptr_t* ucontext_get_sp(ucontext_t* uc);
static intptr_t* ucontext_get_fp(ucontext_t* uc);

View File

@ -112,7 +112,16 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
}
pthread_sigmask(SIG_UNBLOCK, &newset, NULL);
VMError err(NULL, sig, NULL, info, ucVoid);
// support safefetch faults in error handling
ucontext_t* const uc = (ucontext_t*) ucVoid;
address const pc = uc ? os::Linux::ucontext_get_pc(uc) : NULL;
if (uc && pc && StubRoutines::is_safefetch_fault(pc)) {
os::Linux::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return;
}
VMError err(NULL, sig, pc, info, ucVoid);
err.report_and_die();
}

View File

@ -137,6 +137,7 @@ class Solaris {
// ucontext_get_fp() is only used by Solaris X86 (see note below)
static intptr_t* ucontext_get_fp(ucontext_t* uc);
static address ucontext_get_pc(ucontext_t* uc);
static void ucontext_set_pc(ucontext_t* uc, address pc);
// For Analyzer Forte AsyncGetCallTrace profiling support:
// Parameter ret_fp is only used by Solaris X86.

View File

@ -109,7 +109,15 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
}
thr_sigsetmask(SIG_UNBLOCK, &newset, NULL);
VMError err(NULL, sig, NULL, info, ucVoid);
// support safefetch faults in error handling
ucontext_t* const uc = (ucontext_t*) ucVoid;
address const pc = uc ? os::Solaris::ucontext_get_pc(uc) : NULL;
if (uc && pc && StubRoutines::is_safefetch_fault(pc)) {
os::Solaris::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return;
}
VMError err(NULL, sig, pc, info, ucVoid);
err.report_and_die();
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,5 @@ define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+2));
// Only used on 64 bit platforms
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
#endif // OS_CPU_AIX_OJDKPPC_VM_GLOBALS_AIX_PPC_HPP

View File

@ -397,7 +397,7 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
uc->uc_mcontext.jmp_context.iar = ((unsigned long)pc) + 4;
os::Aix::ucontext_set_pc(uc, pc + 4);
return 1;
}
}
@ -420,7 +420,7 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
uc->uc_mcontext.jmp_context.iar = ((unsigned long)pc) + 4;
os::Aix::ucontext_set_pc(uc, pc + 4);
return 1;
}
}
@ -445,7 +445,7 @@ run_stub:
if (stub != NULL) {
// Save all thread context in case we need to restore it.
if (thread != NULL) thread->set_saved_exception_pc(pc);
uc->uc_mcontext.jmp_context.iar = (unsigned long)stub;
os::Aix::ucontext_set_pc(uc, stub);
return 1;
}

View File

@ -309,6 +309,10 @@ address os::Bsd::ucontext_get_pc(ucontext_t * uc) {
return (address)uc->context_pc;
}
void os::Bsd::ucontext_set_pc(ucontext_t * uc, address pc) {
uc->context_pc = (intptr_t)pc ;
}
intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) {
return (intptr_t*)uc->context_sp;
}
@ -463,7 +467,7 @@ JVM_handle_bsd_signal(int sig,
pc = (address) os::Bsd::ucontext_get_pc(uc);
if (StubRoutines::is_safefetch_fault(pc)) {
uc->context_pc = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
os::Bsd::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return 1;
}
@ -703,7 +707,7 @@ JVM_handle_bsd_signal(int sig,
// save all thread context in case we need to restore it
if (thread != NULL) thread->set_saved_exception_pc(pc);
uc->context_pc = (intptr_t)stub;
os::Bsd::ucontext_set_pc(uc, stub);
return true;
}

View File

@ -107,6 +107,10 @@ address os::Bsd::ucontext_get_pc(ucontext_t* uc) {
return NULL;
}
void os::Bsd::ucontext_set_pc(ucontext_t * uc, address pc) {
ShouldNotCallThis();
}
ExtendedPC os::fetch_frame_from_context(void* ucVoid,
intptr_t** ret_sp,
intptr_t** ret_fp) {

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,5 @@ define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+2));
// Only used on 64 bit platforms
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
#endif // OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP

View File

@ -113,6 +113,14 @@ address os::Linux::ucontext_get_pc(ucontext_t * uc) {
return (address)uc->uc_mcontext.regs->nip;
}
// modify PC in ucontext.
// Note: Only use this for an ucontext handed down to a signal handler. See comment
// in ucontext_get_pc.
void os::Linux::ucontext_set_pc(ucontext_t * uc, address pc) {
guarantee(uc->uc_mcontext.regs != NULL, "only use ucontext_set_pc in sigaction context");
uc->uc_mcontext.regs->nip = (unsigned long)pc;
}
intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
return (intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/];
}
@ -213,7 +221,7 @@ JVM_handle_linux_signal(int sig,
if (uc) {
address const pc = os::Linux::ucontext_get_pc(uc);
if (pc && StubRoutines::is_safefetch_fault(pc)) {
uc->uc_mcontext.regs->nip = (unsigned long)StubRoutines::continuation_for_safefetch_fault(pc);
os::Linux::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return true;
}
}
@ -360,7 +368,7 @@ JVM_handle_linux_signal(int sig,
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
uc->uc_mcontext.regs->nip = ((unsigned long)pc) + 4;
os::Linux::ucontext_set_pc(uc, pc + 4);
return true;
}
}
@ -379,7 +387,7 @@ JVM_handle_linux_signal(int sig,
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
uc->uc_mcontext.regs->nip = ((unsigned long)pc) + 4;
os::Linux::ucontext_set_pc(uc, pc + 4);
return true;
}
}
@ -402,7 +410,7 @@ JVM_handle_linux_signal(int sig,
if (stub != NULL) {
// Save all thread context in case we need to restore it.
if (thread != NULL) thread->set_saved_exception_pc(pc);
uc->uc_mcontext.regs->nip = (unsigned long)stub;
os::Linux::ucontext_set_pc(uc, stub);
return true;
}

View File

@ -85,11 +85,6 @@ enum {
CON_O7,
};
static inline void set_cont_address(sigcontext* ctx, address addr) {
SIG_PC(ctx) = (intptr_t)addr;
SIG_NPC(ctx) = (intptr_t)(addr+4);
}
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF.
// os::Solaris::fetch_frame_from_ucontext() tries to skip nested
@ -351,6 +346,12 @@ address os::Linux::ucontext_get_pc(ucontext_t* uc) {
return (address) SIG_PC((sigcontext*)uc);
}
void os::Linux::ucontext_set_pc(ucontext_t* uc, address pc) {
sigcontext_t* ctx = (sigcontext_t*) uc;
SIG_PC(ctx) = (intptr_t)addr;
SIG_NPC(ctx) = (intptr_t)(addr+4);
}
intptr_t* os::Linux::ucontext_get_sp(ucontext_t *uc) {
return (intptr_t*)
((intptr_t)SIG_REGS((sigcontext*)uc).u_regs[CON_O6] + STACK_BIAS);
@ -366,7 +367,7 @@ intptr_t* os::Linux::ucontext_get_fp(ucontext_t *uc) {
inline static bool checkPrefetch(sigcontext* uc, address pc) {
if (StubRoutines::is_safefetch_fault(pc)) {
set_cont_address(uc, address(StubRoutines::continuation_for_safefetch_fault(pc)));
os::Linux::ucontext_set_pc((ucontext_t*)uc, StubRoutines::continuation_for_safefetch_fault(pc));
return true;
}
return false;
@ -666,7 +667,7 @@ JVM_handle_linux_signal(int sig,
// save all thread context in case we need to restore it
thread->set_saved_exception_pc(pc);
thread->set_saved_exception_npc(npc);
set_cont_address(uc, stub);
os::Linux::ucontext_set_pc((ucontext_t*)uc, stub);
return true;
}
}

View File

@ -122,6 +122,10 @@ address os::Linux::ucontext_get_pc(ucontext_t * uc) {
return (address)uc->uc_mcontext.gregs[REG_PC];
}
void os::Linux::ucontext_set_pc(ucontext_t * uc, address pc) {
uc->uc_mcontext.gregs[REG_PC] = (intptr_t)pc;
}
intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
}
@ -279,7 +283,7 @@ JVM_handle_linux_signal(int sig,
pc = (address) os::Linux::ucontext_get_pc(uc);
if (StubRoutines::is_safefetch_fault(pc)) {
uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
os::Linux::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return 1;
}
@ -514,7 +518,7 @@ JVM_handle_linux_signal(int sig,
// save all thread context in case we need to restore it
if (thread != NULL) thread->set_saved_exception_pc(pc);
uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub;
os::Linux::ucontext_set_pc(uc, stub);
return true;
}

View File

@ -100,6 +100,10 @@ address os::Linux::ucontext_get_pc(ucontext_t* uc) {
ShouldNotCallThis();
}
void os::Linux::ucontext_set_pc(ucontext_t * uc, address pc) {
ShouldNotCallThis();
}
ExtendedPC os::fetch_frame_from_context(void* ucVoid,
intptr_t** ret_sp,
intptr_t** ret_fp) {

View File

@ -184,6 +184,11 @@ ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) {
return ExtendedPC(pc);
}
void os::Solaris::ucontext_set_pc(ucontext_t* uc, address pc) {
uc->uc_mcontext.gregs [REG_PC] = (greg_t) pc;
uc->uc_mcontext.gregs [REG_nPC] = (greg_t) (pc + 4);
}
// Assumes ucontext is valid
intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) {
return (intptr_t*)((intptr_t)uc->uc_mcontext.gregs[REG_SP] + STACK_BIAS);
@ -355,8 +360,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// SafeFetch() support
if (StubRoutines::is_safefetch_fault(pc)) {
uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
uc->uc_mcontext.gregs[REG_nPC] = uc->uc_mcontext.gregs[REG_PC] + 4;
os::Solaris::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return 1;
}
@ -494,8 +498,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// simulate a branch to the stub (a "call" in the safepoint stub case)
// factor me: setPC
uc->uc_mcontext.gregs[REG_PC ] = (greg_t)stub;
uc->uc_mcontext.gregs[REG_nPC] = (greg_t)(stub + 4);
os::Solaris::ucontext_set_pc(uc, stub);
#ifndef PRODUCT
if (TraceJumps) thread->record_jump(stub, NULL, __FILE__, __LINE__);

View File

@ -174,6 +174,10 @@ ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) {
return ExtendedPC((address)uc->uc_mcontext.gregs[REG_PC]);
}
void os::Solaris::ucontext_set_pc(ucontext_t* uc, address pc) {
uc->uc_mcontext.gregs [REG_PC] = (greg_t) pc;
}
// Assumes ucontext is valid
intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) {
return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
@ -411,7 +415,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
pc = (address) uc->uc_mcontext.gregs[REG_PC];
if (StubRoutines::is_safefetch_fault(pc)) {
uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
os::Solaris::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return true;
}
@ -614,8 +618,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
if (thread != NULL) thread->set_saved_exception_pc(pc);
// 12/02/99: On Sparc it appears that the full context is also saved
// but as yet, no one looks at or restores that saved context
// factor me: setPC
uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub;
os::Solaris::ucontext_set_pc(uc, stub);
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -926,9 +926,6 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
// Must already have disposed of the old blob somehow.
assert(blob() == NULL, "must be empty");
#ifdef ASSERT
#endif
// Take the new blob away from cb.
set_blob(cb->blob());
// Take over all the section pointers.

View File

@ -4306,7 +4306,18 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes
log->inline_fail("reason unknown");
}
}
#if INCLUDE_TRACE
EventCompilerInlining event;
if (event.should_commit()) {
event.set_compileID(compilation()->env()->task()->compile_id());
event.set_message(msg);
event.set_succeeded(success);
event.set_bci(bci());
event.set_caller(method()->get_Method());
event.set_callee(callee->to_trace_struct());
event.commit();
}
#endif // INCLUDE_TRACE
if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
return;
}

View File

@ -48,6 +48,7 @@
#include "runtime/deoptimization.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/xmlstream.hpp"
#include "trace/tracing.hpp"
#ifdef COMPILER2
#include "ci/bcEscapeAnalyzer.hpp"
#include "ci/ciTypeFlow.hpp"
@ -1466,3 +1467,13 @@ void ciMethod::print_impl(outputStream* st) {
st->print(" loaded=false");
}
}
#if INCLUDE_TRACE
TraceStructCiMethod ciMethod::to_trace_struct() const {
TraceStructCiMethod result;
result.set_class(holder()->name()->as_utf8());
result.set_name(name()->as_utf8());
result.set_signature(signature()->as_symbol()->as_utf8());
return result;
}
#endif

View File

@ -32,13 +32,14 @@
#include "compiler/methodLiveness.hpp"
#include "prims/methodHandles.hpp"
#include "utilities/bitMap.hpp"
#include "trace/tracing.hpp"
class ciMethodBlocks;
class MethodLiveness;
class BitMap;
class Arena;
class BCEscapeAnalyzer;
class InlineTree;
// ciMethod
//
@ -52,6 +53,7 @@ class ciMethod : public ciMetadata {
friend class ciBytecodeStream;
friend class ciMethodHandle;
friend class ciReplay;
friend class InlineTree;
private:
// General method information.
@ -95,12 +97,6 @@ class ciMethod : public ciMetadata {
ciMethod(methodHandle h_m, ciInstanceKlass* holder);
ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor);
Method* get_Method() const {
Method* m = (Method*)_metadata;
assert(m != NULL, "illegal use of unloaded method");
return m;
}
oop loader() const { return _holder->loader(); }
const char* type_string() { return "ciMethod"; }
@ -158,6 +154,11 @@ class ciMethod : public ciMetadata {
}
}
Method* get_Method() const {
Method* m = (Method*)_metadata;
assert(m != NULL, "illegal use of unloaded method");
return m;
}
// Method code and related information.
address code() { if (_code == NULL) load_code(); return _code; }
@ -339,6 +340,10 @@ class ciMethod : public ciMetadata {
// Print the name of this method in various incarnations.
void print_name(outputStream* st = tty);
void print_short_name(outputStream* st = tty);
#if INCLUDE_TRACE
TraceStructCiMethod to_trace_struct() const;
#endif
};
#endif // SHARE_VM_CI_CIMETHOD_HPP

View File

@ -467,6 +467,12 @@ void ClassLoaderData::free_deallocate_list() {
} else {
ShouldNotReachHere();
}
} else {
// Metadata is alive.
// If scratch_class is on stack then it shouldn't be on this list!
assert(!m->is_klass() || !((InstanceKlass*)m)->is_scratch_class(),
"scratch classes on this list should be dead");
// Also should assert that other metadata on the list was found in handles.
}
}
}
@ -737,11 +743,22 @@ bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
// Move class loader data from main list to the unloaded list for unloading
// and deallocation later.
bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, bool clean_alive) {
bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
bool clean_previous_versions) {
ClassLoaderData* data = _head;
ClassLoaderData* prev = NULL;
bool seen_dead_loader = false;
// Mark metadata seen on the stack only so we can delete unneeded entries.
// Only walk all metadata, including the expensive code cache walk, for Full GC
// and only if class redefinition and if there's previous versions of
// Klasses to delete.
bool walk_all_metadata = clean_previous_versions &&
JvmtiExport::has_redefined_a_class() &&
InstanceKlass::has_previous_versions();
MetadataOnStackMark md_on_stack(walk_all_metadata);
// Save previous _unloading pointer for CMS which may add to unloading list before
// purging and we don't want to rewalk the previously unloaded class loader data.
_saved_unloading = _unloading;
@ -749,6 +766,11 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, boo
data = _head;
while (data != NULL) {
if (data->is_alive(is_alive_closure)) {
// clean metaspace
if (walk_all_metadata) {
data->classes_do(InstanceKlass::purge_previous_versions);
}
data->free_deallocate_list();
prev = data;
data = data->next();
continue;
@ -770,11 +792,6 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, boo
_unloading = dead;
}
if (clean_alive) {
// Clean previous versions and the deallocate list.
ClassLoaderDataGraph::clean_metaspaces();
}
if (seen_dead_loader) {
post_class_unload_events();
}
@ -782,21 +799,6 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, boo
return seen_dead_loader;
}
void ClassLoaderDataGraph::clean_metaspaces() {
// mark metadata seen on the stack and code cache so we can delete unneeded entries.
bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
MetadataOnStackMark md_on_stack(has_redefined_a_class);
if (has_redefined_a_class) {
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
data->classes_do(InstanceKlass::purge_previous_versions);
}
}
// Should purge the previous version before deallocating.
free_deallocate_lists();
}
void ClassLoaderDataGraph::purge() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
ClassLoaderData* list = _unloading;
@ -829,12 +831,6 @@ void ClassLoaderDataGraph::post_class_unload_events(void) {
#endif
}
void ClassLoaderDataGraph::free_deallocate_lists() {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->free_deallocate_list();
}
}
// CDS support
// Global metaspaces for writing information to the shared archive. When

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,6 @@ class ClassLoaderDataGraph : public AllStatic {
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
static void post_class_unload_events(void);
static void clean_metaspaces();
public:
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
static void purge();
@ -95,7 +94,7 @@ class ClassLoaderDataGraph : public AllStatic {
static void methods_do(void f(Method*));
static void loaded_classes_do(KlassClosure* klass_closure);
static void classes_unloading_do(void f(Klass* const));
static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive);
static bool do_unloading(BoolObjectClosure* is_alive, bool clean_previous_versions);
// CMS support.
static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
@ -114,8 +113,6 @@ class ClassLoaderDataGraph : public AllStatic {
static bool has_metaspace_oom() { return _metaspace_oom; }
static void set_metaspace_oom(bool value) { _metaspace_oom = value; }
static void free_deallocate_lists();
static void dump_on(outputStream * const out) PRODUCT_RETURN;
static void dump() { dump_on(tty); }
static void verify();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,26 +33,31 @@
#include "services/threadService.hpp"
#include "utilities/chunkedList.hpp"
volatile MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL;
volatile MetadataOnStackBuffer* MetadataOnStackMark::_free_buffers = NULL;
MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL;
MetadataOnStackBuffer* MetadataOnStackMark::_free_buffers = NULL;
MetadataOnStackBuffer* MetadataOnStackMark::_current_buffer = NULL;
NOT_PRODUCT(bool MetadataOnStackMark::_is_active = false;)
// Walk metadata on the stack and mark it so that redefinition doesn't delete
// it. Class unloading also walks the previous versions and might try to
// delete it, so this class is used by class unloading also.
MetadataOnStackMark::MetadataOnStackMark(bool visit_code_cache) {
// it. Class unloading only deletes in-error class files, methods created by
// the relocator and dummy constant pools. None of these appear anywhere except
// in metadata Handles.
MetadataOnStackMark::MetadataOnStackMark(bool redefinition_walk) {
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
assert(_used_buffers == NULL, "sanity check");
assert(!_is_active, "MetadataOnStackMarks do not nest");
NOT_PRODUCT(_is_active = true;)
Threads::metadata_do(Metadata::mark_on_stack);
if (visit_code_cache) {
Threads::metadata_handles_do(Metadata::mark_on_stack);
if (redefinition_walk) {
Threads::metadata_do(Metadata::mark_on_stack);
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
CompileBroker::mark_on_stack();
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
ThreadService::metadata_do(Metadata::mark_on_stack);
}
CompileBroker::mark_on_stack();
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
ThreadService::metadata_do(Metadata::mark_on_stack);
}
MetadataOnStackMark::~MetadataOnStackMark() {
@ -60,10 +65,9 @@ MetadataOnStackMark::~MetadataOnStackMark() {
// Unmark everything that was marked. Can't do the same walk because
// redefine classes messes up the code cache so the set of methods
// might not be the same.
retire_current_buffer();
retire_buffer_for_thread(Thread::current());
MetadataOnStackBuffer* buffer = const_cast<MetadataOnStackBuffer* >(_used_buffers);
MetadataOnStackBuffer* buffer = _used_buffers;
while (buffer != NULL) {
// Clear on stack state for all metadata.
size_t size = buffer->size();
@ -77,7 +81,7 @@ MetadataOnStackMark::~MetadataOnStackMark() {
// Move the buffer to the free list.
buffer->clear();
buffer->set_next_used(NULL);
buffer->set_next_free(const_cast<MetadataOnStackBuffer*>(_free_buffers));
buffer->set_next_free(_free_buffers);
_free_buffers = buffer;
// Step to next used buffer.
@ -93,35 +97,23 @@ void MetadataOnStackMark::retire_buffer(MetadataOnStackBuffer* buffer) {
if (buffer == NULL) {
return;
}
MetadataOnStackBuffer* old_head;
do {
old_head = const_cast<MetadataOnStackBuffer*>(_used_buffers);
buffer->set_next_used(old_head);
} while (Atomic::cmpxchg_ptr(buffer, &_used_buffers, old_head) != old_head);
buffer->set_next_used(_used_buffers);
_used_buffers = buffer;
}
void MetadataOnStackMark::retire_buffer_for_thread(Thread* thread) {
retire_buffer(thread->metadata_on_stack_buffer());
thread->set_metadata_on_stack_buffer(NULL);
}
bool MetadataOnStackMark::has_buffer_for_thread(Thread* thread) {
return thread->metadata_on_stack_buffer() != NULL;
// Current buffer is full or we're ready to walk them, add it to the used list.
void MetadataOnStackMark::retire_current_buffer() {
retire_buffer(_current_buffer);
_current_buffer = NULL;
}
// Get buffer off free list.
MetadataOnStackBuffer* MetadataOnStackMark::allocate_buffer() {
MetadataOnStackBuffer* allocated;
MetadataOnStackBuffer* new_head;
MetadataOnStackBuffer* allocated = _free_buffers;
do {
allocated = const_cast<MetadataOnStackBuffer*>(_free_buffers);
if (allocated == NULL) {
break;
}
new_head = allocated->next_free();
} while (Atomic::cmpxchg_ptr(new_head, &_free_buffers, allocated) != allocated);
if (allocated != NULL) {
_free_buffers = allocated->next_free();
}
if (allocated == NULL) {
allocated = new MetadataOnStackBuffer();
@ -133,10 +125,10 @@ MetadataOnStackBuffer* MetadataOnStackMark::allocate_buffer() {
}
// Record which objects are marked so we can unmark the same objects.
void MetadataOnStackMark::record(Metadata* m, Thread* thread) {
void MetadataOnStackMark::record(Metadata* m) {
assert(_is_active, "metadata on stack marking is active");
MetadataOnStackBuffer* buffer = thread->metadata_on_stack_buffer();
MetadataOnStackBuffer* buffer = _current_buffer;
if (buffer != NULL && buffer->is_full()) {
retire_buffer(buffer);
@ -145,7 +137,7 @@ void MetadataOnStackMark::record(Metadata* m, Thread* thread) {
if (buffer == NULL) {
buffer = allocate_buffer();
thread->set_metadata_on_stack_buffer(buffer);
_current_buffer = buffer;
}
buffer->push(m);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,23 +36,23 @@ typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
// or executing methods, so that it can't be deleted during class redefinition
// and class unloading.
// This is also used for other things that can be deallocated, like class
// metadata during parsing, relocated methods, and methods in backtraces.
// metadata during parsing if errors occur, relocated methods, and temporary
// constant pools.
class MetadataOnStackMark : public StackObj {
NOT_PRODUCT(static bool _is_active;)
static volatile MetadataOnStackBuffer* _used_buffers;
static volatile MetadataOnStackBuffer* _free_buffers;
static MetadataOnStackBuffer* _used_buffers;
static MetadataOnStackBuffer* _free_buffers;
static MetadataOnStackBuffer* _current_buffer;
static MetadataOnStackBuffer* allocate_buffer();
static void retire_buffer(MetadataOnStackBuffer* buffer);
public:
MetadataOnStackMark(bool visit_code_cache);
MetadataOnStackMark(bool redefinition_walk);
~MetadataOnStackMark();
static void record(Metadata* m, Thread* thread);
static void retire_buffer_for_thread(Thread* thread);
static bool has_buffer_for_thread(Thread* thread);
static void record(Metadata* m);
static void retire_current_buffer();
};
#endif // SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP

View File

@ -1368,8 +1368,6 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
ClassLoaderData* loader_data = k->class_loader_data();
Handle class_loader_h(THREAD, loader_data->class_loader());
for (uintx it = 0; it < GCExpandToAllocateDelayMillis; it++){}
// for bootstrap and other parallel classloaders don't acquire lock,
// use placeholder token
// If a parallelCapable class loader calls define_instance_class instead of
@ -1690,9 +1688,11 @@ public:
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
// Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive, bool clean_alive) {
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive,
bool clean_previous_versions) {
// First, mark for unload all ClassLoaderData referencing a dead class loader.
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive, clean_alive);
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive,
clean_previous_versions);
if (unloading_occurred) {
dictionary()->do_unloading();
constraints()->purge_loader_constraints();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -335,7 +335,8 @@ public:
// Unload (that is, break root links to) all unmarked classes and
// loaders. Returns "true" iff something was unloaded.
static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive = true);
static bool do_unloading(BoolObjectClosure* is_alive,
bool clean_previous_versions = true);
// Used by DumpSharedSpaces only to remove classes that failed verification
static void remove_classes_in_error_state();

View File

@ -1572,17 +1572,12 @@ void nmethod::post_compiled_method_unload() {
set_unload_reported();
}
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
if (ic->is_icholder_call()) {
// The only exception is compiledICHolder oops which may
// yet be marked below. (We check this further below).
CompiledICHolder* cichk_oop = ic->cached_icholder();
if (mark_on_stack) {
Metadata::mark_on_stack(cichk_oop->holder_method());
Metadata::mark_on_stack(cichk_oop->holder_klass());
}
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
return;
@ -1590,10 +1585,6 @@ void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_a
} else {
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
if (mark_on_stack) {
Metadata::mark_on_stack(ic_oop);
}
if (ic_oop->is_klass()) {
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
return;
@ -1634,8 +1625,7 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
// The RedefineClasses() API can cause the class unloading invariant
// to no longer be true. See jvmtiExport.hpp for details.
// Also, leave a debugging breadcrumb in local flag.
bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
if (a_class_was_redefined) {
if (JvmtiExport::has_redefined_a_class()) {
// This set of the unloading_occurred flag is done before the
// call to post_compiled_method_unload() so that the unloading
// of this nmethod is reported.
@ -1654,7 +1644,7 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC *ic = CompiledIC_at(&iter);
clean_ic_if_metadata_is_dead(ic, is_alive, false);
clean_ic_if_metadata_is_dead(ic, is_alive);
}
}
}
@ -1741,33 +1731,6 @@ bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *i
return false;
}
void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");
metadata_Relocation* r = iter_at_metadata->metadata_reloc();
// In this metadata, we must only follow those metadatas directly embedded in
// the code. Other metadatas (oop_index>0) are seen as part of
// the metadata section below.
assert(1 == (r->metadata_is_immediate()) +
(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
"metadata must be found in exactly one place");
if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
Metadata* md = r->metadata_value();
if (md != _method) Metadata::mark_on_stack(md);
}
}
void nmethod::mark_metadata_on_stack_non_relocs() {
// Visit the metadata section
for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
Metadata* md = *p;
Metadata::mark_on_stack(md);
}
// Visit metadata not embedded in the other places.
if (_method != NULL) Metadata::mark_on_stack(_method);
}
bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
ResourceMark rm;
@ -1790,19 +1753,13 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
// The RedefineClasses() API can cause the class unloading invariant
// to no longer be true. See jvmtiExport.hpp for details.
// Also, leave a debugging breadcrumb in local flag.
bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
if (a_class_was_redefined) {
if (JvmtiExport::has_redefined_a_class()) {
// This set of the unloading_occurred flag is done before the
// call to post_compiled_method_unload() so that the unloading
// of this nmethod is reported.
unloading_occurred = true;
}
// When class redefinition is used all metadata in the CodeCache has to be recorded,
// so that unused "previous versions" can be purged. Since walking the CodeCache can
// be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
bool mark_metadata_on_stack = a_class_was_redefined;
// Exception cache
clean_exception_cache(is_alive);
@ -1818,7 +1775,7 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
if (unloading_occurred) {
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
}
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
@ -1839,16 +1796,10 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
break;
case relocInfo::metadata_type:
if (mark_metadata_on_stack) {
mark_metadata_on_stack_at(&iter);
}
break; // nothing to do.
}
}
if (mark_metadata_on_stack) {
mark_metadata_on_stack_non_relocs();
}
if (is_unloaded) {
return postponed;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -593,9 +593,6 @@ public:
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
void mark_metadata_on_stack_non_relocs();
public:
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f);

View File

@ -2170,12 +2170,13 @@ void ConcurrentMark::completeCleanup() {
g1h->secondary_free_list_add(&tmp_free_list);
SecondaryFreeList_lock->notify_all();
}
#ifndef PRODUCT
if (G1StressConcRegionFreeing) {
for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
os::sleep(Thread::current(), (jlong) 1, false);
}
}
#endif
}
}
assert(tmp_free_list.is_empty(), "post-condition");
@ -2532,11 +2533,6 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
G1CMTraceTime trace("Unloading", G1Log::finer());
if (ClassUnloadingWithConcurrentMark) {
// Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
// part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase.
// Defer the cleaning until we have complete on_stack data.
MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
bool purged_classes;
{
@ -2548,11 +2544,6 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
{
G1CMTraceTime trace("Deallocate Metadata", G1Log::finest());
ClassLoaderDataGraph::free_deallocate_lists();
}
}
if (G1StringDedup::is_enabled()) {

View File

@ -853,7 +853,7 @@ public:
// Returns the card bitmap for a given task or worker id.
BitMap* count_card_bitmap_for(uint worker_id) {
assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
assert(worker_id < _max_worker_id, "oob");
assert(_count_card_bitmaps != NULL, "uninitialized");
BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
@ -863,7 +863,7 @@ public:
// Returns the array containing the marked bytes for each region,
// for the given worker or task id.
size_t* count_marked_bytes_array_for(uint worker_id) {
assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
assert(worker_id < _max_worker_id, "oob");
assert(_count_marked_bytes != NULL, "uninitialized");
size_t* marked_bytes_array = _count_marked_bytes[worker_id];
assert(marked_bytes_array != NULL, "uninitialized");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,13 +84,13 @@ class G1CardCounts: public CHeapObj<mtGC> {
"_ct_bot: " PTR_FORMAT,
p2i(card_ptr), p2i(_ct_bot)));
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
assert(card_num >= 0 && card_num < _reserved_max_card_num,
assert(card_num < _reserved_max_card_num,
err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr)));
return card_num;
}
jbyte* card_num_2_ptr(size_t card_num) {
assert(card_num >= 0 && card_num < _reserved_max_card_num,
assert(card_num < _reserved_max_card_num,
err_msg("card num out of range: "SIZE_FORMAT, card_num));
return (jbyte*) (_ct_bot + card_num);
}

View File

@ -4909,10 +4909,6 @@ private:
clean_nmethod(claimed_nmethods[i]);
}
}
// The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
// Need to retire the buffers now that this thread has stopped cleaning nmethods.
MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
}
void work_second_pass(uint worker_id) {
@ -4965,9 +4961,6 @@ public:
// G1 specific cleanup work that has
// been moved here to be done in parallel.
ik->clean_dependent_nmethods();
if (JvmtiExport::has_redefined_a_class()) {
InstanceKlass::purge_previous_versions(ik);
}
}
void work() {
@ -5002,18 +4995,8 @@ public:
_klass_cleaning_task(is_alive) {
}
void pre_work_verification() {
assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
}
void post_work_verification() {
assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
}
// The parallel work done by all worker threads.
void work(uint worker_id) {
pre_work_verification();
// Do first pass of code cache cleaning.
_code_cache_task.work_first_pass(worker_id);
@ -5032,8 +5015,6 @@ public:
// Clean all klasses that were not unloaded.
_klass_cleaning_task.work();
post_work_verification();
}
};
@ -5425,7 +5406,7 @@ public:
// limit is set using max_num_q() - which was set using ParallelGCThreads.
// So this must be true - but assert just in case someone decides to
// change the worker ids.
assert(0 <= worker_id && worker_id < limit, "sanity");
assert(worker_id < limit, "sanity");
assert(!rp->discovery_is_atomic(), "check this code");
// Select discovered lists [i, i+stride, i+2*stride,...,limit)

View File

@ -324,9 +324,8 @@ void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
bool during_conc_mark,
size_t marked_bytes) {
assert(0 <= marked_bytes && marked_bytes <= used(),
err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
marked_bytes, used()));
assert(marked_bytes <= used(),
err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, marked_bytes, used()));
_prev_top_at_mark_start = top();
_prev_marked_bytes = marked_bytes;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -270,7 +270,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
const uint n_regions = hrclaimer->n_regions();
for (uint count = 0; count < n_regions; count++) {
const uint index = (start_index + count) % n_regions;
assert(0 <= index && index < n_regions, "sanity");
assert(index < n_regions, "sanity");
// Skip over unavailable regions
if (!is_available(index)) {
continue;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -541,7 +541,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
PerRegionTable*
OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
assert(ind < _max_fine_entries, "Preconditions.");
PerRegionTable* prt = _fine_grain_regions[ind];
while (prt != NULL && prt->hr() != hr) {
prt = prt->collision_list_next();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,7 @@ void HeapRegionSetBase::verify() {
check_mt_safety();
guarantee(( is_empty() && length() == 0 && total_capacity_bytes() == 0) ||
(!is_empty() && length() >= 0 && total_capacity_bytes() >= 0),
(!is_empty() && length() > 0 && total_capacity_bytes() > 0) ,
hrs_ext_msg(this, "invariant"));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ void PtrQueue::flush_impl() {
void PtrQueue::enqueue_known_active(void* ptr) {
assert(0 <= _index && _index <= _sz, "Invariant.");
assert(_index <= _sz, "Invariant.");
assert(_index == 0 || _buf != NULL, "invariant");
while (_index == 0) {
@ -68,7 +68,7 @@ void PtrQueue::enqueue_known_active(void* ptr) {
assert(_index > 0, "postcondition");
_index -= oopSize;
_buf[byte_index_to_index((int)_index)] = ptr;
assert(0 <= _index && _index <= _sz, "Invariant.");
assert(_index <= _sz, "Invariant.");
}
void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
@ -194,7 +194,6 @@ void PtrQueue::handle_zero_index() {
_buf = qset()->allocate_buffer();
_sz = qset()->buffer_size();
_index = _sz;
assert(0 <= _index && _index <= _sz, "Invariant.");
}
bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -195,8 +195,9 @@ process_chunk_boundaries(Space* sp,
// our closures depend on this property and do not protect against
// double scans.
uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index;
uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start());
assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index;
NOISY(tty->print_cr("===========================================================================");)
NOISY(tty->print_cr(" process_chunk_boundary: Called with [" PTR_FORMAT "," PTR_FORMAT ")",
@ -242,8 +243,7 @@ process_chunk_boundaries(Space* sp,
if (first_dirty_card != NULL) {
NOISY(tty->print_cr(" LNC: Found a dirty card at " PTR_FORMAT " in current chunk",
first_dirty_card);)
assert(0 <= cur_chunk_index && cur_chunk_index < lowest_non_clean_chunk_size,
"Bounds error.");
assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
assert(lowest_non_clean[cur_chunk_index] == NULL,
"Write exactly once : value should be stable hereafter for this round");
lowest_non_clean[cur_chunk_index] = first_dirty_card;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -103,7 +103,7 @@ class AdaptiveWeightedAverage : public CHeapObj<mtGC> {
static inline float exp_avg(float avg, float sample,
unsigned int weight) {
assert(0 <= weight && weight <= 100, "weight must be a percent");
assert(weight <= 100, "weight must be a percent");
return (100.0F - weight) * avg / 100.0F + weight * sample / 100.0F;
}
static inline size_t exp_avg(size_t avg, size_t sample,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,6 @@ public:
MemRegion::set_end(e);
}
void set_word_size(size_t ws) {
assert(ws >= 0, "should be a non-zero range");
MemRegion::set_word_size(ws);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,8 +36,7 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
MutableSpace::MutableSpace(size_t alignment): ImmutableSpace(), _top(NULL), _alignment(alignment) {
assert(MutableSpace::alignment() >= 0 &&
MutableSpace::alignment() % os::vm_page_size() == 0,
assert(MutableSpace::alignment() % os::vm_page_size() == 0,
"Space should be aligned");
_mangler = new MutableSpaceMangler(this);
}

View File

@ -562,7 +562,6 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
// Reallocate storage in Arena.
void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
assert(new_size >= 0, "bad size");
if (new_size == 0) return NULL;
#ifdef ASSERT
if (UseMallocOnly) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -792,6 +792,5 @@ void BlockOffsetArrayContigSpace::zero_bottom_entry() {
}
size_t BlockOffsetArrayContigSpace::last_active_index() const {
size_t result = _next_offset_index - 1;
return result >= 0 ? result : 0;
return _next_offset_index == 0 ? 0 : _next_offset_index - 1;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,7 @@ CodeHeap::CodeHeap(const char* name, const int code_blob_type)
void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");
assert( beg < _number_of_committed_segments, "interval begin out of bounds");
assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");
// setup _segmap pointers for faster indexing
address p = (address)_segmap.low() + beg;
@ -63,7 +63,7 @@ void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");
assert( beg < _number_of_committed_segments, "interval begin out of bounds");
assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");
// setup _segmap pointers for faster indexing
address p = (address)_segmap.low() + beg;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -987,7 +987,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
id = next_id();
}
}
assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)");
assert(id < _max_num_q, "Id is out-of-bounds (call Freud?)");
// Get the discovered queue to which we will add
DiscoveredList* list = NULL;
@ -1345,7 +1345,7 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
}
const char* ReferenceProcessor::list_name(uint i) {
assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
assert(i <= _max_num_q * number_of_subclasses_of_ref(),
"Out of bounds index");
int j = i / _max_num_q;

View File

@ -1790,17 +1790,10 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
void ConstantPool::set_on_stack(const bool value) {
if (value) {
int old_flags = *const_cast<volatile int *>(&_flags);
while ((old_flags & _on_stack) == 0) {
int new_flags = old_flags | _on_stack;
int result = Atomic::cmpxchg(new_flags, &_flags, old_flags);
if (result == old_flags) {
// Succeeded.
MetadataOnStackMark::record(this, Thread::current());
return;
}
old_flags = result;
// Only record if it's not already set.
if (!on_stack()) {
_flags |= _on_stack;
MetadataOnStackMark::record(this);
}
} else {
// Clearing is done single-threadedly.

View File

@ -3492,9 +3492,11 @@ void InstanceKlass::set_init_state(ClassState state) {
#endif
// RedefineClasses() support for previous versions:
// Purge previous versions
// RedefineClasses() support for previous versions:
int InstanceKlass::_previous_version_count = 0;
// Purge previous versions before adding new previous versions of the class.
void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
if (ik->previous_versions() != NULL) {
// This klass has previous versions so see what we can cleanup
@ -3524,6 +3526,11 @@ void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
// are executing. Unlink this previous_version.
// The previous version InstanceKlass is on the ClassLoaderData deallocate list
// so will be deallocated during the next phase of class unloading.
RC_TRACE(0x00000200, ("purge: previous version " INTPTR_FORMAT " is dead",
pv_node));
// For debugging purposes.
pv_node->set_is_scratch_class();
pv_node->class_loader_data()->add_to_deallocate_list(pv_node);
pv_node = pv_node->previous_versions();
last->link_previous_versions(pv_node);
deleted_count++;
@ -3537,7 +3544,7 @@ void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
live_count++;
}
// At least one method is live in this previous version so clean its MethodData.
// At least one method is live in this previous version.
// Reset dead EMCP methods not to get breakpoints.
// All methods are deallocated when all of the methods for this class are no
// longer running.
@ -3561,12 +3568,6 @@ void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
("purge: %s(%s): prev method @%d in version @%d is alive",
method->name()->as_C_string(),
method->signature()->as_C_string(), j, version));
#ifdef ASSERT
if (method->method_data() != NULL) {
// Verify MethodData for running methods don't refer to old methods.
method->method_data()->verify_clean_weak_method_links();
}
#endif // ASSERT
}
}
}
@ -3579,18 +3580,6 @@ void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
("purge: previous version stats: live=%d, deleted=%d", live_count,
deleted_count));
}
#ifdef ASSERT
// Verify clean MethodData for this class's methods, e.g. they don't refer to
// old methods that are no longer running.
Array<Method*>* methods = ik->methods();
int num_methods = methods->length();
for (int index = 0; index < num_methods; ++index) {
if (methods->at(index)->method_data() != NULL) {
methods->at(index)->method_data()->verify_clean_weak_method_links();
}
}
#endif // ASSERT
}
void InstanceKlass::mark_newly_obsolete_methods(Array<Method*>* old_methods,
@ -3677,6 +3666,11 @@ void InstanceKlass::add_previous_version(instanceKlassHandle scratch_class,
ConstantPool* cp_ref = scratch_class->constants();
if (!cp_ref->on_stack()) {
RC_TRACE(0x00000400, ("add: scratch class not added; no methods are running"));
// For debugging purposes.
scratch_class->set_is_scratch_class();
scratch_class->class_loader_data()->add_to_deallocate_list(scratch_class());
// Update count for class unloading.
_previous_version_count--;
return;
}
@ -3688,8 +3682,8 @@ void InstanceKlass::add_previous_version(instanceKlassHandle scratch_class,
// if EMCP method (not obsolete) is on the stack, mark as EMCP so that
// we can add breakpoints for it.
// We set the method->on_stack bit during safepoints for class redefinition and
// class unloading and use this bit to set the is_running_emcp bit.
// We set the method->on_stack bit during safepoints for class redefinition
// and use this bit to set the is_running_emcp bit.
// After the safepoint, the on_stack bit is cleared and the running emcp
// method may exit. If so, we would set a breakpoint in a method that
// is never reached, but this won't be noticeable to the programmer.
@ -3708,6 +3702,8 @@ void InstanceKlass::add_previous_version(instanceKlassHandle scratch_class,
assert(scratch_class->previous_versions() == NULL, "shouldn't have a previous version");
scratch_class->link_previous_versions(previous_versions());
link_previous_versions(scratch_class());
// Update count for class unloading.
_previous_version_count++;
} // end add_previous_version()

View File

@ -206,7 +206,8 @@ class InstanceKlass: public Klass {
_misc_is_contended = 1 << 4, // marked with contended annotation
_misc_has_default_methods = 1 << 5, // class/superclass/implemented interfaces has default methods
_misc_declares_default_methods = 1 << 6, // directly declares default methods (any access)
_misc_has_been_redefined = 1 << 7 // class has been redefined
_misc_has_been_redefined = 1 << 7, // class has been redefined
_misc_is_scratch_class = 1 << 8 // class is the redefined scratch class
};
u2 _misc_flags;
u2 _minor_version; // minor version number of class file
@ -626,11 +627,23 @@ class InstanceKlass: public Klass {
_misc_flags |= _misc_has_been_redefined;
}
bool is_scratch_class() const {
return (_misc_flags & _misc_is_scratch_class) != 0;
}
void set_is_scratch_class() {
_misc_flags |= _misc_is_scratch_class;
}
void init_previous_versions() {
_previous_versions = NULL;
}
private:
static int _previous_version_count;
public:
static void purge_previous_versions(InstanceKlass* ik);
static bool has_previous_versions() { return _previous_version_count > 0; }
// JVMTI: Support for caching a class file before it is modified by an agent that can do retransformation
void set_cached_class_file(JvmtiCachedClassFileData *data) {

View File

@ -1970,9 +1970,10 @@ void Method::set_on_stack(const bool value) {
// on stack means some method referring to it is also on the stack.
constants()->set_on_stack(value);
bool succeeded = _access_flags.set_on_stack(value);
if (value && succeeded) {
MetadataOnStackMark::record(this, Thread::current());
bool already_set = on_stack();
_access_flags.set_on_stack(value);
if (value && !already_set) {
MetadataOnStackMark::record(this);
}
}

View File

@ -33,6 +33,7 @@
#include "opto/callGenerator.hpp"
#include "opto/parse.hpp"
#include "runtime/handles.inline.hpp"
#include "utilities/events.hpp"
//=============================================================================
//------------------------------InlineTree-------------------------------------
@ -490,7 +491,7 @@ const char* InlineTree::check_can_parse(ciMethod* callee) {
//------------------------------print_inlining---------------------------------
void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
bool success) const {
ciMethod* caller_method, bool success) const {
const char* inline_msg = msg();
assert(inline_msg != NULL, "just checking");
if (C->log() != NULL) {
@ -509,6 +510,18 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
//tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
}
}
#if INCLUDE_TRACE
EventCompilerInlining event;
if (event.should_commit()) {
event.set_compileID(C->compile_id());
event.set_message(inline_msg);
event.set_succeeded(success);
event.set_bci(caller_bci);
event.set_caller(caller_method->get_Method());
event.set_callee(callee_method->to_trace_struct());
event.commit();
}
#endif // INCLUDE_TRACE
}
//------------------------------ok_to_inline-----------------------------------
@ -531,14 +544,14 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
// Do some initial checks.
if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
set_msg("failed initial checks");
print_inlining(callee_method, caller_bci, false /* !success */);
print_inlining(callee_method, caller_bci, caller_method, false /* !success */);
return NULL;
}
// Do some parse checks.
set_msg(check_can_parse(callee_method));
if (msg() != NULL) {
print_inlining(callee_method, caller_bci, false /* !success */);
print_inlining(callee_method, caller_bci, caller_method, false /* !success */);
return NULL;
}
@ -580,10 +593,11 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
if (msg() == NULL) {
set_msg("inline (hot)");
}
print_inlining(callee_method, caller_bci, true /* success */);
print_inlining(callee_method, caller_bci, caller_method, true /* success */);
build_inline_tree_for_callee(callee_method, jvms, caller_bci);
if (InlineWarmCalls && !wci.is_hot())
if (InlineWarmCalls && !wci.is_hot()) {
return new (C) WarmCallInfo(wci); // copy to heap
}
return WarmCallInfo::always_hot();
}
@ -591,7 +605,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
if (msg() == NULL) {
set_msg("too cold to inline");
}
print_inlining(callee_method, caller_bci, false /* !success */ );
print_inlining(callee_method, caller_bci, caller_method, false /* !success */ );
return NULL;
}

View File

@ -602,7 +602,7 @@ void PhaseChaitin::Register_Allocate() {
// This frame must preserve the required fp alignment
_framesize = round_to(_framesize, Matcher::stack_alignment_in_slots());
assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
assert(_framesize <= 1000000, "sanity check");
#ifndef PRODUCT
_total_framesize += _framesize;
if ((int)_framesize > _max_framesize) {

View File

@ -3027,12 +3027,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
// We may not have profiling here or it may not help us. If we have
// a speculative type use it to perform an exact cast.
ciKlass* spec_obj_type = obj_type->speculative_type();
if (spec_obj_type != NULL ||
(data != NULL &&
// Counter has never been decremented (due to cast failure).
// ...This is a reasonable thing to expect. It is true of
// all casts inserted by javac to implement generic types.
data->as_CounterData()->count() >= 0)) {
if (spec_obj_type != NULL || data != NULL) {
cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
if (cast_obj != NULL) {
if (failure_control != NULL) // failure is now impossible

View File

@ -87,7 +87,7 @@ protected:
JVMState* jvms,
WarmCallInfo* wci_result);
void print_inlining(ciMethod* callee_method, int caller_bci,
bool success) const;
ciMethod* caller_method, bool success) const;
InlineTree* caller_tree() const { return _caller_tree; }
InlineTree* callee_at(int bci, ciMethod* m) const;

View File

@ -142,14 +142,11 @@ void VM_RedefineClasses::doit() {
for (int i = 0; i < _class_count; i++) {
redefine_single_class(_class_defs[i].klass, _scratch_classes[i], thread);
ClassLoaderData* cld = _scratch_classes[i]->class_loader_data();
// Free the memory for this class at class unloading time. Not before
// because CMS might think this is still live.
cld->add_to_deallocate_list((InstanceKlass*)_scratch_classes[i]);
_scratch_classes[i] = NULL;
}
// Clean out MethodData pointing to old Method*
// Have to do this after all classes are redefined and all methods that
// are redefined are marked as old.
MethodDataCleaner clean_weak_method_links;
ClassLoaderDataGraph::classes_do(&clean_weak_method_links);
@ -2902,18 +2899,13 @@ void VM_RedefineClasses::rewrite_cp_refs_in_stack_map_table(
// }
assert(stackmap_p + 1 <= stackmap_end, "no room for frame_type");
// The Linux compiler does not like frame_type to be u1 or u2. It
// issues the following warning for the first if-statement below:
//
// "warning: comparison is always true due to limited range of data type"
//
u4 frame_type = *stackmap_p;
u1 frame_type = *stackmap_p;
stackmap_p++;
// same_frame {
// u1 frame_type = SAME; /* 0-63 */
// }
if (frame_type >= 0 && frame_type <= 63) {
if (frame_type <= 63) {
// nothing more to do for same_frame
}

View File

@ -364,10 +364,6 @@ WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong siz
os::release_memory((char *)(uintptr_t)addr, size);
WB_END
WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
return MemTracker::tracking_level() == NMT_detail;
WB_END
WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
// Test that we can downgrade NMT levels but not upgrade them.
if (MemTracker::tracking_level() == NMT_off) {
@ -1321,7 +1317,6 @@ static JNINativeMethod methods[] = {
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
{CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize },
#endif // INCLUDE_NMT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -194,7 +194,6 @@ class frame VALUE_OBJ_CLASS_SPEC {
public:
// Link (i.e., the pointer to the previous frame)
intptr_t* link() const;
void set_link(intptr_t* addr);
// Return address
address sender_pc() const;

View File

@ -921,6 +921,9 @@ class CommandLineFlags {
"If > 0, provokes an error inside VM error handler (a secondary " \
"crash). see test_error_handler() in debug.cpp.") \
\
notproduct(bool, TestSafeFetchInErrorHandler, false, \
"If true, tests SafeFetch inside error handler.") \
\
develop(bool, Verbose, false, \
"Print additional debugging information from other modes") \
\

View File

@ -210,8 +210,36 @@ static void test_arraycopy_func(address func, int alignment) {
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
}
}
#endif
// simple test for SafeFetch32
static void test_safefetch32() {
int dummy = 17;
int* const p_invalid = (int*) get_segfault_address();
int* const p_valid = &dummy;
int result_invalid = SafeFetch32(p_invalid, 0xABC);
assert(result_invalid == 0xABC, "SafeFetch32 error");
int result_valid = SafeFetch32(p_valid, 0xABC);
assert(result_valid == 17, "SafeFetch32 error");
}
// simple test for SafeFetchN
static void test_safefetchN() {
#ifdef _LP64
const intptr_t v1 = UCONST64(0xABCD00000000ABCD);
const intptr_t v2 = UCONST64(0xDEFD00000000DEFD);
#else
const intptr_t v1 = 0xABCDABCD;
const intptr_t v2 = 0xDEFDDEFD;
#endif
intptr_t dummy = v1;
intptr_t* const p_invalid = (intptr_t*) get_segfault_address();
intptr_t* const p_valid = &dummy;
intptr_t result_invalid = SafeFetchN(p_invalid, v2);
assert(result_invalid == v2, "SafeFetchN error");
intptr_t result_valid = SafeFetchN(p_valid, v2);
assert(result_valid == v1, "SafeFetchN error");
}
#endif
void StubRoutines::initialize2() {
if (_code2 == NULL) {
@ -300,6 +328,13 @@ void StubRoutines::initialize2() {
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_conjoint_words), sizeof(jlong));
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_disjoint_words), sizeof(jlong));
// test safefetch routines
// Not on Windows 32bit until 8074860 is fixed
#if ! (defined(_WIN32) && defined(_M_IX86))
test_safefetch32();
test_safefetchN();
#endif
#endif
}

View File

@ -448,4 +448,9 @@ inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) {
return StubRoutines::SafeFetchN_stub()(adr, errValue);
}
// returns true if SafeFetch32 and SafeFetchN can be used safely (stubroutines are already generated)
inline bool CanUseSafeFetch32() { return StubRoutines::SafeFetch32_stub() ? true : false; }
inline bool CanUseSafeFetchN() { return StubRoutines::SafeFetchN_stub() ? true : false; }
#endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP

View File

@ -203,8 +203,6 @@ Thread::Thread() {
// This initial value ==> never claimed.
_oops_do_parity = 0;
_metadata_on_stack_buffer = NULL;
// the handle mark links itself to last_handle_mark
new HandleMark(this);
@ -776,7 +774,8 @@ void Thread::nmethods_do(CodeBlobClosure* cf) {
// no nmethods in a generic thread...
}
void Thread::metadata_do(void f(Metadata*)) {
void Thread::metadata_handles_do(void f(Metadata*)) {
// Only walk the Handles in Thread.
if (metadata_handles() != NULL) {
for (int i = 0; i< metadata_handles()->length(); i++) {
f(metadata_handles()->at(i));
@ -2713,7 +2712,6 @@ void JavaThread::nmethods_do(CodeBlobClosure* cf) {
}
void JavaThread::metadata_do(void f(Metadata*)) {
Thread::metadata_do(f);
if (has_last_Java_frame()) {
// Traverse the execution stack to call f() on the methods in the stack
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
@ -4104,6 +4102,21 @@ void Threads::metadata_do(void f(Metadata*)) {
}
}
class ThreadHandlesClosure : public ThreadClosure {
void (*_f)(Metadata*);
public:
ThreadHandlesClosure(void f(Metadata*)) : _f(f) {}
virtual void do_thread(Thread* thread) {
thread->metadata_handles_do(_f);
}
};
void Threads::metadata_handles_do(void f(Metadata*)) {
// Only walk the Handles in Thread.
ThreadHandlesClosure handles_closure(f);
threads_do(&handles_closure);
}
void Threads::deoptimized_wrt_marked_nmethods() {
ALL_JAVA_THREADS(p) {
p->deoptimized_wrt_marked_nmethods();

View File

@ -255,9 +255,6 @@ class Thread: public ThreadShadow {
jlong _allocated_bytes; // Cumulative number of bytes allocated on
// the Java heap
// Thread-local buffer used by MetadataOnStackMark.
MetadataOnStackBuffer* _metadata_on_stack_buffer;
TRACE_DATA _trace_data; // Thread-local data for tracing
ThreadExt _ext;
@ -478,7 +475,7 @@ class Thread: public ThreadShadow {
void nmethods_do(CodeBlobClosure* cf);
// jvmtiRedefineClasses support
void metadata_do(void f(Metadata*));
void metadata_handles_do(void f(Metadata*));
// Used by fast lock support
virtual bool is_lock_owned(address adr) const;
@ -494,9 +491,6 @@ class Thread: public ThreadShadow {
// creation fails due to lack of memory, too many threads etc.
bool set_as_starting_thread();
void set_metadata_on_stack_buffer(MetadataOnStackBuffer* buffer) { _metadata_on_stack_buffer = buffer; }
MetadataOnStackBuffer* metadata_on_stack_buffer() const { return _metadata_on_stack_buffer; }
protected:
// OS data associated with the thread
OSThread* _osthread; // Platform-specific thread information
@ -1915,6 +1909,7 @@ class Threads: AllStatic {
// RedefineClasses support
static void metadata_do(void f(Metadata*));
static void metadata_handles_do(void f(Metadata*));
#ifdef ASSERT
static bool is_vm_complete() { return _vm_complete; }

View File

@ -519,12 +519,13 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
// Calc address range within we try to attach (range of possible start addresses).
char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
// SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
// "Cannot use int to initialize char*." Introduce aux variable.
char *unscaled_end = (char *)UnscaledOopHeapMax;
unscaled_end -= size;
char *lowest_start = (size < UnscaledOopHeapMax) ?
MAX2(unscaled_end, aligned_heap_base_min_address) : aligned_heap_base_min_address;
// Need to be careful about size being guaranteed to be less
// than UnscaledOopHeapMax due to type constraints.
char *lowest_start = aligned_heap_base_min_address;
uint64_t unscaled_end = UnscaledOopHeapMax - size;
if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
lowest_start = MAX2(lowest_start, (char*)unscaled_end);
}
lowest_start = (char *)align_ptr_up(lowest_start, attach_point_alignment);
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
aligned_heap_base_min_address, zerobased_max, size, alignment, large);

View File

@ -2584,7 +2584,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
/**********************/ \
/* frame */ \
/**********************/ \
NOT_ZERO(PPC64_ONLY(declare_constant(frame::abi_minframe_size))) \
NOT_ZERO(PPC64_ONLY(declare_constant(frame::entry_frame_locals_size))) \
\
NOT_ZERO(X86_ONLY(declare_constant(frame::entry_frame_call_wrapper_offset))) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,11 +53,7 @@ NMT_TrackingLevel MemTracker::init_tracking_level() {
if (strcmp(nmt_option, "summary") == 0) {
level = NMT_summary;
} else if (strcmp(nmt_option, "detail") == 0) {
#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
level = NMT_detail;
#else
level = NMT_summary;
#endif // PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
} else if (strcmp(nmt_option, "off") != 0) {
// The option value is invalid
_is_nmt_env_valid = false;
@ -95,17 +91,9 @@ void MemTracker::init() {
bool MemTracker::check_launcher_nmt_support(const char* value) {
if (strcmp(value, "=detail") == 0) {
#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
jio_fprintf(defaultStream::error_stream(),
"NMT detail is not supported on this platform. Using NMT summary instead.\n");
if (MemTracker::tracking_level() != NMT_summary) {
return false;
}
#else
if (MemTracker::tracking_level() != NMT_detail) {
return false;
}
#endif
} else if (strcmp(value, "=summary") == 0) {
if (MemTracker::tracking_level() != NMT_summary) {
return false;

View File

@ -400,6 +400,22 @@ Declares a structure type that can be used in other events.
<value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
</event>
<struct id="CiMethod">
<value type="UTF8" field="class" label="Class name"/>
<value type="UTF8" field="name" label="Method name"/>
<value type="UTF8" field="signature" label="Method signature"/>
</struct>
<event id="CompilerInlining" path="vm/compiler/optimization/inlining" label="Method Inlining"
has_thread="true" is_instant="true">
<value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
<value type="METHOD" field="caller" label="Caller Method"/>
<structvalue type="CiMethod" field="callee" label="Callee Method"/>
<value type="BOOLEAN" field="succeeded" label="Succeeded"/>
<value type="UTF8" field="message" label="Message"/>
<value type="INTEGER" field="bci" label="Byte Code Index"/>
</event>
<!-- Code sweeper events -->
<event id="SweepCodeCache" path="vm/code_sweeper/sweep" label="Sweep Code Cache"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,20 +47,6 @@ void AccessFlags::atomic_clear_bits(jint bits) {
} while(f != old_flags);
}
// Returns true iff this thread succeeded setting the bit.
bool AccessFlags::atomic_set_one_bit(jint bit) {
// Atomically update the flags with the bit given
jint old_flags, new_flags, f;
bool is_setting_bit = false;
do {
old_flags = _flags;
new_flags = old_flags | bit;
is_setting_bit = old_flags != new_flags;
f = Atomic::cmpxchg(new_flags, &_flags, old_flags);
} while(f != old_flags);
return is_setting_bit;
}
#if !defined(PRODUCT) || INCLUDE_JVMTI

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -172,7 +172,6 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
// Atomic update of flags
void atomic_set_bits(jint bits);
bool atomic_set_one_bit(jint bit);
void atomic_clear_bits(jint bits);
private:
@ -234,13 +233,12 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
atomic_set_bits(JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE);
}
bool set_on_stack(const bool value)
void set_on_stack(const bool value)
{
if (value) {
return atomic_set_one_bit(JVM_ACC_ON_STACK);
atomic_set_bits(JVM_ACC_ON_STACK);
} else {
atomic_clear_bits(JVM_ACC_ON_STACK);
return true; // Ignored
}
}
// Conversion

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,6 @@ BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) :
_map(map), _size(size_in_bits), _map_allocator(false)
{
assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption.");
assert(size_in_bits >= 0, "just checking");
}
@ -45,7 +44,6 @@ BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) :
}
void BitMap::resize(idx_t size_in_bits, bool in_resource_area) {
assert(size_in_bits >= 0, "just checking");
idx_t old_size_in_words = size_in_words();
bm_word_t* old_map = map();

View File

@ -446,15 +446,6 @@ enum RTMState {
# include "globalDefinitions_aarch64.hpp"
#endif
/*
* If a platform does not support native stack walking
* the platform specific globalDefinitions (above)
* can set PLATFORM_NATIVE_STACK_WALKING_SUPPORTED to 0
*/
#ifndef PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
#define PLATFORM_NATIVE_STACK_WALKING_SUPPORTED 1
#endif
// To assure the IRIW property on processors that are not multiple copy
// atomic, sync instructions must be issued between volatile reads to
// assure their ordering, instead of after volatile stores.

View File

@ -44,14 +44,6 @@
#endif // SOLARIS
#include <math.h>
#ifndef FP_PZERO
// Linux doesn't have positive/negative zero
#define FP_PZERO FP_ZERO
#endif
#if (!defined fpclass) && ((!defined SPARC) || (!defined SOLARIS))
#define fpclass fpclassify
#endif
#include <time.h>
#include <fcntl.h>
#include <dlfcn.h>

Some files were not shown because too many files have changed in this diff Show More