This commit is contained in:
Phil Race 2015-06-16 13:00:37 -07:00
commit fd974c3567
345 changed files with 11854 additions and 5674 deletions

View File

@ -310,3 +310,4 @@ e7dbbef69d12b6a74dfad331b7188e7f893e8d29 jdk9-b62
4915246064b2f89d5f00c96e758686b7fdad36a6 jdk9-b65 4915246064b2f89d5f00c96e758686b7fdad36a6 jdk9-b65
ff3fc75f3214ad7e03595be1b0d0f38d887b6f0e jdk9-b66 ff3fc75f3214ad7e03595be1b0d0f38d887b6f0e jdk9-b66
56166ce66037952fa21e9f680b31bf8eb47312c0 jdk9-b67 56166ce66037952fa21e9f680b31bf8eb47312c0 jdk9-b67
5b500c93ce4822d47061cd518ff3f72d9d8cb5b5 jdk9-b68

View File

@ -310,3 +310,4 @@ ea38728b4f4bdd8fd0d7a89b18069f521cf05013 jdk9-b61
7c31f9d7b932f7924f1258d52885b1c7c3e078c2 jdk9-b65 7c31f9d7b932f7924f1258d52885b1c7c3e078c2 jdk9-b65
dc6e8336f51bb6b67b7245766179eab5ca7720b4 jdk9-b66 dc6e8336f51bb6b67b7245766179eab5ca7720b4 jdk9-b66
f546760134eb861fcfecd4ce611b0040b0d25a6a jdk9-b67 f546760134eb861fcfecd4ce611b0040b0d25a6a jdk9-b67
70e4272790b6199e9ca89df2758ff9cb58ec4125 jdk9-b68

View File

@ -310,3 +310,4 @@ d27f7e0a7aca129969de23e9934408a31b4abf4c jdk9-b62
afc1e295c4bf83f9a5dd539c29914edd4a754a3f jdk9-b65 afc1e295c4bf83f9a5dd539c29914edd4a754a3f jdk9-b65
44ee68f7dbacab24a45115fd6a8ccdc7eb6e8f0b jdk9-b66 44ee68f7dbacab24a45115fd6a8ccdc7eb6e8f0b jdk9-b66
4418697e56f1f43597f55c7cb6573549c6117868 jdk9-b67 4418697e56f1f43597f55c7cb6573549c6117868 jdk9-b67
8efad64f40eb8cd4df376c0a5275892eeb396bbd jdk9-b68

View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.omg.CORBA;
/**
* This Helper class is used to facilitate the marshalling of <tt>Bounds</tt>.
* For more information on Helper files, see
* <a href="doc-files/generatedfiles.html#helper">
* "Generated Files: Helper Files"</a>.<P>
*/
abstract public class BoundsHelper
{
private static String _id = "IDL:omg.org/CORBA/Bounds:1.0";
public static void insert (org.omg.CORBA.Any a, org.omg.CORBA.Bounds that)
{
org.omg.CORBA.portable.OutputStream out = a.create_output_stream ();
a.type (type ());
write (out, that);
a.read_value (out.create_input_stream (), type ());
}
public static org.omg.CORBA.Bounds extract (org.omg.CORBA.Any a)
{
return read (a.create_input_stream ());
}
private static org.omg.CORBA.TypeCode __typeCode = null;
private static boolean __active = false;
synchronized public static org.omg.CORBA.TypeCode type ()
{
if (__typeCode == null)
{
synchronized (org.omg.CORBA.TypeCode.class)
{
if (__typeCode == null)
{
if (__active)
{
return org.omg.CORBA.ORB.init().create_recursive_tc ( _id );
}
__active = true;
org.omg.CORBA.StructMember[] _members0 = new org.omg.CORBA.StructMember [0];
org.omg.CORBA.TypeCode _tcOf_members0 = null;
__typeCode = org.omg.CORBA.ORB.init ().create_exception_tc (org.omg.CORBA.BoundsHelper.id (), "Bounds", _members0);
__active = false;
}
}
}
return __typeCode;
}
public static String id ()
{
return _id;
}
public static org.omg.CORBA.Bounds read (org.omg.CORBA.portable.InputStream istream)
{
org.omg.CORBA.Bounds value = new org.omg.CORBA.Bounds ();
// read and discard the repository ID
istream.read_string ();
return value;
}
public static void write (org.omg.CORBA.portable.OutputStream ostream, org.omg.CORBA.Bounds value)
{
// write the repository ID
ostream.write_string (id ());
}
}

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.omg.CORBA.ORBPackage;
/**
* This Helper class is used to facilitate the marshalling of
* <tt>ORBPackage/InvalidName</tt>.
* For more information on Helper files, see
* <a href="doc-files/generatedfiles.html#helper">
* "Generated Files: Helper Files"</a>.<P>
*/
abstract public class InvalidNameHelper
{
private static String _id = "IDL:omg.org.CORBA/ORB/InvalidName:1.0";
public static void insert (org.omg.CORBA.Any a, org.omg.CORBA.ORBPackage.InvalidName that)
{
org.omg.CORBA.portable.OutputStream out = a.create_output_stream ();
a.type (type ());
write (out, that);
a.read_value (out.create_input_stream (), type ());
}
public static org.omg.CORBA.ORBPackage.InvalidName extract (org.omg.CORBA.Any a)
{
return read (a.create_input_stream ());
}
private static org.omg.CORBA.TypeCode __typeCode = null;
private static boolean __active = false;
synchronized public static org.omg.CORBA.TypeCode type ()
{
if (__typeCode == null)
{
synchronized (org.omg.CORBA.TypeCode.class)
{
if (__typeCode == null)
{
if (__active)
{
return org.omg.CORBA.ORB.init().create_recursive_tc ( _id );
}
__active = true;
org.omg.CORBA.StructMember[] _members0 = new org.omg.CORBA.StructMember [0];
org.omg.CORBA.TypeCode _tcOf_members0 = null;
__typeCode = org.omg.CORBA.ORB.init ().create_exception_tc (org.omg.CORBA.ORBPackage.InvalidNameHelper.id (), "InvalidName", _members0);
__active = false;
}
}
}
return __typeCode;
}
public static String id ()
{
return _id;
}
public static org.omg.CORBA.ORBPackage.InvalidName read (org.omg.CORBA.portable.InputStream istream)
{
org.omg.CORBA.ORBPackage.InvalidName value = new org.omg.CORBA.ORBPackage.InvalidName ();
// read and discard the repository ID
istream.read_string ();
return value;
}
public static void write (org.omg.CORBA.portable.OutputStream ostream, org.omg.CORBA.ORBPackage.InvalidName value)
{
// write the repository ID
ostream.write_string (id ());
}
}

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.omg.CORBA.TypeCodePackage;
/**
* This Helper class is used to facilitate the marshalling of
* <tt>TypeCodePackage/BadKind</tt>.
* For more information on Helper files, see
* <a href="doc-files/generatedfiles.html#helper">
* "Generated Files: Helper Files"</a>.<P>
*/
abstract public class BadKindHelper
{
private static String _id = "IDL:omg.org.CORBA/TypeCode/BadKind:1.0";
public static void insert (org.omg.CORBA.Any a, org.omg.CORBA.TypeCodePackage.BadKind that)
{
org.omg.CORBA.portable.OutputStream out = a.create_output_stream ();
a.type (type ());
write (out, that);
a.read_value (out.create_input_stream (), type ());
}
public static org.omg.CORBA.TypeCodePackage.BadKind extract (org.omg.CORBA.Any a)
{
return read (a.create_input_stream ());
}
private static org.omg.CORBA.TypeCode __typeCode = null;
private static boolean __active = false;
synchronized public static org.omg.CORBA.TypeCode type ()
{
if (__typeCode == null)
{
synchronized (org.omg.CORBA.TypeCode.class)
{
if (__typeCode == null)
{
if (__active)
{
return org.omg.CORBA.ORB.init().create_recursive_tc ( _id );
}
__active = true;
org.omg.CORBA.StructMember[] _members0 = new org.omg.CORBA.StructMember [0];
org.omg.CORBA.TypeCode _tcOf_members0 = null;
__typeCode = org.omg.CORBA.ORB.init ().create_exception_tc (org.omg.CORBA.TypeCodePackage.BadKindHelper.id (), "BadKind", _members0);
__active = false;
}
}
}
return __typeCode;
}
public static String id ()
{
return _id;
}
public static org.omg.CORBA.TypeCodePackage.BadKind read (org.omg.CORBA.portable.InputStream istream)
{
org.omg.CORBA.TypeCodePackage.BadKind value = new org.omg.CORBA.TypeCodePackage.BadKind ();
// read and discard the repository ID
istream.read_string ();
return value;
}
public static void write (org.omg.CORBA.portable.OutputStream ostream, org.omg.CORBA.TypeCodePackage.BadKind value)
{
// write the repository ID
ostream.write_string (id ());
}
}

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.omg.CORBA.TypeCodePackage;
/**
* This Helper class is used to facilitate the marshalling of
* <tt>TypeCodePackage/Bounds</tt>.
* For more information on Helper files, see
* <a href="doc-files/generatedfiles.html#helper">
* "Generated Files: Helper Files"</a>.<P>
*/
abstract public class BoundsHelper
{
private static String _id = "IDL:omg.org.CORBA/TypeCode/Bounds:1.0";
public static void insert (org.omg.CORBA.Any a, org.omg.CORBA.TypeCodePackage.Bounds that)
{
org.omg.CORBA.portable.OutputStream out = a.create_output_stream ();
a.type (type ());
write (out, that);
a.read_value (out.create_input_stream (), type ());
}
public static org.omg.CORBA.TypeCodePackage.Bounds extract (org.omg.CORBA.Any a)
{
return read (a.create_input_stream ());
}
private static org.omg.CORBA.TypeCode __typeCode = null;
private static boolean __active = false;
synchronized public static org.omg.CORBA.TypeCode type ()
{
if (__typeCode == null)
{
synchronized (org.omg.CORBA.TypeCode.class)
{
if (__typeCode == null)
{
if (__active)
{
return org.omg.CORBA.ORB.init().create_recursive_tc ( _id );
}
__active = true;
org.omg.CORBA.StructMember[] _members0 = new org.omg.CORBA.StructMember [0];
org.omg.CORBA.TypeCode _tcOf_members0 = null;
__typeCode = org.omg.CORBA.ORB.init ().create_exception_tc (org.omg.CORBA.TypeCodePackage.BoundsHelper.id (), "Bounds", _members0);
__active = false;
}
}
}
return __typeCode;
}
public static String id ()
{
return _id;
}
public static org.omg.CORBA.TypeCodePackage.Bounds read (org.omg.CORBA.portable.InputStream istream)
{
org.omg.CORBA.TypeCodePackage.Bounds value = new org.omg.CORBA.TypeCodePackage.Bounds ();
// read and discard the repository ID
istream.read_string ();
return value;
}
public static void write (org.omg.CORBA.portable.OutputStream ostream, org.omg.CORBA.TypeCodePackage.Bounds value)
{
// write the repository ID
ostream.write_string (id ());
}
}

View File

@ -470,3 +470,4 @@ bf92b8db249cdfa5651ef954b6c0743a7e0ea4cd jdk9-b64
e7ae94c4f35e940ea423fc1dd260435df34a77c0 jdk9-b65 e7ae94c4f35e940ea423fc1dd260435df34a77c0 jdk9-b65
197e94e0dacddd16816f101d24fc0442ab518326 jdk9-b66 197e94e0dacddd16816f101d24fc0442ab518326 jdk9-b66
d47dfabd16d48eb96a451edd1b61194a39ee0eb5 jdk9-b67 d47dfabd16d48eb96a451edd1b61194a39ee0eb5 jdk9-b67
11af3990d56c97b40318bc1f20608e86f051a3f7 jdk9-b68

View File

@ -31,8 +31,8 @@ ifndef OPENJDK
REASON = "This JDK does not support SDT probes" REASON = "This JDK does not support SDT probes"
else else
# We need a recent GCC for the default # We need a recent GCC for the default (4.4 or later)
ifeq "$(shell expr \( $(CC_VER_MAJOR) \>= 4 \) \& \( $(CC_VER_MINOR) \>= 4 \) )" "0" ifeq "$(shell expr \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 4 \) \) \| \( $(CC_VER_MAJOR) \>= 5 \) )" "0"
REASON = "gcc version is too old" REASON = "gcc version is too old"
else else

View File

@ -44,6 +44,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/native_sanity \ $(HOTSPOT_TOPDIR)/test/native_sanity \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/ToStringInInterfaceTest \
# #
BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(BUILD_OUTPUT)/support/test/hotspot/jtreg/native BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(BUILD_OUTPUT)/support/test/hotspot/jtreg/native

View File

@ -3372,6 +3372,25 @@ operand immI() %{
interface(CONST_INTER); interface(CONST_INTER);
%} %}
// Integer Immediate: 0-bit
operand immI0() %{
predicate(n->get_int() == 0);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 5-bit
operand immI5() %{
predicate(Assembler::is_simm5(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 8-bit // Integer Immediate: 8-bit
operand immI8() %{ operand immI8() %{
predicate(Assembler::is_simm8(n->get_int())); predicate(Assembler::is_simm8(n->get_int()));
@ -3381,6 +3400,25 @@ operand immI8() %{
interface(CONST_INTER); interface(CONST_INTER);
%} %}
// Integer Immediate: the value 10
operand immI10() %{
predicate(n->get_int() == 10);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 11-bit
operand immI11() %{
predicate(Assembler::is_simm11(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 13-bit // Integer Immediate: 13-bit
operand immI13() %{ operand immI13() %{
predicate(Assembler::is_simm13(n->get_int())); predicate(Assembler::is_simm13(n->get_int()));
@ -3410,84 +3448,6 @@ operand immI16() %{
interface(CONST_INTER); interface(CONST_INTER);
%} %}
// Unsigned Integer Immediate: 12-bit (non-negative that fits in simm13)
operand immU12() %{
predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 6-bit
operand immU6() %{
predicate(n->get_int() >= 0 && n->get_int() <= 63);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 11-bit
operand immI11() %{
predicate(Assembler::is_simm11(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 5-bit
operand immI5() %{
predicate(Assembler::is_simm5(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Int Immediate non-negative
operand immU31()
%{
predicate(n->get_int() >= 0);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 0-bit
operand immI0() %{
predicate(n->get_int() == 0);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: the value 10
operand immI10() %{
predicate(n->get_int() == 10);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: the values 0-31
operand immU5() %{
predicate(n->get_int() >= 0 && n->get_int() <= 31);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: the values 1-31 // Integer Immediate: the values 1-31
operand immI_1_31() %{ operand immI_1_31() %{
predicate(n->get_int() >= 1 && n->get_int() <= 31); predicate(n->get_int() >= 1 && n->get_int() <= 31);
@ -3529,7 +3489,6 @@ operand immI_24() %{
format %{ %} format %{ %}
interface(CONST_INTER); interface(CONST_INTER);
%} %}
// Integer Immediate: the value 255 // Integer Immediate: the value 255
operand immI_255() %{ operand immI_255() %{
predicate( n->get_int() == 255 ); predicate( n->get_int() == 255 );
@ -3550,6 +3509,46 @@ operand immI_65535() %{
interface(CONST_INTER); interface(CONST_INTER);
%} %}
// Integer Immediate: the values 0-31
operand immU5() %{
predicate(n->get_int() >= 0 && n->get_int() <= 31);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 6-bit
operand immU6() %{
predicate(n->get_int() >= 0 && n->get_int() <= 63);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Unsigned Integer Immediate: 12-bit (non-negative that fits in simm13)
operand immU12() %{
predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate non-negative
operand immU31()
%{
predicate(n->get_int() >= 0);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Long Immediate: the value FF // Long Immediate: the value FF
operand immL_FF() %{ operand immL_FF() %{
predicate( n->get_long() == 0xFFL ); predicate( n->get_long() == 0xFFL );
@ -5653,17 +5652,17 @@ instruct loadUB2L(iRegL dst, memory mem) %{
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
// Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register // Load Unsigned Byte (8 bit UNsigned) with 32-bit mask into Long Register
instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{ instruct loadUB2L_immI(iRegL dst, memory mem, immI mask) %{
match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
ins_cost(MEMORY_REF_COST + DEFAULT_COST); ins_cost(MEMORY_REF_COST + DEFAULT_COST);
size(2*4); size(2*4);
format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t" format %{ "LDUB $mem,$dst\t# ubyte & 32-bit mask -> long\n\t"
"AND $dst,$mask,$dst" %} "AND $dst,right_n_bits($mask, 8),$dst" %}
ins_encode %{ ins_encode %{
__ ldub($mem$$Address, $dst$$Register); __ ldub($mem$$Address, $dst$$Register);
__ and3($dst$$Register, $mask$$constant, $dst$$Register); __ and3($dst$$Register, $mask$$constant & right_n_bits(8), $dst$$Register);
%} %}
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -5776,20 +5775,20 @@ instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
// Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register // Load Unsigned Short/Char (16bit UNsigned) with a 32-bit mask into a Long Register
instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{ instruct loadUS2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
effect(TEMP dst, TEMP tmp); effect(TEMP dst, TEMP tmp);
ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t" format %{ "LDUH $mem,$dst\t! ushort/char & 32-bit mask -> long\n\t"
"SET $mask,$tmp\n\t" "SET right_n_bits($mask, 16),$tmp\n\t"
"AND $dst,$tmp,$dst" %} "AND $dst,$tmp,$dst" %}
ins_encode %{ ins_encode %{
Register Rdst = $dst$$Register; Register Rdst = $dst$$Register;
Register Rtmp = $tmp$$Register; Register Rtmp = $tmp$$Register;
__ lduh($mem$$Address, Rdst); __ lduh($mem$$Address, Rdst);
__ set($mask$$constant, Rtmp); __ set($mask$$constant & right_n_bits(16), Rtmp);
__ and3(Rdst, Rtmp, Rdst); __ and3(Rdst, Rtmp, Rdst);
%} %}
ins_pipe(iload_mem); ins_pipe(iload_mem);

View File

@ -5431,18 +5431,18 @@ instruct loadUB2L(eRegL dst, memory mem, eFlagsReg cr) %{
%} %}
// Load Unsigned Byte (8 bit UNsigned) with mask into Long Register // Load Unsigned Byte (8 bit UNsigned) with mask into Long Register
instruct loadUB2L_immI8(eRegL dst, memory mem, immI8 mask, eFlagsReg cr) %{ instruct loadUB2L_immI(eRegL dst, memory mem, immI mask, eFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
effect(KILL cr); effect(KILL cr);
format %{ "MOVZX8 $dst.lo,$mem\t# ubyte & 8-bit mask -> long\n\t" format %{ "MOVZX8 $dst.lo,$mem\t# ubyte & 32-bit mask -> long\n\t"
"XOR $dst.hi,$dst.hi\n\t" "XOR $dst.hi,$dst.hi\n\t"
"AND $dst.lo,$mask" %} "AND $dst.lo,right_n_bits($mask, 8)" %}
ins_encode %{ ins_encode %{
Register Rdst = $dst$$Register; Register Rdst = $dst$$Register;
__ movzbl(Rdst, $mem$$Address); __ movzbl(Rdst, $mem$$Address);
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst)); __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
__ andl(Rdst, $mask$$constant); __ andl(Rdst, $mask$$constant & right_n_bits(8));
%} %}
ins_pipe(ialu_reg_mem); ins_pipe(ialu_reg_mem);
%} %}
@ -5550,19 +5550,19 @@ instruct loadUS2L_immI_255(eRegL dst, memory mem, immI_255 mask, eFlagsReg cr) %
ins_pipe(ialu_reg_mem); ins_pipe(ialu_reg_mem);
%} %}
// Load Unsigned Short/Char (16 bit UNsigned) with a 16-bit mask into Long Register // Load Unsigned Short/Char (16 bit UNsigned) with a 32-bit mask into Long Register
instruct loadUS2L_immI16(eRegL dst, memory mem, immI16 mask, eFlagsReg cr) %{ instruct loadUS2L_immI(eRegL dst, memory mem, immI mask, eFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
effect(KILL cr); effect(KILL cr);
format %{ "MOVZX $dst.lo, $mem\t# ushort/char & 16-bit mask -> long\n\t" format %{ "MOVZX $dst.lo, $mem\t# ushort/char & 32-bit mask -> long\n\t"
"XOR $dst.hi,$dst.hi\n\t" "XOR $dst.hi,$dst.hi\n\t"
"AND $dst.lo,$mask" %} "AND $dst.lo,right_n_bits($mask, 16)" %}
ins_encode %{ ins_encode %{
Register Rdst = $dst$$Register; Register Rdst = $dst$$Register;
__ movzwl(Rdst, $mem$$Address); __ movzwl(Rdst, $mem$$Address);
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst)); __ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
__ andl(Rdst, $mask$$constant); __ andl(Rdst, $mask$$constant & right_n_bits(16));
%} %}
ins_pipe(ialu_reg_mem); ins_pipe(ialu_reg_mem);
%} %}

View File

@ -4753,17 +4753,17 @@ instruct loadUB2L(rRegL dst, memory mem)
ins_pipe(ialu_reg_mem); ins_pipe(ialu_reg_mem);
%} %}
// Load Unsigned Byte (8 bit UNsigned) with a 8-bit mask into Long Register // Load Unsigned Byte (8 bit UNsigned) with 32-bit mask into Long Register
instruct loadUB2L_immI8(rRegL dst, memory mem, immI8 mask, rFlagsReg cr) %{ instruct loadUB2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
effect(KILL cr); effect(KILL cr);
format %{ "movzbq $dst, $mem\t# ubyte & 8-bit mask -> long\n\t" format %{ "movzbq $dst, $mem\t# ubyte & 32-bit mask -> long\n\t"
"andl $dst, $mask" %} "andl $dst, right_n_bits($mask, 8)" %}
ins_encode %{ ins_encode %{
Register Rdst = $dst$$Register; Register Rdst = $dst$$Register;
__ movzbq(Rdst, $mem$$Address); __ movzbq(Rdst, $mem$$Address);
__ andl(Rdst, $mask$$constant); __ andl(Rdst, $mask$$constant & right_n_bits(8));
%} %}
ins_pipe(ialu_reg_mem); ins_pipe(ialu_reg_mem);
%} %}
@ -4863,17 +4863,17 @@ instruct loadUS2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{
ins_pipe(ialu_reg_mem); ins_pipe(ialu_reg_mem);
%} %}
// Load Unsigned Short/Char (16 bit UNsigned) with mask into Long Register // Load Unsigned Short/Char (16 bit UNsigned) with 32-bit mask into Long Register
instruct loadUS2L_immI16(rRegL dst, memory mem, immI16 mask, rFlagsReg cr) %{ instruct loadUS2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
effect(KILL cr); effect(KILL cr);
format %{ "movzwq $dst, $mem\t# ushort/char & 16-bit mask -> long\n\t" format %{ "movzwq $dst, $mem\t# ushort/char & 32-bit mask -> long\n\t"
"andl $dst, $mask" %} "andl $dst, right_n_bits($mask, 16)" %}
ins_encode %{ ins_encode %{
Register Rdst = $dst$$Register; Register Rdst = $dst$$Register;
__ movzwq(Rdst, $mem$$Address); __ movzwq(Rdst, $mem$$Address);
__ andl(Rdst, $mask$$constant); __ andl(Rdst, $mask$$constant & right_n_bits(16));
%} %}
ins_pipe(ialu_reg_mem); ins_pipe(ialu_reg_mem);
%} %}

View File

@ -1267,10 +1267,6 @@ void os::shutdown() {
// Note: os::abort() might be called very early during initialization, or // Note: os::abort() might be called very early during initialization, or
// called from signal handler. Before adding something to os::abort(), make // called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM. // sure it is async-safe and can handle partially initialized VM.
void os::abort(bool dump_core) {
abort(dump_core, NULL, NULL);
}
void os::abort(bool dump_core, void* siginfo, void* context) { void os::abort(bool dump_core, void* siginfo, void* context) {
os::shutdown(); os::shutdown();
if (dump_core) { if (dump_core) {

View File

@ -1131,10 +1131,6 @@ void os::shutdown() {
// Note: os::abort() might be called very early during initialization, or // Note: os::abort() might be called very early during initialization, or
// called from signal handler. Before adding something to os::abort(), make // called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM. // sure it is async-safe and can handle partially initialized VM.
void os::abort(bool dump_core) {
abort(dump_core, NULL, NULL);
}
void os::abort(bool dump_core, void* siginfo, void* context) { void os::abort(bool dump_core, void* siginfo, void* context) {
os::shutdown(); os::shutdown();
if (dump_core) { if (dump_core) {

View File

@ -1478,10 +1478,6 @@ void os::shutdown() {
// Note: os::abort() might be called very early during initialization, or // Note: os::abort() might be called very early during initialization, or
// called from signal handler. Before adding something to os::abort(), make // called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM. // sure it is async-safe and can handle partially initialized VM.
void os::abort(bool dump_core) {
abort(dump_core, NULL, NULL);
}
void os::abort(bool dump_core, void* siginfo, void* context) { void os::abort(bool dump_core, void* siginfo, void* context) {
os::shutdown(); os::shutdown();
if (dump_core) { if (dump_core) {

View File

@ -1520,10 +1520,6 @@ void os::shutdown() {
// Note: os::abort() might be called very early during initialization, or // Note: os::abort() might be called very early during initialization, or
// called from signal handler. Before adding something to os::abort(), make // called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM. // sure it is async-safe and can handle partially initialized VM.
void os::abort(bool dump_core) {
abort(dump_core, NULL, NULL);
}
void os::abort(bool dump_core, void* siginfo, void* context) { void os::abort(bool dump_core, void* siginfo, void* context) {
os::shutdown(); os::shutdown();
if (dump_core) { if (dump_core) {

View File

@ -997,7 +997,16 @@ void os::check_dump_limit(char* buffer, size_t buffsz) {
if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
status = false; status = false;
} else { }
#ifndef ASSERT
if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
status = false;
}
#endif
if (status) {
const char* cwd = get_current_directory(NULL, 0); const char* cwd = get_current_directory(NULL, 0);
int pid = current_process_id(); int pid = current_process_id();
if (cwd != NULL) { if (cwd != NULL) {
@ -1086,10 +1095,6 @@ void os::abort(bool dump_core, void* siginfo, void* context) {
win32::exit_process_or_thread(win32::EPT_PROCESS, 1); win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
} }
void os::abort(bool dump_core) {
abort(dump_core, NULL, NULL);
}
// Die immediately, no exit hook, no abort hook, no cleanup. // Die immediately, no exit hook, no abort hook, no cleanup.
void os::die() { void os::die() {
win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);

View File

@ -49,25 +49,6 @@ ciMethodHandle* ciCallSite::get_target() const {
return CURRENT_ENV->get_object(method_handle_oop)->as_method_handle(); return CURRENT_ENV->get_object(method_handle_oop)->as_method_handle();
} }
// ------------------------------------------------------------------
// ciCallSite::get_context
//
// Return the target MethodHandle of this CallSite.
ciKlass* ciCallSite::get_context() {
assert(!is_constant_call_site(), "");
VM_ENTRY_MARK;
oop call_site_oop = get_oop();
InstanceKlass* ctxk = MethodHandles::get_call_site_context(call_site_oop);
if (ctxk == NULL) {
// The call site doesn't have a context associated. Set it to the default context.
oop def_context_oop = java_lang_invoke_CallSite::default_context();
java_lang_invoke_CallSite::set_context_cas(call_site_oop, def_context_oop, /*expected=*/NULL);
ctxk = MethodHandles::get_call_site_context(call_site_oop);
}
return (CURRENT_ENV->get_metadata(ctxk))->as_klass();
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciCallSite::print // ciCallSite::print
// //

View File

@ -43,7 +43,6 @@ public:
// Return the target MethodHandle of this CallSite. // Return the target MethodHandle of this CallSite.
ciMethodHandle* get_target() const; ciMethodHandle* get_target() const;
ciKlass* get_context();
void print(); void print();
}; };

View File

@ -709,24 +709,23 @@ Method* ciEnv::lookup_method(InstanceKlass* accessor,
KlassHandle h_holder(THREAD, holder); KlassHandle h_holder(THREAD, holder);
LinkResolver::check_klass_accessability(h_accessor, h_holder, KILL_COMPILE_ON_FATAL_(NULL)); LinkResolver::check_klass_accessability(h_accessor, h_holder, KILL_COMPILE_ON_FATAL_(NULL));
methodHandle dest_method; methodHandle dest_method;
LinkInfo link_info(h_holder, name, sig, h_accessor, /*check_access*/true);
switch (bc) { switch (bc) {
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
dest_method = dest_method =
LinkResolver::resolve_static_call_or_null(h_holder, name, sig, h_accessor); LinkResolver::resolve_static_call_or_null(link_info);
break; break;
case Bytecodes::_invokespecial: case Bytecodes::_invokespecial:
dest_method = dest_method =
LinkResolver::resolve_special_call_or_null(h_holder, name, sig, h_accessor); LinkResolver::resolve_special_call_or_null(link_info);
break; break;
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
dest_method = dest_method =
LinkResolver::linktime_resolve_interface_method_or_null(h_holder, name, sig, LinkResolver::linktime_resolve_interface_method_or_null(link_info);
h_accessor, true);
break; break;
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
dest_method = dest_method =
LinkResolver::linktime_resolve_virtual_method_or_null(h_holder, name, sig, LinkResolver::linktime_resolve_virtual_method_or_null(link_info);
h_accessor, true);
break; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }

View File

@ -352,11 +352,11 @@ bool ciField::will_link(ciInstanceKlass* accessing_klass,
} }
} }
LinkInfo link_info(_holder->get_instanceKlass(),
_name->get_symbol(), _signature->get_symbol(),
accessing_klass->get_Klass());
fieldDescriptor result; fieldDescriptor result;
LinkResolver::resolve_field(result, _holder->get_instanceKlass(), LinkResolver::resolve_field(result, link_info, bc, false, KILL_COMPILE_ON_FATAL_(false));
_name->get_symbol(), _signature->get_symbol(),
accessing_klass->get_Klass(), bc, true, false,
KILL_COMPILE_ON_FATAL_(false));
// update the hit-cache, unless there is a problem with memory scoping: // update the hit-cache, unless there is a problem with memory scoping:
if (accessing_klass->is_shared() || !is_shared()) { if (accessing_klass->is_shared() || !is_shared()) {

View File

@ -453,8 +453,12 @@ int ciInstanceKlass::compute_nonstatic_fields() {
if (fields == NULL) { if (fields == NULL) {
// This can happen if this class (java.lang.Class) has invisible fields. // This can happen if this class (java.lang.Class) has invisible fields.
_nonstatic_fields = super_fields; if (super_fields != NULL) {
return super_fields->length(); _nonstatic_fields = super_fields;
return super_fields->length();
} else {
return 0;
}
} }
int flen = fields->length(); int flen = fields->length();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -786,6 +786,7 @@ ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver, boo
Symbol* h_name = name()->get_symbol(); Symbol* h_name = name()->get_symbol();
Symbol* h_signature = signature()->get_symbol(); Symbol* h_signature = signature()->get_symbol();
LinkInfo link_info(h_resolved, h_name, h_signature, caller_klass, check_access);
methodHandle m; methodHandle m;
// Only do exact lookup if receiver klass has been linked. Otherwise, // Only do exact lookup if receiver klass has been linked. Otherwise,
// the vtable has not been setup, and the LinkResolver will fail. // the vtable has not been setup, and the LinkResolver will fail.
@ -793,9 +794,9 @@ ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver, boo
|| ||
InstanceKlass::cast(h_recv())->is_linked() && !exact_receiver->is_interface()) { InstanceKlass::cast(h_recv())->is_linked() && !exact_receiver->is_interface()) {
if (holder()->is_interface()) { if (holder()->is_interface()) {
m = LinkResolver::resolve_interface_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass, check_access); m = LinkResolver::resolve_interface_call_or_null(h_recv, link_info);
} else { } else {
m = LinkResolver::resolve_virtual_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass, check_access); m = LinkResolver::resolve_virtual_call_or_null(h_recv, link_info);
} }
} }
@ -839,7 +840,8 @@ int ciMethod::resolve_vtable_index(ciKlass* caller, ciKlass* receiver) {
Symbol* h_name = name()->get_symbol(); Symbol* h_name = name()->get_symbol();
Symbol* h_signature = signature()->get_symbol(); Symbol* h_signature = signature()->get_symbol();
vtable_index = LinkResolver::resolve_virtual_vtable_index(h_recv, h_recv, h_name, h_signature, caller_klass); LinkInfo link_info(h_recv, h_name, h_signature, caller_klass);
vtable_index = LinkResolver::resolve_virtual_vtable_index(h_recv, link_info);
if (vtable_index == Method::nonvirtual_vtable_index) { if (vtable_index == Method::nonvirtual_vtable_index) {
// A statically bound method. Return "no such index". // A statically bound method. Return "no such index".
vtable_index = Method::invalid_vtable_index; vtable_index = Method::invalid_vtable_index;
@ -1285,10 +1287,8 @@ bool ciMethod::check_call(int refinfo_index, bool is_static) const {
EXCEPTION_MARK; EXCEPTION_MARK;
HandleMark hm(THREAD); HandleMark hm(THREAD);
constantPoolHandle pool (THREAD, get_Method()->constants()); constantPoolHandle pool (THREAD, get_Method()->constants());
methodHandle spec_method;
KlassHandle spec_klass;
Bytecodes::Code code = (is_static ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual); Bytecodes::Code code = (is_static ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual);
LinkResolver::resolve_method_statically(spec_method, spec_klass, code, pool, refinfo_index, THREAD); methodHandle spec_method = LinkResolver::resolve_method_statically(code, pool, refinfo_index, THREAD);
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
return false; return false;

View File

@ -2967,47 +2967,42 @@ int java_lang_invoke_MethodType::rtype_slot_count(oop mt) {
int java_lang_invoke_CallSite::_target_offset; int java_lang_invoke_CallSite::_target_offset;
int java_lang_invoke_CallSite::_context_offset; int java_lang_invoke_CallSite::_context_offset;
int java_lang_invoke_CallSite::_default_context_offset;
void java_lang_invoke_CallSite::compute_offsets() { void java_lang_invoke_CallSite::compute_offsets() {
Klass* k = SystemDictionary::CallSite_klass(); Klass* k = SystemDictionary::CallSite_klass();
if (k != NULL) { if (k != NULL) {
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature()); compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
compute_offset(_context_offset, k, vmSymbols::context_name(), vmSymbols::sun_misc_Cleaner_signature()); compute_offset(_context_offset, k, vmSymbols::context_name(),
compute_offset(_default_context_offset, k, vmSymbols::java_lang_invoke_MethodHandleNatives_CallSiteContext_signature());
vmSymbols::DEFAULT_CONTEXT_name(), vmSymbols::sun_misc_Cleaner_signature(),
/*is_static=*/true, /*allow_super=*/false);
} }
} }
oop java_lang_invoke_CallSite::context_volatile(oop call_site) { oop java_lang_invoke_CallSite::context(oop call_site) {
assert(java_lang_invoke_CallSite::is_instance(call_site), ""); assert(java_lang_invoke_CallSite::is_instance(call_site), "");
oop dep_oop = call_site->obj_field_volatile(_context_offset); oop dep_oop = call_site->obj_field(_context_offset);
return dep_oop; return dep_oop;
} }
void java_lang_invoke_CallSite::set_context_volatile(oop call_site, oop context) { // Support for java_lang_invoke_MethodHandleNatives_CallSiteContext
assert(java_lang_invoke_CallSite::is_instance(call_site), "");
call_site->obj_field_put_volatile(_context_offset, context);
}
bool java_lang_invoke_CallSite::set_context_cas(oop call_site, oop context, oop expected) { int java_lang_invoke_MethodHandleNatives_CallSiteContext::_vmdependencies_offset;
assert(java_lang_invoke_CallSite::is_instance(call_site), "");
HeapWord* context_addr = call_site->obj_field_addr<HeapWord>(_context_offset); void java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets() {
oop res = oopDesc::atomic_compare_exchange_oop(context, context_addr, expected, true); Klass* k = SystemDictionary::Context_klass();
bool success = (res == expected); if (k != NULL) {
if (success) { CALLSITECONTEXT_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
update_barrier_set((void*)context_addr, context);
} }
return success;
} }
oop java_lang_invoke_CallSite::default_context() { nmethodBucket* java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) {
InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::CallSite_klass()); assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
oop def_context_oop = ik->java_mirror()->obj_field(_default_context_offset); return (nmethodBucket*) (address) call_site->long_field(_vmdependencies_offset);
assert(!oopDesc::is_null(def_context_oop), ""); }
return def_context_oop;
void java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(oop call_site, nmethodBucket* context) {
assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
call_site->long_field_put(_vmdependencies_offset, (jlong) (address) context);
} }
// Support for java_security_AccessControlContext // Support for java_security_AccessControlContext
@ -3403,6 +3398,7 @@ void JavaClasses::compute_offsets() {
java_lang_invoke_LambdaForm::compute_offsets(); java_lang_invoke_LambdaForm::compute_offsets();
java_lang_invoke_MethodType::compute_offsets(); java_lang_invoke_MethodType::compute_offsets();
java_lang_invoke_CallSite::compute_offsets(); java_lang_invoke_CallSite::compute_offsets();
java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets();
java_security_AccessControlContext::compute_offsets(); java_security_AccessControlContext::compute_offsets();
// Initialize reflection classes. The layouts of these classes // Initialize reflection classes. The layouts of these classes
// changed with the new reflection implementation in JDK 1.4, and // changed with the new reflection implementation in JDK 1.4, and

View File

@ -1170,8 +1170,6 @@ class java_lang_invoke_CallSite: AllStatic {
private: private:
static int _target_offset; static int _target_offset;
static int _context_offset; static int _context_offset;
static int _default_context_offset;
static void compute_offsets(); static void compute_offsets();
@ -1181,11 +1179,7 @@ public:
static void set_target( oop site, oop target); static void set_target( oop site, oop target);
static void set_target_volatile( oop site, oop target); static void set_target_volatile( oop site, oop target);
static oop context_volatile(oop site); static oop context(oop site);
static void set_context_volatile(oop site, oop context);
static bool set_context_cas (oop site, oop context, oop expected);
static oop default_context();
// Testers // Testers
static bool is_subclass(Klass* klass) { static bool is_subclass(Klass* klass) {
@ -1197,6 +1191,31 @@ public:
static int target_offset_in_bytes() { return _target_offset; } static int target_offset_in_bytes() { return _target_offset; }
}; };
// Interface to java.lang.invoke.MethodHandleNatives$CallSiteContext objects
#define CALLSITECONTEXT_INJECTED_FIELDS(macro) \
macro(java_lang_invoke_MethodHandleNatives_CallSiteContext, vmdependencies, intptr_signature, false)
class java_lang_invoke_MethodHandleNatives_CallSiteContext : AllStatic {
friend class JavaClasses;
private:
static int _vmdependencies_offset;
static void compute_offsets();
public:
// Accessors
static nmethodBucket* vmdependencies(oop context);
static void set_vmdependencies(oop context, nmethodBucket* bucket);
// Testers
static bool is_subclass(Klass* klass) {
return klass->is_subclass_of(SystemDictionary::Context_klass());
}
static bool is_instance(oop obj);
};
// Interface to java.security.AccessControlContext objects // Interface to java.security.AccessControlContext objects
class java_security_AccessControlContext: AllStatic { class java_security_AccessControlContext: AllStatic {
@ -1406,7 +1425,8 @@ class InjectedField {
#define ALL_INJECTED_FIELDS(macro) \ #define ALL_INJECTED_FIELDS(macro) \
CLASS_INJECTED_FIELDS(macro) \ CLASS_INJECTED_FIELDS(macro) \
CLASSLOADER_INJECTED_FIELDS(macro) \ CLASSLOADER_INJECTED_FIELDS(macro) \
MEMBERNAME_INJECTED_FIELDS(macro) MEMBERNAME_INJECTED_FIELDS(macro) \
CALLSITECONTEXT_INJECTED_FIELDS(macro)
// Interface to hard-coded offset checking // Interface to hard-coded offset checking

View File

@ -49,6 +49,10 @@ inline bool java_lang_invoke_CallSite::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass()); return obj != NULL && is_subclass(obj->klass());
} }
inline bool java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
inline bool java_lang_invoke_MemberName::is_instance(oop obj) { inline bool java_lang_invoke_MemberName::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass()); return obj != NULL && is_subclass(obj->klass());
} }

View File

@ -159,6 +159,7 @@ class Ticks;
do_klass(MethodType_klass, java_lang_invoke_MethodType, Pre ) \ do_klass(MethodType_klass, java_lang_invoke_MethodType, Pre ) \
do_klass(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre ) \ do_klass(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre ) \
do_klass(CallSite_klass, java_lang_invoke_CallSite, Pre ) \ do_klass(CallSite_klass, java_lang_invoke_CallSite, Pre ) \
do_klass(Context_klass, java_lang_invoke_MethodHandleNatives_CallSiteContext, Pre ) \
do_klass(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre ) \ do_klass(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre ) \
do_klass(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre ) \ do_klass(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre ) \
do_klass(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre ) \ do_klass(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre ) \

View File

@ -45,6 +45,8 @@
#include "runtime/javaCalls.hpp" #include "runtime/javaCalls.hpp"
#include "runtime/orderAccess.inline.hpp" #include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "runtime/thread.hpp"
#include "services/threadService.hpp"
#include "utilities/bytes.hpp" #include "utilities/bytes.hpp"
#define NOFAILOVER_MAJOR_VERSION 51 #define NOFAILOVER_MAJOR_VERSION 51
@ -130,6 +132,16 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
return true; return true;
} }
// Timer includes any side effects of class verification (resolution,
// etc), but not recursive calls to Verifier::verify().
JavaThread* jt = (JavaThread*)THREAD;
PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
ClassLoader::perf_class_verify_selftime(),
ClassLoader::perf_classes_verified(),
jt->get_thread_stat()->perf_recursion_counts_addr(),
jt->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_VERIFY);
// If the class should be verified, first see if we can use the split // If the class should be verified, first see if we can use the split
// verifier. If not, or if verification fails and FailOverToOldVerifier // verifier. If not, or if verification fails and FailOverToOldVerifier
// is set, then call the inference verifier. // is set, then call the inference verifier.

View File

@ -274,12 +274,14 @@
/* internal classes known only to the JVM: */ \ /* internal classes known only to the JVM: */ \
template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \ template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \
template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \ template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \
template(java_lang_invoke_MethodHandleNatives_CallSiteContext, "java/lang/invoke/MethodHandleNatives$CallSiteContext") \
template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \ template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \
template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \ template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \
template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \ template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \
template(java_lang_invoke_Stable_signature, "Ljava/lang/invoke/Stable;") \ template(java_lang_invoke_Stable_signature, "Ljava/lang/invoke/Stable;") \
template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \ template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \ template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \
template(java_lang_invoke_MethodHandleNatives_CallSiteContext_signature, "Ljava/lang/invoke/MethodHandleNatives$CallSiteContext;") \
/* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \ /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \ template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \ template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
@ -401,7 +403,7 @@
template(protection_domain_name, "protection_domain") \ template(protection_domain_name, "protection_domain") \
template(signers_name, "signers_name") \ template(signers_name, "signers_name") \
template(loader_data_name, "loader_data") \ template(loader_data_name, "loader_data") \
template(dependencies_name, "dependencies") \ template(vmdependencies_name, "vmdependencies") \
template(input_stream_void_signature, "(Ljava/io/InputStream;)V") \ template(input_stream_void_signature, "(Ljava/io/InputStream;)V") \
template(getFileURL_name, "getFileURL") \ template(getFileURL_name, "getFileURL") \
template(getFileURL_signature, "(Ljava/io/File;)Ljava/net/URL;") \ template(getFileURL_signature, "(Ljava/io/File;)Ljava/net/URL;") \

View File

@ -1047,40 +1047,6 @@ void CodeCache::flush_dependents_on(instanceKlassHandle dependee) {
} }
} }
// Flushes compiled methods dependent on a particular CallSite
// instance when its target is different than the given MethodHandle.
void CodeCache::flush_dependents_on(Handle call_site, Handle method_handle) {
assert_lock_strong(Compile_lock);
if (number_of_nmethods_with_dependencies() == 0) return;
// CodeCache can only be updated by a thread_in_VM and they will all be
// stopped during the safepoint so CodeCache will be safe to update without
// holding the CodeCache_lock.
CallSiteDepChange changes(call_site(), method_handle());
// Compute the dependent nmethods that have a reference to a
// CallSite object. We use InstanceKlass::mark_dependent_nmethod
// directly instead of CodeCache::mark_for_deoptimization because we
// want dependents on the call site class only not all classes in
// the ContextStream.
int marked = 0;
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
InstanceKlass* ctxk = MethodHandles::get_call_site_context(call_site());
if (ctxk == NULL) {
return; // No dependencies to invalidate yet.
}
marked = ctxk->mark_dependent_nmethods(changes);
}
if (marked > 0) {
// At least one nmethod has been marked for deoptimization
VM_Deoptimize op;
VMThread::execute(&op);
}
}
#ifdef HOTSWAP #ifdef HOTSWAP
// Flushes compiled methods dependent on dependee in the evolutionary sense // Flushes compiled methods dependent on dependee in the evolutionary sense
void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {

View File

@ -224,7 +224,6 @@ class CodeCache : AllStatic {
// Flushing and deoptimization // Flushing and deoptimization
static void flush_dependents_on(instanceKlassHandle dependee); static void flush_dependents_on(instanceKlassHandle dependee);
static void flush_dependents_on(Handle call_site, Handle method_handle);
#ifdef HOTSWAP #ifdef HOTSWAP
// Flushing and deoptimization in case of evolution // Flushing and deoptimization in case of evolution
static void flush_evol_dependents_on(instanceKlassHandle dependee); static void flush_evol_dependents_on(instanceKlassHandle dependee);

View File

@ -117,9 +117,7 @@ void Dependencies::assert_has_no_finalizable_subclasses(ciKlass* ctxk) {
} }
void Dependencies::assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle) { void Dependencies::assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle) {
ciKlass* ctxk = call_site->get_context(); assert_common_2(call_site_target_value, call_site, method_handle);
check_ctxk(ctxk);
assert_common_3(call_site_target_value, ctxk, call_site, method_handle);
} }
// Helper function. If we are adding a new dep. under ctxk2, // Helper function. If we are adding a new dep. under ctxk2,
@ -175,7 +173,6 @@ void Dependencies::assert_common_2(DepType dept,
} }
} }
} else { } else {
assert(dep_implicit_context_arg(dept) == 0, "sanity");
if (note_dep_seen(dept, x0) && note_dep_seen(dept, x1)) { if (note_dep_seen(dept, x0) && note_dep_seen(dept, x1)) {
// look in this bucket for redundant assertions // look in this bucket for redundant assertions
const int stride = 2; const int stride = 2;
@ -389,7 +386,7 @@ int Dependencies::_dep_args[TYPE_LIMIT] = {
3, // unique_concrete_subtypes_2 ctxk, k1, k2 3, // unique_concrete_subtypes_2 ctxk, k1, k2
3, // unique_concrete_methods_2 ctxk, m1, m2 3, // unique_concrete_methods_2 ctxk, m1, m2
1, // no_finalizable_subclasses ctxk 1, // no_finalizable_subclasses ctxk
3 // call_site_target_value ctxk, call_site, method_handle 2 // call_site_target_value call_site, method_handle
}; };
const char* Dependencies::dep_name(Dependencies::DepType dept) { const char* Dependencies::dep_name(Dependencies::DepType dept) {
@ -1515,16 +1512,11 @@ Klass* Dependencies::check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepCh
return find_finalizable_subclass(search_at); return find_finalizable_subclass(search_at);
} }
Klass* Dependencies::check_call_site_target_value(Klass* recorded_ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes) { Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
assert(call_site->is_a(SystemDictionary::CallSite_klass()), "sanity"); assert(!oopDesc::is_null(call_site), "sanity");
assert(!oopDesc::is_null(method_handle), "sanity"); assert(!oopDesc::is_null(method_handle), "sanity");
assert(call_site->is_a(SystemDictionary::CallSite_klass()), "sanity");
Klass* call_site_ctxk = MethodHandles::get_call_site_context(call_site);
assert(!Klass::is_null(call_site_ctxk), "call site context should be initialized already");
if (recorded_ctxk != call_site_ctxk) {
// Stale context
return recorded_ctxk;
}
if (changes == NULL) { if (changes == NULL) {
// Validate all CallSites // Validate all CallSites
if (java_lang_invoke_CallSite::target(call_site) != method_handle) if (java_lang_invoke_CallSite::target(call_site) != method_handle)
@ -1599,7 +1591,7 @@ Klass* Dependencies::DepStream::check_call_site_dependency(CallSiteDepChange* ch
Klass* witness = NULL; Klass* witness = NULL;
switch (type()) { switch (type()) {
case call_site_target_value: case call_site_target_value:
witness = check_call_site_target_value(context_type(), argument_oop(1), argument_oop(2), changes); witness = check_call_site_target_value(argument_oop(0), argument_oop(1), changes);
break; break;
default: default:
witness = NULL; witness = NULL;

View File

@ -173,7 +173,7 @@ class Dependencies: public ResourceObj {
non_klass_types = (1 << call_site_target_value), non_klass_types = (1 << call_site_target_value),
klass_types = all_types & ~non_klass_types, klass_types = all_types & ~non_klass_types,
non_ctxk_types = (1 << evol_method), non_ctxk_types = (1 << evol_method) | (1 << call_site_target_value),
implicit_ctxk_types = 0, implicit_ctxk_types = 0,
explicit_ctxk_types = all_types & ~(non_ctxk_types | implicit_ctxk_types), explicit_ctxk_types = all_types & ~(non_ctxk_types | implicit_ctxk_types),
@ -330,7 +330,7 @@ class Dependencies: public ResourceObj {
static Klass* check_exclusive_concrete_methods(Klass* ctxk, Method* m1, Method* m2, static Klass* check_exclusive_concrete_methods(Klass* ctxk, Method* m1, Method* m2,
KlassDepChange* changes = NULL); KlassDepChange* changes = NULL);
static Klass* check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes = NULL); static Klass* check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes = NULL);
static Klass* check_call_site_target_value(Klass* recorded_ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes = NULL); static Klass* check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
// A returned Klass* is NULL if the dependency assertion is still // A returned Klass* is NULL if the dependency assertion is still
// valid. A non-NULL Klass* is a 'witness' to the assertion // valid. A non-NULL Klass* is a 'witness' to the assertion
// failure, a point in the class hierarchy where the assertion has // failure, a point in the class hierarchy where the assertion has
@ -496,7 +496,7 @@ class Dependencies: public ResourceObj {
bool next(); bool next();
DepType type() { return _type; } DepType type() { return _type; }
bool is_oop_argument(int i) { return type() == call_site_target_value && i > 0; } bool is_oop_argument(int i) { return type() == call_site_target_value; }
uintptr_t get_identifier(int i); uintptr_t get_identifier(int i);
int argument_count() { return dep_args(type()); } int argument_count() { return dep_args(type()); }

View File

@ -565,13 +565,18 @@ nmethod* nmethod::new_nmethod(methodHandle method,
// the number of methods compiled. For applications with a lot // the number of methods compiled. For applications with a lot
// classes the slow way is too slow. // classes the slow way is too slow.
for (Dependencies::DepStream deps(nm); deps.next(); ) { for (Dependencies::DepStream deps(nm); deps.next(); ) {
Klass* klass = deps.context_type(); if (deps.type() == Dependencies::call_site_target_value) {
if (klass == NULL) { // CallSite dependencies are managed on per-CallSite instance basis.
continue; // ignore things like evol_method oop call_site = deps.argument_oop(0);
MethodHandles::add_dependent_nmethod(call_site, nm);
} else {
Klass* klass = deps.context_type();
if (klass == NULL) {
continue; // ignore things like evol_method
}
// record this nmethod as dependent on this klass
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
} }
// record this nmethod as dependent on this klass
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
} }
NOT_PRODUCT(nmethod_stats.note_nmethod(nm)); NOT_PRODUCT(nmethod_stats.note_nmethod(nm));
if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) { if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) {
@ -1464,13 +1469,20 @@ void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
if (!has_flushed_dependencies()) { if (!has_flushed_dependencies()) {
set_has_flushed_dependencies(); set_has_flushed_dependencies();
for (Dependencies::DepStream deps(this); deps.next(); ) { for (Dependencies::DepStream deps(this); deps.next(); ) {
Klass* klass = deps.context_type(); if (deps.type() == Dependencies::call_site_target_value) {
if (klass == NULL) continue; // ignore things like evol_method // CallSite dependencies are managed on per-CallSite instance basis.
oop call_site = deps.argument_oop(0);
// During GC the is_alive closure is non-NULL, and is used to MethodHandles::remove_dependent_nmethod(call_site, this);
// determine liveness of dependees that need to be updated. } else {
if (is_alive == NULL || klass->is_loader_alive(is_alive)) { Klass* klass = deps.context_type();
InstanceKlass::cast(klass)->remove_dependent_nmethod(this); if (klass == NULL) {
continue; // ignore things like evol_method
}
// During GC the is_alive closure is non-NULL, and is used to
// determine liveness of dependees that need to be updated.
if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
}
} }
} }
} }

View File

@ -254,9 +254,9 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
if (_gc_cause != GCCause::_gc_locker && if (_gc_cause != GCCause::_gc_locker &&
gch->total_full_collections_completed() <= _full_gc_count_before) { gch->total_full_collections_completed() <= _full_gc_count_before) {
// maybe we should change the condition to test _gc_cause == // maybe we should change the condition to test _gc_cause ==
// GCCause::_java_lang_system_gc, instead of // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
// _gc_cause != GCCause::_gc_locker // instead of _gc_cause != GCCause::_gc_locker
assert(_gc_cause == GCCause::_java_lang_system_gc, assert(GCCause::is_user_requested_gc(_gc_cause),
"the only way to get here if this was a System.gc()-induced GC"); "the only way to get here if this was a System.gc()-induced GC");
assert(ExplicitGCInvokesConcurrent, "Error"); assert(ExplicitGCInvokesConcurrent, "Error");
// Now, wait for witnessing concurrent gc cycle to complete, // Now, wait for witnessing concurrent gc cycle to complete,

View File

@ -43,7 +43,7 @@ GangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
} }
// Run a task; returns when the task is done, or the workers yield, // Run a task; returns when the task is done, or the workers yield,
// or the task is aborted, or the work gang is terminated via stop(). // or the task is aborted.
// A task that has been yielded can be continued via this interface // A task that has been yielded can be continued via this interface
// by using the same task repeatedly as the argument to the call. // by using the same task repeatedly as the argument to the call.
// It is expected that the YieldingFlexibleGangTask carries the appropriate // It is expected that the YieldingFlexibleGangTask carries the appropriate
@ -297,16 +297,9 @@ void YieldingFlexibleGangWorker::loop() {
WorkData data; WorkData data;
int id; int id;
while (true) { while (true) {
// Check if there is work to do or if we have been asked // Check if there is work to do.
// to terminate
gang()->internal_worker_poll(&data); gang()->internal_worker_poll(&data);
if (data.terminate()) { if (data.task() != NULL && data.sequence_number() != previous_sequence_number) {
// We have been asked to terminate.
assert(gang()->task() == NULL, "No task binding");
// set_status(TERMINATED);
return;
} else if (data.task() != NULL &&
data.sequence_number() != previous_sequence_number) {
// There is work to be done. // There is work to be done.
// First check if we need to become active or if there // First check if we need to become active or if there
// are already the requisite number of workers // are already the requisite number of workers

View File

@ -176,7 +176,7 @@ public:
GangWorker* allocate_worker(uint which); GangWorker* allocate_worker(uint which);
// Run a task; returns when the task is done, or the workers yield, // Run a task; returns when the task is done, or the workers yield,
// or the task is aborted, or the work gang is terminated via stop(). // or the task is aborted.
// A task that has been yielded can be continued via this same interface // A task that has been yielded can be continued via this same interface
// by using the same task repeatedly as the argument to the call. // by using the same task repeatedly as the argument to the call.
// It is expected that the YieldingFlexibleGangTask carries the appropriate // It is expected that the YieldingFlexibleGangTask carries the appropriate

View File

@ -1183,7 +1183,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
IsGCActiveMark x; IsGCActiveMark x;
// Timing // Timing
assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
{ {
@ -2199,6 +2199,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
switch (cause) { switch (cause) {
case GCCause::_gc_locker: return GCLockerInvokesConcurrent; case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent; case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
case GCCause::_g1_humongous_allocation: return true; case GCCause::_g1_humongous_allocation: return true;
case GCCause::_update_allocation_context_stats_inc: return true; case GCCause::_update_allocation_context_stats_inc: return true;
case GCCause::_wb_conc_mark: return true; case GCCause::_wb_conc_mark: return true;

View File

@ -324,7 +324,8 @@ private:
// explicitly started if: // explicitly started if:
// (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
// (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
// (c) cause == _g1_humongous_allocation // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
// (d) cause == _g1_humongous_allocation
bool should_do_concurrent_full_gc(GCCause::Cause cause); bool should_do_concurrent_full_gc(GCCause::Cause cause);
// Keeps track of how many "old marking cycles" (i.e., Full GCs or // Keeps track of how many "old marking cycles" (i.e., Full GCs or

View File

@ -168,7 +168,7 @@ void VM_G1IncCollectionPause::doit_epilogue() {
// +ExplicitGCInvokesConcurrent, we have to wait here for the cycle // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
// that just started (or maybe one that was already in progress) to // that just started (or maybe one that was already in progress) to
// finish. // finish.
if (_gc_cause == GCCause::_java_lang_system_gc && if (GCCause::is_user_requested_gc(_gc_cause) &&
_should_initiate_conc_mark) { _should_initiate_conc_mark) {
assert(ExplicitGCInvokesConcurrent, assert(ExplicitGCInvokesConcurrent,
"the only way to be here is if ExplicitGCInvokesConcurrent is set"); "the only way to be here is if ExplicitGCInvokesConcurrent is set");

View File

@ -130,7 +130,7 @@ void PSAdaptiveSizePolicy::major_collection_end(size_t amount_live,
// Update the pause time. // Update the pause time.
_major_timer.stop(); _major_timer.stop();
if (gc_cause != GCCause::_java_lang_system_gc || if (!GCCause::is_user_requested_gc(gc_cause) ||
UseAdaptiveSizePolicyWithSystemGC) { UseAdaptiveSizePolicyWithSystemGC) {
double major_pause_in_seconds = _major_timer.seconds(); double major_pause_in_seconds = _major_timer.seconds();
double major_pause_in_ms = major_pause_in_seconds * MILLIUNITS; double major_pause_in_ms = major_pause_in_seconds * MILLIUNITS;

View File

@ -272,7 +272,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
// Don't check if the size_policy is ready here. Let // Don't check if the size_policy is ready here. Let
// the size_policy check that internally. // the size_policy check that internally.
if (UseAdaptiveGenerationSizePolicyAtMajorCollection && if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
((gc_cause != GCCause::_java_lang_system_gc) || (!GCCause::is_user_requested_gc(gc_cause) ||
UseAdaptiveSizePolicyWithSystemGC)) { UseAdaptiveSizePolicyWithSystemGC)) {
// Swap the survivor spaces if from_space is empty. The // Swap the survivor spaces if from_space is empty. The
// resize_young_gen() called below is normally used after // resize_young_gen() called below is normally used after

View File

@ -2053,7 +2053,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer); marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
bool max_on_system_gc = UseMaximumCompactionOnSystemGC bool max_on_system_gc = UseMaximumCompactionOnSystemGC
&& gc_cause == GCCause::_java_lang_system_gc; && GCCause::is_user_requested_gc(gc_cause);
summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc); summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
@ -2089,7 +2089,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// Don't check if the size_policy is ready here. Let // Don't check if the size_policy is ready here. Let
// the size_policy check that internally. // the size_policy check that internally.
if (UseAdaptiveGenerationSizePolicyAtMajorCollection && if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
((gc_cause != GCCause::_java_lang_system_gc) || (!GCCause::is_user_requested_gc(gc_cause) ||
UseAdaptiveSizePolicyWithSystemGC)) { UseAdaptiveSizePolicyWithSystemGC)) {
// Swap the survivor spaces if from_space is empty. The // Swap the survivor spaces if from_space is empty. The
// resize_young_gen() called below is normally used after // resize_young_gen() called below is normally used after

View File

@ -290,7 +290,7 @@ bool PSScavenge::invoke_no_policy() {
AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if ((gc_cause != GCCause::_java_lang_system_gc) || if (!GCCause::is_user_requested_gc(gc_cause) ||
UseAdaptiveSizePolicyWithSystemGC) { UseAdaptiveSizePolicyWithSystemGC) {
// Gather the feedback data for eden occupancy. // Gather the feedback data for eden occupancy.
young_gen->eden_space()->accumulate_statistics(); young_gen->eden_space()->accumulate_statistics();

View File

@ -960,7 +960,7 @@ void DefNewGeneration::gc_epilogue(bool full) {
GCCause::to_string(gch->gc_cause())); GCCause::to_string(gch->gc_cause()));
} }
assert(gch->gc_cause() == GCCause::_scavenge_alot || assert(gch->gc_cause() == GCCause::_scavenge_alot ||
(gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
!gch->incremental_collection_failed(), !gch->incremental_collection_failed(),
"Twice in a row"); "Twice in a row");
seen_incremental_collection_failed = false; seen_incremental_collection_failed = false;

View File

@ -244,7 +244,7 @@ void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
// Update the pause time. // Update the pause time.
_minor_timer.stop(); _minor_timer.stop();
if (gc_cause != GCCause::_java_lang_system_gc || if (!GCCause::is_user_requested_gc(gc_cause) ||
UseAdaptiveSizePolicyWithSystemGC) { UseAdaptiveSizePolicyWithSystemGC) {
double minor_pause_in_seconds = _minor_timer.seconds(); double minor_pause_in_seconds = _minor_timer.seconds();
double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS; double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;

View File

@ -103,6 +103,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _last_ditch_collection: case _last_ditch_collection:
return "Last ditch collection"; return "Last ditch collection";
case _dcmd_gc_run:
return "Diagnostic Command";
case _last_gc_cause: case _last_gc_cause:
return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE"; return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";

View File

@ -74,12 +74,15 @@ class GCCause : public AllStatic {
_g1_humongous_allocation, _g1_humongous_allocation,
_last_ditch_collection, _last_ditch_collection,
_dcmd_gc_run,
_last_gc_cause _last_gc_cause
}; };
inline static bool is_user_requested_gc(GCCause::Cause cause) { inline static bool is_user_requested_gc(GCCause::Cause cause) {
return (cause == GCCause::_java_lang_system_gc || return (cause == GCCause::_java_lang_system_gc ||
cause == GCCause::_jvmti_force_gc); cause == GCCause::_dcmd_gc_run);
} }
inline static bool is_serviceability_requested_gc(GCCause::Cause inline static bool is_serviceability_requested_gc(GCCause::Cause

View File

@ -304,9 +304,16 @@ bool GenCollectedHeap::must_clear_all_soft_refs() {
} }
bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
return UseConcMarkSweepGC && if (!UseConcMarkSweepGC) {
((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || return false;
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); }
switch (cause) {
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
case GCCause::_java_lang_system_gc:
case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
default: return false;
}
} }
void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size, void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,

View File

@ -47,7 +47,6 @@ AbstractWorkGang::AbstractWorkGang(const char* name,
/* allow_vm_block */ are_GC_task_threads, /* allow_vm_block */ are_GC_task_threads,
Monitor::_safepoint_check_sometimes); Monitor::_safepoint_check_sometimes);
assert(monitor() != NULL, "Failed to allocate monitor"); assert(monitor() != NULL, "Failed to allocate monitor");
_terminate = false;
_task = NULL; _task = NULL;
_sequence_number = 0; _sequence_number = 0;
_started_workers = 0; _started_workers = 0;
@ -106,18 +105,6 @@ bool WorkGang::initialize_workers() {
return true; return true;
} }
AbstractWorkGang::~AbstractWorkGang() {
if (TraceWorkGang) {
tty->print_cr("Destructing work gang %s", name());
}
stop(); // stop all the workers
for (uint worker = 0; worker < total_workers(); worker += 1) {
delete gang_worker(worker);
}
delete gang_workers();
delete monitor();
}
GangWorker* AbstractWorkGang::gang_worker(uint i) const { GangWorker* AbstractWorkGang::gang_worker(uint i) const {
// Array index bounds checking. // Array index bounds checking.
GangWorker* result = NULL; GangWorker* result = NULL;
@ -175,28 +162,9 @@ void FlexibleWorkGang::run_task(AbstractGangTask* task) {
WorkGang::run_task(task, (uint) active_workers()); WorkGang::run_task(task, (uint) active_workers());
} }
void AbstractWorkGang::stop() {
// Tell all workers to terminate, then wait for them to become inactive.
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
if (TraceWorkGang) {
tty->print_cr("Stopping work gang %s task %s", name(), task()->name());
}
_task = NULL;
_terminate = true;
monitor()->notify_all();
while (finished_workers() < active_workers()) {
if (TraceWorkGang) {
tty->print_cr("Waiting in work gang %s: %u/%u finished",
name(), finished_workers(), active_workers());
}
monitor()->wait(/* no_safepoint_check */ true);
}
}
void AbstractWorkGang::internal_worker_poll(WorkData* data) const { void AbstractWorkGang::internal_worker_poll(WorkData* data) const {
assert(monitor()->owned_by_self(), "worker_poll is an internal method"); assert(monitor()->owned_by_self(), "worker_poll is an internal method");
assert(data != NULL, "worker data is null"); assert(data != NULL, "worker data is null");
data->set_terminate(terminate());
data->set_task(task()); data->set_task(task());
data->set_sequence_number(sequence_number()); data->set_sequence_number(sequence_number());
} }
@ -259,7 +227,7 @@ void GangWorker::initialize() {
void GangWorker::loop() { void GangWorker::loop() {
int previous_sequence_number = 0; int previous_sequence_number = 0;
Monitor* gang_monitor = gang()->monitor(); Monitor* gang_monitor = gang()->monitor();
for ( ; /* !terminate() */; ) { for ( ; ; ) {
WorkData data; WorkData data;
int part; // Initialized below. int part; // Initialized below.
{ {
@ -272,8 +240,6 @@ void GangWorker::loop() {
if (TraceWorkGang) { if (TraceWorkGang) {
tty->print("Polled outside for work in gang %s worker %u", tty->print("Polled outside for work in gang %s worker %u",
gang()->name(), id()); gang()->name(), id());
tty->print(" terminate: %s",
data.terminate() ? "true" : "false");
tty->print(" sequence: %d (prev: %d)", tty->print(" sequence: %d (prev: %d)",
data.sequence_number(), previous_sequence_number); data.sequence_number(), previous_sequence_number);
if (data.task() != NULL) { if (data.task() != NULL) {
@ -283,13 +249,7 @@ void GangWorker::loop() {
} }
tty->cr(); tty->cr();
} }
for ( ; /* break or return */; ) { for ( ; /* break */; ) {
// Terminate if requested.
if (data.terminate()) {
gang()->internal_note_finish();
gang_monitor->notify_all();
return;
}
// Check for new work. // Check for new work.
if ((data.task() != NULL) && if ((data.task() != NULL) &&
(data.sequence_number() != previous_sequence_number)) { (data.sequence_number() != previous_sequence_number)) {
@ -306,8 +266,6 @@ void GangWorker::loop() {
if (TraceWorkGang) { if (TraceWorkGang) {
tty->print("Polled inside for work in gang %s worker %u", tty->print("Polled inside for work in gang %s worker %u",
gang()->name(), id()); gang()->name(), id());
tty->print(" terminate: %s",
data.terminate() ? "true" : "false");
tty->print(" sequence: %d (prev: %d)", tty->print(" sequence: %d (prev: %d)",
data.sequence_number(), previous_sequence_number); data.sequence_number(), previous_sequence_number);
if (data.task() != NULL) { if (data.task() != NULL) {

View File

@ -103,16 +103,15 @@ class AbstractGangTaskWOopQueues : public AbstractGangTask {
// An abstract class representing a gang of workers. // An abstract class representing a gang of workers.
// You subclass this to supply an implementation of run_task(). // You subclass this to supply an implementation of run_task().
class AbstractWorkGang: public CHeapObj<mtInternal> { class AbstractWorkGang: public CHeapObj<mtInternal> {
// Here's the public interface to this class. protected:
// Work gangs are never deleted, so no need to cleanup.
~AbstractWorkGang() { ShouldNotReachHere(); }
public: public:
// Constructor and destructor. // Constructor.
AbstractWorkGang(const char* name, bool are_GC_task_threads, AbstractWorkGang(const char* name, bool are_GC_task_threads,
bool are_ConcurrentGC_threads); bool are_ConcurrentGC_threads);
~AbstractWorkGang();
// Run a task, returns when the task is done (or terminated). // Run a task, returns when the task is done (or terminated).
virtual void run_task(AbstractGangTask* task) = 0; virtual void run_task(AbstractGangTask* task) = 0;
// Stop and terminate all workers.
virtual void stop();
// Return true if more workers should be applied to the task. // Return true if more workers should be applied to the task.
virtual bool needs_more_workers() const { return true; } virtual bool needs_more_workers() const { return true; }
public: public:
@ -129,8 +128,6 @@ protected:
Monitor* _monitor; Monitor* _monitor;
// The count of the number of workers in the gang. // The count of the number of workers in the gang.
uint _total_workers; uint _total_workers;
// Whether the workers should terminate.
bool _terminate;
// The array of worker threads for this gang. // The array of worker threads for this gang.
// This is only needed for cleaning up. // This is only needed for cleaning up.
GangWorker** _gang_workers; GangWorker** _gang_workers;
@ -153,9 +150,6 @@ public:
virtual uint active_workers() const { virtual uint active_workers() const {
return _total_workers; return _total_workers;
} }
bool terminate() const {
return _terminate;
}
GangWorker** gang_workers() const { GangWorker** gang_workers() const {
return _gang_workers; return _gang_workers;
} }
@ -205,21 +199,16 @@ protected:
class WorkData: public StackObj { class WorkData: public StackObj {
// This would be a struct, but I want accessor methods. // This would be a struct, but I want accessor methods.
private: private:
bool _terminate;
AbstractGangTask* _task; AbstractGangTask* _task;
int _sequence_number; int _sequence_number;
public: public:
// Constructor and destructor // Constructor and destructor
WorkData() { WorkData() {
_terminate = false;
_task = NULL; _task = NULL;
_sequence_number = 0; _sequence_number = 0;
} }
~WorkData() { ~WorkData() {
} }
// Accessors and modifiers
bool terminate() const { return _terminate; }
void set_terminate(bool value) { _terminate = value; }
AbstractGangTask* task() const { return _task; } AbstractGangTask* task() const { return _task; }
void set_task(AbstractGangTask* value) { _task = value; } void set_task(AbstractGangTask* value) { _task = value; }
int sequence_number() const { return _sequence_number; } int sequence_number() const { return _sequence_number; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -147,13 +147,10 @@ BasicType Bytecode_member_ref::result_type() const {
methodHandle Bytecode_invoke::static_target(TRAPS) { methodHandle Bytecode_invoke::static_target(TRAPS) {
methodHandle m;
KlassHandle resolved_klass;
constantPoolHandle constants(THREAD, this->constants()); constantPoolHandle constants(THREAD, this->constants());
Bytecodes::Code bc = invoke_code(); Bytecodes::Code bc = invoke_code();
LinkResolver::resolve_method_statically(m, resolved_klass, bc, constants, index(), CHECK_(methodHandle())); return LinkResolver::resolve_method_statically(bc, constants, index(), THREAD);
return m;
} }
Handle Bytecode_invoke::appendix(TRAPS) { Handle Bytecode_invoke::appendix(TRAPS) {

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,7 @@
// that method. If the info is invalid, the link has not been resolved // that method. If the info is invalid, the link has not been resolved
// successfully. // successfully.
class CallInfo VALUE_OBJ_CLASS_SPEC { class CallInfo : public StackObj {
public: public:
// Ways that a method call might be selected (or not) based on receiver type. // Ways that a method call might be selected (or not) based on receiver type.
// Note that an invokevirtual instruction might be linked with no_dispatch, // Note that an invokevirtual instruction might be linked with no_dispatch,
@ -58,11 +58,22 @@ class CallInfo VALUE_OBJ_CLASS_SPEC {
Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix) Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix)
Handle _resolved_method_type; // MethodType (for invokedynamic and invokehandle call sites) Handle _resolved_method_type; // MethodType (for invokedynamic and invokehandle call sites)
void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS); void set_static(KlassHandle resolved_klass, const methodHandle& resolved_method, TRAPS);
void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index , TRAPS); void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass,
void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS); const methodHandle& resolved_method,
void set_handle( methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS); const methodHandle& selected_method,
void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, CallKind kind, int index, TRAPS); int itable_index, TRAPS);
void set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass,
const methodHandle& resolved_method,
const methodHandle& selected_method,
int vtable_index, TRAPS);
void set_handle(const methodHandle& resolved_method,
Handle resolved_appendix, Handle resolved_method_type, TRAPS);
void set_common(KlassHandle resolved_klass, KlassHandle selected_klass,
const methodHandle& resolved_method,
const methodHandle& selected_method,
CallKind kind,
int index, TRAPS);
friend class LinkResolver; friend class LinkResolver;
@ -113,6 +124,37 @@ class CallInfo VALUE_OBJ_CLASS_SPEC {
void print() PRODUCT_RETURN; void print() PRODUCT_RETURN;
}; };
// Condensed information from constant pool to use to resolve the method or field.
// resolved_klass = specified class (i.e., static receiver class)
// current_klass = sending method holder (i.e., class containing the method
// containing the call being resolved)
class LinkInfo : public StackObj {
Symbol* _name; // extracted from JVM_CONSTANT_NameAndType
Symbol* _signature;
KlassHandle _resolved_klass; // class that the constant pool entry points to
KlassHandle _current_klass; // class that owns the constant pool
bool _check_access;
public:
LinkInfo(constantPoolHandle pool, int index, TRAPS);
// Condensed information from other call sites within the vm.
LinkInfo(KlassHandle resolved_klass, Symbol* name, Symbol* signature,
KlassHandle current_klass, bool check_access = true) :
_resolved_klass(resolved_klass),
_name(name), _signature(signature), _current_klass(current_klass),
_check_access(check_access) {}
// accessors
Symbol* name() const { return _name; }
Symbol* signature() const { return _signature; }
KlassHandle resolved_klass() const { return _resolved_klass; }
KlassHandle current_klass() const { return _current_klass; }
bool check_access() const { return _check_access; }
char* method_string() const;
void print() PRODUCT_RETURN;
};
// Link information for getfield/putfield & getstatic/putstatic bytecodes // Link information for getfield/putfield & getstatic/putstatic bytecodes
// is represented using a fieldDescriptor. // is represented using a fieldDescriptor.
@ -124,85 +166,136 @@ class LinkResolver: AllStatic {
friend class klassItable; friend class klassItable;
private: private:
static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, bool checkpolymorphism, bool in_imethod_resolve, TRAPS);
static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); static methodHandle lookup_method_in_klasses(const LinkInfo& link_info,
bool checkpolymorphism,
bool in_imethod_resolve, TRAPS);
static methodHandle lookup_method_in_interfaces(const LinkInfo& link_info, TRAPS);
static methodHandle lookup_polymorphic_method(const LinkInfo& link_info,
Handle *appendix_result_or_null,
Handle *method_type_result, TRAPS);
// Not Linktime so doesn't take LinkInfo
static methodHandle lookup_instance_method_in_klasses (
KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS); // Similar loader constraint checking functions that throw
// LinkageError with descriptive message.
static void check_method_loader_constraints(const LinkInfo& link_info,
const methodHandle& resolved_method,
const char* method_type, TRAPS);
static void check_field_loader_constraints(Symbol* field, Symbol* sig,
KlassHandle current_klass,
KlassHandle sel_klass, TRAPS);
static void resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool nostatics, TRAPS); static methodHandle resolve_interface_method(const LinkInfo& link_info, bool nostatics, TRAPS);
static void resolve_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool require_methodref, TRAPS); static methodHandle resolve_method (const LinkInfo& link_info, bool require_methodref, TRAPS);
static void linktime_resolve_static_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); static methodHandle linktime_resolve_static_method (const LinkInfo& link_info, TRAPS);
static void linktime_resolve_special_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); static methodHandle linktime_resolve_special_method (const LinkInfo& link_info, TRAPS);
static void linktime_resolve_virtual_method (methodHandle &resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature,KlassHandle current_klass, bool check_access, TRAPS); static methodHandle linktime_resolve_virtual_method (const LinkInfo& link_info, TRAPS);
static void linktime_resolve_interface_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); static methodHandle linktime_resolve_interface_method (const LinkInfo& link_info, TRAPS);
static void runtime_resolve_special_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, bool check_access, TRAPS); static void runtime_resolve_special_method (CallInfo& result,
static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); const methodHandle& resolved_method,
static void runtime_resolve_interface_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); KlassHandle resolved_klass,
KlassHandle current_klass,
bool check_access, TRAPS);
static void runtime_resolve_virtual_method (CallInfo& result,
const methodHandle& resolved_method,
KlassHandle resolved_klass,
Handle recv,
KlassHandle recv_klass,
bool check_null_and_abstract, TRAPS);
static void runtime_resolve_interface_method (CallInfo& result,
const methodHandle& resolved_method,
KlassHandle resolved_klass,
Handle recv,
KlassHandle recv_klass,
bool check_null_and_abstract, TRAPS);
static void check_field_accessability (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, fieldDescriptor& fd, TRAPS); static void check_field_accessability(KlassHandle ref_klass,
static void check_method_accessability (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, methodHandle sel_method, TRAPS); KlassHandle resolved_klass,
KlassHandle sel_klass,
const fieldDescriptor& fd, TRAPS);
static void check_method_accessability(KlassHandle ref_klass,
KlassHandle resolved_klass,
KlassHandle sel_klass,
const methodHandle& sel_method, TRAPS);
// runtime resolving from constant pool
static void resolve_invokestatic (CallInfo& result,
constantPoolHandle pool, int index, TRAPS);
static void resolve_invokespecial (CallInfo& result,
constantPoolHandle pool, int index, TRAPS);
static void resolve_invokevirtual (CallInfo& result, Handle recv,
constantPoolHandle pool, int index, TRAPS);
static void resolve_invokeinterface(CallInfo& result, Handle recv,
constantPoolHandle pool, int index, TRAPS);
static void resolve_invokedynamic (CallInfo& result,
constantPoolHandle pool, int index, TRAPS);
static void resolve_invokehandle (CallInfo& result,
constantPoolHandle pool, int index, TRAPS);
public: public:
// constant pool resolving // constant pool resolving
static void check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS); static void check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS);
// static resolving calls (will not run any Java code); used only from Bytecode_invoke::static_target // static resolving calls (will not run any Java code);
static void resolve_method_statically(methodHandle& method_result, KlassHandle& klass_result, // used only from Bytecode_invoke::static_target
Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS); static methodHandle resolve_method_statically(Bytecodes::Code code,
constantPoolHandle pool,
int index, TRAPS);
// runtime/static resolving for fields static void resolve_field_access(fieldDescriptor& result,
static void resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS); constantPoolHandle pool,
static void resolve_field(fieldDescriptor& result, KlassHandle resolved_klass, Symbol* field_name, Symbol* field_signature, int index, Bytecodes::Code byte, TRAPS);
KlassHandle current_klass, Bytecodes::Code access_kind, bool check_access, bool initialize_class, TRAPS); static void resolve_field(fieldDescriptor& result, const LinkInfo& link_info,
Bytecodes::Code access_kind,
bool initialize_class, TRAPS);
// source of access_kind codes: static void resolve_static_call (CallInfo& result,
static Bytecodes::Code field_access_kind(bool is_static, bool is_put) { const LinkInfo& link_info,
return (is_static bool initialize_klass, TRAPS);
? (is_put ? Bytecodes::_putstatic : Bytecodes::_getstatic) static void resolve_special_call (CallInfo& result,
: (is_put ? Bytecodes::_putfield : Bytecodes::_getfield )); const LinkInfo& link_info,
} TRAPS);
static void resolve_virtual_call (CallInfo& result, Handle recv, KlassHandle recv_klass,
const LinkInfo& link_info,
bool check_null_and_abstract, TRAPS);
static void resolve_interface_call(CallInfo& result, Handle recv, KlassHandle recv_klass,
const LinkInfo& link_info,
bool check_null_and_abstract, TRAPS);
static void resolve_handle_call (CallInfo& result,
const LinkInfo& link_info, TRAPS);
static void resolve_dynamic_call (CallInfo& result, Handle bootstrap_specifier,
Symbol* method_name, Symbol* method_signature,
KlassHandle current_klass, TRAPS);
// runtime resolving: // same as above for compile-time resolution; but returns null handle instead of throwing
// resolved_klass = specified class (i.e., static receiver class) // an exception on error also, does not initialize klass (i.e., no side effects)
// current_klass = sending method holder (i.e., class containing the method containing the call being resolved) static methodHandle resolve_virtual_call_or_null (KlassHandle receiver_klass,
static void resolve_static_call (CallInfo& result, KlassHandle& resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool initialize_klass, TRAPS); const LinkInfo& link_info);
static void resolve_special_call (CallInfo& result, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass,
static void resolve_virtual_call (CallInfo& result, Handle recv, KlassHandle recv_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool check_null_and_abstract, TRAPS); const LinkInfo& link_info);
static void resolve_interface_call(CallInfo& result, Handle recv, KlassHandle recv_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool check_null_and_abstract, TRAPS); static methodHandle resolve_static_call_or_null (const LinkInfo& link_info);
static void resolve_handle_call (CallInfo& result, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, TRAPS); static methodHandle resolve_special_call_or_null (const LinkInfo& link_info);
static void resolve_dynamic_call (CallInfo& result, Handle bootstrap_specifier, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, TRAPS);
// same as above for compile-time resolution; but returns null handle instead of throwing an exception on error static int vtable_index_of_interface_method(KlassHandle klass, const methodHandle& resolved_method);
// also, does not initialize klass (i.e., no side effects)
static methodHandle resolve_virtual_call_or_null (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method);
// same as above for compile-time resolution; returns vtable_index if current_klass if linked // same as above for compile-time resolution; returns vtable_index if current_klass if linked
static int resolve_virtual_vtable_index (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass); static int resolve_virtual_vtable_index (KlassHandle receiver_klass,
const LinkInfo& link_info);
// static resolving for compiler (does not throw exceptions, returns null handle if unsuccessful) // static resolving for compiler (does not throw exceptions, returns null handle if unsuccessful)
static methodHandle linktime_resolve_virtual_method_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access); static methodHandle linktime_resolve_virtual_method_or_null (const LinkInfo& link_info);
static methodHandle linktime_resolve_interface_method_or_null(KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access); static methodHandle linktime_resolve_interface_method_or_null(const LinkInfo& link_info);
// runtime resolving from constant pool // runtime resolving from constant pool
static void resolve_invokestatic (CallInfo& result, constantPoolHandle pool, int index, TRAPS); static void resolve_invoke(CallInfo& result, Handle recv,
static void resolve_invokespecial (CallInfo& result, constantPoolHandle pool, int index, TRAPS); constantPoolHandle pool, int index,
static void resolve_invokevirtual (CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); Bytecodes::Code byte, TRAPS);
static void resolve_invokeinterface(CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); private:
static void resolve_invokedynamic (CallInfo& result, constantPoolHandle pool, int index, TRAPS); static void trace_method_resolution(const char* prefix, KlassHandle klass,
static void resolve_invokehandle (CallInfo& result, constantPoolHandle pool, int index, TRAPS); KlassHandle resolved_klass,
const methodHandle& method) PRODUCT_RETURN;
static void resolve_invoke (CallInfo& result, Handle recv, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS);
}; };
#endif // SHARE_VM_INTERPRETER_LINKRESOLVER_HPP #endif // SHARE_VM_INTERPRETER_LINKRESOLVER_HPP

View File

@ -614,8 +614,7 @@ class SpaceManager : public CHeapObj<mtClass> {
Metachunk* _chunks_in_use[NumberOfInUseLists]; Metachunk* _chunks_in_use[NumberOfInUseLists];
Metachunk* _current_chunk; Metachunk* _current_chunk;
// Number of small chunks to allocate to a manager // Maximum number of small chunks to allocate to a SpaceManager
// If class space manager, small chunks are unlimited
static uint const _small_chunk_limit; static uint const _small_chunk_limit;
// Sum of all space in allocated chunks // Sum of all space in allocated chunks
@ -730,6 +729,8 @@ class SpaceManager : public CHeapObj<mtClass> {
// Block allocation and deallocation. // Block allocation and deallocation.
// Allocates a block from the current chunk // Allocates a block from the current chunk
MetaWord* allocate(size_t word_size); MetaWord* allocate(size_t word_size);
// Allocates a block from a small chunk
MetaWord* get_small_chunk_and_allocate(size_t word_size);
// Helper for allocations // Helper for allocations
MetaWord* allocate_work(size_t word_size); MetaWord* allocate_work(size_t word_size);
@ -2011,9 +2012,8 @@ void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
size_t SpaceManager::calc_chunk_size(size_t word_size) { size_t SpaceManager::calc_chunk_size(size_t word_size) {
// Decide between a small chunk and a medium chunk. Up to // Decide between a small chunk and a medium chunk. Up to
// _small_chunk_limit small chunks can be allocated but // _small_chunk_limit small chunks can be allocated.
// once a medium chunk has been allocated, no more small // After that a medium chunk is preferred.
// chunks will be allocated.
size_t chunk_word_size; size_t chunk_word_size;
if (chunks_in_use(MediumIndex) == NULL && if (chunks_in_use(MediumIndex) == NULL &&
sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
@ -2081,7 +2081,7 @@ MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
word_size, words_used, words_left); word_size, words_used, words_left);
} }
// Get another chunk out of the virtual space // Get another chunk
size_t grow_chunks_by_words = calc_chunk_size(word_size); size_t grow_chunks_by_words = calc_chunk_size(word_size);
Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
@ -2412,6 +2412,43 @@ Metachunk* SpaceManager::get_new_chunk(size_t word_size,
return next; return next;
} }
/*
* The policy is to allocate up to _small_chunk_limit small chunks
* after which only medium chunks are allocated. This is done to
* reduce fragmentation. In some cases, this can result in a lot
* of small chunks being allocated to the point where it's not
* possible to expand. If this happens, there may be no medium chunks
* available and OOME would be thrown. Instead of doing that,
* if the allocation request size fits in a small chunk, an attempt
* will be made to allocate a small chunk.
*/
MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
if (word_size + Metachunk::overhead() > small_chunk_size()) {
return NULL;
}
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
MetaWord* mem = NULL;
if (chunk != NULL) {
// Add chunk to the in-use chunk list and do an allocation from it.
// Add to this manager's list of chunks in use.
add_chunk(chunk, false);
mem = chunk->allocate(word_size);
inc_used_metrics(word_size);
// Track metaspace memory usage statistic.
track_metaspace_memory_usage();
}
return mem;
}
MetaWord* SpaceManager::allocate(size_t word_size) { MetaWord* SpaceManager::allocate(size_t word_size) {
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
@ -3560,7 +3597,18 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
} }
if (result == NULL) { if (result == NULL) {
report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); SpaceManager* sm;
if (is_class_space_allocation(mdtype)) {
sm = loader_data->metaspace_non_null()->class_vsm();
} else {
sm = loader_data->metaspace_non_null()->vsm();
}
result = sm->get_small_chunk_and_allocate(word_size);
if (result == NULL) {
report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
}
} }
// Zero initialize. // Zero initialize.

View File

@ -622,14 +622,6 @@ bool InstanceKlass::link_class_impl(
if (!this_k->is_linked()) { if (!this_k->is_linked()) {
if (!this_k->is_rewritten()) { if (!this_k->is_rewritten()) {
{ {
// Timer includes any side effects of class verification (resolution,
// etc), but not recursive entry into verify_code().
PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
ClassLoader::perf_class_verify_selftime(),
ClassLoader::perf_classes_verified(),
jt->get_thread_stat()->perf_recursion_counts_addr(),
jt->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_VERIFY);
bool verify_ok = verify_code(this_k, throw_verifyerror, THREAD); bool verify_ok = verify_code(this_k, throw_verifyerror, THREAD);
if (!verify_ok) { if (!verify_ok) {
return false; return false;
@ -1830,11 +1822,10 @@ int nmethodBucket::decrement() {
// are dependent on the changes that were passed in and mark them for // are dependent on the changes that were passed in and mark them for
// deoptimization. Returns the number of nmethods found. // deoptimization. Returns the number of nmethods found.
// //
int InstanceKlass::mark_dependent_nmethods(DepChange& changes) { int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
int found = 0; int found = 0;
nmethodBucket* b = _dependencies; for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
while (b != NULL) {
nmethod* nm = b->get_nmethod(); nmethod* nm = b->get_nmethod();
// since dependencies aren't removed until an nmethod becomes a zombie, // since dependencies aren't removed until an nmethod becomes a zombie,
// the dependency list may contain nmethods which aren't alive. // the dependency list may contain nmethods which aren't alive.
@ -1842,7 +1833,6 @@ int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
if (TraceDependencies) { if (TraceDependencies) {
ResourceMark rm; ResourceMark rm;
tty->print_cr("Marked for deoptimization"); tty->print_cr("Marked for deoptimization");
tty->print_cr(" context = %s", this->external_name());
changes.print(); changes.print();
nm->print(); nm->print();
nm->print_dependencies(); nm->print_dependencies();
@ -1850,36 +1840,119 @@ int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
nm->mark_for_deoptimization(); nm->mark_for_deoptimization();
found++; found++;
} }
b = b->next();
} }
return found; return found;
} }
//
// Add an nmethodBucket to the list of dependencies for this nmethod.
// It's possible that an nmethod has multiple dependencies on this klass
// so a count is kept for each bucket to guarantee that creation and
// deletion of dependencies is consistent. Returns new head of the list.
//
nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
b->increment();
return deps;
}
}
return new nmethodBucket(nm, deps);
}
//
// Decrement count of the nmethod in the dependency list and remove
// the bucket completely when the count goes to 0. This method must
// find a corresponding bucket otherwise there's a bug in the
// recording of dependencies. Returns true if the bucket is ready for reclamation.
//
bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
int val = b->decrement();
guarantee(val >= 0, err_msg("Underflow: %d", val));
return (val == 0);
}
}
#ifdef ASSERT
tty->print_raw_cr("### can't find dependent nmethod");
nm->print();
#endif // ASSERT
ShouldNotReachHere();
return false;
}
//
// Reclaim all unused buckets. Returns new head of the list.
//
nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) {
nmethodBucket* first = deps;
nmethodBucket* last = NULL;
nmethodBucket* b = first;
while (b != NULL) {
assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
nmethodBucket* next = b->next();
if (b->count() == 0) {
if (last == NULL) {
first = next;
} else {
last->set_next(next);
}
delete b;
// last stays the same.
} else {
last = b;
}
b = next;
}
return first;
}
#ifndef PRODUCT
void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) {
int idx = 0;
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
nmethod* nm = b->get_nmethod();
tty->print("[%d] count=%d { ", idx++, b->count());
if (!verbose) {
nm->print_on(tty, "nmethod");
tty->print_cr(" } ");
} else {
nm->print();
nm->print_dependencies();
tty->print_cr("--- } ");
}
}
}
bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
#ifdef ASSERT
int count = b->count();
assert(count >= 0, err_msg("count shouldn't be negative: %d", count));
#endif
return true;
}
}
return false;
}
#endif //PRODUCT
int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
assert_locked_or_safepoint(CodeCache_lock);
return nmethodBucket::mark_dependent_nmethods(_dependencies, changes);
}
void InstanceKlass::clean_dependent_nmethods() { void InstanceKlass::clean_dependent_nmethods() {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
if (has_unloaded_dependent()) { if (has_unloaded_dependent()) {
nmethodBucket* b = _dependencies; _dependencies = nmethodBucket::clean_dependent_nmethods(_dependencies);
nmethodBucket* last = NULL;
while (b != NULL) {
assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
nmethodBucket* next = b->next();
if (b->count() == 0) {
if (last == NULL) {
_dependencies = next;
} else {
last->set_next(next);
}
delete b;
// last stays the same.
} else {
last = b;
}
b = next;
}
set_has_unloaded_dependent(false); set_has_unloaded_dependent(false);
} }
#ifdef ASSERT #ifdef ASSERT
@ -1893,90 +1966,26 @@ void InstanceKlass::clean_dependent_nmethods() {
#endif #endif
} }
//
// Add an nmethodBucket to the list of dependencies for this nmethod.
// It's possible that an nmethod has multiple dependencies on this klass
// so a count is kept for each bucket to guarantee that creation and
// deletion of dependencies is consistent.
//
void InstanceKlass::add_dependent_nmethod(nmethod* nm) { void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
nmethodBucket* b = _dependencies; _dependencies = nmethodBucket::add_dependent_nmethod(_dependencies, nm);
nmethodBucket* last = NULL;
while (b != NULL) {
if (nm == b->get_nmethod()) {
b->increment();
return;
}
b = b->next();
}
_dependencies = new nmethodBucket(nm, _dependencies);
} }
//
// Decrement count of the nmethod in the dependency list and remove
// the bucket competely when the count goes to 0. This method must
// find a corresponding bucket otherwise there's a bug in the
// recording of dependecies.
//
void InstanceKlass::remove_dependent_nmethod(nmethod* nm) { void InstanceKlass::remove_dependent_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
nmethodBucket* b = _dependencies;
nmethodBucket* last = NULL;
while (b != NULL) {
if (nm == b->get_nmethod()) {
int val = b->decrement();
guarantee(val >= 0, err_msg("Underflow: %d", val));
if (val == 0) {
set_has_unloaded_dependent(true);
}
return;
}
last = b;
b = b->next();
}
#ifdef ASSERT
tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
nm->print();
#endif // ASSERT
ShouldNotReachHere();
}
if (nmethodBucket::remove_dependent_nmethod(_dependencies, nm)) {
set_has_unloaded_dependent(true);
}
}
#ifndef PRODUCT #ifndef PRODUCT
void InstanceKlass::print_dependent_nmethods(bool verbose) { void InstanceKlass::print_dependent_nmethods(bool verbose) {
nmethodBucket* b = _dependencies; nmethodBucket::print_dependent_nmethods(_dependencies, verbose);
int idx = 0;
while (b != NULL) {
nmethod* nm = b->get_nmethod();
tty->print("[%d] count=%d { ", idx++, b->count());
if (!verbose) {
nm->print_on(tty, "nmethod");
tty->print_cr(" } ");
} else {
nm->print();
nm->print_dependencies();
tty->print_cr("--- } ");
}
b = b->next();
}
} }
bool InstanceKlass::is_dependent_nmethod(nmethod* nm) { bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
nmethodBucket* b = _dependencies; return nmethodBucket::is_dependent_nmethod(_dependencies, nm);
while (b != NULL) {
if (nm == b->get_nmethod()) {
#ifdef ASSERT
int count = b->count();
assert(count >= 0, err_msg("count shouldn't be negative: %d", count));
#endif
return true;
}
b = b->next();
}
return false;
} }
#endif //PRODUCT #endif //PRODUCT

View File

@ -1290,6 +1290,15 @@ class nmethodBucket: public CHeapObj<mtClass> {
nmethodBucket* next() { return _next; } nmethodBucket* next() { return _next; }
void set_next(nmethodBucket* b) { _next = b; } void set_next(nmethodBucket* b) { _next = b; }
nmethod* get_nmethod() { return _nmethod; } nmethod* get_nmethod() { return _nmethod; }
static int mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes);
static nmethodBucket* add_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
static bool remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
static nmethodBucket* clean_dependent_nmethods(nmethodBucket* deps);
#ifndef PRODUCT
static void print_dependent_nmethods(nmethodBucket* deps, bool verbose);
static bool is_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
#endif //PRODUCT
}; };
// An iterator that's used to access the inner classes indices in the // An iterator that's used to access the inner classes indices in the

View File

@ -1136,7 +1136,7 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Klass
if (m->has_itable_index()) { if (m->has_itable_index()) {
// This search must match the runtime resolution, i.e. selection search for invokeinterface // This search must match the runtime resolution, i.e. selection search for invokeinterface
// to correctly enforce loader constraints for interface method inheritance // to correctly enforce loader constraints for interface method inheritance
LinkResolver::lookup_instance_method_in_klasses(target, _klass, m->name(), m->signature(), CHECK); target = LinkResolver::lookup_instance_method_in_klasses(_klass, m->name(), m->signature(), CHECK);
} }
if (target == NULL || !target->is_public() || target->is_abstract()) { if (target == NULL || !target->is_public() || target->is_abstract()) {
// Entry does not resolve. Leave it empty for AbstractMethodError. // Entry does not resolve. Leave it empty for AbstractMethodError.

View File

@ -599,10 +599,14 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
} }
bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
const TypeOopPtr* dest_t = phase->type(in(ArrayCopyNode::Dest))->is_oopptr(); Node* dest = in(ArrayCopyNode::Dest);
if (dest->is_top()) {
return false;
}
const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded"); assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
const TypeOopPtr* src_t = phase->type(in(ArrayCopyNode::Src))->is_oopptr(); assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
assert(!src_t->is_known_instance() || _src_type->is_known_instance(), "result of EA not recorded"); _src_type->is_known_instance(), "result of EA not recorded");
if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) { if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance"); assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance");

View File

@ -1946,7 +1946,7 @@ bool CallLeafNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
} }
} }
} }
if (may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
return true; return true;
} }
return false; return false;

View File

@ -1457,18 +1457,18 @@ void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
// factory methods in "int adr_idx" // factory methods in "int adr_idx"
Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
int adr_idx, int adr_idx,
MemNode::MemOrd mo, bool require_atomic_access) { MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) {
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = NULL; // debug-mode-only argument const TypePtr* adr_type = NULL; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx)); debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx); Node* mem = memory(adr_idx);
Node* ld; Node* ld;
if (require_atomic_access && bt == T_LONG) { if (require_atomic_access && bt == T_LONG) {
ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo); ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency);
} else if (require_atomic_access && bt == T_DOUBLE) { } else if (require_atomic_access && bt == T_DOUBLE) {
ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo); ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency);
} else { } else {
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo); ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
} }
ld = _gvn.transform(ld); ld = _gvn.transform(ld);
if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {

View File

@ -512,21 +512,24 @@ class GraphKit : public Phase {
// adapted the `do_put_xxx' and `do_get_xxx' procedures for the case // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
// of volatile fields. // of volatile fields.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
MemNode::MemOrd mo, bool require_atomic_access = false) { MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false) {
// This version computes alias_index from bottom_type // This version computes alias_index from bottom_type
return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
mo, require_atomic_access); mo, control_dependency, require_atomic_access);
} }
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
MemNode::MemOrd mo, bool require_atomic_access = false) { MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false) {
// This version computes alias_index from an address type // This version computes alias_index from an address type
assert(adr_type != NULL, "use other make_load factory"); assert(adr_type != NULL, "use other make_load factory");
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
mo, require_atomic_access); mo, control_dependency, require_atomic_access);
} }
// This is the base version which is given an alias index. // This is the base version which is given an alias index.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
MemNode::MemOrd mo, bool require_atomic_access = false); MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false);
// Create & transform a StoreNode and store the effect into the // Create & transform a StoreNode and store the effect into the
// parser's memory state. // parser's memory state.

View File

@ -2631,7 +2631,9 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
if (!is_store) { if (!is_store) {
MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
Node* p = make_load(control(), adr, value_type, type, adr_type, mo, is_volatile); // To be valid, unsafe loads may depend on other conditions than
// the one that guards them: pin the Load node
Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile);
// load value // load value
switch (type) { switch (type) {
case T_BOOLEAN: case T_BOOLEAN:
@ -5488,7 +5490,7 @@ Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * field
} }
// Build the load. // Build the load.
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, is_vol); Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
// If reference is volatile, prevent following memory ops from // If reference is volatile, prevent following memory ops from
// floating up past the volatile read. Also prevents commoning // floating up past the volatile read. Also prevents commoning
// another volatile read. // another volatile read.

View File

@ -437,7 +437,13 @@ class Invariance : public StackObj {
} }
} }
if (all_inputs_invariant) { if (all_inputs_invariant) {
_invariant.set(n->_idx); // I am a invariant too // If n's control is a predicate that was moved out of the
// loop, it was marked invariant but n is only invariant if
// it depends only on that test. Otherwise, unless that test
// is out of the loop, it's not invariant.
if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == NULL || !_phase->is_member(_lpt, n->in(0))) {
_invariant.set(n->_idx); // I am a invariant too
}
} }
} else { // process next input } else { // process next input
_stack.set_index(idx + 1); _stack.set_index(idx + 1);

View File

@ -1582,13 +1582,36 @@ void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) {
if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())) { if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())) {
if (!def_node->is_reduction()) { // Not marked yet if (!def_node->is_reduction()) { // Not marked yet
// To be a reduction, the arithmetic node must have the phi as input and provide a def to it // To be a reduction, the arithmetic node must have the phi as input and provide a def to it
bool ok = false;
for (unsigned j = 1; j < def_node->req(); j++) { for (unsigned j = 1; j < def_node->req(); j++) {
Node* in = def_node->in(j); Node* in = def_node->in(j);
if (in == phi) { if (in == phi) {
def_node->add_flag(Node::Flag_is_reduction); ok = true;
break; break;
} }
} }
// do nothing if we did not match the initial criteria
if (ok == false) {
continue;
}
// The result of the reduction must not be used in the loop
for (DUIterator_Fast imax, i = def_node->fast_outs(imax); i < imax && ok; i++) {
Node* u = def_node->fast_out(i);
if (has_ctrl(u) && !loop->is_member(get_loop(get_ctrl(u)))) {
continue;
}
if (u == phi) {
continue;
}
ok = false;
}
// iff the uses conform
if (ok) {
def_node->add_flag(Node::Flag_is_reduction);
}
} }
} }
} }

View File

@ -718,7 +718,7 @@ Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
} }
// Use same limit as split_if_with_blocks_post // Use same limit as split_if_with_blocks_post
if( C->unique() > 35000 ) return n; // Method too big if( C->live_nodes() > 35000 ) return n; // Method too big
// Split 'n' through the merge point if it is profitable // Split 'n' through the merge point if it is profitable
Node *phi = split_thru_phi( n, n_blk, policy ); Node *phi = split_thru_phi( n, n_blk, policy );
@ -802,7 +802,7 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
// Cloning Cmp through Phi's involves the split-if transform. // Cloning Cmp through Phi's involves the split-if transform.
// FastLock is not used by an If // FastLock is not used by an If
if( n->is_Cmp() && !n->is_FastLock() ) { if( n->is_Cmp() && !n->is_FastLock() ) {
if( C->unique() > 35000 ) return; // Method too big if( C->live_nodes() > 35000 ) return; // Method too big
// Do not do 'split-if' if irreducible loops are present. // Do not do 'split-if' if irreducible loops are present.
if( _has_irreducible_loops ) if( _has_irreducible_loops )

View File

@ -844,7 +844,7 @@ void Matcher::init_spill_mask( Node *ret ) {
MachNode *spillCP = match_tree(new LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered)); MachNode *spillCP = match_tree(new LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
#endif #endif
MachNode *spillI = match_tree(new LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered)); MachNode *spillI = match_tree(new LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
MachNode *spillL = match_tree(new LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered,false)); MachNode *spillL = match_tree(new LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered, LoadNode::DependsOnlyOnTest, false));
MachNode *spillF = match_tree(new LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered)); MachNode *spillF = match_tree(new LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
MachNode *spillD = match_tree(new LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered)); MachNode *spillD = match_tree(new LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
MachNode *spillP = match_tree(new LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered)); MachNode *spillP = match_tree(new LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));

View File

@ -784,6 +784,9 @@ void LoadNode::dump_spec(outputStream *st) const {
// standard dump does this in Verbose and WizardMode // standard dump does this in Verbose and WizardMode
st->print(" #"); _type->dump_on(st); st->print(" #"); _type->dump_on(st);
} }
if (!_depends_only_on_test) {
st->print(" (does not depend only on test)");
}
} }
#endif #endif
@ -800,7 +803,7 @@ bool LoadNode::is_immutable_value(Node* adr) {
//----------------------------LoadNode::make----------------------------------- //----------------------------LoadNode::make-----------------------------------
// Polymorphic factory method: // Polymorphic factory method:
Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) { Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo, ControlDependency control_dependency) {
Compile* C = gvn.C; Compile* C = gvn.C;
// sanity check the alias category against the created node type // sanity check the alias category against the created node type
@ -816,39 +819,39 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP
rt->isa_oopptr() || is_immutable_value(adr), rt->isa_oopptr() || is_immutable_value(adr),
"raw memory operations should have control edge"); "raw memory operations should have control edge");
switch (bt) { switch (bt) {
case T_BOOLEAN: return new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo); case T_BOOLEAN: return new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_BYTE: return new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo); case T_BYTE: return new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_INT: return new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo); case T_INT: return new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_CHAR: return new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo); case T_CHAR: return new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_SHORT: return new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo); case T_SHORT: return new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_LONG: return new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo); case T_LONG: return new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency);
case T_FLOAT: return new LoadFNode (ctl, mem, adr, adr_type, rt, mo); case T_FLOAT: return new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency);
case T_DOUBLE: return new LoadDNode (ctl, mem, adr, adr_type, rt, mo); case T_DOUBLE: return new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency);
case T_ADDRESS: return new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo); case T_ADDRESS: return new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
case T_OBJECT: case T_OBJECT:
#ifdef _LP64 #ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) { if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node* load = gvn.transform(new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo)); Node* load = gvn.transform(new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency));
return new DecodeNNode(load, load->bottom_type()->make_ptr()); return new DecodeNNode(load, load->bottom_type()->make_ptr());
} else } else
#endif #endif
{ {
assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
return new LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo); return new LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo, control_dependency);
} }
} }
ShouldNotReachHere(); ShouldNotReachHere();
return (LoadNode*)NULL; return (LoadNode*)NULL;
} }
LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) { LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
bool require_atomic = true; bool require_atomic = true;
return new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic); return new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
} }
LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) { LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
bool require_atomic = true; bool require_atomic = true;
return new LoadDNode(ctl, mem, adr, adr_type, rt, mo, require_atomic); return new LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
} }

View File

@ -137,7 +137,33 @@ public:
//------------------------------LoadNode--------------------------------------- //------------------------------LoadNode---------------------------------------
// Load value; requires Memory and Address // Load value; requires Memory and Address
class LoadNode : public MemNode { class LoadNode : public MemNode {
public:
// Some loads (from unsafe) should be pinned: they don't depend only
// on the dominating test. The boolean field _depends_only_on_test
// below records whether that node depends only on the dominating
// test.
// Methods used to build LoadNodes pass an argument of type enum
// ControlDependency instead of a boolean because those methods
// typically have multiple boolean parameters with default values:
// passing the wrong boolean to one of these parameters by mistake
// goes easily unnoticed. Using an enum, the compiler can check that
// the type of a value and the type of the parameter match.
enum ControlDependency {
Pinned,
DependsOnlyOnTest
};
private: private:
// LoadNode::hash() doesn't take the _depends_only_on_test field
// into account: If the graph already has a non-pinned LoadNode and
// we add a pinned LoadNode with the same inputs, it's safe for GVN
// to replace the pinned LoadNode with the non-pinned LoadNode,
// otherwise it wouldn't be safe to have a non pinned LoadNode with
// those inputs in the first place. If the graph already has a
// pinned LoadNode and we add a non pinned LoadNode with the same
// inputs, it's safe (but suboptimal) for GVN to replace the
// non-pinned LoadNode by the pinned LoadNode.
bool _depends_only_on_test;
// On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
// loads that can be reordered, and such requiring acquire semantics to // loads that can be reordered, and such requiring acquire semantics to
// adhere to the Java specification. The required behaviour is stored in // adhere to the Java specification. The required behaviour is stored in
@ -154,8 +180,8 @@ protected:
virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const;
public: public:
LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo) LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
: MemNode(c,mem,adr,at), _type(rt), _mo(mo) { : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) {
init_class_id(Class_Load); init_class_id(Class_Load);
} }
inline bool is_unordered() const { return !is_acquire(); } inline bool is_unordered() const { return !is_acquire(); }
@ -166,7 +192,8 @@ public:
// Polymorphic factory method: // Polymorphic factory method:
static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
const TypePtr* at, const Type *rt, BasicType bt, MemOrd mo); const TypePtr* at, const Type *rt, BasicType bt,
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
virtual uint hash() const; // Check the type virtual uint hash() const; // Check the type
@ -234,16 +261,15 @@ protected:
// which produce results (new raw memory state) inside of loops preventing all // which produce results (new raw memory state) inside of loops preventing all
// manner of other optimizations). Basically, it's ugly but so is the alternative. // manner of other optimizations). Basically, it's ugly but so is the alternative.
// See comment in macro.cpp, around line 125 expand_allocate_common(). // See comment in macro.cpp, around line 125 expand_allocate_common().
virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
}; };
//------------------------------LoadBNode-------------------------------------- //------------------------------LoadBNode--------------------------------------
// Load a byte (8bits signed) from memory // Load a byte (8bits signed) from memory
class LoadBNode : public LoadNode { class LoadBNode : public LoadNode {
public: public:
LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@ -256,8 +282,8 @@ public:
// Load a unsigned byte (8bits unsigned) from memory // Load a unsigned byte (8bits unsigned) from memory
class LoadUBNode : public LoadNode { class LoadUBNode : public LoadNode {
public: public:
LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo) LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
@ -270,8 +296,8 @@ public:
// Load an unsigned short/char (16bits unsigned) from memory // Load an unsigned short/char (16bits unsigned) from memory
class LoadUSNode : public LoadNode { class LoadUSNode : public LoadNode {
public: public:
LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@ -284,8 +310,8 @@ public:
// Load a short (16bits signed) from memory // Load a short (16bits signed) from memory
class LoadSNode : public LoadNode { class LoadSNode : public LoadNode {
public: public:
LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@ -298,8 +324,8 @@ public:
// Load an integer from memory // Load an integer from memory
class LoadINode : public LoadNode { class LoadINode : public LoadNode {
public: public:
LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual int store_Opcode() const { return Op_StoreI; } virtual int store_Opcode() const { return Op_StoreI; }
@ -331,15 +357,15 @@ class LoadLNode : public LoadNode {
public: public:
LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
MemOrd mo, bool require_atomic_access = false) MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
: LoadNode(c, mem, adr, at, tl, mo), _require_atomic_access(require_atomic_access) {} : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegL; } virtual uint ideal_reg() const { return Op_RegL; }
virtual int store_Opcode() const { return Op_StoreL; } virtual int store_Opcode() const { return Op_StoreL; }
virtual BasicType memory_type() const { return T_LONG; } virtual BasicType memory_type() const { return T_LONG; }
bool require_atomic_access() const { return _require_atomic_access; } bool require_atomic_access() const { return _require_atomic_access; }
static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo); const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
#ifndef PRODUCT #ifndef PRODUCT
virtual void dump_spec(outputStream *st) const { virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st); LoadNode::dump_spec(st);
@ -352,8 +378,8 @@ public:
// Load a long from unaligned memory // Load a long from unaligned memory
class LoadL_unalignedNode : public LoadLNode { class LoadL_unalignedNode : public LoadLNode {
public: public:
LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo) LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadLNode(c, mem, adr, at, TypeLong::LONG, mo) {} : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
}; };
@ -361,8 +387,8 @@ public:
// Load a float (64 bits) from memory // Load a float (64 bits) from memory
class LoadFNode : public LoadNode { class LoadFNode : public LoadNode {
public: public:
LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo) LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo) {} : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegF; } virtual uint ideal_reg() const { return Op_RegF; }
virtual int store_Opcode() const { return Op_StoreF; } virtual int store_Opcode() const { return Op_StoreF; }
@ -382,15 +408,15 @@ class LoadDNode : public LoadNode {
public: public:
LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
MemOrd mo, bool require_atomic_access = false) MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
: LoadNode(c, mem, adr, at, t, mo), _require_atomic_access(require_atomic_access) {} : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegD; } virtual uint ideal_reg() const { return Op_RegD; }
virtual int store_Opcode() const { return Op_StoreD; } virtual int store_Opcode() const { return Op_StoreD; }
virtual BasicType memory_type() const { return T_DOUBLE; } virtual BasicType memory_type() const { return T_DOUBLE; }
bool require_atomic_access() const { return _require_atomic_access; } bool require_atomic_access() const { return _require_atomic_access; }
static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo); const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
#ifndef PRODUCT #ifndef PRODUCT
virtual void dump_spec(outputStream *st) const { virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st); LoadNode::dump_spec(st);
@ -403,8 +429,8 @@ public:
// Load a double from unaligned memory // Load a double from unaligned memory
class LoadD_unalignedNode : public LoadDNode { class LoadD_unalignedNode : public LoadDNode {
public: public:
LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo) LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadDNode(c, mem, adr, at, Type::DOUBLE, mo) {} : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
}; };
@ -412,8 +438,8 @@ public:
// Load a pointer from memory (either object or array) // Load a pointer from memory (either object or array)
class LoadPNode : public LoadNode { class LoadPNode : public LoadNode {
public: public:
LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo) LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo) {} : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; } virtual uint ideal_reg() const { return Op_RegP; }
virtual int store_Opcode() const { return Op_StoreP; } virtual int store_Opcode() const { return Op_StoreP; }
@ -425,8 +451,8 @@ public:
// Load a narrow oop from memory (either object or array) // Load a narrow oop from memory (either object or array)
class LoadNNode : public LoadNode { class LoadNNode : public LoadNode {
public: public:
LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo) LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo) {} : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; } virtual uint ideal_reg() const { return Op_RegN; }
virtual int store_Opcode() const { return Op_StoreN; } virtual int store_Opcode() const { return Op_StoreN; }

View File

@ -235,7 +235,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
// //
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
bool needs_atomic_access = is_vol || AlwaysAtomicAccesses; bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, needs_atomic_access); Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
// Adjust Java stack // Adjust Java stack
if (type2size[bt] == 1) if (type2size[bt] == 1)

View File

@ -1573,11 +1573,12 @@ void PhaseCCP::analyze() {
set_type(n, t); set_type(n, t);
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* m = n->fast_out(i); // Get user Node* m = n->fast_out(i); // Get user
if( m->is_Region() ) { // New path to Region? Must recheck Phis too if (m->is_Region()) { // New path to Region? Must recheck Phis too
for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) { for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
Node* p = m->fast_out(i2); // Propagate changes to uses Node* p = m->fast_out(i2); // Propagate changes to uses
if( p->bottom_type() != type(p) ) // If not already bottomed out if (p->bottom_type() != type(p)) { // If not already bottomed out
worklist.push(p); // Propagate change to user worklist.push(p); // Propagate change to user
}
} }
} }
// If we changed the receiver type to a call, we need to revisit // If we changed the receiver type to a call, we need to revisit
@ -1587,12 +1588,31 @@ void PhaseCCP::analyze() {
if (m->is_Call()) { if (m->is_Call()) {
for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) { for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
Node* p = m->fast_out(i2); // Propagate changes to uses Node* p = m->fast_out(i2); // Propagate changes to uses
if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control && p->outcnt() == 1) if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control && p->outcnt() == 1) {
worklist.push(p->unique_out()); worklist.push(p->unique_out());
}
} }
} }
if( m->bottom_type() != type(m) ) // If not already bottomed out if (m->bottom_type() != type(m)) { // If not already bottomed out
worklist.push(m); // Propagate change to user worklist.push(m); // Propagate change to user
}
// CmpU nodes can get their type information from two nodes up in the
// graph (instead of from the nodes immediately above). Make sure they
// are added to the worklist if nodes they depend on are updated, since
// they could be missed and get wrong types otherwise.
uint m_op = m->Opcode();
if (m_op == Op_AddI || m_op == Op_SubI) {
for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
Node* p = m->fast_out(i2); // Propagate changes to uses
if (p->Opcode() == Op_CmpU) {
// Got a CmpU which might need the new type information from node n.
if(p->bottom_type() != type(p)) { // If not already bottomed out
worklist.push(p); // Propagate change to user
}
}
}
}
} }
} }
} }

View File

@ -1631,7 +1631,7 @@ void SuperWord::output() {
} }
Node* adr = low_adr->in(MemNode::Address); Node* adr = low_adr->in(MemNode::Address);
const TypePtr* atyp = n->adr_type(); const TypePtr* atyp = n->adr_type();
vn = LoadVectorNode::make(opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n)); vn = LoadVectorNode::make(opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n), control_dependency(p));
vlen_in_bytes = vn->as_LoadVector()->memory_size(); vlen_in_bytes = vn->as_LoadVector()->memory_size();
} else if (n->is_Store()) { } else if (n->is_Store()) {
// Promote value to be stored to vector // Promote value to be stored to vector
@ -2280,6 +2280,19 @@ Node* SuperWord::executed_last(Node_List* p) {
return n; return n;
} }
LoadNode::ControlDependency SuperWord::control_dependency(Node_List* p) {
LoadNode::ControlDependency dep = LoadNode::DependsOnlyOnTest;
for (uint i = 0; i < p->size(); i++) {
Node* n = p->at(i);
assert(n->is_Load(), "only meaningful for loads");
if (!n->depends_only_on_test()) {
dep = LoadNode::Pinned;
}
}
return dep;
}
//----------------------------align_initial_loop_index--------------------------- //----------------------------align_initial_loop_index---------------------------
// Adjust pre-loop limit so that in main loop, a load/store reference // Adjust pre-loop limit so that in main loop, a load/store reference
// to align_to_ref will be a position zero in the vector. // to align_to_ref will be a position zero in the vector.

View File

@ -428,6 +428,7 @@ class SuperWord : public ResourceObj {
Node* executed_first(Node_List* p); Node* executed_first(Node_List* p);
// Return the node executed last in pack p. // Return the node executed last in pack p.
Node* executed_last(Node_List* p); Node* executed_last(Node_List* p);
static LoadNode::ControlDependency control_dependency(Node_List* p);
// Alignment within a vector memory reference // Alignment within a vector memory reference
int memory_alignment(MemNode* s, int iv_adjust); int memory_alignment(MemNode* s, int iv_adjust);
// (Start, end] half-open range defining which operands are vector // (Start, end] half-open range defining which operands are vector

View File

@ -406,9 +406,11 @@ PackNode* PackNode::binary_tree_pack(int lo, int hi) {
// Return the vector version of a scalar load node. // Return the vector version of a scalar load node.
LoadVectorNode* LoadVectorNode::make(int opc, Node* ctl, Node* mem, LoadVectorNode* LoadVectorNode::make(int opc, Node* ctl, Node* mem,
Node* adr, const TypePtr* atyp, uint vlen, BasicType bt) { Node* adr, const TypePtr* atyp,
uint vlen, BasicType bt,
ControlDependency control_dependency) {
const TypeVect* vt = TypeVect::make(bt, vlen); const TypeVect* vt = TypeVect::make(bt, vlen);
return new LoadVectorNode(ctl, mem, adr, atyp, vt); return new LoadVectorNode(ctl, mem, adr, atyp, vt, control_dependency);
} }
// Return the vector version of a scalar store node. // Return the vector version of a scalar store node.

View File

@ -454,8 +454,8 @@ class XorVNode : public VectorNode {
// Load Vector from memory // Load Vector from memory
class LoadVectorNode : public LoadNode { class LoadVectorNode : public LoadNode {
public: public:
LoadVectorNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeVect* vt) LoadVectorNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeVect* vt, ControlDependency control_dependency = LoadNode::DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, vt, MemNode::unordered) { : LoadNode(c, mem, adr, at, vt, MemNode::unordered, control_dependency) {
init_class_id(Class_LoadVector); init_class_id(Class_LoadVector);
} }
@ -471,7 +471,9 @@ class LoadVectorNode : public LoadNode {
virtual int store_Opcode() const { return Op_StoreVector; } virtual int store_Opcode() const { return Op_StoreVector; }
static LoadVectorNode* make(int opc, Node* ctl, Node* mem, static LoadVectorNode* make(int opc, Node* ctl, Node* mem,
Node* adr, const TypePtr* atyp, uint vlen, BasicType bt); Node* adr, const TypePtr* atyp,
uint vlen, BasicType bt,
ControlDependency control_dependency = LoadNode::DependsOnlyOnTest);
}; };
//------------------------------StoreVectorNode-------------------------------- //------------------------------StoreVectorNode--------------------------------

View File

@ -1126,39 +1126,32 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive
Method* m = Method::resolve_jmethod_id(method_id); Method* m = Method::resolve_jmethod_id(method_id);
number_of_parameters = m->size_of_parameters(); number_of_parameters = m->size_of_parameters();
Klass* holder = m->method_holder(); Klass* holder = m->method_holder();
if (!(holder)->is_interface()) { if (call_type != JNI_VIRTUAL) {
selected_method = m;
} else if (!m->has_itable_index()) {
// non-interface call -- for that little speed boost, don't handlize // non-interface call -- for that little speed boost, don't handlize
debug_only(No_Safepoint_Verifier nosafepoint;) debug_only(No_Safepoint_Verifier nosafepoint;)
if (call_type == JNI_VIRTUAL) { // jni_GetMethodID makes sure class is linked and initialized
// jni_GetMethodID makes sure class is linked and initialized // so m should have a valid vtable index.
// so m should have a valid vtable index. assert(m->valid_vtable_index(), "no valid vtable index");
assert(!m->has_itable_index(), ""); int vtbl_index = m->vtable_index();
int vtbl_index = m->vtable_index(); if (vtbl_index != Method::nonvirtual_vtable_index) {
if (vtbl_index != Method::nonvirtual_vtable_index) { Klass* k = h_recv->klass();
Klass* k = h_recv->klass(); // k might be an arrayKlassOop but all vtables start at
// k might be an arrayKlassOop but all vtables start at // the same place. The cast is to avoid virtual call and assertion.
// the same place. The cast is to avoid virtual call and assertion. InstanceKlass *ik = (InstanceKlass*)k;
InstanceKlass *ik = (InstanceKlass*)k; selected_method = ik->method_at_vtable(vtbl_index);
selected_method = ik->method_at_vtable(vtbl_index);
} else {
// final method
selected_method = m;
}
} else { } else {
// JNI_NONVIRTUAL call // final method
selected_method = m; selected_method = m;
} }
} else { } else {
// interface call // interface call
KlassHandle h_holder(THREAD, holder); KlassHandle h_holder(THREAD, holder);
if (call_type == JNI_VIRTUAL) { int itbl_index = m->itable_index();
int itbl_index = m->itable_index(); Klass* k = h_recv->klass();
Klass* k = h_recv->klass(); selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
} else {
selected_method = m;
}
} }
} }

View File

@ -842,7 +842,7 @@ JvmtiEnvBase::get_stack_trace(JavaThread *java_thread,
// optimize to limit the number of times that java_sender() is called // optimize to limit the number of times that java_sender() is called
javaVFrame *jvf_cursor = jvf; javaVFrame *jvf_cursor = jvf;
javaVFrame *jvf_prev = NULL; javaVFrame *jvf_prev = NULL;
javaVFrame *jvf_prev_prev; javaVFrame *jvf_prev_prev = NULL;
int j = 0; int j = 0;
while (jvf_cursor != NULL) { while (jvf_cursor != NULL) {
jvf_prev_prev = jvf_prev; jvf_prev_prev = jvf_prev;

View File

@ -677,24 +677,24 @@ Handle MethodHandles::resolve_MemberName(Handle mname, KlassHandle caller, TRAPS
case IS_METHOD: case IS_METHOD:
{ {
CallInfo result; CallInfo result;
LinkInfo link_info(defc, name, type, caller, caller.not_null());
{ {
assert(!HAS_PENDING_EXCEPTION, ""); assert(!HAS_PENDING_EXCEPTION, "");
if (ref_kind == JVM_REF_invokeStatic) { if (ref_kind == JVM_REF_invokeStatic) {
LinkResolver::resolve_static_call(result, LinkResolver::resolve_static_call(result,
defc, name, type, caller, caller.not_null(), false, THREAD); link_info, false, THREAD);
} else if (ref_kind == JVM_REF_invokeInterface) { } else if (ref_kind == JVM_REF_invokeInterface) {
LinkResolver::resolve_interface_call(result, Handle(), defc, LinkResolver::resolve_interface_call(result, Handle(), defc,
defc, name, type, caller, caller.not_null(), false, THREAD); link_info, false, THREAD);
} else if (mh_invoke_id != vmIntrinsics::_none) { } else if (mh_invoke_id != vmIntrinsics::_none) {
assert(!is_signature_polymorphic_static(mh_invoke_id), ""); assert(!is_signature_polymorphic_static(mh_invoke_id), "");
LinkResolver::resolve_handle_call(result, LinkResolver::resolve_handle_call(result, link_info, THREAD);
defc, name, type, caller, THREAD);
} else if (ref_kind == JVM_REF_invokeSpecial) { } else if (ref_kind == JVM_REF_invokeSpecial) {
LinkResolver::resolve_special_call(result, LinkResolver::resolve_special_call(result,
defc, name, type, caller, caller.not_null(), THREAD); link_info, THREAD);
} else if (ref_kind == JVM_REF_invokeVirtual) { } else if (ref_kind == JVM_REF_invokeVirtual) {
LinkResolver::resolve_virtual_call(result, Handle(), defc, LinkResolver::resolve_virtual_call(result, Handle(), defc,
defc, name, type, caller, caller.not_null(), false, THREAD); link_info, false, THREAD);
} else { } else {
assert(false, err_msg("ref_kind=%d", ref_kind)); assert(false, err_msg("ref_kind=%d", ref_kind));
} }
@ -714,11 +714,11 @@ Handle MethodHandles::resolve_MemberName(Handle mname, KlassHandle caller, TRAPS
case IS_CONSTRUCTOR: case IS_CONSTRUCTOR:
{ {
CallInfo result; CallInfo result;
LinkInfo link_info(defc, name, type, caller, caller.not_null());
{ {
assert(!HAS_PENDING_EXCEPTION, ""); assert(!HAS_PENDING_EXCEPTION, "");
if (name == vmSymbols::object_initializer_name()) { if (name == vmSymbols::object_initializer_name()) {
LinkResolver::resolve_special_call(result, LinkResolver::resolve_special_call(result, link_info, THREAD);
defc, name, type, caller, caller.not_null(), THREAD);
} else { } else {
break; // will throw after end of switch break; // will throw after end of switch
} }
@ -735,7 +735,8 @@ Handle MethodHandles::resolve_MemberName(Handle mname, KlassHandle caller, TRAPS
fieldDescriptor result; // find_field initializes fd if found fieldDescriptor result; // find_field initializes fd if found
{ {
assert(!HAS_PENDING_EXCEPTION, ""); assert(!HAS_PENDING_EXCEPTION, "");
LinkResolver::resolve_field(result, defc, name, type, caller, Bytecodes::_nop, false, false, THREAD); LinkInfo link_info(defc, name, type, caller, /*check_access*/false);
LinkResolver::resolve_field(result, link_info, Bytecodes::_nop, false, THREAD);
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
return empty; return empty;
} }
@ -942,22 +943,56 @@ int MethodHandles::find_MemberNames(KlassHandle k,
return rfill + overflow; return rfill + overflow;
} }
// Get context class for a CallSite instance: either extract existing context or use default one. void MethodHandles::add_dependent_nmethod(oop call_site, nmethod* nm) {
InstanceKlass* MethodHandles::get_call_site_context(oop call_site) { assert_locked_or_safepoint(CodeCache_lock);
// In order to extract a context the following traversal is performed:
// CallSite.context => Cleaner.referent => Class._klass => Klass oop context = java_lang_invoke_CallSite::context(call_site);
assert(java_lang_invoke_CallSite::is_instance(call_site), ""); nmethodBucket* deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
oop context_oop = java_lang_invoke_CallSite::context_volatile(call_site);
if (oopDesc::is_null(context_oop)) { nmethodBucket* new_deps = nmethodBucket::add_dependent_nmethod(deps, nm);
return NULL; // The context hasn't been initialized yet. if (deps != new_deps) {
java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context, new_deps);
} }
oop context_class_oop = java_lang_ref_Reference::referent(context_oop); }
if (oopDesc::is_null(context_class_oop)) {
// The context reference was cleared by GC, so current dependency context void MethodHandles::remove_dependent_nmethod(oop call_site, nmethod* nm) {
// isn't usable anymore. Context should be fetched from CallSite again. assert_locked_or_safepoint(CodeCache_lock);
return NULL;
oop context = java_lang_invoke_CallSite::context(call_site);
nmethodBucket* deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
if (nmethodBucket::remove_dependent_nmethod(deps, nm)) {
nmethodBucket* new_deps = nmethodBucket::clean_dependent_nmethods(deps);
if (deps != new_deps) {
java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context, new_deps);
}
}
}
void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) {
assert_lock_strong(Compile_lock);
int marked = 0;
CallSiteDepChange changes(call_site(), target());
{
MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
oop context = java_lang_invoke_CallSite::context(call_site());
nmethodBucket* deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
marked = nmethodBucket::mark_dependent_nmethods(deps, changes);
if (marked > 0) {
nmethodBucket* new_deps = nmethodBucket::clean_dependent_nmethods(deps);
if (deps != new_deps) {
java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context, new_deps);
}
}
}
if (marked > 0) {
// At least one nmethod has been marked for deoptimization
VM_Deoptimize op;
VMThread::execute(&op);
} }
return InstanceKlass::cast(java_lang_Class::as_Klass(context_class_oop));
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -1276,7 +1311,7 @@ JVM_ENTRY(void, MHN_setCallSiteTargetNormal(JNIEnv* env, jobject igcls, jobject
{ {
// Walk all nmethods depending on this call site. // Walk all nmethods depending on this call site.
MutexLocker mu(Compile_lock, thread); MutexLocker mu(Compile_lock, thread);
CodeCache::flush_dependents_on(call_site, target); MethodHandles::flush_dependent_nmethods(call_site, target);
java_lang_invoke_CallSite::set_target(call_site(), target()); java_lang_invoke_CallSite::set_target(call_site(), target());
} }
} }
@ -1288,30 +1323,34 @@ JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobjec
{ {
// Walk all nmethods depending on this call site. // Walk all nmethods depending on this call site.
MutexLocker mu(Compile_lock, thread); MutexLocker mu(Compile_lock, thread);
CodeCache::flush_dependents_on(call_site, target); MethodHandles::flush_dependent_nmethods(call_site, target);
java_lang_invoke_CallSite::set_target_volatile(call_site(), target()); java_lang_invoke_CallSite::set_target_volatile(call_site(), target());
} }
} }
JVM_END JVM_END
JVM_ENTRY(void, MHN_invalidateDependentNMethods(JNIEnv* env, jobject igcls, jobject call_site_jh)) { JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject context_jh)) {
Handle call_site(THREAD, JNIHandles::resolve_non_null(call_site_jh)); Handle context(THREAD, JNIHandles::resolve_non_null(context_jh));
{ {
// Walk all nmethods depending on this call site. // Walk all nmethods depending on this call site.
MutexLocker mu1(Compile_lock, thread); MutexLocker mu1(Compile_lock, thread);
CallSiteDepChange changes(call_site(), Handle());
InstanceKlass* ctxk = MethodHandles::get_call_site_context(call_site());
if (ctxk == NULL) {
return; // No dependencies to invalidate yet.
}
int marked = 0; int marked = 0;
{ {
MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
marked = ctxk->mark_dependent_nmethods(changes); nmethodBucket* b = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());
while(b != NULL) {
nmethod* nm = b->get_nmethod();
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
nm->mark_for_deoptimization();
marked++;
}
nmethodBucket* next = b->next();
delete b;
b = next;
}
java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context(), NULL); // reset context
} }
java_lang_invoke_CallSite::set_context_volatile(call_site(), NULL); // Reset call site to initial state
if (marked > 0) { if (marked > 0) {
// At least one nmethod has been marked for deoptimization // At least one nmethod has been marked for deoptimization
VM_Deoptimize op; VM_Deoptimize op;
@ -1357,6 +1396,7 @@ JVM_END
#define MT JLINV"MethodType;" #define MT JLINV"MethodType;"
#define MH JLINV"MethodHandle;" #define MH JLINV"MethodHandle;"
#define MEM JLINV"MemberName;" #define MEM JLINV"MemberName;"
#define CTX JLINV"MethodHandleNatives$CallSiteContext;"
#define CC (char*) /*cast a literal from (const char*)*/ #define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
@ -1374,7 +1414,7 @@ static JNINativeMethod MHN_methods[] = {
{CC"objectFieldOffset", CC"("MEM")J", FN_PTR(MHN_objectFieldOffset)}, {CC"objectFieldOffset", CC"("MEM")J", FN_PTR(MHN_objectFieldOffset)},
{CC"setCallSiteTargetNormal", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetNormal)}, {CC"setCallSiteTargetNormal", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetNormal)},
{CC"setCallSiteTargetVolatile", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetVolatile)}, {CC"setCallSiteTargetVolatile", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetVolatile)},
{CC"invalidateDependentNMethods", CC"("CS")V", FN_PTR(MHN_invalidateDependentNMethods)}, {CC"clearCallSiteContext", CC"("CTX")V", FN_PTR(MHN_clearCallSiteContext)},
{CC"staticFieldOffset", CC"("MEM")J", FN_PTR(MHN_staticFieldOffset)}, {CC"staticFieldOffset", CC"("MEM")J", FN_PTR(MHN_staticFieldOffset)},
{CC"staticFieldBase", CC"("MEM")"OBJ, FN_PTR(MHN_staticFieldBase)}, {CC"staticFieldBase", CC"("MEM")"OBJ, FN_PTR(MHN_staticFieldBase)},
{CC"getMemberVMInfo", CC"("MEM")"OBJ, FN_PTR(MHN_getMemberVMInfo)} {CC"getMemberVMInfo", CC"("MEM")"OBJ, FN_PTR(MHN_getMemberVMInfo)}

View File

@ -69,7 +69,10 @@ class MethodHandles: AllStatic {
enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 }; enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
// CallSite support // CallSite support
static InstanceKlass* get_call_site_context(oop call_site); static void add_dependent_nmethod(oop call_site, nmethod* nm);
static void remove_dependent_nmethod(oop call_site, nmethod* nm);
static void flush_dependent_nmethods(Handle call_site, Handle target);
// Generate MethodHandles adapters. // Generate MethodHandles adapters.
static bool generate_adapters(); static bool generate_adapters();

View File

@ -64,7 +64,7 @@ void AdvancedThresholdPolicy::initialize() {
} }
#endif #endif
#ifdef SPARC #if defined SPARC || defined AARCH64
if (FLAG_IS_DEFAULT(InlineSmallCode)) { if (FLAG_IS_DEFAULT(InlineSmallCode)) {
FLAG_SET_DEFAULT(InlineSmallCode, 2500); FLAG_SET_DEFAULT(InlineSmallCode, 2500);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -179,9 +179,9 @@ void JavaCalls::call_virtual(JavaValue* result, KlassHandle spec_klass, Symbol*
CallInfo callinfo; CallInfo callinfo;
Handle receiver = args->receiver(); Handle receiver = args->receiver();
KlassHandle recvrKlass(THREAD, receiver.is_null() ? (Klass*)NULL : receiver->klass()); KlassHandle recvrKlass(THREAD, receiver.is_null() ? (Klass*)NULL : receiver->klass());
LinkInfo link_info(spec_klass, name, signature, KlassHandle(), /*check_access*/false);
LinkResolver::resolve_virtual_call( LinkResolver::resolve_virtual_call(
callinfo, receiver, recvrKlass, spec_klass, name, signature, callinfo, receiver, recvrKlass, link_info, true, CHECK);
KlassHandle(), false, true, CHECK);
methodHandle method = callinfo.selected_method(); methodHandle method = callinfo.selected_method();
assert(method.not_null(), "should have thrown exception"); assert(method.not_null(), "should have thrown exception");
@ -216,7 +216,8 @@ void JavaCalls::call_virtual(JavaValue* result, Handle receiver, KlassHandle spe
void JavaCalls::call_special(JavaValue* result, KlassHandle klass, Symbol* name, Symbol* signature, JavaCallArguments* args, TRAPS) { void JavaCalls::call_special(JavaValue* result, KlassHandle klass, Symbol* name, Symbol* signature, JavaCallArguments* args, TRAPS) {
CallInfo callinfo; CallInfo callinfo;
LinkResolver::resolve_special_call(callinfo, klass, name, signature, KlassHandle(), false, CHECK); LinkInfo link_info(klass, name, signature, KlassHandle(), /*check_access*/false);
LinkResolver::resolve_special_call(callinfo, link_info, CHECK);
methodHandle method = callinfo.selected_method(); methodHandle method = callinfo.selected_method();
assert(method.not_null(), "should have thrown exception"); assert(method.not_null(), "should have thrown exception");
@ -250,7 +251,8 @@ void JavaCalls::call_special(JavaValue* result, Handle receiver, KlassHandle kla
void JavaCalls::call_static(JavaValue* result, KlassHandle klass, Symbol* name, Symbol* signature, JavaCallArguments* args, TRAPS) { void JavaCalls::call_static(JavaValue* result, KlassHandle klass, Symbol* name, Symbol* signature, JavaCallArguments* args, TRAPS) {
CallInfo callinfo; CallInfo callinfo;
LinkResolver::resolve_static_call(callinfo, klass, name, signature, KlassHandle(), false, true, CHECK); LinkInfo link_info(klass, name, signature, KlassHandle(), /*check_access*/false);
LinkResolver::resolve_static_call(callinfo, link_info, true, CHECK);
methodHandle method = callinfo.selected_method(); methodHandle method = callinfo.selected_method();
assert(method.not_null(), "should have thrown exception"); assert(method.not_null(), "should have thrown exception");

View File

@ -775,6 +775,10 @@ void os::start_thread(Thread* thread) {
pd_start_thread(thread); pd_start_thread(thread);
} }
void os::abort(bool dump_core) {
abort(dump_core && CreateCoredumpOnCrash, NULL, NULL);
}
//--------------------------------------------------------------------------- //---------------------------------------------------------------------------
// Helper functions for fatal error handler // Helper functions for fatal error handler

View File

@ -831,9 +831,9 @@ methodHandle Reflection::resolve_interface_call(instanceKlassHandle klass, metho
CallInfo info; CallInfo info;
Symbol* signature = method->signature(); Symbol* signature = method->signature();
Symbol* name = method->name(); Symbol* name = method->name();
LinkResolver::resolve_interface_call(info, receiver, recv_klass, klass, LinkResolver::resolve_interface_call(info, receiver, recv_klass,
name, signature, LinkInfo(klass, name, signature, KlassHandle(), false),
KlassHandle(), false, true, true,
CHECK_(methodHandle())); CHECK_(methodHandle()));
return info.selected_method(); return info.selected_method();
} }

View File

@ -315,7 +315,7 @@ int VMUptimeDCmd::num_arguments() {
void SystemGCDCmd::execute(DCmdSource source, TRAPS) { void SystemGCDCmd::execute(DCmdSource source, TRAPS) {
if (!DisableExplicitGC) { if (!DisableExplicitGC) {
Universe::heap()->collect(GCCause::_java_lang_system_gc); Universe::heap()->collect(GCCause::_dcmd_gc_run);
} else { } else {
output()->print_cr("Explicit GC is disabled, no GC has been performed."); output()->print_cr("Explicit GC is disabled, no GC has been performed.");
} }

View File

@ -89,11 +89,11 @@ private:
return ((uintx)1) << validate_tag(tag); return ((uintx)1) << validate_tag(tag);
} }
static TagType validate_tag(uintx tag) { static TagType validate_tag(TagType tag) {
// Type of tag is not TagType to dodge useless MacOSX compiler warning. assert(0 <= tag, err_msg("Tag " INTX_FORMAT " is negative", (intx)tag));
assert(tag < (sizeof(uintx) * BitsPerByte), assert(tag < BitsPerWord,
err_msg("Tag " UINTX_FORMAT " is too large", tag)); err_msg("Tag " UINTX_FORMAT " is too large", (uintx)tag));
return static_cast<TagType>(tag); return tag;
} }
}; };

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8080699
* @summary eliminated arraycopy node still reachable through exception edges
* @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation TestDeadArrayCopyOnMemChain
*
*/
public class TestDeadArrayCopyOnMemChain {
static class A {
int f;
}
static void test_helper(Object o) {
}
static void test(int src_off, boolean flag) {
// dst is eliminated first. Eliminating dst causes src to be
// eliminated. When working on the safepoint at the uncommon
// trap in the exception handler, the eliminated ArrayCopyNode
// is reached through the exception edges.
Object[] dst = new Object[10];
Object[] src = new Object[10];
// src_off causes the exception handler to be run sometimes
try {
System.arraycopy(src, src_off, dst, 0, 10);
} catch (IndexOutOfBoundsException ioobe) {
// flag always false so test becomes uncommon trap. Make
// sure src is live at the unc.
if (flag) {
test_helper(src);
}
}
}
static public void main(String[] args) {
for (int i = 0; i < 20000; i++) {
test((i%2) == 0 ? 0 : -1, false);
}
}
}

View File

@ -24,12 +24,15 @@
/** /**
* @test * @test
* @bug 8057967 * @bug 8057967
* @ignore 8079205 * @run main/bootclasspath -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+TraceClassUnloading
* @run main/bootclasspath -Xbatch java.lang.invoke.CallSiteDepContextTest * -XX:+PrintCompilation -XX:+TraceDependencies -XX:+TraceReferenceGC
* -verbose:gc java.lang.invoke.CallSiteDepContextTest
*/ */
package java.lang.invoke; package java.lang.invoke;
import java.lang.ref.*; import java.lang.ref.*;
import java.lang.reflect.Field;
import jdk.internal.org.objectweb.asm.*; import jdk.internal.org.objectweb.asm.*;
import sun.misc.Unsafe; import sun.misc.Unsafe;
@ -96,6 +99,13 @@ public class CallSiteDepContextTest {
} }
} }
public static void testHiddenDepField() throws Exception {
try {
Field f = MethodHandleNatives.CallSiteContext.class.getDeclaredField("vmdependencies");
throw new AssertionError("Context.dependencies field should be hidden");
} catch(NoSuchFieldException e) { /* expected */ }
}
public static void testSharedCallSite() throws Throwable { public static void testSharedCallSite() throws Throwable {
Class<?> cls1 = UNSAFE.defineAnonymousClass(Object.class, getClassFile("CS_1"), null); Class<?> cls1 = UNSAFE.defineAnonymousClass(Object.class, getClassFile("CS_1"), null);
Class<?> cls2 = UNSAFE.defineAnonymousClass(Object.class, getClassFile("CS_2"), null); Class<?> cls2 = UNSAFE.defineAnonymousClass(Object.class, getClassFile("CS_2"), null);
@ -132,12 +142,14 @@ public class CallSiteDepContextTest {
static ReferenceQueue rq = new ReferenceQueue(); static ReferenceQueue rq = new ReferenceQueue();
static PhantomReference ref; static PhantomReference ref;
public static void testGC() throws Throwable { public static void testGC(boolean clear, boolean precompile) throws Throwable {
String id = "_" + clear + "_" + precompile;
mcs = new MutableCallSite(LOOKUP.findStatic(T.class, "f1", TYPE)); mcs = new MutableCallSite(LOOKUP.findStatic(T.class, "f1", TYPE));
Class<?>[] cls = new Class[] { Class<?>[] cls = new Class[] {
UNSAFE.defineAnonymousClass(Object.class, getClassFile("GC_1"), null), UNSAFE.defineAnonymousClass(Object.class, getClassFile("GC_1" + id), null),
UNSAFE.defineAnonymousClass(Object.class, getClassFile("GC_2"), null), UNSAFE.defineAnonymousClass(Object.class, getClassFile("GC_2" + id), null),
}; };
MethodHandle[] mhs = new MethodHandle[] { MethodHandle[] mhs = new MethodHandle[] {
@ -151,30 +163,38 @@ public class CallSiteDepContextTest {
execute(1, mhs); execute(1, mhs);
ref = new PhantomReference<>(cls[0], rq); ref = new PhantomReference<>(cls[0], rq);
cls[0] = UNSAFE.defineAnonymousClass(Object.class, getClassFile("GC_3"), null); cls[0] = UNSAFE.defineAnonymousClass(Object.class, getClassFile("GC_3" + id), null);
mhs[0] = LOOKUP.findStatic(cls[0], METHOD_NAME, TYPE); mhs[0] = LOOKUP.findStatic(cls[0], METHOD_NAME, TYPE);
do { do {
System.gc(); System.gc();
try { try {
Reference ref1 = rq.remove(1000); Reference ref1 = rq.remove(100);
if (ref1 == ref) { if (ref1 == ref) {
ref1.clear();
System.gc(); // Ensure that the stale context is cleared
break; break;
} }
} catch(InterruptedException e) { /* ignore */ } } catch(InterruptedException e) { /* ignore */ }
} while (true); } while (true);
execute(1, mhs); if (clear) {
ref.clear();
System.gc(); // Ensure that the stale context is unloaded
}
if (precompile) {
execute(1, mhs);
}
mcs.setTarget(LOOKUP.findStatic(T.class, "f2", TYPE)); mcs.setTarget(LOOKUP.findStatic(T.class, "f2", TYPE));
execute(2, mhs); execute(2, mhs);
} }
public static void main(String[] args) throws Throwable { public static void main(String[] args) throws Throwable {
testHiddenDepField();
testSharedCallSite(); testSharedCallSite();
testNonBoundCallSite(); testNonBoundCallSite();
testGC(); testGC(false, false);
testGC(false, true);
testGC( true, false);
testGC( true, true);
System.out.println("TEST PASSED"); System.out.println("TEST PASSED");
} }
} }

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8080976
* @summary Loop variant use in reduction should prevent vectorization
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestReductionWithLoopVariantUse
*
*/
public class TestReductionWithLoopVariantUse {
static int m(int[] array) {
int c = 0;
for (int i = 0; i < 256; i++) {
c += array[i];
array[i] = c;
}
return c;
}
static public void main(String[] args) {
int[] array = new int[256];
int[] array2 = new int[256];
for (int j = 0; j < 256; j++) {
array2[j] = j;
}
for (int i = 0; i < 20000; i++) {
System.arraycopy(array2, 0, array, 0, 256);
int res = m(array);
boolean success = true;
int c = 0;
for (int j = 0; j < 256; j++) {
c += array2[j];
if (array[j] != c) {
System.out.println("Failed for " + j + " : " + array[j] + " != " + c);
success = false;
}
}
if (c != res) {
System.out.println("Failed for sum: " + c + " != " + res);
}
if (!success) {
throw new RuntimeException("Test failed");
}
}
}
}

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8080156 8060036
* @summary Test correctness of type propagation to CmpUNodes.
* @run main TestTypePropagationToCmpU
*/
public class TestTypePropagationToCmpU {
public static void main(String[] args) {
try {
// Trigger compilation
for (int i = 0; i < 100_000; ++i) {
test();
}
} catch (NullPointerException e) {
// Test should never throw a NullPointerException
throw new RuntimeException("Test failed");
}
}
static int global = 42;
public static void test() {
int a = Integer.MIN_VALUE;
int b = global;
char[] buf = { 0 };
for (int i = 0; i <= b; ++i) {
a = i - b;
}
// C2 adds a range check and an uncommon trap here to ensure that the array index
// is in bounds. If type information is not propagated correctly to the corresponding
// CmpUNode, this trap may be always taken. Because C2 also removes the unnecessary
// allocation of 'buf', a NullPointerException is thrown in this case.
char c = buf[(a * 11) / 2 - a]; // a is 0 here if global >= 0
buf[0] = 0;
}
}

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8077504
* @summary Unsafe load can loose control dependency and cause crash
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestUnsafeLoadControl
*
*/
import java.lang.reflect.Field;
import sun.misc.Unsafe;
public class TestUnsafeLoadControl {
private static final Unsafe UNSAFE;
static {
try {
Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe");
unsafeField.setAccessible(true);
UNSAFE = (Unsafe) unsafeField.get(null);
} catch(Exception e) {
throw new RuntimeException(e);
}
}
static int val;
static void test1(int[] a, boolean[] flags, boolean flag, long j) {
for (int i = 0; i < 10; i++) {
if (flags[i]) {
if (flag) {
long address = (j << 2) + UNSAFE.ARRAY_INT_BASE_OFFSET;
int v = UNSAFE.getInt(a, address);
val = v;
}
}
}
}
static int test2(int[] a, boolean[] flags, boolean flag, long j) {
int sum = 0;
for (int i = 0; i < 10; i++) {
if (flags[i]) {
if (flag) {
long address = (j << 2) + UNSAFE.ARRAY_INT_BASE_OFFSET;
int v = UNSAFE.getInt(a, address);
if (v == 0) {
sum++;
}
}
}
}
return sum;
}
static public void main(String[] args) {
boolean[] flags = new boolean[10];
for (int i = 0; i < flags.length; i++) {
flags[i] = true;
}
int[] array = new int[10];
for (int i = 0; i < 20000; i++) {
test1(array, flags, true, 0);
}
for (int i = 0; i < flags.length; i++) {
flags[i] = false;
}
test1(array, flags, true, Long.MAX_VALUE/4);
for (int i = 0; i < flags.length; i++) {
flags[i] = true;
}
for (int i = 0; i < 20000; i++) {
test2(array, flags, true, 0);
}
for (int i = 0; i < flags.length; i++) {
flags[i] = false;
}
test2(array, flags, true, Long.MAX_VALUE/4);
}
}

View File

@ -27,6 +27,7 @@
* @requires vm.gc=="null" * @requires vm.gc=="null"
* @requires (vm.opt.AggressiveOpts=="null") | (vm.opt.AggressiveOpts=="false") * @requires (vm.opt.AggressiveOpts=="null") | (vm.opt.AggressiveOpts=="false")
* @requires vm.compMode != "Xcomp" * @requires vm.compMode != "Xcomp"
* @requires vm.opt.UseCompressedOops != false
* @summary Verify that starting the VM with a small heap works * @summary Verify that starting the VM with a small heap works
* @library /testlibrary /../../test/lib * @library /testlibrary /../../test/lib
* @modules java.management/sun.management * @modules java.management/sun.management

View File

@ -1,12 +1,10 @@
/* /*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this * published by the Free Software Foundation.
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -22,12 +20,27 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package com.sun.tools.sjavac.comp.dependencies;
import java.util.Set; /**
* Implementation of InterfaceWithToString.
*/
public class ImplementationOfWithToString implements InterfaceWithToString {
import com.sun.tools.javac.code.Symbol.PackageSymbol; /**
* @see InterfaceWithToString#someMethod()
* {@inheritDoc}
*/
@Override
public void someMethod() {
// May do something here.
}
interface Dependency { /**
Set<PackageSymbol> getPackages(); * @see java.lang.Object#toString()
* {@inheritDoc}
*/
@Override
public String toString() {
return "toString() from " + getClass().getName();
}
} }

View File

@ -1,12 +1,10 @@
/* /*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this * published by the Free Software Foundation.
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -22,7 +20,18 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package pkg25;
public class Cls25 extends Throwable { /**
* Interface with toString declared.
*/
public interface InterfaceWithToString {
void someMethod();
/**
* Same as Object.toString().
*
* @return some custom string.
*/
String toString();
} }

View File

@ -1,12 +1,10 @@
/* /*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this * published by the Free Software Foundation.
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -22,9 +20,22 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package pkg13;
public class Cls13 { /* @test
public pkg14.Cls14 getCls14() { * @bug 8072588
return null; * @build InterfaceWithToString
* @build ImplementationOfWithToString
* @run main/native ToStringTest
*/
public final class ToStringTest {
static {
System.loadLibrary("ToStringTest");
}
native static void nTest();
public static void main(String[] args) throws Exception {
nTest();
} }
} }

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* Native test for ToStringInInterfaceTest.
*/
#include "jni.h"
#define checkException(env) if ((*env)->ExceptionCheck(env)) { return; }
jstring callStringMethod(JNIEnv* env, jobject jobj, jmethodID id, ...)
{
jstring value;
va_list ap;
va_start(ap, id);
value = (jstring)(*env)->CallObjectMethodV(env, jobj, id, ap);
va_end(ap);
return value;
}
JNIEXPORT void JNICALL Java_ToStringTest_nTest(JNIEnv* env, jclass jclazz)
{
jclass classOfInterfaceWithToString;
jclass classOfImplementationOfWithToString;
jmethodID constructorOfImplementationOfWithToString;
jobject instanceOfImplementationOfWithToString;
jmethodID toStringOfInterfaceWithToString;
jmethodID toStringOfImplementationOfWithToString;
jstring jstr;
const char *chars;
classOfInterfaceWithToString = (*env)->FindClass(env, "InterfaceWithToString");
checkException(env);
classOfImplementationOfWithToString = (*env)->FindClass(env, "ImplementationOfWithToString");
checkException(env);
constructorOfImplementationOfWithToString = (*env)->GetMethodID(env, classOfImplementationOfWithToString, "<init>", "()V");
checkException(env);
instanceOfImplementationOfWithToString = (*env)->NewObject(env, classOfImplementationOfWithToString, constructorOfImplementationOfWithToString);
checkException(env);
toStringOfInterfaceWithToString = (*env)->GetMethodID(env, classOfInterfaceWithToString, "toString", "()Ljava/lang/String;");
checkException(env);
toStringOfImplementationOfWithToString = (*env)->GetMethodID(env, classOfImplementationOfWithToString, "toString", "()Ljava/lang/String;");
checkException(env);
jstr = callStringMethod(env, instanceOfImplementationOfWithToString, toStringOfImplementationOfWithToString);
checkException(env);
chars = (*env)->GetStringUTFChars(env, jstr, NULL);
(*env)->ReleaseStringUTFChars(env, jstr, chars);
jstr = callStringMethod(env, instanceOfImplementationOfWithToString, toStringOfInterfaceWithToString);
checkException(env);
chars = (*env)->GetStringUTFChars(env, jstr, NULL);
(*env)->ReleaseStringUTFChars(env, jstr, chars);
}

View File

@ -59,7 +59,7 @@ public class RunGCTest {
} }
OutputAnalyzer output = new OutputAnalyzer(gcLog, ""); OutputAnalyzer output = new OutputAnalyzer(gcLog, "");
output.shouldMatch(".*\\[Full GC \\(System(\\.gc\\(\\))?.*"); output.shouldContain("[Full GC (Diagnostic Command)");
} }
@Test @Test

View File

@ -24,11 +24,14 @@
import jdk.test.lib.Platform; import jdk.test.lib.Platform;
import jdk.test.lib.ProcessTools; import jdk.test.lib.ProcessTools;
import jdk.test.lib.OutputAnalyzer; import jdk.test.lib.OutputAnalyzer;
import jdk.test.lib.apps.LingeredApp;
/* /*
* @test * @test
* @library /../../test/lib/share/classes
* @library /testlibrary * @library /testlibrary
* @build jdk.test.lib.* * @build jdk.test.lib.*
* @build jdk.test.lib.apps.*
* @run main TestClassLoaderStats * @run main TestClassLoaderStats
*/ */
public class TestClassLoaderStats { public class TestClassLoaderStats {
@ -39,19 +42,27 @@ public class TestClassLoaderStats {
return; return;
} }
ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder( LingeredApp app = null;
"-XX:+UsePerfData", try {
"sun.jvm.hotspot.tools.ClassLoaderStats", app = LingeredApp.startApp();
Integer.toString(ProcessTools.getProcessId()));
OutputAnalyzer output = ProcessTools.executeProcess(processBuilder);
System.out.println(output.getOutput());
output.shouldHaveExitValue(0); System.out.println("Attaching sun.jvm.hotspot.tools.ClassLoaderStats to " + app.getPid());
output.shouldContain("Debugger attached successfully."); ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(
// The class loader stats header needs to be presented in the output: "-XX:+UsePerfData",
output.shouldMatch("class_loader\\W+classes\\W+bytes\\W+parent_loader\\W+alive?\\W+type"); "sun.jvm.hotspot.tools.ClassLoaderStats",
output.stderrShouldNotMatch("[E|e]xception"); Long.toString(app.getPid()));
output.stderrShouldNotMatch("[E|e]rror"); OutputAnalyzer output = ProcessTools.executeProcess(processBuilder);
System.out.println(output.getOutput());
output.shouldHaveExitValue(0);
output.shouldContain("Debugger attached successfully.");
// The class loader stats header needs to be presented in the output:
output.shouldMatch("class_loader\\W+classes\\W+bytes\\W+parent_loader\\W+alive?\\W+type");
output.stderrShouldNotMatch("[E|e]xception");
output.stderrShouldNotMatch("[E|e]rror");
} finally {
app.stopApp();
}
} }
} }

View File

@ -24,11 +24,14 @@
import jdk.test.lib.OutputAnalyzer; import jdk.test.lib.OutputAnalyzer;
import jdk.test.lib.Platform; import jdk.test.lib.Platform;
import jdk.test.lib.ProcessTools; import jdk.test.lib.ProcessTools;
import jdk.test.lib.apps.LingeredApp;
/* /*
* @test * @test
* @library /../../test/lib/share/classes
* @library /testlibrary * @library /testlibrary
* @build jdk.test.lib.* * @build jdk.test.lib.*
* @build jdk.test.lib.apps.*
* @run main TestStackTrace * @run main TestStackTrace
*/ */
public class TestStackTrace { public class TestStackTrace {
@ -39,17 +42,25 @@ public class TestStackTrace {
return; return;
} }
ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder( LingeredApp app = null;
"-XX:+UsePerfData", try {
"sun.jvm.hotspot.tools.StackTrace", app = LingeredApp.startApp();
Integer.toString(ProcessTools.getProcessId()));
OutputAnalyzer output = ProcessTools.executeProcess(processBuilder);
System.out.println(output.getOutput());
output.shouldHaveExitValue(0); System.out.println("Attaching sun.jvm.hotspot.tools.StackTrace to " + app.getPid());
output.shouldContain("Debugger attached successfully."); ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(
output.stderrShouldNotMatch("[E|e]xception"); "-XX:+UsePerfData",
output.stderrShouldNotMatch("[E|e]rror"); "sun.jvm.hotspot.tools.StackTrace",
Long.toString(app.getPid()));
OutputAnalyzer output = ProcessTools.executeProcess(processBuilder);
System.out.println(output.getOutput());
output.shouldHaveExitValue(0);
output.shouldContain("Debugger attached successfully.");
output.stderrShouldNotMatch("[E|e]xception");
output.stderrShouldNotMatch("[E|e]rror");
} finally {
app.stopApp();
}
} }
} }

View File

@ -310,3 +310,4 @@ f4a4a54620370f077c2e830a5561c8cfa811712b jdk9-b61
ae7406e82828fe1c245ac7507a9da5fd5b1c9529 jdk9-b65 ae7406e82828fe1c245ac7507a9da5fd5b1c9529 jdk9-b65
d5963ccce28d7a3e96ee3e2dc8a8676e61699b70 jdk9-b66 d5963ccce28d7a3e96ee3e2dc8a8676e61699b70 jdk9-b66
78c2685daabafae827c686ca2d1bb2e451faed2b jdk9-b67 78c2685daabafae827c686ca2d1bb2e451faed2b jdk9-b67
82aae947938ec9b0119fdd78a616d0b7263072ee jdk9-b68

Some files were not shown because too many files have changed in this diff Show More