This commit is contained in:
Rachel Protacio 2016-10-26 20:13:29 +00:00
commit e715dd214c
627 changed files with 16760 additions and 5789 deletions

View File

@ -382,3 +382,5 @@ e384420383a5b79fa0012ebcb25d8f83cff7f777 jdk-9+135
9cb87c88ed851c0575b8ead753ea238ed5b544e9 jdk-9+137
d273dfe9a126d3bffe92072547fef2cd1361b0eb jdk-9+138
65477538bec32963dc41153d89c4417eb46c45fc jdk-9+139
0875007901f7d364a08220b052f0c81003e9c8c5 jdk-9+140
9aadd2163b568d76f8969ad2fb404a63733da359 jdk-9+141

View File

@ -382,3 +382,5 @@ be1218f792a450dfb5d4b1f82616b9d95a6a732e jdk-9+133
d7f519b004254b19e384131d9f0d0e40e31a0fd3 jdk-9+137
67c4388142bdf58aec8fefa4475faaa8a5d7380c jdk-9+138
7dcf453eacae79ee86a6bcc75fd0b546fc99b48a jdk-9+139
a5815c6098a241d3a1df64d22b84b3524e4a77df jdk-9+140
f64afae7f1a5608e438585bbf0bc23785e69cba0 jdk-9+141

View File

@ -428,9 +428,10 @@ AC_DEFUN([BASIC_SETUP_TOOL],
# Call BASIC_SETUP_TOOL with AC_PATH_PROGS to locate the tool
# $1: variable to set
# $2: executable name (or list of names) to look for
# $3: [path]
AC_DEFUN([BASIC_PATH_PROGS],
[
BASIC_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2)])
BASIC_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2, , $3)])
])
# Call BASIC_SETUP_TOOL with AC_CHECK_TOOLS to locate the tool
@ -444,9 +445,10 @@ AC_DEFUN([BASIC_CHECK_TOOLS],
# Like BASIC_PATH_PROGS but fails if no tool was found.
# $1: variable to set
# $2: executable name (or list of names) to look for
# $3: [path]
AC_DEFUN([BASIC_REQUIRE_PROGS],
[
BASIC_PATH_PROGS($1, $2)
BASIC_PATH_PROGS($1, $2, , $3)
BASIC_CHECK_NONEMPTY($1)
])
@ -1065,7 +1067,9 @@ AC_DEFUN_ONCE([BASIC_SETUP_COMPLEX_TOOLS],
BASIC_PATH_PROGS(HG, hg)
BASIC_PATH_PROGS(STAT, stat)
BASIC_PATH_PROGS(TIME, time)
BASIC_PATH_PROGS(DTRACE, dtrace)
# Dtrace is usually found in /usr/sbin on Solaris, but that directory may not
# be in the user path.
BASIC_PATH_PROGS(DTRACE, dtrace, $PATH:/usr/sbin)
BASIC_PATH_PROGS(PATCH, [gpatch patch])
# Check if it's GNU time
IS_GNU_TIME=`$TIME --version 2>&1 | $GREP 'GNU time'`

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -168,7 +168,6 @@ AC_DEFUN([BPERF_SETUP_CCACHE],
[AS_HELP_STRING([--enable-ccache],
[enable using ccache to speed up recompilations @<:@disabled@:>@])])
CCACHE=
CCACHE_STATUS=
AC_MSG_CHECKING([is ccache enabled])
if test "x$enable_ccache" = xyes; then

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -156,7 +156,3 @@ JVM_VARIANT_KERNEL := false
JVM_VARIANT_ZERO := false
JVM_VARIANT_ZEROSHARK := false
JVM_VARIANT_CORE := false
# Sneak this in via the spec.gmk file, since we don't want to mess around too much with the Hotspot make files.
# This is needed to get the LOG setting to work properly.
include $(SRC_ROOT)/make/common/MakeBase.gmk

View File

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -895,7 +895,7 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
elif test "x$OPENJDK_$1_OS" = xsolaris; then
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -DSOLARIS"
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -template=no%extdef -features=no%split_init \
-D_Crun_inline_placement -library=%none -KPIC -mt -xwe -features=no%except"
-D_Crun_inline_placement -library=%none -KPIC -mt -features=no%except"
elif test "x$OPENJDK_$1_OS" = xmacosx; then
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -D_ALLBSD_SOURCE"

View File

@ -3657,6 +3657,7 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
# Call BASIC_SETUP_TOOL with AC_PATH_PROGS to locate the tool
# $1: variable to set
# $2: executable name (or list of names) to look for
# $3: [path]
# Call BASIC_SETUP_TOOL with AC_CHECK_TOOLS to locate the tool
@ -3667,6 +3668,7 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
# Like BASIC_PATH_PROGS but fails if no tool was found.
# $1: variable to set
# $2: executable name (or list of names) to look for
# $3: [path]
# Like BASIC_SETUP_TOOL but fails if no tool was found.
@ -3733,7 +3735,7 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -3792,7 +3794,7 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
# ... then the rest
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -4403,7 +4405,7 @@ VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom"
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -4498,7 +4500,7 @@ VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom"
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -4818,7 +4820,7 @@ VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom"
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -5091,7 +5093,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1475218974
DATE_WHEN_GENERATED=1477108079
###############################################################################
#
@ -22769,6 +22771,8 @@ $as_echo "$tool_specified" >&6; }
fi
# Dtrace is usually found in /usr/sbin on Solaris, but that directory may not
# be in the user path.
# Publish this variable in the help.
@ -22791,7 +22795,8 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
as_dummy="$PATH:/usr/sbin"
for as_dir in $as_dummy
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@ -22849,7 +22854,8 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
as_dummy="$PATH:/usr/sbin"
for as_dir in $as_dummy
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@ -50071,7 +50077,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
elif test "x$OPENJDK_TARGET_OS" = xsolaris; then
JVM_CFLAGS="$JVM_CFLAGS -DSOLARIS"
JVM_CFLAGS="$JVM_CFLAGS -template=no%extdef -features=no%split_init \
-D_Crun_inline_placement -library=%none -KPIC -mt -xwe -features=no%except"
-D_Crun_inline_placement -library=%none -KPIC -mt -features=no%except"
elif test "x$OPENJDK_TARGET_OS" = xmacosx; then
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
JVM_CFLAGS="$JVM_CFLAGS -D_ALLBSD_SOURCE"
@ -50886,7 +50892,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
elif test "x$OPENJDK_BUILD_OS" = xsolaris; then
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -DSOLARIS"
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -template=no%extdef -features=no%split_init \
-D_Crun_inline_placement -library=%none -KPIC -mt -xwe -features=no%except"
-D_Crun_inline_placement -library=%none -KPIC -mt -features=no%except"
elif test "x$OPENJDK_BUILD_OS" = xmacosx; then
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -D_ALLBSD_SOURCE"
@ -65239,7 +65245,6 @@ if test "${enable_ccache+set}" = set; then :
fi
CCACHE=
CCACHE_STATUS=
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking is ccache enabled" >&5
$as_echo_n "checking is ccache enabled... " >&6; }

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -175,7 +175,7 @@ VERSION_NUMBER_FOUR_POSITIONS := @VERSION_NUMBER_FOUR_POSITIONS@
VERSION_STRING := @VERSION_STRING@
# The short version string, without trailing zeroes and just PRE, if present.
VERSION_SHORT := @VERSION_SHORT@
# The Java specification version. It usually equals to the major version number.
# The Java specification version. It usually equals the major version number.
VERSION_SPECIFICATION := @VERSION_MAJOR@
# A GA version is defined by the PRE string being empty. Rather than testing for
# that, this variable defines it with true/false.
@ -244,9 +244,6 @@ USE_PRECOMPILED_HEADER := @USE_PRECOMPILED_HEADER@
# Only build headless support or not
ENABLE_HEADLESS_ONLY := @ENABLE_HEADLESS_ONLY@
# Legacy support
USE_NEW_HOTSPOT_BUILD:=@USE_NEW_HOTSPOT_BUILD@
# JDK_OUTPUTDIR specifies where a working jvm is built.
# You can run $(JDK_OUTPUTDIR)/bin/java
# Though the layout of the contents of $(JDK_OUTPUTDIR) is not

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -1,6 +1,6 @@
#!/bin/sh
#
# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -287,7 +287,8 @@ var getJibProfilesProfiles = function (input, common) {
target_os: "solaris",
target_cpu: "x64",
dependencies: concat(common.dependencies, "devkit", "cups"),
configure_args: concat(common.configure_args, "--with-zlib=system"),
configure_args: concat(common.configure_args, "--with-zlib=system",
"--enable-dtrace"),
default_make_targets: common.default_make_targets
},
@ -295,7 +296,8 @@ var getJibProfilesProfiles = function (input, common) {
target_os: "solaris",
target_cpu: "sparcv9",
dependencies: concat(common.dependencies, "devkit", "cups"),
configure_args: concat(common.configure_args, "--with-zlib=system"),
configure_args: concat(common.configure_args, "--with-zlib=system",
"--enable-dtrace"),
default_make_targets: common.default_make_targets
},

View File

@ -382,3 +382,5 @@ aa053a3faf266c12b4fd5272da431a3e08e4a3e3 jdk-9+136
258cf18fa7fc59359b874f8743b7168dc48baf73 jdk-9+137
27bb44be32076861a0951bcefb07a1d92509a4b6 jdk-9+138
8c9da7fc5b07c606afd571c7012441b77dda83b2 jdk-9+139
9f3fc931bc230f44f2a58d75f7f6360af98bb113 jdk-9+140
b32f998da32b488ec7c4e9dbb3c750841b48e74d jdk-9+141

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -31,22 +31,17 @@
package com.sun.corba.se.impl.io;
import java.io.InputStream;
import java.io.IOException;
import java.io.StreamCorruptedException;
import java.io.ObjectInputValidation;
import java.io.NotActiveException;
import java.io.InvalidObjectException;
import java.io.InvalidClassException;
import java.io.DataInputStream;
import java.io.OptionalDataException;
import java.io.WriteAbortedException;
import java.io.Externalizable;
import java.io.EOFException;
import java.lang.reflect.*;
import java.util.Vector;
import java.util.Stack;
import java.util.Hashtable;
import java.util.Enumeration;
import sun.corba.Bridge ;
@ -54,7 +49,6 @@ import sun.corba.Bridge ;
import java.security.AccessController ;
import java.security.PrivilegedAction ;
import com.sun.corba.se.impl.io.ObjectStreamClass;
import com.sun.corba.se.impl.util.Utility;
import org.omg.CORBA.portable.ValueInputStream;
@ -71,14 +65,12 @@ import org.omg.CORBA.TypeCode;
import com.sun.org.omg.CORBA.ValueDefPackage.FullValueDescription;
import com.sun.org.omg.SendingContext.CodeBase;
import javax.rmi.PortableRemoteObject;
import javax.rmi.CORBA.Util;
import javax.rmi.CORBA.ValueHandler;
import java.security.*;
import java.util.*;
import com.sun.corba.se.impl.orbutil.ObjectUtility ;
import com.sun.corba.se.impl.logging.OMGSystemException ;
import com.sun.corba.se.impl.logging.UtilSystemException ;
@ -182,75 +174,6 @@ public class IIOPInputStream
private byte streamFormatVersion;
// Since java.io.OptionalDataException's constructors are
// package private, but we need to throw it in some special
// cases, we try to do it by reflection.
private static final Constructor OPT_DATA_EXCEPTION_CTOR;
private Object[] readObjectArgList = { this } ;
static {
OPT_DATA_EXCEPTION_CTOR = getOptDataExceptionCtor();
}
// Grab the OptionalDataException boolean ctor and make
// it accessible. Note that any exceptions
// will be wrapped in ExceptionInInitializerErrors.
private static Constructor getOptDataExceptionCtor() {
try {
Constructor result =
(Constructor) AccessController.doPrivileged(
new PrivilegedExceptionAction() {
public java.lang.Object run()
throws NoSuchMethodException,
SecurityException {
Constructor boolCtor
= OptionalDataException.class.getDeclaredConstructor(
new Class[] {
Boolean.TYPE });
boolCtor.setAccessible(true);
return boolCtor;
}});
if (result == null)
// XXX I18N, logging needed.
throw new Error("Unable to find OptionalDataException constructor");
return result;
} catch (Exception ex) {
// XXX I18N, logging needed.
throw new ExceptionInInitializerError(ex);
}
}
// Create a new OptionalDataException with the EOF marker
// set to true. See handleOptionalDataMarshalException.
private OptionalDataException createOptionalDataException() {
try {
OptionalDataException result
= (OptionalDataException)
OPT_DATA_EXCEPTION_CTOR.newInstance(new Object[] {
Boolean.TRUE });
if (result == null)
// XXX I18N, logging needed.
throw new Error("Created null OptionalDataException");
return result;
} catch (Exception ex) {
// XXX I18N, logging needed.
throw new Error("Couldn't create OptionalDataException", ex);
}
}
// Return the stream format version currently being used
// to deserialize an object
protected byte getStreamFormatVersion() {
@ -395,7 +318,6 @@ public class IIOPInputStream
int offset)
/* throws OptionalDataException, ClassNotFoundException, IOException */
{
/* Save the current state and get ready to read an object. */
Object prevObject = currentObject;
ObjectStreamClass prevClassDesc = currentClassDesc;
@ -947,7 +869,7 @@ public class IIOPInputStream
if (!objectRead)
result = new EOFException("No more optional data");
else
result = createOptionalDataException();
result = bridge.newOptionalDataExceptionForSerialization(true);
result.initCause(marshalException);
@ -1230,8 +1152,7 @@ public class IIOPInputStream
readObjectState.beginUnmarshalCustomValue(this,
calledDefaultWriteObject,
(currentClassDesc.readObjectMethod
!= null));
currentClassDesc.hasReadObject());
} else {
if (currentClassDesc.hasReadObject())
setState(IN_READ_OBJECT_REMOTE_NOT_CUSTOM_MARSHALED);
@ -1556,8 +1477,7 @@ public class IIOPInputStream
readObjectState.beginUnmarshalCustomValue(this,
calledDefaultWriteObject,
(currentClassDesc.readObjectMethod
!= null));
currentClassDesc.hasReadObject());
}
boolean usedReadObject = false;
@ -1714,13 +1634,8 @@ public class IIOPInputStream
throws InvalidClassException, StreamCorruptedException,
ClassNotFoundException, IOException
{
if (osc.readObjectMethod == null) {
return false;
}
try {
osc.readObjectMethod.invoke( obj, readObjectArgList ) ;
return true;
return osc.invokeReadObject( obj, this ) ;
} catch (InvocationTargetException e) {
Throwable t = e.getTargetException();
if (t instanceof ClassNotFoundException)
@ -1734,8 +1649,6 @@ public class IIOPInputStream
else
// XXX I18N, logging needed.
throw new Error("internal error");
} catch (IllegalAccessException e) {
return false;
}
}

View File

@ -31,33 +31,23 @@
package com.sun.corba.se.impl.io;
import org.omg.CORBA.INTERNAL;
import org.omg.CORBA.portable.OutputStream;
import java.security.AccessController ;
import java.security.PrivilegedAction ;
import java.io.IOException;
import java.io.DataOutputStream;
import java.io.Serializable;
import java.io.InvalidClassException;
import java.io.StreamCorruptedException;
import java.io.Externalizable;
import java.io.ObjectStreamException;
import java.io.NotSerializableException;
import java.io.NotActiveException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Field;
import java.util.Stack;
import javax.rmi.CORBA.Util;
import javax.rmi.CORBA.ValueHandlerMultiFormat;
import sun.corba.Bridge ;
import com.sun.corba.se.impl.io.ObjectStreamClass;
import com.sun.corba.se.impl.util.Utility;
import com.sun.corba.se.impl.util.RepositoryId;
@ -621,7 +611,7 @@ public class IIOPOutputStream
writeObjectState.enterWriteObject(this);
// writeObject(obj, c, this);
osc.writeObjectMethod.invoke( obj, writeObjectArgList ) ;
osc.invokeWriteObject( obj, this ) ;
writeObjectState.exitWriteObject(this);
@ -636,8 +626,6 @@ public class IIOPOutputStream
else
// XXX I18N, Logging needed.
throw new Error("invokeObjectWriter internal error",e);
} catch (IllegalAccessException e) {
// cannot happen
}
}
@ -761,59 +749,52 @@ public class IIOPOutputStream
*/
private void outputClassFields(Object o, Class cl,
ObjectStreamField[] fields)
throws IOException, InvalidClassException {
throws IOException {
for (int i = 0; i < fields.length; i++) {
if (fields[i].getField() == null)
// XXX I18N, Logging needed.
throw new InvalidClassException(cl.getName(),
"Nonexistent field " + fields[i].getName());
try {
switch (fields[i].getTypeCode()) {
case 'B':
byte byteValue = fields[i].getField().getByte( o ) ;
orbStream.write_octet(byteValue);
break;
case 'C':
char charValue = fields[i].getField().getChar( o ) ;
orbStream.write_wchar(charValue);
break;
case 'F':
float floatValue = fields[i].getField().getFloat( o ) ;
orbStream.write_float(floatValue);
break;
case 'D' :
double doubleValue = fields[i].getField().getDouble( o ) ;
orbStream.write_double(doubleValue);
break;
case 'I':
int intValue = fields[i].getField().getInt( o ) ;
orbStream.write_long(intValue);
break;
case 'J':
long longValue = fields[i].getField().getLong( o ) ;
orbStream.write_longlong(longValue);
break;
case 'S':
short shortValue = fields[i].getField().getShort( o ) ;
orbStream.write_short(shortValue);
break;
case 'Z':
boolean booleanValue = fields[i].getField().getBoolean( o ) ;
orbStream.write_boolean(booleanValue);
break;
case '[':
case 'L':
Object objectValue = fields[i].getField().get( o ) ;
writeObjectField(fields[i], objectValue);
break;
default:
// XXX I18N, Logging needed.
throw new InvalidClassException(cl.getName());
}
} catch (IllegalAccessException exc) {
throw wrapper.illegalFieldAccess( exc, fields[i].getName() ) ;
switch (fields[i].getTypeCode()) {
case 'B':
byte byteValue = bridge.getByte(o, fields[i].getFieldID()) ;
orbStream.write_octet(byteValue);
break;
case 'C':
char charValue = bridge.getChar(o, fields[i].getFieldID()) ;
orbStream.write_wchar(charValue);
break;
case 'F':
float floatValue = bridge.getFloat(o, fields[i].getFieldID()) ;
orbStream.write_float(floatValue);
break;
case 'D' :
double doubleValue = bridge.getDouble(o, fields[i].getFieldID()) ;
orbStream.write_double(doubleValue);
break;
case 'I':
int intValue = bridge.getInt(o, fields[i].getFieldID()) ;
orbStream.write_long(intValue);
break;
case 'J':
long longValue = bridge.getLong(o, fields[i].getFieldID()) ;
orbStream.write_longlong(longValue);
break;
case 'S':
short shortValue = bridge.getShort(o, fields[i].getFieldID()) ;
orbStream.write_short(shortValue);
break;
case 'Z':
boolean booleanValue = bridge.getBoolean(o, fields[i].getFieldID()) ;
orbStream.write_boolean(booleanValue);
break;
case '[':
case 'L':
Object objectValue = bridge.getObject(o, fields[i].getFieldID()) ;
writeObjectField(fields[i], objectValue);
break;
default:
throw new InvalidClassException(cl.getName());
}
}
}

View File

@ -31,16 +31,16 @@
package com.sun.corba.se.impl.io;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.lang.invoke.MethodHandle;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.security.DigestOutputStream;
import java.security.AccessController;
import java.security.PrivilegedExceptionAction;
import java.security.PrivilegedActionException;
import java.security.PrivilegedAction;
import java.lang.reflect.Modifier;
import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.lang.reflect.Member;
import java.lang.reflect.Method;
@ -80,9 +80,6 @@ public class ObjectStreamClass implements java.io.Serializable {
public static final long kDefaultUID = -1;
private static Object noArgsList[] = {};
private static Class<?> noTypesList[] = {};
/** true if represents enum type */
private boolean isEnum;
@ -311,12 +308,37 @@ public class ObjectStreamClass implements java.io.Serializable {
return null;
}
public final boolean invokeWriteObject(Object obj, ObjectOutputStream ois) throws InvocationTargetException {
if (!hasWriteObject()) {
return false;
}
try {
writeObjectMethod.invoke(obj, ois);
} catch (Throwable t) {
throw new InvocationTargetException(t, "writeObject");
}
return true;
}
public final boolean invokeReadObject(Object obj, ObjectInputStream ois) throws InvocationTargetException {
if (hasReadObject()) {
try {
readObjectMethod.invoke(obj, ois);
return true;
} catch (Throwable t) {
throw new InvocationTargetException(t, "readObject");
}
} else {
return false;
}
}
public Serializable writeReplace(Serializable value) {
if (writeReplaceObjectMethod != null) {
try {
return (Serializable) writeReplaceObjectMethod.invoke(value,noArgsList);
} catch(Throwable t) {
throw new RuntimeException(t);
return (Serializable) writeReplaceObjectMethod.invoke(value);
} catch (Throwable t) {
throw new InternalError("unexpected error", t);
}
}
else return value;
@ -325,9 +347,9 @@ public class ObjectStreamClass implements java.io.Serializable {
public Object readResolve(Object value) {
if (readResolveObjectMethod != null) {
try {
return readResolveObjectMethod.invoke(value,noArgsList);
} catch(Throwable t) {
throw new RuntimeException(t);
return readResolveObjectMethod.invoke(value);
} catch (Throwable t) {
throw new InternalError("unexpected error", t);
}
}
else return value;
@ -382,31 +404,34 @@ public class ObjectStreamClass implements java.io.Serializable {
*/
}
private static final class PersistentFieldsValue
static final class PersistentFieldsValue
extends ClassValue<ObjectStreamField[]> {
PersistentFieldsValue() { }
protected ObjectStreamField[] computeValue(Class<?> type) {
try {
bridge.ensureClassInitialized(type);
Field pf = type.getDeclaredField("serialPersistentFields");
int mods = pf.getModifiers();
if (Modifier.isPrivate(mods) && Modifier.isStatic(mods) &&
Modifier.isFinal(mods)) {
pf.setAccessible(true);
long offset = bridge.staticFieldOffset(pf);
java.io.ObjectStreamField[] fields =
(java.io.ObjectStreamField[])pf.get(type);
(java.io.ObjectStreamField[])bridge.getObject(type, offset);
return translateFields(fields);
}
} catch (NoSuchFieldException | IllegalAccessException |
} catch (NoSuchFieldException |
IllegalArgumentException | ClassCastException e) {
}
return null;
}
private static ObjectStreamField[] translateFields(
java.io.ObjectStreamField[] fields) {
private static ObjectStreamField[] translateFields(java.io.ObjectStreamField[] fields) {
if (fields == null) {
return null;
}
ObjectStreamField[] translation =
new ObjectStreamField[fields.length];
new ObjectStreamField[fields.length];
for (int i = 0; i < fields.length; i++) {
translation[i] = new ObjectStreamField(fields[i].getName(),
fields[i].getType());
@ -450,13 +475,11 @@ public class ObjectStreamClass implements java.io.Serializable {
* If it is declared, use the declared serialPersistentFields.
* Otherwise, extract the fields from the class itself.
*/
fields = persistentFieldsValue.get(cl);
fields = persistentFieldsValue.get(cl);
if (fields == null) {
/* Get all of the declared fields for this
* Class. setAccessible on all fields so they
* can be accessed later. Create a temporary
* ObjectStreamField array to hold each
/* Get all of the declared fields for this Class.
* Create a temporary ObjectStreamField array to hold each
* non-static, non-transient field. Then copy the
* temporary array into an array of the correct
* size once the number of fields is known.
@ -471,7 +494,6 @@ public class ObjectStreamClass implements java.io.Serializable {
int modifiers = fld.getModifiers();
if (!Modifier.isStatic(modifiers) &&
!Modifier.isTransient(modifiers)) {
fld.setAccessible(true) ;
tempFields[numFields++] = new ObjectStreamField(fld);
}
}
@ -487,7 +509,6 @@ public class ObjectStreamClass implements java.io.Serializable {
try {
Field reflField = cl.getDeclaredField(fields[j].getName());
if (fields[j].getType() == reflField.getType()) {
reflField.setAccessible(true);
fields[j].setField(reflField);
}
} catch (NoSuchFieldException e) {
@ -527,8 +548,8 @@ public class ObjectStreamClass implements java.io.Serializable {
int mods = f.getModifiers();
// SerialBug 5: static final SUID should be read
if (Modifier.isStatic(mods) && Modifier.isFinal(mods) ) {
f.setAccessible(true);
suid = f.getLong(cl);
long offset = bridge.staticFieldOffset(f);
suid = bridge.getLong(cl, offset);
// SerialBug 2: should be computed after writeObject
// actualSuid = computeStructuralUID(cl);
} else {
@ -540,16 +561,12 @@ public class ObjectStreamClass implements java.io.Serializable {
suid = _computeSerialVersionUID(cl);
// SerialBug 2: should be computed after writeObject
// actualSuid = computeStructuralUID(cl);
} catch (IllegalAccessException ex) {
suid = _computeSerialVersionUID(cl);
}
}
writeReplaceObjectMethod = ObjectStreamClass.getInheritableMethod(cl,
"writeReplace", noTypesList, Object.class);
writeReplaceObjectMethod = bridge.writeReplaceForSerialization(cl);
readResolveObjectMethod = ObjectStreamClass.getInheritableMethod(cl,
"readResolve", noTypesList, Object.class);
readResolveObjectMethod = bridge.readResolveForSerialization(cl);
if (externalizable)
cons = getExternalizableConstructor(cl) ;
@ -557,14 +574,8 @@ public class ObjectStreamClass implements java.io.Serializable {
cons = getSerializableConstructor(cl) ;
if (serializable && !forProxyClass) {
/* Look for the writeObject method
* Set the accessible flag on it here. ObjectOutputStream
* will call it as necessary.
*/
writeObjectMethod = getPrivateMethod( cl, "writeObject",
new Class<?>[] { java.io.ObjectOutputStream.class }, Void.TYPE ) ;
readObjectMethod = getPrivateMethod( cl, "readObject",
new Class<?>[] { java.io.ObjectInputStream.class }, Void.TYPE ) ;
writeObjectMethod = bridge.writeObjectForSerialization(cl) ;
readObjectMethod = bridge.readObjectForSerialization(cl);
}
return null;
}
@ -585,27 +596,6 @@ public class ObjectStreamClass implements java.io.Serializable {
}
}
/**
* Returns non-static private method with given signature defined by given
* class, or null if none found. Access checks are disabled on the
* returned method (if any).
*/
private static Method getPrivateMethod(Class<?> cl, String name,
Class<?>[] argTypes,
Class<?> returnType)
{
try {
Method meth = cl.getDeclaredMethod(name, argTypes);
meth.setAccessible(true);
int mods = meth.getModifiers();
return ((meth.getReturnType() == returnType) &&
((mods & Modifier.STATIC) == 0) &&
((mods & Modifier.PRIVATE) != 0)) ? meth : null;
} catch (NoSuchMethodException ex) {
return null;
}
}
// Specific to RMI-IIOP
/**
* Java to IDL ptc-02-01-12 1.5.1
@ -848,6 +838,22 @@ public class ObjectStreamClass implements java.io.Serializable {
return writeObjectMethod != null ;
}
/**
* Returns true if represented class is serializable or externalizable and
* defines a conformant writeReplace method. Otherwise, returns false.
*/
boolean hasWriteReplaceMethod() {
return (writeReplaceObjectMethod != null);
}
/**
* Returns true if represented class is serializable or externalizable and
* defines a conformant readResolve method. Otherwise, returns false.
*/
boolean hasReadResolveMethod() {
return (readResolveObjectMethod != null);
}
/**
* Returns when or not this class should be custom
* marshaled (use chunking). This should happen if
@ -904,7 +910,7 @@ public class ObjectStreamClass implements java.io.Serializable {
{
if (cons != null) {
try {
return cons.newInstance(new Object[0]);
return cons.newInstance();
} catch (IllegalAccessException ex) {
// should not occur, as access checks have been suppressed
InternalError ie = new InternalError();
@ -912,7 +918,7 @@ public class ObjectStreamClass implements java.io.Serializable {
throw ie ;
}
} else {
throw new UnsupportedOperationException();
throw new UnsupportedOperationException("no constructor for " + ofClass);
}
}
@ -921,15 +927,8 @@ public class ObjectStreamClass implements java.io.Serializable {
* Access checks are disabled on the returned constructor (if any), since
* the defining class may still be non-public.
*/
private static Constructor getExternalizableConstructor(Class<?> cl) {
try {
Constructor cons = cl.getDeclaredConstructor(new Class<?>[0]);
cons.setAccessible(true);
return ((cons.getModifiers() & Modifier.PUBLIC) != 0) ?
cons : null;
} catch (NoSuchMethodException ex) {
return null;
}
private static Constructor<?> getExternalizableConstructor(Class<?> cl) {
return bridge.newConstructorForExternalization(cl);
}
/**
@ -937,28 +936,8 @@ public class ObjectStreamClass implements java.io.Serializable {
* superclass, or null if none found. Access checks are disabled on the
* returned constructor (if any).
*/
private static Constructor getSerializableConstructor(Class<?> cl) {
Class<?> initCl = cl;
while (Serializable.class.isAssignableFrom(initCl)) {
if ((initCl = initCl.getSuperclass()) == null) {
return null;
}
}
try {
Constructor cons = initCl.getDeclaredConstructor(new Class<?>[0]);
int mods = cons.getModifiers();
if ((mods & Modifier.PRIVATE) != 0 ||
((mods & (Modifier.PUBLIC | Modifier.PROTECTED)) == 0 &&
!packageEquals(cl, initCl)))
{
return null;
}
cons = bridge.newConstructorForSerialization(cl, cons);
cons.setAccessible(true);
return cons;
} catch (NoSuchMethodException ex) {
return null;
}
private static Constructor<?> getSerializableConstructor(Class<?> cl) {
return bridge.newConstructorForSerialization(cl);
}
/*
@ -1522,11 +1501,11 @@ public class ObjectStreamClass implements java.io.Serializable {
* @since JDK 1.2
*/
private boolean hasExternalizableBlockData;
Method writeObjectMethod;
Method readObjectMethod;
private transient Method writeReplaceObjectMethod;
private transient Method readResolveObjectMethod;
private Constructor cons ;
private transient MethodHandle writeObjectMethod;
private transient MethodHandle readObjectMethod;
private transient MethodHandle writeReplaceObjectMethod;
private transient MethodHandle readResolveObjectMethod;
private transient Constructor<?> cons;
/**
* Beginning in Java to IDL ptc/02-01-12, RMI-IIOP has a
@ -1543,44 +1522,12 @@ public class ObjectStreamClass implements java.io.Serializable {
*/
private ObjectStreamClass localClassDesc;
/* Find out if the class has a static class initializer <clinit> */
private static Method hasStaticInitializerMethod = null;
/**
* Returns true if the given class defines a static initializer method,
* false otherwise.
*/
private static boolean hasStaticInitializer(Class<?> cl) {
if (hasStaticInitializerMethod == null) {
Class<?> classWithThisMethod = null;
try {
if (classWithThisMethod == null)
classWithThisMethod = java.io.ObjectStreamClass.class;
hasStaticInitializerMethod =
classWithThisMethod.getDeclaredMethod("hasStaticInitializer",
new Class<?>[] { Class.class });
} catch (NoSuchMethodException ex) {
}
if (hasStaticInitializerMethod == null) {
// XXX I18N, logging needed
throw new InternalError("Can't find hasStaticInitializer method on "
+ classWithThisMethod.getName());
}
hasStaticInitializerMethod.setAccessible(true);
}
try {
Boolean retval = (Boolean)
hasStaticInitializerMethod.invoke(null, new Object[] { cl });
return retval.booleanValue();
} catch (Exception ex) {
// XXX I18N, logging needed
InternalError ie = new InternalError( "Error invoking hasStaticInitializer" ) ;
ie.initCause( ex ) ;
throw ie ;
}
return bridge.hasStaticInitializerForSerialization(cl);
}
@ -1754,7 +1701,6 @@ public class ObjectStreamClass implements java.io.Serializable {
if ((meth == null) || (meth.getReturnType() != returnType)) {
return null;
}
meth.setAccessible(true);
int mods = meth.getModifiers();
if ((mods & (Modifier.STATIC | Modifier.ABSTRACT)) != 0) {
return null;

View File

@ -32,6 +32,7 @@ module java.corba {
requires java.logging;
requires java.naming;
requires java.transaction;
requires jdk.unsupported;
exports javax.activity;
exports javax.rmi;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,25 +25,24 @@
package sun.corba ;
import java.io.OptionalDataException;
import java.lang.invoke.MethodHandle;
import java.lang.reflect.Field ;
import java.lang.reflect.Method ;
import java.lang.reflect.Constructor ;
import java.lang.reflect.InvocationTargetException ;
import java.io.ObjectInputStream ;
import java.security.AccessController;
import java.security.Permission;
import java.security.PrivilegedAction;
import jdk.internal.misc.Unsafe ;
import jdk.internal.reflect.ReflectionFactory;
import sun.misc.Unsafe;
import sun.reflect.ReflectionFactory;
/** This class provides the methods for fundamental JVM operations
* needed in the ORB that are not part of the public Java API. This includes:
* <ul>
* <li>throwException, which can throw undeclared checked exceptions.
* This is needed to handle throwing arbitrary exceptions across a standardized OMG interface that (incorrectly) does not specify appropriate exceptions.</li>
* This is needed to handle throwing arbitrary exceptions across a standardized
* OMG interface that (incorrectly) does not specify appropriate exceptions.</li>
* <li>putXXX/getXXX methods that allow unchecked access to fields of objects.
* This is used for setting uninitialzed non-static final fields (which is
* impossible with reflection) and for speed.</li>
@ -71,88 +70,28 @@ import jdk.internal.reflect.ReflectionFactory;
*/
public final class Bridge
{
private static final Class[] NO_ARGS = new Class[] {};
private static final Permission getBridgePermission =
new BridgePermission( "getBridge" ) ;
new BridgePermission("getBridge");
private static Bridge bridge = null ;
// latestUserDefinedLoader() is a private static method
// in ObjectInputStream in JDK 1.3 through 1.5.
// We use reflection in a doPrivileged block to get a
// Method reference and make it accessible.
private final Method latestUserDefinedLoaderMethod ;
private final Unsafe unsafe ;
/** Access to Unsafe to read/write fields. */
private static final Unsafe unsafe = AccessController.doPrivileged(
(PrivilegedAction<Unsafe>)() -> {
try {
Field field = Unsafe.class.getDeclaredField("theUnsafe");
field.setAccessible(true);
return (Unsafe)field.get(null);
} catch (NoSuchFieldException |IllegalAccessException ex) {
throw new InternalError("Unsafe.theUnsafe field not available", ex);
}
}
) ;
private final ReflectionFactory reflectionFactory ;
private Method getLatestUserDefinedLoaderMethod()
{
return (Method) AccessController.doPrivileged(
new PrivilegedAction()
{
public Object run()
{
Method result = null;
try {
Class io = ObjectInputStream.class;
result = io.getDeclaredMethod(
"latestUserDefinedLoader", NO_ARGS);
result.setAccessible(true);
} catch (NoSuchMethodException nsme) {
Error err = new Error( "java.io.ObjectInputStream" +
" latestUserDefinedLoader " + nsme );
err.initCause(nsme) ;
throw err ;
}
return result;
}
}
);
}
private Unsafe getUnsafe() {
Field fld = (Field)AccessController.doPrivileged(
new PrivilegedAction()
{
public Object run()
{
Field fld = null ;
try {
Class unsafeClass = jdk.internal.misc.Unsafe.class ;
fld = unsafeClass.getDeclaredField( "theUnsafe" ) ;
fld.setAccessible( true ) ;
return fld ;
} catch (NoSuchFieldException exc) {
Error err = new Error( "Could not access Unsafe" ) ;
err.initCause( exc ) ;
throw err ;
}
}
}
) ;
Unsafe unsafe = null;
try {
unsafe = (Unsafe)(fld.get( null )) ;
} catch (Throwable t) {
Error err = new Error( "Could not access Unsafe" ) ;
err.initCause( t ) ;
throw err ;
}
return unsafe ;
}
private Bridge()
{
latestUserDefinedLoaderMethod = getLatestUserDefinedLoaderMethod();
unsafe = getUnsafe() ;
reflectionFactory = (ReflectionFactory)AccessController.doPrivileged(
new ReflectionFactory.GetReflectionFactoryAction());
private Bridge() {
reflectionFactory = ReflectionFactory.getReflectionFactory();
}
/** Fetch the Bridge singleton. This requires the following
@ -182,23 +121,8 @@ public final class Bridge
/** Obtain the latest user defined ClassLoader from the call stack.
* This is required by the RMI-IIOP specification.
*/
public final ClassLoader getLatestUserDefinedLoader()
{
try {
// Invoke the ObjectInputStream.latestUserDefinedLoader method
return (ClassLoader)latestUserDefinedLoaderMethod.invoke(null,
(Object[])NO_ARGS);
} catch (InvocationTargetException ite) {
Error err = new Error(
"sun.corba.Bridge.latestUserDefinedLoader: " + ite ) ;
err.initCause( ite ) ;
throw err ;
} catch (IllegalAccessException iae) {
Error err = new Error(
"sun.corba.Bridge.latestUserDefinedLoader: " + iae ) ;
err.initCause( iae ) ;
throw err ;
}
public final ClassLoader getLatestUserDefinedLoader() {
return jdk.internal.misc.VM.latestUserDefinedLoader();
}
/**
@ -345,6 +269,23 @@ public final class Bridge
return unsafe.objectFieldOffset( f ) ;
}
/**
* Returns the offset of a static field.
*/
public final long staticFieldOffset(Field f)
{
return unsafe.staticFieldOffset( f ) ;
}
/**
* Ensure that the class has been initalized.
* @param cl the class to ensure is initialized
*/
public final void ensureClassInitialized(Class<?> cl) {
unsafe.ensureClassInitialized(cl);
}
/** Throw the exception.
* The exception may be an undeclared checked exception.
*/
@ -353,16 +294,55 @@ public final class Bridge
unsafe.throwException( ee ) ;
}
/** Obtain a constructor for Class cl using constructor cons which
* may be the constructor defined in a superclass of cl. This is
* used to create a constructor for Serializable classes that
* constructs an instance of the Serializable class using the
/**
* Obtain a constructor for Class cl.
* This is used to create a constructor for Serializable classes that
* construct an instance of the Serializable class using the
* no args constructor of the first non-Serializable superclass
* of the Serializable class.
*/
public final Constructor newConstructorForSerialization( Class cl,
Constructor cons )
{
return reflectionFactory.newConstructorForSerialization( cl, cons ) ;
public final Constructor<?> newConstructorForSerialization( Class<?> cl ) {
return reflectionFactory.newConstructorForSerialization( cl ) ;
}
public final Constructor<?> newConstructorForExternalization(Class<?> cl) {
return reflectionFactory.newConstructorForExternalization( cl ) ;
}
/**
* Returns true if the given class defines a static initializer method,
* false otherwise.
*/
public final boolean hasStaticInitializerForSerialization(Class<?> cl) {
return reflectionFactory.hasStaticInitializerForSerialization(cl);
}
public final MethodHandle writeObjectForSerialization(Class<?> cl) {
return reflectionFactory.writeObjectForSerialization(cl);
}
public final MethodHandle readObjectForSerialization(Class<?> cl) {
return reflectionFactory.readObjectForSerialization(cl);
}
public final MethodHandle readObjectNoDataForSerialization(Class<?> cl) {
return reflectionFactory.readObjectNoDataForSerialization(cl);
}
public final MethodHandle readResolveForSerialization(Class<?> cl) {
return reflectionFactory.readResolveForSerialization(cl);
}
public final MethodHandle writeReplaceForSerialization(Class<?> cl) {
return reflectionFactory.writeReplaceForSerialization(cl);
}
/**
* Return a new OptionalDataException instance.
* @return a new OptionalDataException instance
*/
public final OptionalDataException newOptionalDataExceptionForSerialization(boolean bool) {
return reflectionFactory.newOptionalDataExceptionForSerialization(bool);
}
}

View File

@ -25,13 +25,13 @@
package sun.corba;
import com.sun.corba.se.impl.io.ValueUtility;
import jdk.internal.misc.Unsafe;
import java.lang.reflect.Field;
import java.security.AccessController;
import java.security.PrivilegedAction;
import com.sun.corba.se.impl.io.ValueUtility;
import sun.misc.Unsafe;
/** A repository of "shared secrets", which are a mechanism for
calling implementation-private methods in another package without
using reflection. A package-private class implements a public
@ -43,22 +43,22 @@ import java.security.PrivilegedAction;
// SharedSecrets cloned in corba repo to avoid build issues
public class SharedSecrets {
private static final Unsafe unsafe = getUnsafe();
private static JavaCorbaAccess javaCorbaAccess;
private static Unsafe getUnsafe() {
PrivilegedAction<Unsafe> pa = () -> {
Class<?> unsafeClass = jdk.internal.misc.Unsafe.class ;
try {
Field f = unsafeClass.getDeclaredField("theUnsafe");
f.setAccessible(true);
return (Unsafe) f.get(null);
} catch (Exception e) {
throw new Error(e);
}
};
return AccessController.doPrivileged(pa);
}
/** Access to Unsafe to read/write fields. */
private static final Unsafe unsafe = AccessController.doPrivileged(
(PrivilegedAction<Unsafe>)() -> {
try {
Field field = Unsafe.class.getDeclaredField("theUnsafe");
field.setAccessible(true);
return (Unsafe)field.get(null);
} catch (NoSuchFieldException |IllegalAccessException ex) {
throw new InternalError("Unsafe.theUnsafe field not available", ex);
}
}
);
private static JavaCorbaAccess javaCorbaAccess;
public static JavaCorbaAccess getJavaCorbaAccess() {
if (javaCorbaAccess == null) {

View File

@ -542,3 +542,5 @@ a20da289f646ee44440695b81abc0548330e4ca7 jdk-9+136
dfcbf839e299e7e2bba1da69bdb347617ea4c7e8 jdk-9+137
fc0956308c7a586267c5dd35dff74f773aa9c3eb jdk-9+138
08492e67bf3226784dab3bf9ae967382ddbc1af5 jdk-9+139
fec31089c2ef5a12dd64f401b0bf2e00f56ee0d0 jdk-9+140
160a00bc6ed0af1fdf8418fc65e6bddbbc0c536d jdk-9+141

View File

@ -1,11 +1,9 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<?eclipse-pydev version="1.0"?><pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/mx.jvmci</path>
<path>/.mx.jvmci</path>
</pydev_pathproperty>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/mx</path>

View File

@ -61,9 +61,8 @@ ifeq ($(OPENJDK_TARGET_OS), linux)
else ifeq ($(OPENJDK_TARGET_OS), solaris)
SA_TOOLCHAIN := TOOLCHAIN_LINK_CXX
COMMON_CFLAGS := -DSOLARIS_11_B159_OR_LATER
SA_CFLAGS := $(CFLAGS_JDKLIB) $(COMMON_CFLAGS)
SA_CXXFLAGS := $(CXXFLAGS_JDKLIB) $(COMMON_CFLAGS)
SA_CFLAGS := $(CFLAGS_JDKLIB)
SA_CXXFLAGS := $(CXXFLAGS_JDKLIB)
SA_LDFLAGS := $(subst -Wl$(COMMA)-z$(COMMA)defs,, $(LDFLAGS_JDKLIB)) \
-mt $(LDFLAGS_CXX_JDK)
SA_LIBS := -ldl -ldemangle -lthread -lc
@ -75,7 +74,7 @@ else ifeq ($(OPENJDK_TARGET_OS), macosx)
-mstack-alignment=16 -fPIC
SA_LDFLAGS := $(LDFLAGS_JDKLIB)
SA_LIBS := -framework Foundation -framework JavaNativeFoundation \
-framework Security -framework CoreFoundation
-framework JavaRuntimeSupport -framework Security -framework CoreFoundation
else ifeq ($(OPENJDK_TARGET_OS), windows)
SA_NAME := sawindbg

View File

@ -142,6 +142,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
sender_sp = _unextended_sp + _cb->frame_size();
// Is sender_sp safe?
if ((address)sender_sp >= thread->stack_base()) {
return false;
}
sender_unextended_sp = sender_sp;
sender_pc = (address) *(sender_sp-1);
// Note: frame::sender_sp_offset is only valid for compiled frame
@ -200,8 +204,15 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
// construct the potential sender
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
return sender.is_entry_frame_valid(thread);
// Validate the JavaCallWrapper an entry frame must have
address jcw = (address)sender.entry_frame_call_wrapper();
bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)sender.fp());
return jcw_safe;
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();

View File

@ -1962,6 +1962,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// due to cache line collision.
__ serialize_memory(rthread, r2);
}
} else {
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
}
// check for safepoint operation in progress and/or pending suspend requests

View File

@ -454,8 +454,9 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method.
if (UseJVMCICompiler) {
// Check if we need to take lock at entry of synchronized method. This can
// only occur on method entry so emit it only for vtos with step 0.
if (UseJVMCICompiler && state == vtos && step == 0) {
Label L;
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
__ cbz(rscratch1, L);
@ -464,6 +465,16 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
// Take lock.
lock_method();
__ bind(L);
} else {
#ifdef ASSERT
if (UseJVMCICompiler) {
Label L;
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
__ cbz(rscratch1, L);
__ stop("unexpected pending monitor in deopt entry");
__ bind(L);
}
#endif
}
#endif
// handle exceptions

View File

@ -2102,7 +2102,9 @@ class Assembler : public AbstractAssembler {
inline void mfvscr( VectorRegister d);
// Vector-Scalar (VSX) instructions.
inline void lxvd2x( VectorSRegister d, Register a);
inline void lxvd2x( VectorSRegister d, Register a, Register b);
inline void stxvd2x( VectorSRegister d, Register a);
inline void stxvd2x( VectorSRegister d, Register a, Register b);
inline void mtvrd( VectorRegister d, Register a);
inline void mfvrd( Register a, VectorRegister d);

View File

@ -734,8 +734,10 @@ inline void Assembler::lvsl( VectorRegister d, Register s1, Register s2) { emit
inline void Assembler::lvsr( VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
// Vector-Scalar (VSX) instructions.
inline void Assembler::lxvd2x (VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(s1) | rb(s2)); }
inline void Assembler::stxvd2x(VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(s1) | rb(s2)); }
inline void Assembler::lxvd2x (VectorSRegister d, Register s1) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
inline void Assembler::lxvd2x (VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stxvd2x(VectorSRegister d, Register s1) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
inline void Assembler::stxvd2x(VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::mtvrd( VectorRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vrt(d) | ra(a) | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
inline void Assembler::mfvrd( Register a, VectorRegister d) { emit_int32( MFVSRD_OPCODE | vrt(d) | ra(a) | 1u); } // 1u: d is treated as Vector (VMX/Altivec).

View File

@ -1894,6 +1894,22 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ beq(combined_check, slow);
}
if (flags & LIR_OpArrayCopy::type_check) {
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
__ bge(CCR0, slow);
}
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
__ bge(CCR0, slow);
}
}
// Higher 32bits must be null.
__ extsw(length, length);

View File

@ -1220,8 +1220,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(l_10);
// Use loop with VSX load/store instructions to
// copy 32 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32
@ -1486,8 +1486,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(l_9);
// Use loop with VSX load/store instructions to
// copy 16 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load from src.
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst.
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load from src.
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst.
__ lxvd2x(tmp_vsr2, R3_ARG1, tmp1); // Load from src + 16.
__ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16.
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32.
@ -1677,8 +1677,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(l_7);
// Use loop with VSX load/store instructions to
// copy 8 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32
@ -1745,13 +1745,16 @@ class StubGenerator: public StubCodeGenerator {
// Do reverse copy. We assume the case of actual overlap is rare enough
// that we don't have to optimize it.
Label l_1, l_2, l_3, l_4, l_5, l_6;
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
Register tmp1 = R6_ARG4;
Register tmp2 = R7_ARG5;
Register tmp3 = R8_ARG6;
Register tmp4 = R0;
VectorSRegister tmp_vsr1 = VSR1;
VectorSRegister tmp_vsr2 = VSR2;
{ // FasterArrayCopy
__ cmpwi(CCR0, R5_ARG3, 0);
__ beq(CCR0, l_6);
@ -1761,6 +1764,25 @@ class StubGenerator: public StubCodeGenerator {
__ add(R4_ARG2, R4_ARG2, R5_ARG3);
__ srdi(R5_ARG3, R5_ARG3, 2);
if (!aligned) {
// check if arrays have same alignment mod 8.
__ xorr(tmp1, R3_ARG1, R4_ARG2);
__ andi_(R0, tmp1, 7);
// Not the same alignment, but ld and std just need to be 4 byte aligned.
__ bne(CCR0, l_7); // to OR from is 8 byte aligned -> copy 2 at a time
// copy 1 element to align to and from on an 8 byte boundary
__ andi_(R0, R3_ARG1, 7);
__ beq(CCR0, l_7);
__ addi(R3_ARG1, R3_ARG1, -4);
__ addi(R4_ARG2, R4_ARG2, -4);
__ addi(R5_ARG3, R5_ARG3, -1);
__ lwzx(tmp2, R3_ARG1);
__ stwx(tmp2, R4_ARG2);
__ bind(l_7);
}
__ cmpwi(CCR0, R5_ARG3, 7);
__ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
@ -1768,6 +1790,7 @@ class StubGenerator: public StubCodeGenerator {
__ andi(R5_ARG3, R5_ARG3, 7);
__ mtctr(tmp1);
if (!VM_Version::has_vsx()) {
__ bind(l_4);
// Use unrolled version for mass copying (copy 4 elements a time).
// Load feeding store gets zero latency on Power6, however not on Power5.
@ -1783,6 +1806,40 @@ class StubGenerator: public StubCodeGenerator {
__ std(tmp2, 8, R4_ARG2);
__ std(tmp1, 0, R4_ARG2);
__ bdnz(l_4);
} else { // Processor supports VSX, so use it to mass copy.
// Prefetch the data into the L2 cache.
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
// loop contains < 8 instructions that fit inside a single
// i-cache sector.
__ align(32);
__ bind(l_4);
// Use loop with VSX load/store instructions to
// copy 8 elements a time.
__ addi(R3_ARG1, R3_ARG1, -32); // Update src-=32
__ addi(R4_ARG2, R4_ARG2, -32); // Update dsc-=32
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src+16
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ bdnz(l_4);
// Restore DSCR pre-fetch value.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
}
__ cmpwi(CCR0, R5_ARG3, 0);
__ beq(CCR0, l_6);
@ -1892,8 +1949,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(l_5);
// Use loop with VSX load/store instructions to
// copy 4 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32
@ -1962,6 +2019,9 @@ class StubGenerator: public StubCodeGenerator {
Register tmp3 = R8_ARG6;
Register tmp4 = R0;
VectorSRegister tmp_vsr1 = VSR1;
VectorSRegister tmp_vsr2 = VSR2;
Label l_1, l_2, l_3, l_4, l_5;
__ cmpwi(CCR0, R5_ARG3, 0);
@ -1980,6 +2040,7 @@ class StubGenerator: public StubCodeGenerator {
__ andi(R5_ARG3, R5_ARG3, 3);
__ mtctr(tmp1);
if (!VM_Version::has_vsx()) {
__ bind(l_4);
// Use unrolled version for mass copying (copy 4 elements a time).
// Load feeding store gets zero latency on Power6, however not on Power5.
@ -1995,6 +2056,40 @@ class StubGenerator: public StubCodeGenerator {
__ std(tmp2, 8, R4_ARG2);
__ std(tmp1, 0, R4_ARG2);
__ bdnz(l_4);
} else { // Processor supports VSX, so use it to mass copy.
// Prefetch the data into the L2 cache.
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
// loop contains < 8 instructions that fit inside a single
// i-cache sector.
__ align(32);
__ bind(l_4);
// Use loop with VSX load/store instructions to
// copy 4 elements a time.
__ addi(R3_ARG1, R3_ARG1, -32); // Update src-=32
__ addi(R4_ARG2, R4_ARG2, -32); // Update dsc-=32
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src+16
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ bdnz(l_4);
// Restore DSCR pre-fetch value.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
}
__ cmpwi(CCR0, R5_ARG3, 0);
__ beq(CCR0, l_1);

View File

@ -656,7 +656,7 @@ void VM_Version::determine_features() {
a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb
a->tcheck(0); // code[12] -> tcheck
a->mfdscr(R0); // code[13] -> mfdscr
a->lxvd2x(VSR0, 0, R3_ARG1); // code[14] -> vsx
a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.

View File

@ -2034,6 +2034,27 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ delayed()->nop();
}
// If the compiler was not able to prove that exact type of the source or the destination
// of the arraycopy is an array type, check at runtime if the source or the destination is
// an instance type.
if (flags & LIR_OpArrayCopy::type_check) {
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(dst, tmp);
__ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2);
__ cmp(tmp2, Klass::_lh_neutral_value);
__ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry());
__ delayed()->nop();
}
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
__ load_klass(src, tmp);
__ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2);
__ cmp(tmp2, Klass::_lh_neutral_value);
__ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry());
__ delayed()->nop();
}
}
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
// test src_pos register
__ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry());

View File

@ -279,7 +279,13 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
if (sender.is_entry_frame()) {
return sender.is_entry_frame_valid(thread);
// Validate the JavaCallWrapper an entry frame must have
address jcw = (address)sender.entry_frame_call_wrapper();
bool jcw_safe = (jcw <= thread->stack_base()) && (jcw > sender_fp);
return jcw_safe;
}
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size

View File

@ -384,8 +384,9 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
address entry = __ pc();
__ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method.
if (UseJVMCICompiler) {
// Check if we need to take lock at entry of synchronized method. This can
// only occur on method entry so emit it only for vtos with step 0.
if (UseJVMCICompiler && state == vtos && step == 0) {
Label L;
Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
__ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter
@ -395,6 +396,17 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
// Take lock.
lock_method();
__ bind(L);
} else {
#ifdef ASSERT
if (UseJVMCICompiler) {
Label L;
Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
__ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter
__ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
__ stop("unexpected pending monitor in deopt entry");
__ bind(L);
}
#endif
}
#endif
{ Label L;

View File

@ -457,9 +457,10 @@ void VM_Version::revert() {
unsigned int VM_Version::calc_parallel_worker_threads() {
unsigned int result;
if (is_M_series()) {
// for now, use same gc thread calculation for M-series as for niagara-plus
// in future, we may want to tweak parameters for nof_parallel_worker_thread
if (is_M_series() || is_S_series()) {
// for now, use same gc thread calculation for M-series and S-series as for
// niagara-plus. In future, we may want to tweak parameters for
// nof_parallel_worker_thread
result = nof_parallel_worker_threads(5, 16, 8);
} else if (is_niagara_plus()) {
result = nof_parallel_worker_threads(5, 16, 8);
@ -483,6 +484,9 @@ int VM_Version::parse_features(const char* implementation) {
} else if (strstr(impl, "SPARC-M") != NULL) {
// M-series SPARC is based on T-series.
features |= (M_family_m | T_family_m);
} else if (strstr(impl, "SPARC-S") != NULL) {
// S-series SPARC is based on T-series.
features |= (S_family_m | T_family_m);
} else if (strstr(impl, "SPARC-T") != NULL) {
features |= T_family_m;
if (strstr(impl, "SPARC-T1") != NULL) {

View File

@ -49,14 +49,15 @@ protected:
cbcond_instructions = 12,
sparc64_family = 13,
M_family = 14,
T_family = 15,
T1_model = 16,
sparc5_instructions = 17,
aes_instructions = 18,
sha1_instruction = 19,
sha256_instruction = 20,
sha512_instruction = 21,
crc32c_instruction = 22
S_family = 15,
T_family = 16,
T1_model = 17,
sparc5_instructions = 18,
aes_instructions = 19,
sha1_instruction = 20,
sha256_instruction = 21,
sha512_instruction = 22,
crc32c_instruction = 23
};
enum Feature_Flag_Set {
@ -78,6 +79,7 @@ protected:
cbcond_instructions_m = 1 << cbcond_instructions,
sparc64_family_m = 1 << sparc64_family,
M_family_m = 1 << M_family,
S_family_m = 1 << S_family,
T_family_m = 1 << T_family,
T1_model_m = 1 << T1_model,
sparc5_instructions_m = 1 << sparc5_instructions,
@ -105,6 +107,7 @@ protected:
// Returns true if the platform is in the niagara line (T series)
static bool is_M_family(int features) { return (features & M_family_m) != 0; }
static bool is_S_family(int features) { return (features & S_family_m) != 0; }
static bool is_T_family(int features) { return (features & T_family_m) != 0; }
static bool is_niagara() { return is_T_family(_features); }
#ifdef ASSERT
@ -153,6 +156,7 @@ public:
static bool is_niagara_plus() { return is_T_family(_features) && !is_T1_model(_features); }
static bool is_M_series() { return is_M_family(_features); }
static bool is_S_series() { return is_S_family(_features); }
static bool is_T4() { return is_T_family(_features) && has_cbcond(); }
static bool is_T7() { return is_T_family(_features) && has_sparc5_instr(); }

View File

@ -2461,6 +2461,7 @@ void Assembler::movdqu(Address dst, XMMRegister src) {
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
attributes.reset_is_clear_context();
simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x7F);
emit_operand(src, dst);
@ -2490,6 +2491,7 @@ void Assembler::vmovdqu(Address dst, XMMRegister src) {
InstructionMark im(this);
InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
attributes.reset_is_clear_context();
// swap src<->dst for encoding
assert(src != xnoreg, "sanity");
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
@ -2590,6 +2592,7 @@ void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, int vect
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
attributes.reset_is_clear_context();
attributes.set_embedded_opmask_register_specifier(mask);
attributes.set_is_evex_instruction();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@ -2623,6 +2626,7 @@ void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) {
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
attributes.reset_is_clear_context();
attributes.set_is_evex_instruction();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x7F);
@ -2655,6 +2659,7 @@ void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) {
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
attributes.reset_is_clear_context();
attributes.set_is_evex_instruction();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x7F);
@ -2794,6 +2799,7 @@ void Assembler::movsd(Address dst, XMMRegister src) {
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.reset_is_clear_context();
attributes.set_rex_vex_w_reverted();
simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x11);
@ -2823,6 +2829,7 @@ void Assembler::movss(Address dst, XMMRegister src) {
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
attributes.reset_is_clear_context();
simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x11);
emit_operand(src, dst);
@ -3291,6 +3298,15 @@ void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, i
emit_int8(imm8);
}
void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
assert(VM_Version::supports_avx(), "");
InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x06);
emit_int8(0xC0 | encode);
emit_int8(imm8);
}
void Assembler::pause() {
emit_int8((unsigned char)0xF3);
@ -3362,6 +3378,7 @@ void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Addre
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
attributes.reset_is_clear_context();
attributes.set_embedded_opmask_register_specifier(mask);
attributes.set_is_evex_instruction();
int dst_enc = kdst->encoding();
@ -3384,6 +3401,7 @@ void Assembler::evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMReg
assert(is_vector_masking(), "");
assert(VM_Version::supports_avx512vlbw(), "");
InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.reset_is_clear_context();
attributes.set_embedded_opmask_register_specifier(mask);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -3423,6 +3441,7 @@ void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Addre
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_reg_mask */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
attributes.reset_is_clear_context();
attributes.set_embedded_opmask_register_specifier(mask);
attributes.set_is_evex_instruction();
vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@ -3493,6 +3512,7 @@ void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int
assert(VM_Version::supports_evex(), "");
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_is_evex_instruction();
attributes.reset_is_clear_context();
int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int8(0x76);
emit_int8((unsigned char)(0xC0 | encode));
@ -3503,6 +3523,7 @@ void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vect
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
attributes.reset_is_clear_context();
attributes.set_is_evex_instruction();
int dst_enc = kdst->encoding();
vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@ -3532,6 +3553,7 @@ void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int
void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(VM_Version::supports_evex(), "");
InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.reset_is_clear_context();
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8(0x29);
@ -3543,6 +3565,7 @@ void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vect
assert(VM_Version::supports_evex(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.reset_is_clear_context();
attributes.set_is_evex_instruction();
attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
int dst_enc = kdst->encoding();
@ -3763,6 +3786,7 @@ void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vecto
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
attributes.reset_is_clear_context();
attributes.set_embedded_opmask_register_specifier(mask);
attributes.set_is_evex_instruction();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
@ -6208,6 +6232,7 @@ void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
attributes.reset_is_clear_context();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x39);
emit_operand(src, dst);
@ -6238,6 +6263,7 @@ void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) {
InstructionMark im(this);
InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
attributes.reset_is_clear_context();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x39);
emit_operand(src, dst);
@ -6298,6 +6324,7 @@ void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) {
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
attributes.reset_is_clear_context();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x19);
emit_operand(src, dst);
@ -6328,6 +6355,7 @@ void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) {
InstructionMark im(this);
InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
attributes.reset_is_clear_context();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x19);
emit_operand(src, dst);
@ -6371,6 +6399,7 @@ void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) {
InstructionMark im(this);
InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit);
attributes.reset_is_clear_context();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x1B);
emit_operand(src, dst);
@ -7181,7 +7210,9 @@ void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, boo
// fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024
byte4 |= ((_attributes->get_vector_len())& 0x3) << 5;
// last is EVEX.z for zero/merge actions
byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0);
if (_attributes->is_no_reg_mask() == false) {
byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0);
}
emit_int8(byte4);
}
@ -7337,7 +7368,7 @@ void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop
emit_int8((unsigned char)(0xF & cop));
}
void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
assert(VM_Version::supports_avx(), "");
assert(!VM_Version::supports_evex(), "");
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
@ -7348,6 +7379,15 @@ void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMM
emit_int8((unsigned char)(0xF0 & src2_enc<<4));
}
void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
assert(VM_Version::supports_avx2(), "");
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8((unsigned char)0x02);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8((unsigned char)imm8);
}
void Assembler::shlxl(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi2(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);

View File

@ -1550,6 +1550,7 @@ private:
void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
void vpermq(XMMRegister dst, XMMRegister src, int imm8);
void vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
void vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
void pause();
@ -2105,7 +2106,8 @@ private:
// AVX support for vectorized conditional move (double). The following two instructions used only coupled.
void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
void blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);
protected:
// Next instructions require address alignment 16 bytes SSE mode.
@ -2139,7 +2141,7 @@ public:
_input_size_in_bits(Assembler::EVEX_NObit),
_is_evex_instruction(false),
_evex_encoding(0),
_is_clear_context(false),
_is_clear_context(true),
_is_extended_context(false),
_current_assembler(NULL),
_embedded_opmask_register_specifier(1) { // hard code k1, it will be initialized for now
@ -2205,7 +2207,7 @@ public:
void set_evex_encoding(int value) { _evex_encoding = value; }
// Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
void set_is_clear_context(void) { _is_clear_context = true; }
void reset_is_clear_context(void) { _is_clear_context = false; }
// Map back to current asembler so that we can manage object level assocation
void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }

View File

@ -3146,6 +3146,23 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ jcc(Assembler::zero, *stub->entry());
}
// If the compiler was not able to prove that exact type of the source or the destination
// of the arraycopy is an array type, check at runtime if the source or the destination is
// an instance type.
if (flags & LIR_OpArrayCopy::type_check) {
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
__ jcc(Assembler::greaterEqual, *stub->entry());
}
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
__ jcc(Assembler::greaterEqual, *stub->entry());
}
}
// check if negative
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
__ testl(src_pos, src_pos);

View File

@ -140,6 +140,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
sender_sp = _unextended_sp + _cb->frame_size();
// Is sender_sp safe?
if ((address)sender_sp >= thread->stack_base()) {
return false;
}
sender_unextended_sp = sender_sp;
// On Intel the return_address is always the word on the stack
sender_pc = (address) *(sender_sp-1);
@ -199,8 +203,15 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
// construct the potential sender
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
return sender.is_entry_frame_valid(thread);
// Validate the JavaCallWrapper an entry frame must have
address jcw = (address)sender.entry_frame_call_wrapper();
bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)sender.fp());
return jcw_safe;
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();

View File

@ -4309,6 +4309,15 @@ void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int v
}
}
void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
if (reachable(src)) {
Assembler::vpand(dst, nds, as_Address(src), vector_len);
} else {
lea(rscratch1, src);
Assembler::vpand(dst, nds, Address(rscratch1, 0), vector_len);
}
}
void MacroAssembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
int dst_enc = dst->encoding();
int src_enc = src->encoding();

View File

@ -943,6 +943,23 @@ class MacroAssembler: public Assembler {
bool multi_block, XMMRegister shuf_mask);
#endif
#ifdef _LP64
private:
void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
Register e, Register f, Register g, Register h, int iteration);
void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
Register a, Register b, Register c, Register d, Register e, Register f,
Register g, Register h, int iteration);
void addmq(int disp, Register r1, Register r2);
public:
void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
XMMRegister shuf_mask);
#endif
void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
Register buf, Register state, Register ofs, Register limit, Register rsp,
@ -1177,6 +1194,10 @@ public:
void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
void vpbroadcastw(XMMRegister dst, XMMRegister src);
void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);

View File

@ -674,6 +674,11 @@ void MacroAssembler::addm(int disp, Register r1, Register r2) {
movl(Address(r1, disp), r2);
}
void MacroAssembler::addmq(int disp, Register r1, Register r2) {
addq(r2, Address(r1, disp));
movq(Address(r1, disp), r2);
}
void MacroAssembler::sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp,
@ -1026,4 +1031,488 @@ bind(compute_size1);
bind(compute_size_end1);
}
}
void MacroAssembler::sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c,
Register d, Register e, Register f, Register g, Register h,
int iteration)
{
const Register& y0 = r13;
const Register& y1 = r14;
const Register& y2 = r15;
#ifdef _WIN64
const Register& y3 = rcx;
#else
const Register& y3 = rdi;
#endif
const Register& T1 = r12;
if (iteration % 4 > 0) {
addq(old_h, y2); //h = k + w + h + S0 + S1 + CH = t1 + S0;
}
movq(y2, f); //y2 = f; CH
rorxq(y0, e, 41); //y0 = e >> 41; S1A
rorxq(y1, e, 18); //y1 = e >> 18; S1B
xorq(y2, g); //y2 = f^g; CH
xorq(y0, y1); //y0 = (e >> 41) ^ (e >> 18); S1
rorxq(y1, e, 14); //y1 = (e >> 14); S1
andq(y2, e); //y2 = (f^g)&e; CH
if (iteration % 4 > 0 ) {
addq(old_h, y3); //h = t1 + S0 + MAJ
}
xorq(y0, y1); //y0 = (e >> 41) ^ (e >> 18) ^ (e >> 14); S1
rorxq(T1, a, 34); //T1 = a >> 34; S0B
xorq(y2, g); //y2 = CH = ((f^g)&e) ^g; CH
rorxq(y1, a, 39); //y1 = a >> 39; S0A
movq(y3, a); //y3 = a; MAJA
xorq(y1, T1); //y1 = (a >> 39) ^ (a >> 34); S0
rorxq(T1, a, 28); //T1 = (a >> 28); S0
addq(h, Address(rsp, (8 * iteration))); //h = k + w + h; --
orq(y3, c); //y3 = a | c; MAJA
xorq(y1, T1); //y1 = (a >> 39) ^ (a >> 34) ^ (a >> 28); S0
movq(T1, a); //T1 = a; MAJB
andq(y3, b); //y3 = (a | c)&b; MAJA
andq(T1, c); //T1 = a&c; MAJB
addq(y2, y0); //y2 = S1 + CH; --
addq(d, h); //d = k + w + h + d; --
orq(y3, T1); //y3 = MAJ = (a | c)&b) | (a&c); MAJ
addq(h, y1); //h = k + w + h + S0; --
addq(d, y2); //d = k + w + h + d + S1 + CH = d + t1; --
if (iteration % 4 == 3) {
addq(h, y2); //h = k + w + h + S0 + S1 + CH = t1 + S0; --
addq(h, y3); //h = t1 + S0 + MAJ; --
}
}
void MacroAssembler::sha512_AVX2_one_round_and_schedule(
XMMRegister xmm4, // ymm4
XMMRegister xmm5, // ymm5
XMMRegister xmm6, // ymm6
XMMRegister xmm7, // ymm7
Register a, //rax
Register b, //rbx
Register c, //rdi
Register d, //rsi
Register e, //r8
Register f, //r9
Register g, //r10
Register h, //r11
int iteration)
{
const Register& y0 = r13;
const Register& y1 = r14;
const Register& y2 = r15;
#ifdef _WIN64
const Register& y3 = rcx;
#else
const Register& y3 = rdi;
#endif
const Register& T1 = r12;
if (iteration % 4 == 0) {
// Extract w[t - 7]
// xmm0 = W[-7]
vperm2f128(xmm0, xmm7, xmm6, 3);
vpalignr(xmm0, xmm0, xmm6, 8, AVX_256bit);
// Calculate w[t - 16] + w[t - 7]
vpaddq(xmm0, xmm0, xmm4, AVX_256bit); //xmm0 = W[-7] + W[-16]
// Extract w[t - 15]
//xmm1 = W[-15]
vperm2f128(xmm1, xmm5, xmm4, 3);
vpalignr(xmm1, xmm1, xmm4, 8, AVX_256bit);
// Calculate sigma0
// Calculate w[t - 15] ror 1
vpsrlq(xmm2, xmm1, 1, AVX_256bit);
vpsllq(xmm3, xmm1, (64 - 1), AVX_256bit);
vpor(xmm3, xmm3, xmm2, AVX_256bit); //xmm3 = W[-15] ror 1
// Calculate w[t - 15] shr 7
vpsrlq(xmm8, xmm1, 7, AVX_256bit); //xmm8 = W[-15] >> 7
} else if (iteration % 4 == 1) {
//Calculate w[t - 15] ror 8
vpsrlq(xmm2, xmm1, 8, AVX_256bit);
vpsllq(xmm1, xmm1, (64 - 8), AVX_256bit);
vpor(xmm1, xmm1, xmm2, AVX_256bit); //xmm1 = W[-15] ror 8
//XOR the three components
vpxor(xmm3, xmm3, xmm8, AVX_256bit); //xmm3 = W[-15] ror 1 ^ W[-15] >> 7
vpxor(xmm1, xmm3, xmm1, AVX_256bit); //xmm1 = s0
//Add three components, w[t - 16], w[t - 7] and sigma0
vpaddq(xmm0, xmm0, xmm1, AVX_256bit); //xmm0 = W[-16] + W[-7] + s0
// Move to appropriate lanes for calculating w[16] and w[17]
vperm2f128(xmm4, xmm0, xmm0, 0); //xmm4 = W[-16] + W[-7] + s0{ BABA }
address MASK_YMM_LO = StubRoutines::x86::pshuffle_byte_flip_mask_addr_sha512();
//Move to appropriate lanes for calculating w[18] and w[19]
vpand(xmm0, xmm0, ExternalAddress(MASK_YMM_LO + 32), AVX_256bit); //xmm0 = W[-16] + W[-7] + s0{ DC00 }
//Calculate w[16] and w[17] in both 128 bit lanes
//Calculate sigma1 for w[16] and w[17] on both 128 bit lanes
vperm2f128(xmm2, xmm7, xmm7, 17); //xmm2 = W[-2] {BABA}
vpsrlq(xmm8, xmm2, 6, AVX_256bit); //xmm8 = W[-2] >> 6 {BABA}
} else if (iteration % 4 == 2) {
vpsrlq(xmm3, xmm2, 19, AVX_256bit); //xmm3 = W[-2] >> 19 {BABA}
vpsllq(xmm1, xmm2, (64 - 19), AVX_256bit); //xmm1 = W[-2] << 19 {BABA}
vpor(xmm3, xmm3, xmm1, AVX_256bit); //xmm3 = W[-2] ror 19 {BABA}
vpxor(xmm8, xmm8, xmm3, AVX_256bit);// xmm8 = W[-2] ror 19 ^ W[-2] >> 6 {BABA}
vpsrlq(xmm3, xmm2, 61, AVX_256bit); //xmm3 = W[-2] >> 61 {BABA}
vpsllq(xmm1, xmm2, (64 - 61), AVX_256bit); //xmm1 = W[-2] << 61 {BABA}
vpor(xmm3, xmm3, xmm1, AVX_256bit); //xmm3 = W[-2] ror 61 {BABA}
vpxor(xmm8, xmm8, xmm3, AVX_256bit); //xmm8 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) { BABA }
//Add sigma1 to the other components to get w[16] and w[17]
vpaddq(xmm4, xmm4, xmm8, AVX_256bit); //xmm4 = { W[1], W[0], W[1], W[0] }
//Calculate sigma1 for w[18] and w[19] for upper 128 bit lane
vpsrlq(xmm8, xmm4, 6, AVX_256bit); //xmm8 = W[-2] >> 6 {DC--}
} else if (iteration % 4 == 3){
vpsrlq(xmm3, xmm4, 19, AVX_256bit); //xmm3 = W[-2] >> 19 {DC--}
vpsllq(xmm1, xmm4, (64 - 19), AVX_256bit); //xmm1 = W[-2] << 19 {DC--}
vpor(xmm3, xmm3, xmm1, AVX_256bit); //xmm3 = W[-2] ror 19 {DC--}
vpxor(xmm8, xmm8, xmm3, AVX_256bit); //xmm8 = W[-2] ror 19 ^ W[-2] >> 6 {DC--}
vpsrlq(xmm3, xmm4, 61, AVX_256bit); //xmm3 = W[-2] >> 61 {DC--}
vpsllq(xmm1, xmm4, (64 - 61), AVX_256bit); //xmm1 = W[-2] << 61 {DC--}
vpor(xmm3, xmm3, xmm1, AVX_256bit); //xmm3 = W[-2] ror 61 {DC--}
vpxor(xmm8, xmm8, xmm3, AVX_256bit); //xmm8 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) { DC-- }
//Add the sigma0 + w[t - 7] + w[t - 16] for w[18] and w[19] to newly calculated sigma1 to get w[18] and w[19]
vpaddq(xmm2, xmm0, xmm8, AVX_256bit); //xmm2 = { W[3], W[2], --, -- }
//Form w[19, w[18], w17], w[16]
vpblendd(xmm4, xmm4, xmm2, 0xF0, AVX_256bit); //xmm4 = { W[3], W[2], W[1], W[0] }
}
movq(y3, a); //y3 = a; MAJA
rorxq(y0, e, 41); // y0 = e >> 41; S1A
rorxq(y1, e, 18); //y1 = e >> 18; S1B
addq(h, Address(rsp, (iteration * 8))); //h = k + w + h; --
orq(y3, c); //y3 = a | c; MAJA
movq(y2, f); //y2 = f; CH
xorq(y2, g); //y2 = f^g; CH
rorxq(T1, a, 34); //T1 = a >> 34; S0B
xorq(y0, y1); //y0 = (e >> 41) ^ (e >> 18); S1
rorxq(y1, e, 14); //y1 = (e >> 14); S1
andq(y2, e); //y2 = (f^g) & e; CH
addq(d, h); //d = k + w + h + d; --
andq(y3, b); //y3 = (a | c)&b; MAJA
xorq(y0, y1); //y0 = (e >> 41) ^ (e >> 18) ^ (e >> 14); S1
rorxq(y1, a, 39); //y1 = a >> 39; S0A
xorq(y1, T1); //y1 = (a >> 39) ^ (a >> 34); S0
rorxq(T1, a, 28); //T1 = (a >> 28); S0
xorq(y2, g); //y2 = CH = ((f^g)&e) ^ g; CH
xorq(y1, T1); //y1 = (a >> 39) ^ (a >> 34) ^ (a >> 28); S0
movq(T1, a); //T1 = a; MAJB
andq(T1, c); //T1 = a&c; MAJB
addq(y2, y0); //y2 = S1 + CH; --
orq(y3, T1); //y3 = MAJ = (a | c)&b) | (a&c); MAJ
addq(h, y1); //h = k + w + h + S0; --
addq(d, y2); //d = k + w + h + d + S1 + CH = d + t1; --
addq(h, y2); //h = k + w + h + S0 + S1 + CH = t1 + S0; --
addq(h, y3); //h = t1 + S0 + MAJ; --
}
void MacroAssembler::sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp,
bool multi_block, XMMRegister shuf_mask)
{
Label loop0, loop1, loop2, done_hash,
compute_block_size, compute_size,
compute_block_size_end, compute_size_end;
address K512_W = StubRoutines::x86::k512_W_addr();
address pshuffle_byte_flip_mask_sha512 = StubRoutines::x86::pshuffle_byte_flip_mask_addr_sha512();
address pshuffle_byte_flip_mask_addr = 0;
const XMMRegister& XFER = xmm0; // YTMP0
const XMMRegister& BYTE_FLIP_MASK = xmm9; // ymm9
#ifdef _WIN64
const Register& INP = rcx; //1st arg
const Register& CTX = rdx; //2nd arg
const Register& NUM_BLKS = r8; //3rd arg
const Register& c = rdi;
const Register& d = rsi;
const Register& e = r8;
const Register& y3 = rcx;
const Register& offset = r8;
const Register& input_limit = r9;
#else
const Register& INP = rdi; //1st arg
const Register& CTX = rsi; //2nd arg
const Register& NUM_BLKS = rdx; //3rd arg
const Register& c = rcx;
const Register& d = r8;
const Register& e = rdx;
const Register& y3 = rdi;
const Register& offset = rdx;
const Register& input_limit = rcx;
#endif
const Register& TBL = rbp;
const Register& a = rax;
const Register& b = rbx;
const Register& f = r9;
const Register& g = r10;
const Register& h = r11;
//Local variables as defined in assembly file.
enum
{
_XFER_SIZE = 4 * 8, // resq 4 => reserve 4 quadwords. Hence 4 * 8
_SRND_SIZE = 8, // resq 1
_INP_SIZE = 8,
_INP_END_SIZE = 8,
_RSP_SAVE_SIZE = 8, // defined as resq 1
#ifdef _WIN64
_GPR_SAVE_SIZE = 8 * 8, // defined as resq 8
#else
_GPR_SAVE_SIZE = 6 * 8 // resq 6
#endif
};
enum
{
_XFER = 0,
_SRND = _XFER + _XFER_SIZE, // 32
_INP = _SRND + _SRND_SIZE, // 40
_INP_END = _INP + _INP_SIZE, // 48
_RSP = _INP_END + _INP_END_SIZE, // 56
_GPR = _RSP + _RSP_SAVE_SIZE, // 64
_STACK_SIZE = _GPR + _GPR_SAVE_SIZE // 128 for windows and 112 for linux.
};
//Saving offset and limit as it will help with blocksize calculation for multiblock SHA512.
#ifdef _WIN64
push(r8); // win64: this is ofs
push(r9); // win64: this is limit, we need them again at the very end.
#else
push(rdx); // linux : this is ofs, need at the end for multiblock calculation
push(rcx); // linux: This is the limit.
#endif
//Allocate Stack Space
movq(rax, rsp);
subq(rsp, _STACK_SIZE);
andq(rsp, -32);
movq(Address(rsp, _RSP), rax);
//Save GPRs
movq(Address(rsp, _GPR), rbp);
movq(Address(rsp, (_GPR + 8)), rbx);
movq(Address(rsp, (_GPR + 16)), r12);
movq(Address(rsp, (_GPR + 24)), r13);
movq(Address(rsp, (_GPR + 32)), r14);
movq(Address(rsp, (_GPR + 40)), r15);
#ifdef _WIN64
movq(Address(rsp, (_GPR + 48)), rsi);
movq(Address(rsp, (_GPR + 56)), rdi);
#endif
vpblendd(xmm0, xmm0, xmm1, 0xF0, AVX_128bit);
vpblendd(xmm0, xmm0, xmm1, 0xF0, AVX_256bit);
if (multi_block) {
xorq(rax, rax);
bind(compute_block_size);
cmpptr(offset, input_limit); // Assuming that offset is less than limit.
jccb(Assembler::aboveEqual, compute_block_size_end);
addq(offset, 128);
addq(rax, 128);
jmpb(compute_block_size);
bind(compute_block_size_end);
movq(NUM_BLKS, rax);
cmpq(NUM_BLKS, 0);
jcc(Assembler::equal, done_hash);
} else {
xorq(NUM_BLKS, NUM_BLKS); //If single block.
addq(NUM_BLKS, 128);
}
addq(NUM_BLKS, INP); //pointer to end of data
movq(Address(rsp, _INP_END), NUM_BLKS);
//load initial digest
movq(a, Address(CTX, 8 * 0));
movq(b, Address(CTX, 8 * 1));
movq(c, Address(CTX, 8 * 2));
movq(d, Address(CTX, 8 * 3));
movq(e, Address(CTX, 8 * 4));
movq(f, Address(CTX, 8 * 5));
movq(g, Address(CTX, 8 * 6));
movq(h, Address(CTX, 8 * 7));
pshuffle_byte_flip_mask_addr = pshuffle_byte_flip_mask_sha512;
vmovdqu(BYTE_FLIP_MASK, ExternalAddress(pshuffle_byte_flip_mask_addr + 0)); //PSHUFFLE_BYTE_FLIP_MASK wrt rip
bind(loop0);
lea(TBL, ExternalAddress(K512_W));
//byte swap first 16 dwords
vmovdqu(xmm4, Address(INP, 32 * 0));
vpshufb(xmm4, xmm4, BYTE_FLIP_MASK, AVX_256bit);
vmovdqu(xmm5, Address(INP, 32 * 1));
vpshufb(xmm5, xmm5, BYTE_FLIP_MASK, AVX_256bit);
vmovdqu(xmm6, Address(INP, 32 * 2));
vpshufb(xmm6, xmm6, BYTE_FLIP_MASK, AVX_256bit);
vmovdqu(xmm7, Address(INP, 32 * 3));
vpshufb(xmm7, xmm7, BYTE_FLIP_MASK, AVX_256bit);
movq(Address(rsp, _INP), INP);
movslq(Address(rsp, _SRND), 4);
align(16);
//Schedule 64 input dwords, by calling sha512_AVX2_one_round_and_schedule
bind(loop1);
vpaddq(xmm0, xmm4, Address(TBL, 0 * 32), AVX_256bit);
vmovdqu(Address(rsp, _XFER), xmm0);
//four rounds and schedule
sha512_AVX2_one_round_and_schedule(xmm4, xmm5, xmm6, xmm7, a, b, c, d, e, f, g, h, 0);
sha512_AVX2_one_round_and_schedule(xmm4, xmm5, xmm6, xmm7, h, a, b, c, d, e, f, g, 1);
sha512_AVX2_one_round_and_schedule(xmm4, xmm5, xmm6, xmm7, g, h, a, b, c, d, e, f, 2);
sha512_AVX2_one_round_and_schedule(xmm4, xmm5, xmm6, xmm7, f, g, h, a, b, c, d, e, 3);
vpaddq(xmm0, xmm5, Address(TBL, 1 * 32), AVX_256bit);
vmovdqu(Address(rsp, _XFER), xmm0);
//four rounds and schedule
sha512_AVX2_one_round_and_schedule(xmm5, xmm6, xmm7, xmm4, e, f, g, h, a, b, c, d, 0);
sha512_AVX2_one_round_and_schedule(xmm5, xmm6, xmm7, xmm4, d, e, f, g, h, a, b, c, 1);
sha512_AVX2_one_round_and_schedule(xmm5, xmm6, xmm7, xmm4, c, d, e, f, g, h, a, b, 2);
sha512_AVX2_one_round_and_schedule(xmm5, xmm6, xmm7, xmm4, b, c, d, e, f, g, h, a, 3);
vpaddq(xmm0, xmm6, Address(TBL, 2 * 32), AVX_256bit);
vmovdqu(Address(rsp, _XFER), xmm0);
//four rounds and schedule
sha512_AVX2_one_round_and_schedule(xmm6, xmm7, xmm4, xmm5, a, b, c, d, e, f, g, h, 0);
sha512_AVX2_one_round_and_schedule(xmm6, xmm7, xmm4, xmm5, h, a, b, c, d, e, f, g, 1);
sha512_AVX2_one_round_and_schedule(xmm6, xmm7, xmm4, xmm5, g, h, a, b, c, d, e, f, 2);
sha512_AVX2_one_round_and_schedule(xmm6, xmm7, xmm4, xmm5, f, g, h, a, b, c, d, e, 3);
vpaddq(xmm0, xmm7, Address(TBL, 3 * 32), AVX_256bit);
vmovdqu(Address(rsp, _XFER), xmm0);
addq(TBL, 4 * 32);
//four rounds and schedule
sha512_AVX2_one_round_and_schedule(xmm7, xmm4, xmm5, xmm6, e, f, g, h, a, b, c, d, 0);
sha512_AVX2_one_round_and_schedule(xmm7, xmm4, xmm5, xmm6, d, e, f, g, h, a, b, c, 1);
sha512_AVX2_one_round_and_schedule(xmm7, xmm4, xmm5, xmm6, c, d, e, f, g, h, a, b, 2);
sha512_AVX2_one_round_and_schedule(xmm7, xmm4, xmm5, xmm6, b, c, d, e, f, g, h, a, 3);
subq(Address(rsp, _SRND), 1);
jcc(Assembler::notEqual, loop1);
movslq(Address(rsp, _SRND), 2);
bind(loop2);
vpaddq(xmm0, xmm4, Address(TBL, 0 * 32), AVX_256bit);
vmovdqu(Address(rsp, _XFER), xmm0);
//four rounds and compute.
sha512_AVX2_one_round_compute(a, a, b, c, d, e, f, g, h, 0);
sha512_AVX2_one_round_compute(h, h, a, b, c, d, e, f, g, 1);
sha512_AVX2_one_round_compute(g, g, h, a, b, c, d, e, f, 2);
sha512_AVX2_one_round_compute(f, f, g, h, a, b, c, d, e, 3);
vpaddq(xmm0, xmm5, Address(TBL, 1 * 32), AVX_256bit);
vmovdqu(Address(rsp, _XFER), xmm0);
addq(TBL, 2 * 32);
// four rounds and compute.
sha512_AVX2_one_round_compute(e, e, f, g, h, a, b, c, d, 0);
sha512_AVX2_one_round_compute(d, d, e, f, g, h, a, b, c, 1);
sha512_AVX2_one_round_compute(c, c, d, e, f, g, h, a, b, 2);
sha512_AVX2_one_round_compute(b, b, c, d, e, f, g, h, a, 3);
vmovdqu(xmm4, xmm6);
vmovdqu(xmm5, xmm7);
subq(Address(rsp, _SRND), 1);
jcc(Assembler::notEqual, loop2);
addmq(8 * 0, CTX, a);
addmq(8 * 1, CTX, b);
addmq(8 * 2, CTX, c);
addmq(8 * 3, CTX, d);
addmq(8 * 4, CTX, e);
addmq(8 * 5, CTX, f);
addmq(8 * 6, CTX, g);
addmq(8 * 7, CTX, h);
movq(INP, Address(rsp, _INP));
addq(INP, 128);
cmpq(INP, Address(rsp, _INP_END));
jcc(Assembler::notEqual, loop0);
bind(done_hash);
//Restore GPRs
movq(rbp, Address(rsp, (_GPR + 0)));
movq(rbx, Address(rsp, (_GPR + 8)));
movq(r12, Address(rsp, (_GPR + 16)));
movq(r13, Address(rsp, (_GPR + 24)));
movq(r14, Address(rsp, (_GPR + 32)));
movq(r15, Address(rsp, (_GPR + 40)));
#ifdef _WIN64
movq(rsi, Address(rsp, (_GPR + 48)));
movq(rdi, Address(rsp, (_GPR + 56)));
#endif
//Restore Stack Pointer
movq(rsp, Address(rsp, _RSP));
#ifdef _WIN64
pop(r9);
pop(r8);
#else
pop(rcx);
pop(rdx);
#endif
if (multi_block) {
#ifdef _WIN64
const Register& limit_end = r9;
const Register& ofs_end = r8;
#else
const Register& limit_end = rcx;
const Register& ofs_end = rdx;
#endif
movq(rax, ofs_end);
bind(compute_size);
cmpptr(rax, limit_end);
jccb(Assembler::aboveEqual, compute_size_end);
addq(rax, 128);
jmpb(compute_size);
bind(compute_size_end);
}
}
#endif //#ifdef _LP64

View File

@ -3718,6 +3718,25 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
//Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
address generate_pshuffle_byte_flip_mask_sha512() {
__ align(32);
StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512");
address start = __ pc();
if (VM_Version::supports_avx2()) {
__ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK
__ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
__ emit_data64(0x1011121314151617, relocInfo::none);
__ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
__ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
}
return start;
}
// ofs and limit are use for multi-block byte array.
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
address generate_sha256_implCompress(bool multi_block, const char *name) {
@ -3761,6 +3780,39 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
address generate_sha512_implCompress(bool multi_block, const char *name) {
assert(VM_Version::supports_avx2(), "");
assert(VM_Version::supports_bmi2(), "");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
Register buf = c_rarg0;
Register state = c_rarg1;
Register ofs = c_rarg2;
Register limit = c_rarg3;
const XMMRegister msg = xmm0;
const XMMRegister state0 = xmm1;
const XMMRegister state1 = xmm2;
const XMMRegister msgtmp0 = xmm3;
const XMMRegister msgtmp1 = xmm4;
const XMMRegister msgtmp2 = xmm5;
const XMMRegister msgtmp3 = xmm6;
const XMMRegister msgtmp4 = xmm7;
const XMMRegister shuf_mask = xmm8;
__ enter();
__ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
buf, state, ofs, limit, rsp, multi_block, shuf_mask);
__ leave();
__ ret(0);
return start;
}
// This is a version of CTR/AES crypt which does 6 blocks in a loop at a time
// to hide instruction latency
//
@ -5081,6 +5133,12 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress");
StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB");
}
if (UseSHA512Intrinsics) {
StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W;
StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512();
StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress");
StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB");
}
// Generate GHASH intrinsics code
if (UseGHASHIntrinsics) {

View File

@ -48,6 +48,8 @@ address StubRoutines::x86::_shuffle_byte_flip_mask_addr = NULL;
address StubRoutines::x86::_k256_adr = NULL;
#ifdef _LP64
address StubRoutines::x86::_k256_W_adr = NULL;
address StubRoutines::x86::_k512_W_addr = NULL;
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = NULL;
#endif
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;
@ -297,4 +299,49 @@ ALIGNED_(64) juint StubRoutines::x86::_k256[] =
// used in MacroAssembler::sha256_AVX2
// dynamically built from _k256
ALIGNED_(64) juint StubRoutines::x86::_k256_W[2*sizeof(StubRoutines::x86::_k256)];
// used in MacroAssembler::sha512_AVX2
ALIGNED_(64) julong StubRoutines::x86::_k512_W[] =
{
0x428a2f98d728ae22LL, 0x7137449123ef65cdLL,
0xb5c0fbcfec4d3b2fLL, 0xe9b5dba58189dbbcLL,
0x3956c25bf348b538LL, 0x59f111f1b605d019LL,
0x923f82a4af194f9bLL, 0xab1c5ed5da6d8118LL,
0xd807aa98a3030242LL, 0x12835b0145706fbeLL,
0x243185be4ee4b28cLL, 0x550c7dc3d5ffb4e2LL,
0x72be5d74f27b896fLL, 0x80deb1fe3b1696b1LL,
0x9bdc06a725c71235LL, 0xc19bf174cf692694LL,
0xe49b69c19ef14ad2LL, 0xefbe4786384f25e3LL,
0x0fc19dc68b8cd5b5LL, 0x240ca1cc77ac9c65LL,
0x2de92c6f592b0275LL, 0x4a7484aa6ea6e483LL,
0x5cb0a9dcbd41fbd4LL, 0x76f988da831153b5LL,
0x983e5152ee66dfabLL, 0xa831c66d2db43210LL,
0xb00327c898fb213fLL, 0xbf597fc7beef0ee4LL,
0xc6e00bf33da88fc2LL, 0xd5a79147930aa725LL,
0x06ca6351e003826fLL, 0x142929670a0e6e70LL,
0x27b70a8546d22ffcLL, 0x2e1b21385c26c926LL,
0x4d2c6dfc5ac42aedLL, 0x53380d139d95b3dfLL,
0x650a73548baf63deLL, 0x766a0abb3c77b2a8LL,
0x81c2c92e47edaee6LL, 0x92722c851482353bLL,
0xa2bfe8a14cf10364LL, 0xa81a664bbc423001LL,
0xc24b8b70d0f89791LL, 0xc76c51a30654be30LL,
0xd192e819d6ef5218LL, 0xd69906245565a910LL,
0xf40e35855771202aLL, 0x106aa07032bbd1b8LL,
0x19a4c116b8d2d0c8LL, 0x1e376c085141ab53LL,
0x2748774cdf8eeb99LL, 0x34b0bcb5e19b48a8LL,
0x391c0cb3c5c95a63LL, 0x4ed8aa4ae3418acbLL,
0x5b9cca4f7763e373LL, 0x682e6ff3d6b2b8a3LL,
0x748f82ee5defb2fcLL, 0x78a5636f43172f60LL,
0x84c87814a1f0ab72LL, 0x8cc702081a6439ecLL,
0x90befffa23631e28LL, 0xa4506cebde82bde9LL,
0xbef9a3f7b2c67915LL, 0xc67178f2e372532bLL,
0xca273eceea26619cLL, 0xd186b8c721c0c207LL,
0xeada7dd6cde0eb1eLL, 0xf57d4f7fee6ed178LL,
0x06f067aa72176fbaLL, 0x0a637dc5a2c898a6LL,
0x113f9804bef90daeLL, 0x1b710b35131c471bLL,
0x28db77f523047d84LL, 0x32caab7b40c72493LL,
0x3c9ebe0a15c9bebcLL, 0x431d67c49c100d4cLL,
0x4cc5d4becb3e42b6LL, 0x597f299cfc657e2aLL,
0x5fcb6fab3ad6faecLL, 0x6c44198c4a475817LL,
};
#endif

View File

@ -33,7 +33,7 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _call_
enum platform_dependent_constants {
code_size1 = 20000 LP64_ONLY(+10000), // simply increase if too small (assembler will crash if too small)
code_size2 = 33800 LP64_ONLY(+1200) // simply increase if too small (assembler will crash if too small)
code_size2 = 33800 LP64_ONLY(+10000) // simply increase if too small (assembler will crash if too small)
};
class x86 {
@ -134,6 +134,10 @@ class x86 {
#ifdef _LP64
static juint _k256_W[];
static address _k256_W_adr;
static julong _k512_W[];
static address _k512_W_addr;
// byte flip mask for sha512
static address _pshuffle_byte_flip_mask_addr_sha512;
#endif
// byte flip mask for sha256
static address _pshuffle_byte_flip_mask_addr;
@ -192,6 +196,8 @@ class x86 {
static address k256_addr() { return _k256_adr; }
#ifdef _LP64
static address k256_W_addr() { return _k256_W_adr; }
static address k512_W_addr() { return _k512_W_addr; }
static address pshuffle_byte_flip_mask_addr_sha512() { return _pshuffle_byte_flip_mask_addr_sha512; }
#endif
static address pshuffle_byte_flip_mask_addr() { return _pshuffle_byte_flip_mask_addr; }
static void generate_CRC32C_table(bool is_pclmulqdq_supported);

View File

@ -254,8 +254,9 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
NOT_LP64(__ get_thread(thread));
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method.
if (UseJVMCICompiler) {
// Check if we need to take lock at entry of synchronized method. This can
// only occur on method entry so emit it only for vtos with step 0.
if (UseJVMCICompiler && state == vtos && step == 0) {
Label L;
__ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
__ jcc(Assembler::zero, L);
@ -266,6 +267,16 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
// Take lock.
lock_method();
__ bind(L);
} else {
#ifdef ASSERT
if (UseJVMCICompiler) {
Label L;
__ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
__ jccb(Assembler::zero, L);
__ stop("unexpected pending monitor in deopt entry");
__ bind(L);
}
#endif
}
#endif
// handle exceptions

View File

@ -769,7 +769,11 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
}
if (UseSHA512Intrinsics) {
if (UseSHA) {
if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
}
} else if (UseSHA512Intrinsics) {
warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}

View File

@ -8173,13 +8173,13 @@ instruct vcmov4D_reg(vecY dst, vecY src1, vecY src2, immI8 cop, cmpOp_vcmppd cop
match(Set dst (CMoveVD (Binary copnd cop) (Binary src1 src2)));
effect(TEMP dst, USE src1, USE src2);
format %{ "cmppd.$copnd $dst, $src1, $src2 ! vcmovevd, cond=$cop\n\t"
"vpblendd $dst,$src1,$src2,$dst ! vcmovevd\n\t"
"blendvpd $dst,$src1,$src2,$dst ! vcmovevd\n\t"
%}
ins_encode %{
int vector_len = 1;
int cond = (Assembler::Condition)($copnd$$cmpcode);
__ cmppd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, cond, vector_len);
__ vpblendd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, $dst$$XMMRegister, vector_len);
__ blendvpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, $dst$$XMMRegister, vector_len);
%}
ins_pipe( pipe_slow );
%}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include <objc/objc-runtime.h>
#import <Foundation/Foundation.h>
#import <JavaNativeFoundation/JavaNativeFoundation.h>
#import <JavaRuntimeSupport/JavaRuntimeSupport.h>
#include <jni.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -420,8 +420,6 @@ extern uintptr_t Ppltdest(struct ps_prochandle *, uintptr_t, int *);
/*
* Stack frame iteration interface.
*/
#ifdef SOLARIS_11_B159_OR_LATER
/* building on Nevada-B159 or later so define the new callback */
typedef int proc_stack_f(
void *, /* the cookie given to Pstack_iter() */
const prgregset_t, /* the frame's registers */
@ -432,10 +430,6 @@ typedef int proc_stack_f(
#define PR_SIGNAL_FRAME 1 /* called by a signal handler */
#define PR_FOUND_SIGNAL 2 /* we found the corresponding signal number */
#else
/* building on Nevada-B158 or earlier so define the old callback */
typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
#endif
extern int Pstack_iter(struct ps_prochandle *,
const prgregset_t, proc_stack_f *, void *);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,6 +46,17 @@
extern "C" {
#endif
/* extended symbol table information */
typedef struct {
const char *prs_object; /* object name */
const char *prs_name; /* symbol name */
Lmid_t prs_lmid; /* link map id */
uint_t prs_id; /* symbol id */
uint_t prs_table; /* symbol table id */
} prsyminfo_t;
typedef struct ps_prochandle ps_prochandle_t;
/*
* 'object_name' is the name of a load object obtained from an
* iteration over the process's address space mappings (Pmapping_iter),
@ -53,8 +64,10 @@ extern "C" {
* or else it is one of the special PR_OBJ_* values above.
*/
extern int Plookup_by_addr(struct ps_prochandle *,
uintptr_t, char *, size_t, GElf_Sym *);
extern int Plookup_by_addr(ps_prochandle_t *, uintptr_t, char *,
size_t, GElf_Sym *, prsyminfo_t *);
extern ps_prochandle_t *proc_arg_grab(const char *, int, int,
int *, const char **);
typedef int proc_map_f(void *, const prmap_t *, const char *);
extern int Pobject_iter(struct ps_prochandle *, proc_map_f *, void *);
@ -88,7 +101,6 @@ extern int Pobject_iter(struct ps_prochandle *, proc_map_f *, void *);
#define G_ELF 13 /* Libelf error, elf_errno() is meaningful */
#define G_NOTE 14 /* Required PT_NOTE Phdr not present in core */
extern struct ps_prochandle *proc_arg_grab(const char *, int, int, int *);
extern const pstatus_t *Pstatus(struct ps_prochandle *);
/* Flags accepted by Prelease (partial) */
@ -101,8 +113,6 @@ extern int Pstop(struct ps_prochandle *, uint_t);
/*
* Stack frame iteration interface.
*/
#ifdef SOLARIS_11_B159_OR_LATER
/* building on Nevada-B159 or later so define the new callback */
typedef int proc_stack_f(
void *, /* the cookie given to Pstack_iter() */
const prgregset_t, /* the frame's registers */
@ -113,10 +123,6 @@ typedef int proc_stack_f(
#define PR_SIGNAL_FRAME 1 /* called by a signal handler */
#define PR_FOUND_SIGNAL 2 /* we found the corresponding signal number */
#else
/* building on Nevada-B158 or earlier so define the old callback */
typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
#endif
extern int Pstack_iter(struct ps_prochandle *,
const prgregset_t, proc_stack_f *, void *);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,9 +24,6 @@
#include "salibproc.h"
#include "sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h"
#ifndef SOLARIS_11_B159_OR_LATER
#include <sys/utsname.h>
#endif
#include <thread_db.h>
#include <strings.h>
#include <limits.h>
@ -45,20 +42,6 @@
// debug modes
static int _libsaproc_debug = 0;
#ifndef SOLARIS_11_B159_OR_LATER
static bool _Pstack_iter_debug = false;
static void dprintf_2(const char* format,...) {
if (_Pstack_iter_debug) {
va_list alist;
va_start(alist, format);
fputs("Pstack_iter DEBUG: ", stderr);
vfprintf(stderr, format, alist);
va_end(alist);
}
}
#endif // !SOLARIS_11_B159_OR_LATER
static void print_debug(const char* format,...) {
if (_libsaproc_debug) {
@ -757,7 +740,8 @@ static void attach_internal(JNIEnv* env, jobject this_obj, jstring cmdLine, jboo
#endif
// connect to process/core
struct ps_prochandle* ph = proc_arg_grab(cmdLine_cstr, (isProcess? PR_ARG_PIDS : PR_ARG_CORES), PGRAB_FORCE, &gcode);
ps_prochandle_t* ph = proc_arg_grab(cmdLine_cstr, (isProcess? PR_ARG_PIDS : PR_ARG_CORES), PGRAB_FORCE, &gcode, NULL);
env->ReleaseStringUTFChars(cmdLine, cmdLine_cstr);
if (! ph) {
if (gcode > 0 && gcode < sizeof(proc_arg_grab_errmsgs)/sizeof(const char*)) {
@ -997,11 +981,6 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_fill
TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
}
#ifndef SOLARIS_11_B159_OR_LATER
// building on Nevada-B158 or earlier so more hoops to jump through
static bool has_newer_Pstack_iter = false; // older version by default
#endif
/*
* Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
* Method: fillCFrameList0
@ -1030,23 +1009,8 @@ JNIEXPORT jobject JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_f
env->ReleaseLongArrayElements(regsArray, ptr, JNI_ABORT);
CHECK_EXCEPTION_(0);
#ifdef SOLARIS_11_B159_OR_LATER
// building on Nevada-B159 or later so use the new callback
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
wrapper_fill_cframe_list, &dbgo2);
#else
// building on Nevada-B158 or earlier so figure out which callback to use
if (has_newer_Pstack_iter) {
// Since we're building on Nevada-B158 or earlier, we have to
// cast wrapper_fill_cframe_list to make the compiler happy.
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
(proc_stack_f *)wrapper_fill_cframe_list, &dbgo2);
} else {
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
fill_cframe_list, &dbgo2);
}
#endif // SOLARIS_11_B159_OR_LATER
return dbgo2.obj;
}
@ -1236,7 +1200,8 @@ JNIEXPORT jobject JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_l
char nameBuf[SYMBOL_BUF_SIZE + 1];
GElf_Sym sym;
int res = Plookup_by_addr((struct ps_prochandle*) p_ps_prochandle, (uintptr_t) address,
nameBuf, sizeof(nameBuf), &sym);
nameBuf, sizeof(nameBuf), &sym, NULL);
if (res != 0) { // failed
return 0;
}
@ -1268,102 +1233,6 @@ JNIEXPORT jstring JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_d
return res;
}
#ifndef SOLARIS_11_B159_OR_LATER
// Determine if the OS we're running on has the newer version
// of libproc's Pstack_iter.
//
// Set env var PSTACK_ITER_DEBUG=true to debug this logic.
// Set env var PSTACK_ITER_DEBUG_RELEASE to simulate a 'release' value.
// Set env var PSTACK_ITER_DEBUG_VERSION to simulate a 'version' value.
//
// frankenputer 'uname -r -v': 5.10 Generic_141445-09
// jurassic 'uname -r -v': 5.11 snv_164
// lonepeak 'uname -r -v': 5.11 snv_127
//
static void set_has_newer_Pstack_iter(JNIEnv *env) {
static bool done_set = false;
if (done_set) {
// already set has_newer_Pstack_iter
return;
}
struct utsname name;
if (uname(&name) == -1) {
THROW_NEW_DEBUGGER_EXCEPTION("uname() failed!");
}
dprintf_2("release='%s' version='%s'\n", name.release, name.version);
if (_Pstack_iter_debug) {
char *override = getenv("PSTACK_ITER_DEBUG_RELEASE");
if (override != NULL) {
strncpy(name.release, override, SYS_NMLN - 1);
name.release[SYS_NMLN - 2] = '\0';
dprintf_2("overriding with release='%s'\n", name.release);
}
override = getenv("PSTACK_ITER_DEBUG_VERSION");
if (override != NULL) {
strncpy(name.version, override, SYS_NMLN - 1);
name.version[SYS_NMLN - 2] = '\0';
dprintf_2("overriding with version='%s'\n", name.version);
}
}
// the major number corresponds to the old SunOS major number
int major = atoi(name.release);
if (major >= 6) {
dprintf_2("release is SunOS 6 or later\n");
has_newer_Pstack_iter = true;
done_set = true;
return;
}
if (major < 5) {
dprintf_2("release is SunOS 4 or earlier\n");
done_set = true;
return;
}
// some SunOS 5.* build so now check for Solaris versions
char *dot = strchr(name.release, '.');
int minor = 0;
if (dot != NULL) {
// release is major.minor format
*dot = NULL;
minor = atoi(dot + 1);
}
if (minor <= 10) {
dprintf_2("release is Solaris 10 or earlier\n");
done_set = true;
return;
} else if (minor >= 12) {
dprintf_2("release is Solaris 12 or later\n");
has_newer_Pstack_iter = true;
done_set = true;
return;
}
// some Solaris 11 build so now check for internal build numbers
if (strncmp(name.version, "snv_", 4) != 0) {
dprintf_2("release is Solaris 11 post-GA or later\n");
has_newer_Pstack_iter = true;
done_set = true;
return;
}
// version begins with "snv_" so a pre-GA build of Solaris 11
int build = atoi(&name.version[4]);
if (build >= 159) {
dprintf_2("release is Nevada-B159 or later\n");
has_newer_Pstack_iter = true;
} else {
dprintf_2("release is Nevada-B158 or earlier\n");
}
done_set = true;
}
#endif // !SOLARIS_11_B159_OR_LATER
/*
* Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
* Method: initIDs
@ -1383,14 +1252,6 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_init
if (libproc_handle == 0)
THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!");
#ifndef SOLARIS_11_B159_OR_LATER
_Pstack_iter_debug = getenv("PSTACK_ITER_DEBUG") != NULL;
set_has_newer_Pstack_iter(env);
CHECK_EXCEPTION;
dprintf_2("has_newer_Pstack_iter=%d\n", has_newer_Pstack_iter);
#endif
p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J");
CHECK_EXCEPTION;

View File

@ -171,6 +171,9 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
return (String) getValue();
}
private static final int PROPERTY_LINE_WIDTH = 80;
private static final int PROPERTY_HELP_INDENT = 10;
/**
* Prints a description of the properties used to configure shared JVMCI code.
*
@ -178,24 +181,26 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
*/
public static void printProperties(PrintStream out) {
out.println("[JVMCI properties]");
int typeWidth = 0;
int nameWidth = 0;
Option[] values = values();
for (Option option : values) {
typeWidth = Math.max(typeWidth, option.type.getSimpleName().length());
nameWidth = Math.max(nameWidth, option.getPropertyName().length());
}
for (Option option : values) {
Object value = option.getValue();
if (value instanceof String) {
value = '"' + String.valueOf(value) + '"';
}
String assign = option.isDefault ? " =" : ":=";
String format = "%" + (typeWidth + 1) + "s %-" + (nameWidth + 1) + "s %s %s%n";
out.printf(format, option.type.getSimpleName(), option.getPropertyName(), assign, value);
String helpFormat = "%" + (typeWidth + 1) + "s %s%n";
String name = option.getPropertyName();
String assign = option.isDefault ? "=" : ":=";
String typeName = option.type.getSimpleName();
String linePrefix = String.format("%s %s %s ", name, assign, value);
int typeStartPos = PROPERTY_LINE_WIDTH - typeName.length();
int linePad = typeStartPos - linePrefix.length();
if (linePad > 0) {
out.printf("%s%-" + linePad + "s[%s]%n", linePrefix, "", typeName);
} else {
out.printf("%s[%s]%n", linePrefix, typeName);
}
for (String line : option.helpLines) {
out.printf(helpFormat, "", line);
out.printf("%" + PROPERTY_HELP_INDENT + "s%s%n", "", line);
}
}
}
@ -306,6 +311,7 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
PrintStream out = new PrintStream(getLogStream());
Option.printProperties(out);
compilerFactory.printProperties(out);
System.exit(0);
}
if (Option.PrintConfig.getBoolean()) {

View File

@ -22,7 +22,8 @@
*/
package jdk.vm.ci.services;
import java.lang.reflect.Module;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Formatter;
import java.util.Iterator;
import java.util.ServiceConfigurationError;
@ -36,11 +37,62 @@ public final class Services {
private Services() {
}
private static int getJavaSpecificationVersion() {
String value = System.getProperty("java.specification.version");
if (value.startsWith("1.")) {
value = value.substring(2);
}
return Integer.parseInt(value);
}
/**
* The integer value corresponding to the value of the {@code java.specification.version} system
* property after any leading {@code "1."} has been stripped.
*/
public static final int JAVA_SPECIFICATION_VERSION = getJavaSpecificationVersion();
// Use reflection so that this compiles on Java 8
private static final Method getModule;
private static final Method getPackages;
private static final Method addUses;
private static final Method isExported;
private static final Method addExports;
static {
if (JAVA_SPECIFICATION_VERSION >= 9) {
try {
getModule = Class.class.getMethod("getModule");
Class<?> moduleClass = getModule.getReturnType();
getPackages = moduleClass.getMethod("getPackages");
addUses = moduleClass.getMethod("addUses", Class.class);
isExported = moduleClass.getMethod("isExported", String.class, moduleClass);
addExports = moduleClass.getMethod("addExports", String.class, moduleClass);
} catch (NoSuchMethodException | SecurityException e) {
throw new InternalError(e);
}
} else {
getModule = null;
getPackages = null;
addUses = null;
isExported = null;
addExports = null;
}
}
@SuppressWarnings("unchecked")
static <T> T invoke(Method method, Object receiver, Object... args) {
try {
return (T) method.invoke(receiver, args);
} catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
throw new InternalError(e);
}
}
/**
* Performs any required security checks and dynamic reconfiguration to allow the module of a
* given class to access the classes in the JVMCI module.
*
* Note: This API uses {@link Class} instead of {@link Module} to provide backwards
* Note: This API uses {@link Class} instead of {@code Module} to provide backwards
* compatibility for JVMCI clients compiled against a JDK release earlier than 9.
*
* @param requestor a class requesting access to the JVMCI module for its module
@ -52,15 +104,19 @@ public final class Services {
if (sm != null) {
sm.checkPermission(new JVMCIPermission());
}
Module jvmci = Services.class.getModule();
Module requestorModule = requestor.getModule();
if (jvmci != requestorModule) {
for (String pkg : jvmci.getPackages()) {
// Export all JVMCI packages dynamically instead
// of requiring a long list of --add-exports
// options on the JVM command line.
if (!jvmci.isExported(pkg, requestorModule)) {
jvmci.addExports(pkg, requestorModule);
if (JAVA_SPECIFICATION_VERSION >= 9) {
Object jvmci = invoke(getModule, Services.class);
Object requestorModule = invoke(getModule, requestor);
if (jvmci != requestorModule) {
String[] packages = invoke(getPackages, jvmci);
for (String pkg : packages) {
// Export all JVMCI packages dynamically instead
// of requiring a long list of --add-exports
// options on the JVM command line.
boolean exported = invoke(isExported, jvmci, pkg, requestorModule);
if (!exported) {
invoke(addExports, jvmci, pkg, requestorModule);
}
}
}
}
@ -77,8 +133,10 @@ public final class Services {
if (sm != null) {
sm.checkPermission(new JVMCIPermission());
}
Module jvmci = Services.class.getModule();
jvmci.addUses(service);
if (JAVA_SPECIFICATION_VERSION >= 9) {
Object jvmci = invoke(getModule, Services.class);
invoke(addUses, jvmci, service);
}
// Restrict JVMCI clients to be on the class path or module path
return ServiceLoader.load(service, ClassLoader.getSystemClassLoader());
@ -98,8 +156,10 @@ public final class Services {
if (sm != null) {
sm.checkPermission(new JVMCIPermission());
}
Module jvmci = Services.class.getModule();
jvmci.addUses(service);
if (JAVA_SPECIFICATION_VERSION >= 9) {
Object jvmci = invoke(getModule, Services.class);
invoke(addUses, jvmci, service);
}
// Restrict JVMCI clients to be on the class path or module path
Iterable<S> providers = ServiceLoader.load(service, ClassLoader.getSystemClassLoader());
S singleProvider = null;

View File

@ -42,7 +42,7 @@
#include "runtime/sharedRuntime.hpp"
Compiler::Compiler() : AbstractCompiler(c1) {
Compiler::Compiler() : AbstractCompiler(compiler_c1) {
}
void Compiler::init_c1_runtime() {
@ -223,6 +223,7 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
case vmIntrinsics::_putCharStringU:
#ifdef TRACE_HAVE_INTRINSICS
case vmIntrinsics::_counterTime:
case vmIntrinsics::_getBufferWriter:
#if defined(_LP64) || !defined(TRACE_ID_CLASS_SHIFT)
case vmIntrinsics::_getClassId:
#endif

View File

@ -3120,6 +3120,22 @@ void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
__ move(id, rlock_result(x));
}
void LIRGenerator::do_getBufferWriter(Intrinsic* x) {
LabelObj* L_end = new LabelObj();
LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),
in_bytes(TRACE_THREAD_DATA_WRITER_OFFSET),
T_OBJECT);
LIR_Opr result = rlock_result(x);
__ move_wide(jobj_addr, result);
__ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
__ branch(lir_cond_equal, T_OBJECT, L_end->label());
__ move_wide(new LIR_Address(result, T_OBJECT), result);
__ branch_destination(L_end->label());
}
#endif
@ -3151,6 +3167,9 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
case vmIntrinsics::_getClassId:
do_ClassIDIntrinsic(x);
break;
case vmIntrinsics::_getBufferWriter:
do_getBufferWriter(x);
break;
case vmIntrinsics::_counterTime:
do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), x);
break;

View File

@ -441,6 +441,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
#ifdef TRACE_HAVE_INTRINSICS
void do_ClassIDIntrinsic(Intrinsic* x);
void do_getBufferWriter(Intrinsic* x);
#endif
void do_RuntimeCall(address routine, Intrinsic* x);

View File

@ -5775,9 +5775,22 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
// Anonymous classes such as generated LambdaForm classes are also not included.
if (SystemDictionaryShared::is_sharing_possible(_loader_data) &&
_host_klass == NULL) {
oop class_loader = _loader_data->class_loader();
ResourceMark rm(THREAD);
classlist_file->print_cr("%s", _class_name->as_C_string());
classlist_file->flush();
// For the boot and platform class loaders, check if the class is not found in the
// java runtime image. Additional check for the boot class loader is if the class
// is not found in the boot loader's appended entries. This indicates that the class
// is not useable during run time, such as the ones found in the --patch-module entries,
// so it should not be included in the classlist file.
if (((class_loader == NULL && !ClassLoader::contains_append_entry(stream->source())) ||
SystemDictionary::is_platform_class_loader(class_loader)) &&
!ClassLoader::is_jrt(stream->source())) {
tty->print_cr("skip writing class %s from source %s to classlist file",
_class_name->as_C_string(), stream->source());
} else {
classlist_file->print_cr("%s", _class_name->as_C_string());
classlist_file->flush();
}
}
}
#endif
@ -5856,6 +5869,11 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
assert(cp != NULL, "invariant");
assert(_loader_data != NULL, "invariant");
if (_class_name == vmSymbols::java_lang_Object()) {
check_property(_local_interfaces == Universe::the_empty_klass_array(),
"java.lang.Object cannot implement an interface in class file %s",
CHECK);
}
// We check super class after class file is parsed and format is checked
if (_super_class_index > 0 && NULL ==_super_klass) {
Symbol* const super_class_name = cp->klass_name_at(_super_class_index);

View File

@ -945,11 +945,11 @@ ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path, bo
}
// returns true if entry already on class path
bool ClassLoader::contains_entry(ClassPathEntry *entry) {
bool ClassLoader::contains_append_entry(const char* name) {
ClassPathEntry* e = _first_append_entry;
while (e != NULL) {
// assume zip entries have been canonicalized
if (strcmp(entry->name(), e->name()) == 0) {
if (strcmp(name, e->name()) == 0) {
return true;
}
e = e->next();
@ -991,7 +991,7 @@ bool ClassLoader::update_class_path_entry_list(const char *path,
// Do not reorder the bootclasspath which would break get_system_package().
// Add new entry to linked list
if (!check_for_duplicates || !contains_entry(new_entry)) {
if (!check_for_duplicates || !contains_append_entry(new_entry->name())) {
ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry);
}
return true;

View File

@ -451,7 +451,7 @@ class ClassLoader: AllStatic {
static void set_first_append_entry(ClassPathEntry* entry);
// indicates if class path already contains a entry (exact match by name)
static bool contains_entry(ClassPathEntry* entry);
static bool contains_append_entry(const char* name);
// adds a class path list
static void add_to_list(ClassPathEntry* new_entry);

View File

@ -74,7 +74,7 @@ instanceKlassHandle KlassFactory::check_shared_class_file_load_hook(
(SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
ClassFileStream* stream = new ClassFileStream(ptr,
end_ptr - ptr,
ent->_name,
ent == NULL ? NULL : ent->_name,
ClassFileStream::verify);
ClassFileParser parser(stream,
class_name,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -203,6 +203,7 @@ class stack_map_frame {
inline bool verify(address start, address end) const;
inline void print_on(outputStream* st, int current_offset) const;
inline void print_truncated(outputStream* st, int current_offset) const;
// Create as_xxx and is_xxx methods for the subtypes
#define FRAME_TYPE_DECL(stackmap_frame_type, arg1, arg2) \
@ -263,6 +264,10 @@ class same_frame : public stack_map_frame {
void print_on(outputStream* st, int current_offset = -1) const {
st->print("same_frame(@%d)", offset_delta() + current_offset);
}
void print_truncated(outputStream* st, int current_offset = -1) const {
print_on(st, current_offset);
}
};
class same_frame_extended : public stack_map_frame {
@ -309,6 +314,10 @@ class same_frame_extended : public stack_map_frame {
void print_on(outputStream* st, int current_offset = -1) const {
st->print("same_frame_extended(@%d)", offset_delta() + current_offset);
}
void print_truncated(outputStream* st, int current_offset = -1) const {
print_on(st, current_offset);
}
};
class same_locals_1_stack_item_frame : public stack_map_frame {
@ -381,6 +390,11 @@ class same_locals_1_stack_item_frame : public stack_map_frame {
types()->print_on(st);
st->print(")");
}
void print_truncated(outputStream* st, int current_offset = -1) const {
st->print("same_locals_1_stack_item_frame(@%d), output truncated, Stackmap exceeds table size.",
offset_delta() + current_offset);
}
};
class same_locals_1_stack_item_extended : public stack_map_frame {
@ -446,6 +460,11 @@ class same_locals_1_stack_item_extended : public stack_map_frame {
types()->print_on(st);
st->print(")");
}
void print_truncated(outputStream* st, int current_offset = -1) const {
st->print("same_locals_1_stack_item_extended(@%d), output truncated, Stackmap exceeds table size.",
offset_delta() + current_offset);
}
};
class chop_frame : public stack_map_frame {
@ -511,6 +530,10 @@ class chop_frame : public stack_map_frame {
void print_on(outputStream* st, int current_offset = -1) const {
st->print("chop_frame(@%d,%d)", offset_delta() + current_offset, chops());
}
void print_truncated(outputStream* st, int current_offset = -1) const {
print_on(st, current_offset);
}
};
class append_frame : public stack_map_frame {
@ -619,6 +642,11 @@ class append_frame : public stack_map_frame {
}
st->print(")");
}
void print_truncated(outputStream* st, int current_offset = -1) const {
st->print("append_frame(@%d), output truncated, Stackmap exceeds table size.",
offset_delta() + current_offset);
}
};
class full_frame : public stack_map_frame {
@ -784,6 +812,11 @@ class full_frame : public stack_map_frame {
}
st->print("})");
}
void print_truncated(outputStream* st, int current_offset = -1) const {
st->print("full_frame(@%d), output truncated, Stackmap exceeds table size.",
offset_delta() + current_offset);
}
};
#define VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
@ -841,6 +874,10 @@ void stack_map_frame::print_on(outputStream* st, int offs = -1) const {
FOR_EACH_STACKMAP_FRAME_TYPE(VOID_VIRTUAL_DISPATCH, print_on, (st, offs));
}
void stack_map_frame::print_truncated(outputStream* st, int offs = -1) const {
FOR_EACH_STACKMAP_FRAME_TYPE(VOID_VIRTUAL_DISPATCH, print_truncated, (st, offs));
}
#undef VIRTUAL_DISPATCH
#undef VOID_VIRTUAL_DISPATCH

View File

@ -437,17 +437,15 @@ void StringTable::dump(outputStream* st, bool verbose) {
st->print("%d: ", length);
} else {
ResourceMark rm(THREAD);
int utf8_length;
int utf8_length = length;
char* utf8_string;
if (!is_latin1) {
jchar* chars = value->char_at_addr(0);
utf8_length = UNICODE::utf8_length(chars, length);
utf8_string = UNICODE::as_utf8(chars, length);
utf8_string = UNICODE::as_utf8(chars, utf8_length);
} else {
jbyte* bytes = value->byte_at_addr(0);
utf8_length = UNICODE::utf8_length(bytes, length);
utf8_string = UNICODE::as_utf8(bytes, length);
utf8_string = UNICODE::as_utf8(bytes, utf8_length);
}
st->print("%d: ", utf8_length);

View File

@ -1234,7 +1234,7 @@ bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
SharedClassPathEntry* ent =
(SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
if (!Universe::is_module_initialized()) {
assert(ent->is_jrt(),
assert(ent != NULL && ent->is_jrt(),
"Loading non-bootstrap classes before the module system is initialized");
assert(class_loader.is_null(), "sanity");
return true;
@ -1257,6 +1257,7 @@ bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
}
if (class_loader.is_null()) {
assert(ent != NULL, "Shared class for NULL classloader must have valid SharedClassPathEntry");
// The NULL classloader can load archived class originated from the
// "modules" jimage and the -Xbootclasspath/a. For class from the
// "modules" jimage, the PackageEntry/ModuleEntry must be defined

View File

@ -541,8 +541,19 @@ void ErrorContext::stackmap_details(outputStream* ss, const Method* method) cons
stack_map_frame* sm_frame = sm_table->entries();
streamIndentor si2(ss);
int current_offset = -1;
// Subtract two from StackMapAttribute length because the length includes
// two bytes for number of table entries.
size_t sm_table_space = method->stackmap_data()->length() - 2;
for (u2 i = 0; i < sm_table->number_of_entries(); ++i) {
ss->indent();
size_t sm_frame_size = sm_frame->size();
// If the size of the next stackmap exceeds the length of the entire
// stackmap table then print a truncated message and return.
if (sm_frame_size > sm_table_space) {
sm_frame->print_truncated(ss, current_offset);
return;
}
sm_table_space -= sm_frame_size;
sm_frame->print_on(ss, current_offset);
ss->cr();
current_offset += sm_frame->offset_delta();

View File

@ -45,6 +45,10 @@
#include "c1/c1_Runtime1.hpp"
#endif
const char* CodeBlob::compiler_name() const {
return compilertype2name(_type);
}
unsigned int CodeBlob::align_code_offset(int offset) {
// align the size to CodeEntryAlignment
return
@ -65,7 +69,7 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
return size;
}
CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
@ -80,7 +84,8 @@ CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_com
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
_content_begin(layout.content_begin())
_content_begin(layout.content_begin()),
_type(type)
{
assert(layout.size() == round_to(layout.size(), oopSize), "unaligned size");
assert(layout.header_size() == round_to(layout.header_size(), oopSize), "unaligned size");
@ -92,7 +97,7 @@ CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_com
#endif // COMPILER1
}
CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
@ -106,7 +111,8 @@ CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* c
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
_content_begin(layout.content_begin())
_content_begin(layout.content_begin()),
_type(type)
{
assert(_size == round_to(_size, oopSize), "unaligned size");
assert(_header_size == round_to(_header_size, oopSize), "unaligned size");
@ -123,7 +129,7 @@ CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* c
// Creates a simple CodeBlob. Sets up the size of the different regions.
RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
: CodeBlob(name, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
: CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
{
assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
assert(!UseRelocIndex, "no space allocated for reloc index yet");
@ -148,7 +154,7 @@ RuntimeBlob::RuntimeBlob(
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments
) : CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
cb->copy_code_and_locs_to(this);
}

View File

@ -26,6 +26,7 @@
#define SHARE_VM_CODE_CODEBLOB_HPP
#include "asm/codeBuffer.hpp"
#include "compiler/compilerDefinitions.hpp"
#include "compiler/oopMap.hpp"
#include "runtime/frame.hpp"
#include "runtime/handles.hpp"
@ -71,7 +72,8 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
friend class CodeCacheDumper;
protected:
const char* _name;
const CompilerType _type; // CompilerType
int _size; // total size of CodeBlob in bytes
int _header_size; // size of header (depends on subclass)
int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have
@ -92,9 +94,10 @@ protected:
ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
bool _caller_must_gc_arguments;
CodeStrings _strings;
const char* _name;
CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public:
// Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
@ -115,9 +118,11 @@ public:
virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_compiled() const { return false; }
virtual bool is_compiled_by_c2() const { return false; }
virtual bool is_compiled_by_c1() const { return false; }
virtual bool is_compiled_by_jvmci() const { return false; }
inline bool is_compiled_by_c1() const { return _type == compiler_c1; };
inline bool is_compiled_by_c2() const { return _type == compiler_c2; };
inline bool is_compiled_by_jvmci() const { return _type == compiler_jvmci; };
inline bool is_compiled_by_shark() const { return _type == compiler_shark; };
const char* compiler_name() const;
// Casting
nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; }

View File

@ -31,14 +31,14 @@
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
CompiledMethod::CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults();
}
CompiledMethod::CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults();
}

View File

@ -164,8 +164,8 @@ protected:
virtual void flush() = 0;
protected:
CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public:
virtual bool is_compiled() const { return true; }
@ -191,12 +191,10 @@ public:
// will be transformed to zombie immediately
};
virtual AbstractCompiler* compiler() const = 0;
virtual bool is_in_use() const = 0;
virtual int comp_level() const = 0;
virtual int compile_id() const = 0;
virtual address verified_entry_point() const = 0;
virtual void log_identity(xmlStream* log) const = 0;
virtual void log_state_change() const = 0;

View File

@ -272,79 +272,3 @@ bool DependencyContext::find_stale_entries() {
int nmethodBucket::decrement() {
return Atomic::add(-1, (volatile int *)&_count);
}
/////////////// Unit tests ///////////////
#ifndef PRODUCT
class TestDependencyContext {
public:
nmethod* _nmethods[3];
intptr_t _dependency_context;
DependencyContext dependencies() {
DependencyContext depContext(&_dependency_context);
return depContext;
}
TestDependencyContext() : _dependency_context(DependencyContext::EMPTY) {
CodeCache_lock->lock_without_safepoint_check();
_nmethods[0] = reinterpret_cast<nmethod*>(0x8 * 0);
_nmethods[1] = reinterpret_cast<nmethod*>(0x8 * 1);
_nmethods[2] = reinterpret_cast<nmethod*>(0x8 * 2);
dependencies().add_dependent_nmethod(_nmethods[2]);
dependencies().add_dependent_nmethod(_nmethods[1]);
dependencies().add_dependent_nmethod(_nmethods[0]);
}
~TestDependencyContext() {
dependencies().wipe();
CodeCache_lock->unlock();
}
static void testRemoveDependentNmethod(int id, bool delete_immediately) {
TestDependencyContext c;
DependencyContext depContext = c.dependencies();
assert(!has_stale_entries(depContext), "check");
nmethod* nm = c._nmethods[id];
depContext.remove_dependent_nmethod(nm, delete_immediately);
if (!delete_immediately) {
assert(has_stale_entries(depContext), "check");
assert(depContext.is_dependent_nmethod(nm), "check");
depContext.expunge_stale_entries();
}
assert(!has_stale_entries(depContext), "check");
assert(!depContext.is_dependent_nmethod(nm), "check");
}
static void testRemoveDependentNmethod() {
testRemoveDependentNmethod(0, false);
testRemoveDependentNmethod(1, false);
testRemoveDependentNmethod(2, false);
testRemoveDependentNmethod(0, true);
testRemoveDependentNmethod(1, true);
testRemoveDependentNmethod(2, true);
}
static void test() {
testRemoveDependentNmethod();
}
static bool has_stale_entries(DependencyContext ctx) {
assert(ctx.has_stale_entries() == ctx.find_stale_entries(), "check");
return ctx.has_stale_entries();
}
};
void TestDependencyContext_test() {
TestDependencyContext::test();
}
#endif // PRODUCT

View File

@ -29,6 +29,7 @@
#include "oops/oop.hpp"
#include "runtime/handles.hpp"
#include "runtime/perfData.hpp"
#include "runtime/safepoint.hpp"
class nmethod;
class DepChange;

View File

@ -82,32 +82,6 @@
#endif
bool nmethod::is_compiled_by_c1() const {
if (compiler() == NULL) {
return false;
}
return compiler()->is_c1();
}
bool nmethod::is_compiled_by_jvmci() const {
if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
if (is_native_method()) return false;
return compiler()->is_jvmci();
}
bool nmethod::is_compiled_by_c2() const {
if (compiler() == NULL) {
return false;
}
return compiler()->is_c2();
}
bool nmethod::is_compiled_by_shark() const {
if (compiler() == NULL) {
return false;
}
return compiler()->is_shark();
}
//---------------------------------------------------------------------------------
// NMethod statistics
// They are printed under various flags, including:
@ -440,7 +414,6 @@ void nmethod::init_defaults() {
_scavenge_root_link = NULL;
}
_scavenge_root_state = 0;
_compiler = NULL;
#if INCLUDE_RTM_OPT
_rtm_state = NoRTM;
#endif
@ -468,7 +441,7 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method,
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), compiler_none, native_nmethod_size,
compile_id, &offsets,
code_buffer, frame_size,
basic_lock_owner_sp_offset,
@ -518,7 +491,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
+ round_to(debug_info->data_size() , oopSize);
nm = new (nmethod_size, comp_level)
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
oop_maps,
handler_table,
@ -569,6 +542,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
// For native wrappers
nmethod::nmethod(
Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
CodeOffsets* offsets,
@ -577,7 +551,7 @@ nmethod::nmethod(
ByteSize basic_lock_owner_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps )
: CompiledMethod(method, "native nmethod", nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
: CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
_native_basic_lock_sp_offset(basic_lock_sp_offset)
{
@ -666,6 +640,7 @@ void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw
nmethod::nmethod(
Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
int entry_bci,
@ -685,7 +660,7 @@ nmethod::nmethod(
Handle speculation_log
#endif
)
: CompiledMethod(method, "nmethod", nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
: CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_native_receiver_sp_offset(in_ByteSize(-1)),
_native_basic_lock_sp_offset(in_ByteSize(-1))
{
@ -701,7 +676,6 @@ nmethod::nmethod(
_entry_bci = entry_bci;
_compile_id = compile_id;
_comp_level = comp_level;
_compiler = compiler;
_orig_pc_offset = orig_pc_offset;
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
@ -803,9 +777,7 @@ void nmethod::log_identity(xmlStream* log) const {
log->print(" compile_id='%d'", compile_id());
const char* nm_kind = compile_kind();
if (nm_kind != NULL) log->print(" compile_kind='%s'", nm_kind);
if (compiler() != NULL) {
log->print(" compiler='%s'", compiler()->name());
}
log->print(" compiler='%s'", compiler_name());
if (TieredCompilation) {
log->print(" level='%d'", comp_level());
}

View File

@ -74,8 +74,6 @@ class nmethod : public CompiledMethod {
static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link;
AbstractCompiler* _compiler; // The compiler which compiled this nmethod
// offsets for entry points
address _entry_point; // entry point with class check
address _verified_entry_point; // entry point without class check
@ -166,6 +164,7 @@ class nmethod : public CompiledMethod {
// For native wrappers
nmethod(Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
CodeOffsets* offsets,
@ -177,6 +176,7 @@ class nmethod : public CompiledMethod {
// Creation support
nmethod(Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
int entry_bci,
@ -251,18 +251,10 @@ class nmethod : public CompiledMethod {
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps);
// accessors
AbstractCompiler* compiler() const { return _compiler; }
// type info
bool is_nmethod() const { return true; }
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
bool is_compiled_by_c1() const;
bool is_compiled_by_jvmci() const;
bool is_compiled_by_c2() const;
bool is_compiled_by_shark() const;
// boundaries for different parts
address consts_begin () const { return header_begin() + _consts_offset ; }
address consts_end () const { return code_begin() ; }

View File

@ -743,7 +743,9 @@ address virtual_call_Relocation::cached_value() {
}
Method* virtual_call_Relocation::method_value() {
Metadata* m = code()->metadata_at(_method_index);
CompiledMethod* cm = code();
if (cm == NULL) return (Method*)NULL;
Metadata* m = cm->metadata_at(_method_index);
assert(m != NULL || _method_index == 0, "should be non-null for non-zero index");
assert(m == NULL || m->is_method(), "not a method");
return (Method*)m;
@ -769,7 +771,9 @@ void opt_virtual_call_Relocation::unpack_data() {
}
Method* opt_virtual_call_Relocation::method_value() {
Metadata* m = code()->metadata_at(_method_index);
CompiledMethod* cm = code();
if (cm == NULL) return (Method*)NULL;
Metadata* m = cm->metadata_at(_method_index);
assert(m != NULL || _method_index == 0, "should be non-null for non-zero index");
assert(m == NULL || m->is_method(), "not a method");
return (Method*)m;
@ -800,7 +804,9 @@ address opt_virtual_call_Relocation::static_stub() {
}
Method* static_call_Relocation::method_value() {
Metadata* m = code()->metadata_at(_method_index);
CompiledMethod* cm = code();
if (cm == NULL) return (Method*)NULL;
Metadata* m = cm->metadata_at(_method_index);
assert(m != NULL || _method_index == 0, "should be non-null for non-zero index");
assert(m == NULL || m->is_method(), "not a method");
return (Method*)m;
@ -970,7 +976,9 @@ void RelocIterator::print_current() {
// work even during GC or other inconvenient times.
if (WizardMode && oop_value != NULL) {
tty->print("oop_value=" INTPTR_FORMAT ": ", p2i(oop_value));
oop_value->print_value_on(tty);
if (oop_value->is_oop()) {
oop_value->print_value_on(tty);
}
}
break;
}

View File

@ -26,6 +26,7 @@
#define SHARE_VM_COMPILER_ABSTRACTCOMPILER_HPP
#include "ci/compilerInterface.hpp"
#include "compiler/compilerDefinitions.hpp"
#include "compiler/compilerDirectives.hpp"
typedef void (*initializer)(void);
@ -82,24 +83,15 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
// This thread will initialize the compiler runtime.
bool should_perform_init();
// The (closed set) of concrete compiler classes.
enum Type {
none,
c1,
c2,
jvmci,
shark
};
private:
Type _type;
const CompilerType _type;
#if INCLUDE_JVMCI
CompilerStatistics _stats;
#endif
public:
AbstractCompiler(Type type) : _type(type), _compiler_state(uninitialized), _num_compiler_threads(0) {}
AbstractCompiler(CompilerType type) : _type(type), _compiler_state(uninitialized), _num_compiler_threads(0) {}
// This function determines the compiler thread that will perform the
// shutdown of the corresponding compiler runtime.
@ -157,10 +149,11 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
}
// Compiler type queries.
bool is_c1() { return _type == c1; }
bool is_c2() { return _type == c2; }
bool is_jvmci() { return _type == jvmci; }
bool is_shark() { return _type == shark; }
const bool is_c1() { return _type == compiler_c1; }
const bool is_c2() { return _type == compiler_c2; }
const bool is_jvmci() { return _type == compiler_jvmci; }
const bool is_shark() { return _type == compiler_shark; }
const CompilerType type() { return _type; }
// Extra tests to identify trivial methods for the tiered compilation policy.
virtual bool is_trivial(Method* method) { return false; }

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "compiler/compilerDefinitions.hpp"
const char* compilertype2name_tab[compiler_number_of_types] = {
"",
"c1",
"c2",
"jvmci",
"shark"
};

View File

@ -0,0 +1,109 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
#define SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
#include "utilities/globalDefinitions.hpp"
// The (closed set) of concrete compiler classes.
enum CompilerType {
compiler_none,
compiler_c1,
compiler_c2,
compiler_jvmci,
compiler_shark,
compiler_number_of_types
};
extern const char* compilertype2name_tab[compiler_number_of_types]; // Map CompilerType to its name
inline const char* compilertype2name(CompilerType t) { return (uint)t < compiler_number_of_types ? compilertype2name_tab[t] : NULL; }
// Handy constants for deciding which compiler mode to use.
enum MethodCompilation {
InvocationEntryBci = -1 // i.e., not a on-stack replacement compilation
};
// Enumeration to distinguish tiers of compilation
enum CompLevel {
CompLevel_any = -1,
CompLevel_all = -1,
CompLevel_none = 0, // Interpreter
CompLevel_simple = 1, // C1
CompLevel_limited_profile = 2, // C1, invocation & backedge counters
CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo
CompLevel_full_optimization = 4, // C2, Shark or JVMCI
#if defined(COMPILER2) || defined(SHARK)
CompLevel_highest_tier = CompLevel_full_optimization, // pure C2 and tiered or JVMCI and tiered
#elif defined(COMPILER1)
CompLevel_highest_tier = CompLevel_simple, // pure C1 or JVMCI
#else
CompLevel_highest_tier = CompLevel_none,
#endif
#if defined(TIERED)
CompLevel_initial_compile = CompLevel_full_profile // tiered
#elif defined(COMPILER1) || INCLUDE_JVMCI
CompLevel_initial_compile = CompLevel_simple // pure C1 or JVMCI
#elif defined(COMPILER2) || defined(SHARK)
CompLevel_initial_compile = CompLevel_full_optimization // pure C2
#else
CompLevel_initial_compile = CompLevel_none
#endif
};
inline bool is_c1_compile(int comp_level) {
return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization;
}
inline bool is_c2_compile(int comp_level) {
return comp_level == CompLevel_full_optimization;
}
inline bool is_highest_tier_compile(int comp_level) {
return comp_level == CompLevel_highest_tier;
}
inline bool is_compile(int comp_level) {
return is_c1_compile(comp_level) || is_c2_compile(comp_level);
}
// States of Restricted Transactional Memory usage.
enum RTMState {
NoRTM = 0x2, // Don't use RTM
UseRTM = 0x1, // Use RTM
ProfileRTM = 0x0 // Use RTM with abort ratio calculation
};
#ifndef INCLUDE_RTM_OPT
#define INCLUDE_RTM_OPT 0
#endif
#if INCLUDE_RTM_OPT
#define RTM_OPT_ONLY(code) code
#else
#define RTM_OPT_ONLY(code)
#endif
#endif // SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP

View File

@ -922,18 +922,13 @@ size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
return res;
}
} else {
// must read from what 'p' points to in each loop.
Klass* k = ((volatile oopDesc*)p)->klass_or_null();
// Ensure klass read before size.
Klass* k = oop(p)->klass_or_null_acquire();
if (k != NULL) {
assert(k->is_klass(), "Should really be klass oop.");
oop o = (oop)p;
assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
// Bugfix for systems with weak memory model (PPC64/IA64).
// The object o may be an array. Acquire to make sure that the array
// size (third word) is consistent.
OrderAccess::acquire();
size_t res = o->size_given_klass(k);
res = adjustObjectSize(res);
assert(res != 0, "Block size should not be 0");
@ -977,21 +972,13 @@ const {
return res;
}
} else {
// must read from what 'p' points to in each loop.
Klass* k = ((volatile oopDesc*)p)->klass_or_null();
// We trust the size of any object that has a non-NULL
// klass and (for those in the perm gen) is parsable
// -- irrespective of its conc_safe-ty.
// Ensure klass read before size.
Klass* k = oop(p)->klass_or_null_acquire();
if (k != NULL) {
assert(k->is_klass(), "Should really be klass oop.");
oop o = (oop)p;
assert(o->is_oop(), "Should be an oop");
// Bugfix for systems with weak memory model (PPC64/IA64).
// The object o may be an array. Acquire to make sure that the array
// size (third word) is consistent.
OrderAccess::acquire();
size_t res = o->size_given_klass(k);
res = adjustObjectSize(res);
assert(res != 0, "Block size should not be 0");
@ -1028,7 +1015,7 @@ bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
FreeChunk* fc = (FreeChunk*)p;
assert(is_in_reserved(p), "Should be in space");
if (FreeChunk::indicatesFreeChunk(p)) return false;
Klass* k = oop(p)->klass_or_null();
Klass* k = oop(p)->klass_or_null_acquire();
if (k != NULL) {
// Ignore mark word because it may have been used to
// chain together promoted objects (the last one

View File

@ -5630,7 +5630,7 @@ size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
size_t sz = 0;
oop p = (oop)addr;
if (p->klass_or_null() != NULL) {
if (p->klass_or_null_acquire() != NULL) {
sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
} else {
sz = block_size_using_printezis_bits(addr);
@ -6076,7 +6076,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
}
if (_bitMap->isMarked(addr)) {
// it's marked; is it potentially uninitialized?
if (p->klass_or_null() != NULL) {
if (p->klass_or_null_acquire() != NULL) {
// an initialized object; ignore mark word in verification below
// since we are running concurrent with mutators
assert(p->is_oop(true), "should be an oop");
@ -6121,7 +6121,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
}
} else {
// Either a not yet marked object or an uninitialized object
if (p->klass_or_null() == NULL) {
if (p->klass_or_null_acquire() == NULL) {
// An uninitialized object, skip to the next card, since
// we may not be able to read its P-bits yet.
assert(size == 0, "Initial value");
@ -6320,7 +6320,7 @@ bool MarkFromRootsClosure::do_bit(size_t offset) {
assert(_skipBits == 0, "tautology");
_skipBits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr);
if (p->klass_or_null() == NULL) {
if (p->klass_or_null_acquire() == NULL) {
DEBUG_ONLY(if (!_verifying) {)
// We re-dirty the cards on which this object lies and increase
// the _threshold so that we'll come back to scan this object
@ -6340,7 +6340,7 @@ bool MarkFromRootsClosure::do_bit(size_t offset) {
if (_threshold < end_card_addr) {
_threshold = end_card_addr;
}
if (p->klass_or_null() != NULL) {
if (p->klass_or_null_acquire() != NULL) {
// Redirty the range of cards...
_mut->mark_range(redirty_range);
} // ...else the setting of klass will dirty the card anyway.
@ -6483,7 +6483,7 @@ bool ParMarkFromRootsClosure::do_bit(size_t offset) {
assert(_skip_bits == 0, "tautology");
_skip_bits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr);
if (p->klass_or_null() == NULL) {
if (p->klass_or_null_acquire() == NULL) {
// in the case of Clean-on-Enter optimization, redirty card
// and avoid clearing card by increasing the threshold.
return true;
@ -7354,7 +7354,7 @@ size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
"alignment problem");
#ifdef ASSERT
if (oop(addr)->klass_or_null() != NULL) {
if (oop(addr)->klass_or_null_acquire() != NULL) {
// Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop");
assert(size ==
@ -7365,7 +7365,7 @@ size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
} else {
// This should be an initialized object that's alive.
assert(oop(addr)->klass_or_null() != NULL,
assert(oop(addr)->klass_or_null_acquire() != NULL,
"Should be an initialized object");
// Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,99 +53,4 @@ void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_ind
biased_index, bias(), length());
}
class TestMappedArray : public G1BiasedMappedArray<int> {
protected:
virtual int default_value() const { return 0xBAADBABE; }
public:
static void test_biasedarray() {
const size_t REGION_SIZE_IN_WORDS = 512;
const size_t NUM_REGIONS = 20;
HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
TestMappedArray array;
array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
REGION_SIZE_IN_WORDS * HeapWordSize);
// Check address calculation (bounds)
assert(array.bottom_address_mapped() == fake_heap,
"bottom mapped address should be " PTR_FORMAT ", but is " PTR_FORMAT, p2i(fake_heap), p2i(array.bottom_address_mapped()));
assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
int* bottom = array.address_mapped_to(fake_heap);
assert((void*)bottom == (void*) array.base(), "must be");
int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
assert((void*)end == (void*)(array.base() + array.length()), "must be");
// The entire array should contain default value elements
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value(), "must be");
}
// Test setting values in the table
HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
// Set/get by address tests: invert some value; first retrieve one
int actual_value = array.get_by_index(NUM_REGIONS / 2);
array.set_by_index(NUM_REGIONS / 2, ~actual_value);
// Get the same value by address, should correspond to the start of the "region"
int value = array.get_by_address(region_start_address);
assert(value == ~actual_value, "must be");
// Get the same value by address, at one HeapWord before the start
value = array.get_by_address(region_start_address - 1);
assert(value == array.default_value(), "must be");
// Get the same value by address, at the end of the "region"
value = array.get_by_address(region_end_address);
assert(value == ~actual_value, "must be");
// Make sure the next value maps to another index
value = array.get_by_address(region_end_address + 1);
assert(value == array.default_value(), "must be");
// Reset the value in the array
array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
// The entire array should have the default value again
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value(), "must be");
}
// Set/get by index tests: invert some value
idx_t index = NUM_REGIONS / 2;
actual_value = array.get_by_index(index);
array.set_by_index(index, ~actual_value);
value = array.get_by_index(index);
assert(value == ~actual_value, "must be");
value = array.get_by_index(index - 1);
assert(value == array.default_value(), "must be");
value = array.get_by_index(index + 1);
assert(value == array.default_value(), "must be");
array.set_by_index(0, 0);
value = array.get_by_index(0);
assert(value == 0, "must be");
array.set_by_index(array.length() - 1, 0);
value = array.get_by_index(array.length() - 1);
assert(value == 0, "must be");
array.set_by_index(index, 0);
// The array should have three zeros, and default values otherwise
size_t num_zeros = 0;
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value() || *current == 0, "must be");
if (*current == 0) {
num_zeros++;
}
}
assert(num_zeros == 3, "must be");
}
};
void TestG1BiasedArray_test() {
TestMappedArray::test_biasedarray();
}
#endif

View File

@ -4420,6 +4420,19 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
// InitialMark needs claim bits to keep track of the marked-through CLDs.
if (collector_state()->during_initial_mark_pause()) {
double start_clear_claimed_marks = os::elapsedTime();
ClassLoaderDataGraph::clear_claimed_marks();
double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
phase_times->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
}
double start_par_time_sec = os::elapsedTime();
double end_par_time_sec;
@ -4427,10 +4440,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G
const uint n_workers = workers()->active_workers();
G1RootProcessor root_processor(this, n_workers);
G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
// InitialMark needs claim bits to keep track of the marked-through CLDs.
if (collector_state()->during_initial_mark_pause()) {
ClassLoaderDataGraph::clear_claimed_marks();
}
print_termination_stats_hdr();
@ -4444,8 +4453,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G
// reported parallel time.
}
G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
phase_times->record_par_time(par_time_ms);

View File

@ -101,6 +101,7 @@ void G1GCPhaseTimes::note_gc_start() {
_gc_start_counter = os::elapsed_counter();
_cur_expand_heap_time_ms = 0.0;
_external_accounted_time_ms = 0.0;
_recorded_clear_claimed_marks_time_ms = 0.0;
for (int i = 0; i < GCParPhasesSentinel; i++) {
if (_gc_par_phases[i] != NULL) {
@ -306,6 +307,10 @@ void G1GCPhaseTimes::print() {
debug_line("Reference Processing", _cur_ref_proc_time_ms);
debug_line("Reference Enqueuing", _cur_ref_enq_time_ms);
debug_line("Redirty Cards", _recorded_redirty_logged_cards_time_ms);
if (_recorded_clear_claimed_marks_time_ms > 0.0) {
debug_line("Clear Claimed Marks", _recorded_clear_claimed_marks_time_ms);
}
trace_phase(_gc_par_phases[RedirtyCards]);
if (G1EagerReclaimHumongousObjects) {
debug_line("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);

View File

@ -103,6 +103,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _external_accounted_time_ms;
double _recorded_clear_claimed_marks_time_ms;
double _recorded_young_cset_choice_time_ms;
double _recorded_non_young_cset_choice_time_ms;
@ -257,6 +259,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_external_accounted_time_ms += time_ms;
}
void record_clear_claimed_marks_time_ms(double recorded_clear_claimed_marks_time_ms) {
_recorded_clear_claimed_marks_time_ms = recorded_clear_claimed_marks_time_ms;
}
double cur_collection_start_sec() {
return _cur_collection_start_sec;
}

View File

@ -370,50 +370,3 @@ void HumongousRegionSetMtSafeChecker::check() {
"master humongous set MT safety protocol outside a safepoint");
}
}
void FreeRegionList_test() {
FreeRegionList l("test");
const uint num_regions_in_test = 5;
// Create a fake heap. It does not need to be valid, as the HeapRegion constructor
// does not access it.
MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords);
// Allocate a fake BOT because the HeapRegion constructor initializes
// the BOT.
size_t bot_size = G1BlockOffsetTable::compute_size(heap.word_size());
HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
ReservedSpace bot_rs(G1BlockOffsetTable::compute_size(heap.word_size()));
G1RegionToSpaceMapper* bot_storage =
G1RegionToSpaceMapper::create_mapper(bot_rs,
bot_rs.size(),
os::vm_page_size(),
HeapRegion::GrainBytes,
BOTConstants::N_bytes,
mtGC);
G1BlockOffsetTable bot(heap, bot_storage);
bot_storage->commit_regions(0, num_regions_in_test);
// Set up memory regions for the heap regions.
MemRegion mr0(heap.start(), HeapRegion::GrainWords);
MemRegion mr1(mr0.end(), HeapRegion::GrainWords);
MemRegion mr2(mr1.end(), HeapRegion::GrainWords);
MemRegion mr3(mr2.end(), HeapRegion::GrainWords);
MemRegion mr4(mr3.end(), HeapRegion::GrainWords);
HeapRegion hr0(0, &bot, mr0);
HeapRegion hr1(1, &bot, mr1);
HeapRegion hr2(2, &bot, mr2);
HeapRegion hr3(3, &bot, mr3);
HeapRegion hr4(4, &bot, mr4);
l.add_ordered(&hr1);
l.add_ordered(&hr0);
l.add_ordered(&hr3);
l.add_ordered(&hr4);
l.add_ordered(&hr2);
assert(l.length() == num_regions_in_test, "wrong length");
l.verify_list();
bot_storage->uncommit_regions(0, num_regions_in_test);
delete bot_storage;
FREE_C_HEAP_ARRAY(HeapWord, bot_data);
}

View File

@ -79,126 +79,3 @@ void WorkerDataArray<size_t>::WDAPrinter::details(const WorkerDataArray<size_t>*
}
out->cr();
}
#ifndef PRODUCT
#include "memory/resourceArea.hpp"
void WorkerDataArray_test_verify_string(const char* expected_string, const char* actual_string) {
const size_t expected_len = strlen(expected_string);
assert(expected_len == strlen(actual_string),
"Wrong string length, expected " SIZE_FORMAT " but got " SIZE_FORMAT "(Expected '%s' but got: '%s')",
expected_len, strlen(actual_string), expected_string, actual_string);
// Can't use strncmp here because floating point values use different decimal points for different locales.
// Allow strings to differ in "." vs. "," only. This should still catch most errors.
for (size_t i = 0; i < expected_len; i++) {
char e = expected_string[i];
char a = actual_string[i];
if (e != a) {
if ((e == '.' || e == ',') && (a == '.' || a == ',')) {
// Most likely just a difference in locale
} else {
assert(false, "Expected '%s' but got: '%s'", expected_string, actual_string);
}
}
}
}
void WorkerDataArray_test_verify_array(WorkerDataArray<size_t>& array, size_t expected_sum, double expected_avg, const char* expected_summary, const char* exected_details) {
const double epsilon = 0.0001;
assert(array.sum() == expected_sum, "Wrong sum, expected: " SIZE_FORMAT " but got: " SIZE_FORMAT, expected_sum, array.sum());
assert(fabs(array.average() - expected_avg) < epsilon, "Wrong average, expected: %f but got: %f", expected_avg, array.average());
ResourceMark rm;
stringStream out;
array.print_summary_on(&out);
WorkerDataArray_test_verify_string(expected_summary, out.as_string());
out.reset();
array.print_details_on(&out);
WorkerDataArray_test_verify_string(exected_details, out.as_string());
}
void WorkerDataArray_test_verify_array(WorkerDataArray<double>& array, double expected_sum, double expected_avg, const char* expected_summary, const char* exected_details) {
const double epsilon = 0.0001;
assert(fabs(array.sum() - expected_sum) < epsilon, "Wrong sum, expected: %f but got: %f", expected_sum, array.sum());
assert(fabs(array.average() - expected_avg) < epsilon, "Wrong average, expected: %f but got: %f", expected_avg, array.average());
ResourceMark rm;
stringStream out;
array.print_summary_on(&out);
WorkerDataArray_test_verify_string(expected_summary, out.as_string());
out.reset();
array.print_details_on(&out);
WorkerDataArray_test_verify_string(exected_details, out.as_string());
}
void WorkerDataArray_test_basic() {
WorkerDataArray<size_t> array(3, "Test array");
array.set(0, 5);
array.set(1, 3);
array.set(2, 7);
WorkerDataArray_test_verify_array(array, 15, 5.0,
"Test array Min: 3, Avg: 5.0, Max: 7, Diff: 4, Sum: 15, Workers: 3\n",
" 5 3 7\n" );
}
void WorkerDataArray_test_add() {
WorkerDataArray<size_t> array(3, "Test array");
array.set(0, 5);
array.set(1, 3);
array.set(2, 7);
for (uint i = 0; i < 3; i++) {
array.add(i, 1);
}
WorkerDataArray_test_verify_array(array, 18, 6.0,
"Test array Min: 4, Avg: 6.0, Max: 8, Diff: 4, Sum: 18, Workers: 3\n",
" 6 4 8\n" );
}
void WorkerDataArray_test_with_uninitialized() {
WorkerDataArray<size_t> array(3, "Test array");
array.set(0, 5);
array.set(1, WorkerDataArray<size_t>::uninitialized());
array.set(2, 7);
WorkerDataArray_test_verify_array(array, 12, 6,
"Test array Min: 5, Avg: 6.0, Max: 7, Diff: 2, Sum: 12, Workers: 2\n",
" 5 - 7\n" );
}
void WorkerDataArray_test_uninitialized() {
WorkerDataArray<size_t> array(3, "Test array");
array.set(0, WorkerDataArray<size_t>::uninitialized());
array.set(1, WorkerDataArray<size_t>::uninitialized());
array.set(2, WorkerDataArray<size_t>::uninitialized());
WorkerDataArray_test_verify_array(array, 0, 0.0,
"Test array skipped\n",
" - - -\n" );
}
void WorkerDataArray_test_double_with_uninitialized() {
WorkerDataArray<double> array(3, "Test array");
array.set(0, 5.1 / MILLIUNITS);
array.set(1, WorkerDataArray<double>::uninitialized());
array.set(2, 7.2 / MILLIUNITS);
WorkerDataArray_test_verify_array(array, 12.3 / MILLIUNITS, 6.15 / MILLIUNITS,
"Test array Min: 5.1, Avg: 6.1, Max: 7.2, Diff: 2.1, Sum: 12.3, Workers: 2\n",
" 5.1 - 7.2\n" );
}
void WorkerDataArray_test() {
WorkerDataArray_test_basic();
WorkerDataArray_test_add();
WorkerDataArray_test_with_uninitialized();
WorkerDataArray_test_uninitialized();
WorkerDataArray_test_double_with_uninitialized();
}
#endif

View File

@ -909,184 +909,3 @@ void MarkSweepPolicy::initialize_gc_policy_counters() {
_gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
}
/////////////// Unit tests ///////////////
#ifndef PRODUCT
// Testing that the NewSize flag is handled correct is hard because it
// depends on so many other configurable variables. This test only tries to
// verify that there are some basic rules for NewSize honored by the policies.
class TestGenCollectorPolicy {
public:
static void test_new_size() {
size_t flag_value;
save_flags();
// If NewSize has been ergonomically set, the collector policy
// should use it for min but calculate the initial young size
// using NewRatio.
flag_value = 20 * M;
set_basic_flag_values();
FLAG_SET_ERGO(size_t, NewSize, flag_value);
verify_young_min(flag_value);
set_basic_flag_values();
FLAG_SET_ERGO(size_t, NewSize, flag_value);
verify_scaled_young_initial(InitialHeapSize);
// If NewSize is set on the command line, it should be used
// for both min and initial young size if less than min heap.
// Note that once a flag has been set with FLAG_SET_CMDLINE it
// will be treated as it have been set on the command line for
// the rest of the VM lifetime. This is an irreversible change.
flag_value = 20 * M;
set_basic_flag_values();
FLAG_SET_CMDLINE(size_t, NewSize, flag_value);
verify_young_min(flag_value);
set_basic_flag_values();
FLAG_SET_CMDLINE(size_t, NewSize, flag_value);
verify_young_initial(flag_value);
// If NewSize is set on command line, but is larger than the min
// heap size, it should only be used for initial young size.
flag_value = 80 * M;
set_basic_flag_values();
FLAG_SET_CMDLINE(size_t, NewSize, flag_value);
verify_young_initial(flag_value);
restore_flags();
}
static void test_old_size() {
size_t flag_value;
size_t heap_alignment = CollectorPolicy::compute_heap_alignment();
save_flags();
// If OldSize is set on the command line, it should be used
// for both min and initial old size if less than min heap.
flag_value = 20 * M;
set_basic_flag_values();
FLAG_SET_CMDLINE(size_t, OldSize, flag_value);
verify_old_min(flag_value);
set_basic_flag_values();
FLAG_SET_CMDLINE(size_t, OldSize, flag_value);
// Calculate what we expect the flag to be.
size_t expected_old_initial = align_size_up(InitialHeapSize, heap_alignment) - MaxNewSize;
verify_old_initial(expected_old_initial);
// If MaxNewSize is large, the maximum OldSize will be less than
// what's requested on the command line and it should be reset
// ergonomically.
// We intentionally set MaxNewSize + OldSize > MaxHeapSize (see over_size).
flag_value = 30 * M;
set_basic_flag_values();
FLAG_SET_CMDLINE(size_t, OldSize, flag_value);
size_t over_size = 20*M;
size_t new_size_value = align_size_up(MaxHeapSize, heap_alignment) - flag_value + over_size;
FLAG_SET_CMDLINE(size_t, MaxNewSize, new_size_value);
// Calculate what we expect the flag to be.
expected_old_initial = align_size_up(MaxHeapSize, heap_alignment) - MaxNewSize;
verify_old_initial(expected_old_initial);
restore_flags();
}
static void verify_young_min(size_t expected) {
MarkSweepPolicy msp;
msp.initialize_all();
assert(msp.min_young_size() <= expected, "%zu > %zu", msp.min_young_size(), expected);
}
static void verify_young_initial(size_t expected) {
MarkSweepPolicy msp;
msp.initialize_all();
assert(msp.initial_young_size() == expected, "%zu != %zu", msp.initial_young_size(), expected);
}
static void verify_scaled_young_initial(size_t initial_heap_size) {
MarkSweepPolicy msp;
msp.initialize_all();
if (InitialHeapSize > initial_heap_size) {
// InitialHeapSize was adapted by msp.initialize_all, e.g. due to alignment
// caused by 64K page size.
initial_heap_size = InitialHeapSize;
}
size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size);
assert(msp.initial_young_size() == expected, "%zu != %zu", msp.initial_young_size(), expected);
assert(FLAG_IS_ERGO(NewSize) && NewSize == expected,
"NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize);
}
static void verify_old_min(size_t expected) {
MarkSweepPolicy msp;
msp.initialize_all();
assert(msp.min_old_size() <= expected, "%zu > %zu", msp.min_old_size(), expected);
}
static void verify_old_initial(size_t expected) {
MarkSweepPolicy msp;
msp.initialize_all();
assert(msp.initial_old_size() == expected, "%zu != %zu", msp.initial_old_size(), expected);
}
private:
static size_t original_InitialHeapSize;
static size_t original_MaxHeapSize;
static size_t original_MaxNewSize;
static size_t original_MinHeapDeltaBytes;
static size_t original_NewSize;
static size_t original_OldSize;
static void set_basic_flag_values() {
FLAG_SET_ERGO(size_t, MaxHeapSize, 180 * M);
FLAG_SET_ERGO(size_t, InitialHeapSize, 100 * M);
FLAG_SET_ERGO(size_t, OldSize, 4 * M);
FLAG_SET_ERGO(size_t, NewSize, 1 * M);
FLAG_SET_ERGO(size_t, MaxNewSize, 80 * M);
Arguments::set_min_heap_size(40 * M);
}
static void save_flags() {
original_InitialHeapSize = InitialHeapSize;
original_MaxHeapSize = MaxHeapSize;
original_MaxNewSize = MaxNewSize;
original_MinHeapDeltaBytes = MinHeapDeltaBytes;
original_NewSize = NewSize;
original_OldSize = OldSize;
}
static void restore_flags() {
InitialHeapSize = original_InitialHeapSize;
MaxHeapSize = original_MaxHeapSize;
MaxNewSize = original_MaxNewSize;
MinHeapDeltaBytes = original_MinHeapDeltaBytes;
NewSize = original_NewSize;
OldSize = original_OldSize;
}
};
size_t TestGenCollectorPolicy::original_InitialHeapSize = 0;
size_t TestGenCollectorPolicy::original_MaxHeapSize = 0;
size_t TestGenCollectorPolicy::original_MaxNewSize = 0;
size_t TestGenCollectorPolicy::original_MinHeapDeltaBytes = 0;
size_t TestGenCollectorPolicy::original_NewSize = 0;
size_t TestGenCollectorPolicy::original_OldSize = 0;
void TestNewSize_test() {
TestGenCollectorPolicy::test_new_size();
}
void TestOldSize_test() {
TestGenCollectorPolicy::test_old_size();
}
#endif

View File

@ -37,7 +37,7 @@
JVMCICompiler* JVMCICompiler::_instance = NULL;
elapsedTimer JVMCICompiler::_codeInstallTimer;
JVMCICompiler::JVMCICompiler() : AbstractCompiler(jvmci) {
JVMCICompiler::JVMCICompiler() : AbstractCompiler(compiler_jvmci) {
_bootstrapping = false;
_bootstrap_compilation_request_handled = false;
_methods_compiled = 0;

View File

@ -50,7 +50,7 @@
"Use JVMCI as the default compiler") \
\
experimental(bool, JVMCIPrintProperties, false, \
"Prints properties used by the JVMCI compiler") \
"Prints properties used by the JVMCI compiler and exits") \
\
experimental(bool, BootstrapJVMCI, false, \
"Bootstrap JVMCI before running Java main method") \

View File

@ -283,11 +283,15 @@ public:
bool validate_classpath_entry_table();
static SharedClassPathEntry* shared_classpath(int index) {
if (index < 0) {
return NULL;
}
char* p = (char*)_classpath_entry_table;
p += _classpath_entry_size * index;
return (SharedClassPathEntry*)p;
}
static const char* shared_classpath_name(int index) {
assert(index >= 0, "Sanity");
return shared_classpath(index)->_name;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,87 +79,3 @@ void GuardedMemory::print_on(outputStream* st) const {
break;
}
}
// test code...
#ifndef PRODUCT
static void guarded_memory_test_check(void* p, size_t sz, void* tag) {
assert(p != NULL, "NULL pointer given to check");
u_char* c = (u_char*) p;
GuardedMemory guarded(c);
assert(guarded.get_tag() == tag, "Tag is not the same as supplied");
assert(guarded.get_user_ptr() == c, "User pointer is not the same as supplied");
assert(guarded.get_user_size() == sz, "User size is not the same as supplied");
assert(guarded.verify_guards(), "Guard broken");
}
void GuardedMemory::test_guarded_memory() {
// Test the basic characteristics...
size_t total_sz = GuardedMemory::get_total_size(1);
assert(total_sz > 1 && total_sz >= (sizeof(GuardHeader) + 1 + sizeof(Guard)), "Unexpected size");
u_char* basep = (u_char*) os::malloc(total_sz, mtInternal);
GuardedMemory guarded(basep, 1, (void*)0xf000f000);
assert(*basep == badResourceValue, "Expected guard in the form of badResourceValue");
u_char* userp = guarded.get_user_ptr();
assert(*userp == uninitBlockPad, "Expected uninitialized data in the form of uninitBlockPad");
guarded_memory_test_check(userp, 1, (void*)0xf000f000);
void* freep = guarded.release_for_freeing();
assert((u_char*)freep == basep, "Expected the same pointer guard was ");
assert(*userp == freeBlockPad, "Expected user data to be free block padded");
assert(!guarded.verify_guards(), "Expected failed");
os::free(freep);
// Test a number of odd sizes...
size_t sz = 0;
do {
void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal);
void* up = guarded.wrap_with_guards(p, sz, (void*)1);
memset(up, 0, sz);
guarded_memory_test_check(up, sz, (void*)1);
os::free(guarded.release_for_freeing());
sz = (sz << 4) + 1;
} while (sz < (256 * 1024));
// Test buffer overrun into head...
basep = (u_char*) os::malloc(GuardedMemory::get_total_size(1), mtInternal);
guarded.wrap_with_guards(basep, 1);
*basep = 0;
assert(!guarded.verify_guards(), "Expected failure");
os::free(basep);
// Test buffer overrun into tail with a number of odd sizes...
sz = 1;
do {
void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal);
void* up = guarded.wrap_with_guards(p, sz, (void*)1);
memset(up, 0, sz + 1); // Buffer-overwrite (within guard)
assert(!guarded.verify_guards(), "Guard was not broken as expected");
os::free(guarded.release_for_freeing());
sz = (sz << 4) + 1;
} while (sz < (256 * 1024));
// Test wrap_copy/wrap_free...
assert(GuardedMemory::free_copy(NULL), "Expected free NULL to be OK");
const char* str = "Check my bounds out";
size_t str_sz = strlen(str) + 1;
char* str_copy = (char*) GuardedMemory::wrap_copy(str, str_sz);
guarded_memory_test_check(str_copy, str_sz, NULL);
assert(strcmp(str, str_copy) == 0, "Not identical copy");
assert(GuardedMemory::free_copy(str_copy), "Free copy failed to verify");
void* no_data = NULL;
void* no_data_copy = GuardedMemory::wrap_copy(no_data, 0);
assert(GuardedMemory::free_copy(no_data_copy), "Expected valid guards even for no data copy");
}
void GuardedMemory_test() {
GuardedMemory::test_guarded_memory();
}
#endif // !PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -82,6 +82,7 @@
*/
class GuardedMemory : StackObj { // Wrapper on stack
friend class GuardedMemoryTest;
// Private inner classes for memory layout...
protected:
@ -317,10 +318,6 @@ protected:
*/
static bool free_copy(void* p);
// Testing...
#ifndef PRODUCT
static void test_guarded_memory(void);
#endif
}; // GuardedMemory
#endif // SHARE_VM_MEMORY_GUARDEDMEMORY_HPP

Some files were not shown because too many files have changed in this diff Show More