This commit is contained in:
Lana Steuck 2012-12-16 22:05:19 -08:00
commit 7aab781edb
1021 changed files with 36679 additions and 21705 deletions

View File

@ -188,3 +188,5 @@ cdaa6122185f9bf512dcd6600f56bfccc4824e8c jdk8-b61
70fa4b11f26522e69b51fd652215f60ce350bac3 jdk8-b64
a2cf4d4a484378caea2e827ed604b2bbae58bdba jdk8-b65
17820b958ae84f7c1cc6719319c8e2232f7a4f1d jdk8-b66
76cc9bd3ece407d3a15d3bea537b57927973c5e7 jdk8-b67
cb33628d4e8f11e879c371959e5948b66a53376f jdk8-b68

View File

@ -188,3 +188,5 @@ e07f499b9dccb529ecf74172cf6ac11a195ec57a jdk8-b60
1c8370a55b305d35353346202bde042ba9e8a9fd jdk8-b64
b772de306dc24c17f7bd1398531ddeb58723b804 jdk8-b65
13bb8c326e7b7b0b19d78c8088033e3932e3f7ca jdk8-b66
9a6ec97ec45c1a62d5233cefa91e8390e380e13a jdk8-b67
cdb401a60cea6ad5ef3f498725ed1decf8dda1ea jdk8-b68

View File

@ -90,13 +90,25 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE],
tmp="$complete EOL"
arguments="${tmp#* }"
new_path=`$WHICH $path 2> /dev/null`
# Cannot rely on the command "which" here since it doesn't always work.
is_absolute_path=`$ECHO "$path" | $GREP ^/`
if test -z "$is_absolute_path"; then
# Path to executable is not absolute. Find it.
IFS_save="$IFS"
IFS=:
for p in $PATH; do
if test -f "$p/$path" && test -x "$p/$path"; then
new_path="$p/$path"
break
fi
done
IFS="$IFS_save"
else
AC_MSG_NOTICE([Resolving $1 (as $path) failed, using $path directly.])
new_path="$path"
fi
if test "x$new_path" = x; then
is_absolute_path=`$ECHO "$path" | $GREP ^/`
if test "x$is_absolute_path" != x; then
AC_MSG_NOTICE([Resolving $1 (as $path) with 'which' failed, using $path directly.])
new_path="$path"
else
AC_MSG_NOTICE([The path of $1, which resolves as "$complete", is not found.])
has_space=`$ECHO "$complete" | $GREP " "`
if test "x$has_space" != x; then
@ -104,20 +116,19 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE],
fi
AC_MSG_ERROR([Cannot locate the the path of $1])
fi
fi
fi
# Now join together the path and the arguments once again
if test "x$arguments" != xEOL; then
new_complete="$new_path ${arguments% *}"
else
new_complete="$new_path"
fi
# Now join together the path and the arguments once again
if test "x$arguments" != xEOL; then
new_complete="$new_path ${arguments% *}"
else
new_complete="$new_path"
fi
if test "x$complete" != "x$new_complete"; then
$1="$new_complete"
AC_MSG_NOTICE([Rewriting $1 to "$new_complete"])
fi
$1="$new_complete"
AC_MSG_NOTICE([Rewriting $1 to "$new_complete"])
fi
])
AC_DEFUN([BASIC_REMOVE_SYMBOLIC_LINKS],

File diff suppressed because it is too large Load Diff

View File

@ -97,6 +97,24 @@ EXTRA_LDFLAGS=@LEGACY_EXTRA_LDFLAGS@
USE_PRECOMPILED_HEADER=@USE_PRECOMPILED_HEADER@
# Hotspot expects the variable FULL_DEBUG_SYMBOLS=1/0 to control debug symbols
# creation.
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
FULL_DEBUG_SYMBOLS=1
# Ensure hotspot uses the objcopy that configure located
ALT_OBJCOPY:=$(OBJCOPY)
else
FULL_DEBUG_SYMBOLS=0
endif
# Hotspot expects the variable ZIP_DEBUGINFO_FILES=1/0 and not true/false.
ifeq ($(ZIP_DEBUGINFO_FILES)$(ENABLE_DEBUG_SYMBOLS), truetrue)
ZIP_DEBUGINFO_FILES:=1
endif
ifeq ($(ZIP_DEBUGINFO_FILES), false)
ZIP_DEBUGINFO_FILES:=0
endif
# Sneak this in via the spec.gmk file, since we don't want to mess around too much with the Hotspot make files.
# This is needed to get the LOG setting to work properly.
include $(SRC_ROOT)/common/makefiles/MakeBase.gmk

View File

@ -432,32 +432,30 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
# ENABLE_DEBUG_SYMBOLS
# This must be done after the toolchain is setup, since we're looking at objcopy.
#
ENABLE_DEBUG_SYMBOLS=default
# default on macosx is no...
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
ENABLE_DEBUG_SYMBOLS=no
fi
AC_ARG_ENABLE([debug-symbols],
[AS_HELP_STRING([--disable-debug-symbols],[disable generation of debug symbols @<:@enabled@:>@])],
[ENABLE_DEBUG_SYMBOLS=${enable_debug_symbols}],
)
[AS_HELP_STRING([--disable-debug-symbols],[disable generation of debug symbols @<:@enabled@:>@])])
AC_MSG_CHECKING([if we should generate debug symbols])
if test "x$ENABLE_DEBUG_SYMBOLS" = "xyes" && test "x$OBJCOPY" = x; then
if test "x$enable_debug_symbols" = "xyes" && test "x$OBJCOPY" = x; then
# explicit enabling of enable-debug-symbols and can't find objcopy
# this is an error
AC_MSG_ERROR([Unable to find objcopy, cannot enable debug-symbols])
fi
if test "x$ENABLE_DEBUG_SYMBOLS" = "xdefault"; then
if test "x$enable_debug_symbols" = "xyes"; then
ENABLE_DEBUG_SYMBOLS=true
elif test "x$enable_debug_symbols" = "xno"; then
ENABLE_DEBUG_SYMBOLS=false
else
# default on macosx is false
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
ENABLE_DEBUG_SYMBOLS=false
# Default is on if objcopy is found, otherwise off
if test "x$OBJCOPY" != x || test "x$OPENJDK_TARGET_OS" = xwindows; then
ENABLE_DEBUG_SYMBOLS=yes
elif test "x$OBJCOPY" != x || test "x$OPENJDK_TARGET_OS" = xwindows; then
ENABLE_DEBUG_SYMBOLS=true
else
ENABLE_DEBUG_SYMBOLS=no
ENABLE_DEBUG_SYMBOLS=false
fi
fi
@ -466,22 +464,16 @@ AC_MSG_RESULT([$ENABLE_DEBUG_SYMBOLS])
#
# ZIP_DEBUGINFO_FILES
#
ZIP_DEBUGINFO_FILES=yes
AC_ARG_ENABLE([zip-debug-info],
[AS_HELP_STRING([--disable-zip-debug-info],[disable zipping of debug-info files @<:@enabled@:>@])],
[ZIP_DEBUGINFO_FILES=${enable_zip_debug_info}],
)
[AS_HELP_STRING([--disable-zip-debug-info],[disable zipping of debug-info files @<:@enabled@:>@])])
AC_MSG_CHECKING([if we should zip debug-info files])
AC_MSG_RESULT([$ZIP_DEBUGINFO_FILES])
AC_MSG_RESULT([${enable_zip_debug_info}])
# Hotspot wants ZIP_DEBUGINFO_FILES to be 1 for yes
# use that...
if test "x$ZIP_DEBUGINFO_FILES" = "xyes"; then
ZIP_DEBUGINFO_FILES=1
if test "x${enable_zip_debug_info}" = "xno"; then
ZIP_DEBUGINFO_FILES=false
else
ZIP_DEBUGINFO_FILES=0
ZIP_DEBUGINFO_FILES=true
fi
AC_SUBST(ENABLE_DEBUG_SYMBOLS)

View File

@ -114,13 +114,25 @@ AC_DEFUN([TOOLCHAIN_FIND_COMPILER],
[
COMPILER_NAME=$2
# Do a first initial attempt at searching the list of compiler names.
$1=
# If TOOLS_DIR is set, check for all compiler names in there first
# before checking the rest of the PATH.
if test -n "$TOOLS_DIR"; then
PATH_save="$PATH"
PATH="$TOOLS_DIR"
AC_PATH_PROGS(TOOLS_DIR_$1, $3)
$1=$TOOLS_DIR_$1
PATH="$PATH_save"
fi
# AC_PATH_PROGS can't be run multiple times with the same variable,
# so create a new name for this run.
AC_PATH_PROGS(POTENTIAL_$1, $3)
$1=$POTENTIAL_$1
if test "x[$]$1" = x; then
AC_PATH_PROGS(POTENTIAL_$1, $3)
$1=$POTENTIAL_$1
fi
if test "x$[$]$1" = x; then
if test "x[$]$1" = x; then
HELP_MSG_MISSING_DEPENDENCY([devkit])
AC_MSG_ERROR([Could not find a $COMPILER_NAME compiler. $HELP_MSG])
fi

194
common/bin/hgforest.sh Normal file
View File

@ -0,0 +1,194 @@
#!/bin/sh
#
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# Shell script for a fast parallel forest command
command="$1"
pull_extra_base="$2"
# Python always buffers stdout significantly, thus we will not see any output from hg clone jdk,
# until a lot of time has passed! By passing -u to python, we get incremental updates
# on stdout. Much nicer.
whichhg="`which hg`"
if [ "${whichhg}" = "" ] ; then
echo Cannot find hg!
exit 1
fi
if [ "" = "$command" ] ; then
echo No command to hg supplied!
exit 1
fi
has_hash_bang="`head -n 1 "${whichhg}" | cut -b 1-2`"
python=""
bpython=""
if [ "#!" = "$has_hash_bang" ] ; then
python="`head -n 1 ${whichhg} | cut -b 3-`"
bpython="`basename "$python"`"
fi
if [ "python" = "$bpython" -a -x "$python" ] ; then
hg="${python} -u ${whichhg}"
else
echo Cannot find python from hg launcher. Running plain hg, which probably has buffered stdout.
hg="hg"
fi
# Clean out the temporary directory that stores the pid files.
tmp=/tmp/forest.$$
rm -f -r ${tmp}
mkdir -p ${tmp}
safe_interrupt () {
if [ -d ${tmp} ]; then
if [ "`ls ${tmp}`" != "" ]; then
echo "Waiting for processes ( `cat ${tmp}/* | tr '\n' ' '`) to terminate nicely!"
sleep 1
# Pipe stderr to dev/null to silence kill, that complains when trying to kill
# a subprocess that has already exited.
kill -TERM `cat ${tmp}/* | tr '\n' ' '` 2> /dev/null
wait
echo Interrupt complete!
fi
fi
rm -f -r ${tmp}
exit 1
}
nice_exit () {
if [ -d ${tmp} ]; then
if [ "`ls ${tmp}`" != "" ]; then
wait
fi
fi
rm -f -r ${tmp}
}
trap 'safe_interrupt' INT QUIT
trap 'nice_exit' EXIT
# Only look in specific locations for possible forests (avoids long searches)
pull_default=""
repos=""
repos_extra=""
if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
subrepos="corba jaxp jaxws langtools jdk hotspot"
if [ -f .hg/hgrc ] ; then
pull_default=`hg paths default`
if [ "${pull_default}" = "" ] ; then
echo "ERROR: Need initial clone with 'hg paths default' defined"
exit 1
fi
fi
if [ "${pull_default}" = "" ] ; then
echo "ERROR: Need initial repository to use this script"
exit 1
fi
for i in ${subrepos} ; do
if [ ! -f ${i}/.hg/hgrc ] ; then
repos="${repos} ${i}"
fi
done
if [ "${pull_extra_base}" != "" ] ; then
subrepos_extra="jdk/src/closed jdk/make/closed jdk/test/closed hotspot/make/closed hotspot/src/closed hotspot/test/closed deploy install sponsors pubs"
pull_default_tail=`echo ${pull_default} | sed -e 's@^.*://[^/]*/\(.*\)@\1@'`
pull_extra="${pull_extra_base}/${pull_default_tail}"
for i in ${subrepos_extra} ; do
if [ ! -f ${i}/.hg/hgrc ] ; then
repos_extra="${repos_extra} ${i}"
fi
done
fi
at_a_time=2
# Any repos to deal with?
if [ "${repos}" = "" -a "${repos_extra}" = "" ] ; then
exit
fi
else
hgdirs=`ls -d ./.hg ./*/.hg ./*/*/.hg ./*/*/*/.hg ./*/*/*/*/.hg 2>/dev/null`
# Derive repository names from the .hg directory locations
for i in ${hgdirs} ; do
repos="${repos} `echo ${i} | sed -e 's@/.hg$@@'`"
done
for i in ${repos} ; do
if [ -h ${i}/.hg/store/lock -o -f ${i}/.hg/store/lock ] ; then
locked="${i} ${locked}"
fi
done
at_a_time=8
# Any repos to deal with?
if [ "${repos}" = "" ] ; then
echo "No repositories to process."
exit
fi
if [ "${locked}" != "" ] ; then
echo "These repositories are locked: ${locked}"
exit
fi
fi
# Echo out what repositories we do a command on.
echo "# Repositories: ${repos} ${repos_extra}"
echo
# Run the supplied command on all repos in parallel.
n=0
for i in ${repos} ${repos_extra} ; do
n=`expr ${n} '+' 1`
repopidfile=`echo ${i} | sed -e 's@./@@' -e 's@/@_@g'`
reponame=`echo ${i} | sed -e :a -e 's/^.\{1,20\}$/ &/;ta'`
pull_base="${pull_default}"
for j in $repos_extra ; do
if [ "$i" = "$j" ] ; then
pull_base="${pull_extra}"
fi
done
(
(
if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
pull_newrepo="`echo ${pull_base}/${i} | sed -e 's@\([^:]/\)//*@\1@g'`"
echo ${hg} clone ${pull_newrepo} ${i}
${hg} clone ${pull_newrepo} ${i} &
else
echo "cd ${i} && ${hg} $*"
cd ${i} && ${hg} "$@" &
fi
echo $! > ${tmp}/${repopidfile}.pid
) 2>&1 | sed -e "s@^@${reponame}: @") &
if [ `expr ${n} '%' ${at_a_time}` -eq 0 ] ; then
sleep 2
echo Waiting 5 secs before spawning next background command.
sleep 3
fi
done
# Wait for all hg commands to complete
wait
# Terminate with exit 0 all the time (hard to know when to say "failed")
exit 0

View File

@ -275,10 +275,12 @@ define SetupZipArchive
# Explicitly excluded files can be given with absolute path. The patsubst solution
# isn't perfect but the likelyhood of an absolute path to match something in a src
# dir is very small.
# If zip has nothing to do, it returns 12 and would fail the build. Check for 12
# and only fail if it's not.
$$($1_ZIP) : $$($1_ALL_SRCS) $$($1_EXTRA_DEPS)
$(MKDIR) -p $$(@D)
$(ECHO) Updating $$($1_NAME)
$$(foreach i,$$($1_SRC),(cd $$i && $(ZIP) -qru $$@ . $$($1_ZIP_INCLUDES) $$($1_ZIP_EXCLUDES) -x \*_the.\* $$(addprefix -x$(SPACE),$$(patsubst $$i/%,%,$$($1_EXCLUDE_FILES))))$$(NEWLINE)) true
$$(foreach i,$$($1_SRC),(cd $$i && $(ZIP) -qru $$@ . $$($1_ZIP_INCLUDES) $$($1_ZIP_EXCLUDES) -x \*_the.\* $$(addprefix -x$(SPACE),$$(patsubst $$i/%,%,$$($1_EXCLUDE_FILES))) || test "$$$$?" = "12" )$$(NEWLINE)) true
$(TOUCH) $$@
endef

View File

@ -302,7 +302,7 @@ define SetupNativeCompilation
endif
ifneq (,$$($1_DEBUG_SYMBOLS))
ifeq ($(ENABLE_DEBUG_SYMBOLS), yes)
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
# Programs don't get the debug symbols added in the old build. It's not clear if
# this is intentional.
ifeq ($$($1_PROGRAM),)
@ -394,7 +394,7 @@ define SetupNativeCompilation
endif
ifneq (,$$($1_DEBUG_SYMBOLS))
ifeq ($(ENABLE_DEBUG_SYMBOLS), yes)
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
ifeq ($(OPENJDK_TARGET_OS), windows)
$1_EXTRA_LDFLAGS+="-pdb:$$($1_OBJECT_DIR)/$$($1_LIBRARY).pdb" \
"-map:$$($1_OBJECT_DIR)/$$($1_LIBRARY).map"
@ -429,7 +429,7 @@ define SetupNativeCompilation
endif # Touch to not retrigger rule on rebuild
$(TOUCH) $$@
ifeq ($(ZIP_DEBUGINFO_FILES), 1)
ifeq ($(ZIP_DEBUGINFO_FILES), true)
$1 += $$($1_OUTPUT_DIR)/$$(LIBRARY_PREFIX)$$($1_LIBRARY).diz
ifeq ($(OPENJDK_TARGET_OS), windows)
@ -472,7 +472,7 @@ define SetupNativeCompilation
ifneq (,$$($1_PROGRAM))
# A executable binary has been specified, setup the target for it.
ifneq (,$$($1_DEBUG_SYMBOLS))
ifeq ($(ENABLE_DEBUG_SYMBOLS), yes)
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
ifeq ($(OPENJDK_TARGET_OS), windows)
$1_EXTRA_LDFLAGS+="-pdb:$$($1_OBJECT_DIR)/$$($1_PROGRAM).pdb" \
"-map:$$($1_OBJECT_DIR)/$$($1_PROGRAM).map"
@ -507,7 +507,7 @@ define SetupNativeCompilation
endif
$(TOUCH) $$@
ifeq ($(ZIP_DEBUGINFO_FILES), 1)
ifeq ($(ZIP_DEBUGINFO_FILES), true)
$1 += $$($1_OUTPUT_DIR)/$$($1_PROGRAM).diz
ifeq ($(OPENJDK_TARGET_OS), windows)

View File

@ -131,6 +131,7 @@ CORE_PKGS = \
java.util.concurrent \
java.util.concurrent.atomic \
java.util.concurrent.locks \
java.util.function \
java.util.jar \
java.util.logging \
java.util.prefs \

View File

@ -188,3 +188,5 @@ d54dc53e223ed9ce7d5f4d2cd02ad9d5def3c2db jdk8-b59
54d599a5b4aad83c235d590652fc81f41c2824fb jdk8-b64
5132f7900a8f0c30c3ca7f7a32f9433f4fee7745 jdk8-b65
65771ad1ca557ca26e4979d4dc633cf685435cb8 jdk8-b66
394515ad2a55d4d54df990b36065505d3e7a3cbb jdk8-b67
82000531feaa7baef76b6406099e5cd88943d635 jdk8-b68

View File

@ -26,8 +26,8 @@
#
# Get clones of all nested repositories
sh ./make/scripts/hgforest.sh clone $*
sh ./common/bin/hgforest.sh clone "$@"
# Update all existing repositories to the latest sources
sh ./make/scripts/hgforest.sh pull -u
sh ./common/bin/hgforest.sh pull -u

View File

@ -297,3 +297,7 @@ b4ee7b773144a88af8b6b92e4384dea82cb948d8 hs25-b09
cfc5309f03b7bd6c1567618b63cf1fc74c0f2a8f hs25-b10
01684f7fee1b86222be69bc23841ec2a4416696c jdk8-b66
b61d9c88b759d1594b8af1655598e8fa00393672 hs25-b11
25bdce771bb3a7ae9825261a284d292cda700122 jdk8-b67
a35a72dd2e1255239d31f796f9f693e49b36bc9f hs25-b12
121aa71316af6cd877bf455e775fa3fdbcdd4b65 jdk8-b68
b6c9c0109a608eedbb6b868d260952990e3c91fe hs25-b13

View File

@ -69,6 +69,8 @@ public class ConstMethod extends VMObject {
signatureIndex = new CIntField(type.getCIntegerField("_signature_index"), 0);
idnum = new CIntField(type.getCIntegerField("_method_idnum"), 0);
maxStack = new CIntField(type.getCIntegerField("_max_stack"), 0);
maxLocals = new CIntField(type.getCIntegerField("_max_locals"), 0);
sizeOfParameters = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
// start of byte code
bytecodeOffset = type.getSize();
@ -96,6 +98,8 @@ public class ConstMethod extends VMObject {
private static CIntField signatureIndex;
private static CIntField idnum;
private static CIntField maxStack;
private static CIntField maxLocals;
private static CIntField sizeOfParameters;
// start of bytecode
private static long bytecodeOffset;
@ -151,6 +155,14 @@ public class ConstMethod extends VMObject {
return maxStack.getValue(this);
}
public long getMaxLocals() {
return maxLocals.getValue(this);
}
public long getSizeOfParameters() {
return sizeOfParameters.getValue(this);
}
public Symbol getName() {
return getMethod().getName();
}
@ -247,6 +259,8 @@ public class ConstMethod extends VMObject {
visitor.doCInt(signatureIndex, true);
visitor.doCInt(codeSize, true);
visitor.doCInt(maxStack, true);
visitor.doCInt(maxLocals, true);
visitor.doCInt(sizeOfParameters, true);
}
// Accessors

View File

@ -50,8 +50,6 @@ public class Method extends Metadata {
constMethod = type.getAddressField("_constMethod");
methodData = type.getAddressField("_method_data");
methodSize = new CIntField(type.getCIntegerField("_method_size"), 0);
maxLocals = new CIntField(type.getCIntegerField("_max_locals"), 0);
sizeOfParameters = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
code = type.getAddressField("_code");
vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0);
@ -83,8 +81,6 @@ public class Method extends Metadata {
private static AddressField constMethod;
private static AddressField methodData;
private static CIntField methodSize;
private static CIntField maxLocals;
private static CIntField sizeOfParameters;
private static CIntField accessFlags;
private static CIntField vtableIndex;
private static CIntField invocationCounter;
@ -134,8 +130,8 @@ public class Method extends Metadata {
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
public long getMethodSize() { return methodSize.getValue(this); }
public long getMaxStack() { return getConstMethod().getMaxStack(); }
public long getMaxLocals() { return maxLocals.getValue(this); }
public long getSizeOfParameters() { return sizeOfParameters.getValue(this); }
public long getMaxLocals() { return getConstMethod().getMaxLocals(); }
public long getSizeOfParameters() { return getConstMethod().getSizeOfParameters(); }
public long getNameIndex() { return getConstMethod().getNameIndex(); }
public long getSignatureIndex() { return getConstMethod().getSignatureIndex(); }
public long getGenericSignatureIndex() { return getConstMethod().getGenericSignatureIndex(); }
@ -282,8 +278,6 @@ public class Method extends Metadata {
public void iterateFields(MetadataVisitor visitor) {
visitor.doCInt(methodSize, true);
visitor.doCInt(maxLocals, true);
visitor.doCInt(sizeOfParameters, true);
visitor.doCInt(accessFlags, true);
}

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=11
HS_BUILD_NUMBER=13
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View File

@ -71,41 +71,36 @@ default:: $(AdditionalTargets) $(JvmtiGeneratedFiles)
!include $(HOTSPOTWORKSPACE)/make/hotspot_version
!if "$(HOTSPOT_RELEASE_VERSION)" != ""
HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)"
!else
HOTSPOT_RELEASE_VERSION="$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER)"
!endif
!if "$(USER_RELEASE_SUFFIX)" != ""
HOTSPOT_BUILD_VERSION$(HOTSPOT_BUILD_VERSION) = internal-$(USER_RELEASE_SUFFIX)
HOTSPOT_BUILD_VERSION = internal-$(USER_RELEASE_SUFFIX)
!else
HOTSPOT_BUILD_VERSION$(HOTSPOT_BUILD_VERSION) = internal
HOTSPOT_BUILD_VERSION = internal
!endif
!if "$(HOTSPOT_BUILD_VERSION)" != ""
HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)"
!if "$(HOTSPOT_RELEASE_VERSION)" != ""
HOTSPOT_RELEASE_VERSION="\\\"$(HOTSPOT_RELEASE_VERSION)\\\""
!else
HOTSPOT_RELEASE_VERSION="\\\"$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER)-$(HOTSPOT_BUILD_VERSION)\\\""
!endif
!if "$(JRE_RELEASE_VERSION)" != ""
JRE_RELEASE_VERSION="$(JRE_RELEASE_VERSION)"
JRE_RELEASE_VERSION="\\\"$(JRE_RELEASE_VERSION)\\\""
!else
JRE_RELEASE_VERSION="$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER)"
JRE_RELEASE_VERSION="\\\"$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER)\\\""
!endif
# Define HOTSPOT_VM_DISTRO if HOTSPOT_VM_DISTRO is set,
# and if it is not see if we have the src/closed directory
!if "$(HOTSPOT_VM_DISTRO)" != ""
HOTSPOT_VM_DISTRO="$(HOTSPOT_VM_DISTRO)"
HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO)
!else
!if exists($(HOTSPOTWORKSPACE)\src\closed)
HOTSPOT_VM_DISTRO="Java HotSpot(TM)"
HOTSPOT_VM_DISTRO="\\\"Java HotSpot(TM)\\\""
!else
HOTSPOT_VM_DISTRO="OpenJDK"
HOTSPOT_VM_DISTRO="\\\"OpenJDK\\\""
!endif
!endif
ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) \
-define HOTSPOT_RELEASE_VERSION=\\\"$(HOTSPOT_RELEASE_VERSION)\\\" \
-define JRE_RELEASE_VERSION=\\\"$(JRE_RELEASE_VERSION)\\\" \
-define HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\"
ReleaseOptions = -define HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) -define JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) -define HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO)
ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions)
$(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
@$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -25,33 +25,8 @@
#ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
#define CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
#include "asm/assembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
#include "runtime/handles.inline.hpp"
#include "asm/assembler.hpp"
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
jint& stub_inst = *(jint*) branch;
stub_inst = patched_branch(target - branch, stub_inst, 0);
}
#ifndef PRODUCT
inline void MacroAssembler::pd_print_patched_instruction(address branch) {
jint stub_inst = *(jint*) branch;
print_instruction(stub_inst);
::tty->print("%s", " (unresolved)");
}
#endif // PRODUCT
inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
inline int AddressLiteral::low10() const {
return Assembler::low10(value());
}
// inlines for SPARC assembler -- dmu 5/97
inline void Assembler::check_delay() {
# ifdef CHECK_DELAY
@ -76,9 +51,8 @@ inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
}
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
@ -111,16 +85,9 @@ inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
else ldf(w, s1, s2.as_constant(), d);
}
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
@ -152,98 +119,9 @@ inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only();
inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
#ifdef _LP64
// Make all 32 bit loads signed so 64 bit registers maintain proper sign
inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
#else
inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
#endif
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
# ifdef _LP64
inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
# else
inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
# endif
#endif
inline void Assembler::ld( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
else { ld( a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldsb(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
else { ldsb(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldsh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
else { ldsh(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldsw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
else { ldsw(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldub(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
else { ldub(a.base(), a.disp() + offset, d); }
}
inline void Assembler::lduh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
else { lduh(a.base(), a.disp() + offset, d); }
}
inline void Assembler::lduw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
else { lduw(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldd( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
else { ldd( a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldx( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
else { ldx( a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
// form effective addresses this way:
inline void Assembler::add(const Address& a, Register d, int offset) {
if (a.has_index()) add(a.base(), a.index(), d);
else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
else { add(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) andn(s1, s2.as_register(), d);
else andn(s1, s2.as_constant(), d);
}
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); }
inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
@ -251,20 +129,9 @@ inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rs
// pp 222
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
if (s2.is_register()) stf(w, d, s1, s2.as_register());
else stf(w, d, s1, s2.as_constant());
}
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
else { stf(w, d, a.base(), a.disp() + offset); }
}
inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
@ -285,46 +152,6 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only();
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); }
inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
#endif
inline void Assembler::stb(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
else { stb(d, a.base(), a.disp() + offset); }
}
inline void Assembler::sth(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
else { sth(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stw(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
else { stw(d, a.base(), a.disp() + offset); }
}
inline void Assembler::st( Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
else { st( d, a.base(), a.disp() + offset); }
}
inline void Assembler::std(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
else { std(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stx(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
else { stx(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
// v8 p 99
inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
@ -336,561 +163,9 @@ inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only();
inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) sub(s1, s2.as_register(), d);
else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) sub(d, offset, d);
}
// pp 231
inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::swap( Address& a, Register d, int offset ) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); swap( a.base(), a.index(), d ); }
else { swap( a.base(), a.disp() + offset, d ); }
}
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
Assembler::ld( s1, simm13a, d);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
ld_ptr(s1, in_bytes(simm13a), d);
}
#endif
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
#ifdef _LP64
Assembler::ldx(a, d, offset);
#else
Assembler::ld( a, d, offset);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
Assembler::st( d, s1, simm13a);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
st_ptr(d, s1, in_bytes(simm13a));
}
#endif
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
#ifdef _LP64
Assembler::stx(d, a, offset);
#else
Assembler::st( d, a, offset);
#endif
}
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
Assembler::ldd(s1, simm13a, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
#ifdef _LP64
Assembler::ldx(a, d, offset);
#else
Assembler::ldd(a, d, offset);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
Assembler::std(d, s1, simm13a);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
#ifdef _LP64
Assembler::stx(d, a, offset);
#else
Assembler::std(d, a, offset);
#endif
}
// Functions for isolating 64 bit shifts for LP64
inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, s2, d);
#else
Assembler::sll( s1, s2, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, imm6a, d);
#else
Assembler::sll( s1, imm6a, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, s2, d);
#else
Assembler::srl( s1, s2, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, imm6a, d);
#else
Assembler::srl( s1, imm6a, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
else sll_ptr(s1, s2.as_constant(), d);
}
// Use the right branch for the platform
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp(c, a, icc, p, d, rt);
else
Assembler::br(c, a, d, rt);
}
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
br(c, a, p, target(L));
}
// Branch that tests either xcc or icc depending on the
// architecture compiled (LP64 or not)
inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
#ifdef _LP64
Assembler::bp(c, a, xcc, p, d, rt);
#else
MacroAssembler::br(c, a, p, d, rt);
#endif
}
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
brx(c, a, p, target(L));
}
inline void MacroAssembler::ba( Label& L ) {
br(always, false, pt, L);
}
// Warning: V9 only functions
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
Assembler::bp(c, a, cc, p, d, rt);
}
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
Assembler::bp(c, a, cc, p, L);
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
fbp(c, a, fcc0, p, d, rt);
else
Assembler::fb(c, a, d, rt);
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
fb(c, a, p, target(L));
}
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
Assembler::fbp(c, a, cc, p, d, rt);
}
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
Assembler::fbp(c, a, cc, p, L);
}
inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
inline bool MacroAssembler::is_far_target(address d) {
if (ForceUnreachable) {
// References outside the code cache should be treated as far
return d < CodeCache::low_bound() || d > CodeCache::high_bound();
}
return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
}
// Call with a check to see if we need to deal with the added
// expense of relocation and if we overflow the displacement
// of the quick call instruction.
inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
#ifdef _LP64
intptr_t disp;
// NULL is ok because it will be relocated later.
// Must change NULL to a reachable address in order to
// pass asserts here and in wdisp.
if ( d == NULL )
d = pc();
// Is this address within range of the call instruction?
// If not, use the expensive instruction sequence
if (is_far_target(d)) {
relocate(rt);
AddressLiteral dest(d);
jumpl_to(dest, O7, O7);
} else {
Assembler::call(d, rt);
}
#else
Assembler::call( d, rt );
#endif
}
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
MacroAssembler::call( target(L), rt);
}
inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
// prefetch instruction
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp( never, true, xcc, pt, d, rt );
}
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
// clobbers o7 on V8!!
// returns delta from gotten pc to addr after
inline int MacroAssembler::get_pc( Register d ) {
int x = offset();
if (VM_Version::v9_instructions_work())
rdpc(d);
else {
Label lbl;
Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
if (d == O7) delayed()->nop();
else delayed()->mov(O7, d);
bind(lbl);
}
return offset() - x;
}
// Note: All MacroAssembler::set_foo functions are defined out-of-line.
// Loads the current PC of the following instruction as an immediate value in
// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
#ifdef _LP64
Unimplemented();
#else
Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
Assembler::add(reg,thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
#endif
return thepc;
}
inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ld(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ldub(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ld_ptr(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, temp);
} else {
sethi(addrlit, temp);
}
st(s, temp, addrlit.low10() + offset);
}
inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, temp);
} else {
sethi(addrlit, temp);
}
st_ptr(s, temp, addrlit.low10() + offset);
}
// This code sequence is relocatable to any address, even on LP64.
inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
patchable_sethi(addrlit, temp);
jmpl(temp, addrlit.low10() + offset, d);
}
inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
jumpl_to(addrlit, temp, G0, offset);
}
inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
int ld_offset, int jmp_offset) {
assert_not_delayed();
//sethi(al); // sethi is caller responsibility for this one
ld_ptr(a, temp, ld_offset);
jmp(temp, jmp_offset);
}
inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
set_metadata(allocate_metadata_address(obj), d);
}
inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
set_metadata(constant_metadata_address(obj), d);
}
inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
set(obj_addr, d);
}
inline void MacroAssembler::set_oop(jobject obj, Register d) {
set_oop(allocate_oop_address(obj), d);
}
inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
set_oop(constant_oop_address(obj), d);
}
inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
set(obj_addr, d);
}
inline void MacroAssembler::load_argument( Argument& a, Register d ) {
if (a.is_register())
mov(a.as_register(), d);
else
ld (a.as_address(), d);
}
inline void MacroAssembler::store_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
st_ptr (s, a.as_address()); // ABI says everything is right justified.
}
inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
st_ptr (s, a.as_address());
}
#ifdef _LP64
inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
fmov(FloatRegisterImpl::S, s, a.as_float_register() );
else
// Floats are stored in the high half of the stack entry
// The low half is undefined per the ABI.
stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
}
inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
fmov(FloatRegisterImpl::D, s, a.as_double_register() );
else
stf(FloatRegisterImpl::D, s, a.as_address());
}
inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
stx(s, a.as_address());
}
#endif
inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
// returns if membar generates anything, obviously this code should mirror
// membar below.
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
if( !os::is_MP() ) return false; // Not needed on single CPU
if( VM_Version::v9_instructions_work() ) {
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
return (effective_mask != 0);
} else {
return true;
}
}
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
// Uniprocessors do not need memory barriers
if (!os::is_MP()) return;
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
// 8.4.4.3, a.31 and a.50.
if( VM_Version::v9_instructions_work() ) {
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
// of the mmask subfield of const7a that does anything that isn't done
// implicitly is StoreLoad.
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
if ( effective_mask != 0 ) {
Assembler::membar( effective_mask );
}
} else {
// stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
// do not issue the stbar because to my knowledge all v8 machines implement TSO,
// which guarantees that all stores behave as if an stbar were issued just after
// each one of them. On these machines, stbar ought to be a nop. There doesn't
// appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
// it can't be specified by stbar, nor have I come up with a way to simulate it.
//
// Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
// space. Put one here to be on the safe side.
Assembler::ldstub(SP, 0, G0);
}
}
#endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP

View File

@ -31,9 +31,4 @@ private:
public:
void flush_bundle(bool start_new_bundle) {}
// Heuristic for pre-packing the pt/pn bit of a predicted branch.
bool is_backward_branch(Label& L) {
return L.is_bound() && insts_end() <= locator_address(L.loc());
}
#endif // CPU_SPARC_VM_CODEBUFFER_SPARC_HPP

View File

@ -582,7 +582,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// the following temporary registers are used during frame creation
const Register Gtmp1 = G3_scratch ;
const Register Gtmp2 = G1_scratch;
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
const Register RconstMethod = Gtmp1;
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
bool inc_counter = UseCompiler || CountCompiledCalls;
@ -618,6 +620,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
}
#endif // ASSERT
__ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, Gtmp1);
__ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes
__ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord
@ -1047,8 +1050,6 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
const Register Gtmp = G3_scratch;
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
// slop factor is two extra slots on the expression stack so that
// we always have room to store a result when returning from a call without parameters
@ -1066,6 +1067,9 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
// Now compute new frame size
if (native) {
const Register RconstMethod = Gtmp;
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
__ ld_ptr(constMethod, RconstMethod);
__ lduh( size_of_parameters, Gtmp );
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
} else {
@ -1236,9 +1240,13 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
}
if (init_value != noreg) {
Label clear_loop;
const Register RconstMethod = O1;
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
// NOTE: If you change the frame layout, this code will need to
// be updated!
__ ld_ptr( constMethod, RconstMethod );
__ lduh( size_of_locals, O2 );
__ lduh( size_of_parameters, O1 );
__ sll( O2, LogBytesPerWord, O2);
@ -1483,13 +1491,16 @@ void CppInterpreterGenerator::adjust_callers_stack(Register args) {
//
// assert_different_registers(state, prev_state);
const Register Gtmp = G3_scratch;
const RconstMethod = G3_scratch;
const Register tmp = O2;
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
__ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, tmp);
__ sll(tmp, LogBytesPerWord, Gtmp); // parameter size in bytes
__ add(args, Gtmp, Gargs); // points to first local + BytesPerWord
__ sll(tmp, LogBytesPerWord, Gargs); // parameter size in bytes
__ add(args, Gargs, Gargs); // points to first local + BytesPerWord
// NEW
__ add(Gargs, -wordSize, Gargs); // points to first local[0]
// determine extra space for non-argument locals & adjust caller's SP
@ -1541,8 +1552,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
address entry_point = __ pc();
__ mov(G0, prevState); // no current activation
@ -1750,7 +1759,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ ld_ptr(STATE(_result._to_call._callee), L4_scratch); // called method
__ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack
__ lduh(L4_scratch, in_bytes(Method::size_of_parameters_offset()), L2_scratch); // get parameter size
// get parameter size
__ ld_ptr(L4_scratch, in_bytes(Method::const_offset()), L2_scratch);
__ lduh(L2_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), L2_scratch);
__ sll(L2_scratch, LogBytesPerWord, L2_scratch ); // parameter size in bytes
__ add(L1_scratch, L2_scratch, L1_scratch); // stack destination for result
__ ld(L4_scratch, in_bytes(Method::result_index_offset()), L3_scratch); // called method result type index

View File

@ -204,25 +204,6 @@
intptr_t* out_register_addr(Register reg) const {
return younger_sp_addr_at(reg->after_save()->sp_offset_in_saved_window());
}
intptr_t* memory_param_addr(int param_ix, bool is_in) const {
int offset = callee_register_argument_save_area_sp_offset + param_ix;
if (is_in)
return fp_addr_at(offset);
else
return sp_addr_at(offset);
}
intptr_t* param_addr(int param_ix, bool is_in) const {
if (param_ix >= callee_register_argument_save_area_words)
return memory_param_addr(param_ix, is_in);
else if (is_in)
return register_addr(Argument(param_ix, true).as_register());
else {
// the registers are stored in the next younger frame
// %%% is this really necessary?
ShouldNotReachHere();
return NULL;
}
}
// Interpreter frames
@ -269,12 +250,8 @@
#ifndef CC_INTERP
// where Lmonitors is saved:
BasicObjectLock** interpreter_frame_monitors_addr() const {
return (BasicObjectLock**) sp_addr_at(Lmonitors->sp_offset_in_saved_window());
}
intptr_t** interpreter_frame_esp_addr() const {
return (intptr_t**)sp_addr_at(Lesp->sp_offset_in_saved_window());
}
inline BasicObjectLock** interpreter_frame_monitors_addr() const;
inline intptr_t** interpreter_frame_esp_addr() const;
inline void interpreter_frame_set_tos_address(intptr_t* x);

View File

@ -25,6 +25,8 @@
#ifndef CPU_SPARC_VM_FRAME_SPARC_INLINE_HPP
#define CPU_SPARC_VM_FRAME_SPARC_INLINE_HPP
#include "asm/macroAssembler.hpp"
// Inline functions for SPARC frames:
// Constructors
@ -185,6 +187,13 @@ inline intptr_t* frame::interpreter_frame_tos_address() const {
return *interpreter_frame_esp_addr() + 1;
}
inline BasicObjectLock** frame::interpreter_frame_monitors_addr() const {
return (BasicObjectLock**) sp_addr_at(Lmonitors->sp_offset_in_saved_window());
}
inline intptr_t** frame::interpreter_frame_esp_addr() const {
return (intptr_t**)sp_addr_at(Lesp->sp_offset_in_saved_window());
}
inline void frame::interpreter_frame_set_tos_address( intptr_t* x ) {
*interpreter_frame_esp_addr() = x - 1;
}

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/icache.hpp"
#define __ _masm->

View File

@ -25,7 +25,7 @@
#ifndef CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
#define CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
// This file specializes the assember with interpreter-specific macros

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,765 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
#define CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
inline int AddressLiteral::low10() const {
return Assembler::low10(value());
}
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
jint& stub_inst = *(jint*) branch;
stub_inst = patched_branch(target - branch, stub_inst, 0);
}
#ifndef PRODUCT
inline void MacroAssembler::pd_print_patched_instruction(address branch) {
jint stub_inst = *(jint*) branch;
print_instruction(stub_inst);
::tty->print("%s", " (unresolved)");
}
#endif // PRODUCT
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
ld( s1, simm13a, d);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
ld_ptr(s1, in_bytes(simm13a), d);
}
#endif
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
ldx(s1, s2, d);
#else
ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
#ifdef _LP64
ldx(a, d, offset);
#else
ld( a, d, offset);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
st( d, s1, simm13a);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
st_ptr(d, s1, in_bytes(simm13a));
}
#endif
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
stx(d, s1, s2);
#else
st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
#ifdef _LP64
stx(d, a, offset);
#else
st( d, a, offset);
#endif
}
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
Assembler::ldd(s1, simm13a, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
ldx(s1, s2, d);
#else
ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
#ifdef _LP64
ldx(a, d, offset);
#else
ldd(a, d, offset);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
Assembler::std(d, s1, simm13a);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
stx(d, s1, s2);
#else
std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
#ifdef _LP64
stx(d, a, offset);
#else
std(d, a, offset);
#endif
}
// Functions for isolating 64 bit shifts for LP64
inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, s2, d);
#else
Assembler::sll( s1, s2, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, imm6a, d);
#else
Assembler::sll( s1, imm6a, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, s2, d);
#else
Assembler::srl( s1, s2, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, imm6a, d);
#else
Assembler::srl( s1, imm6a, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
else sll_ptr(s1, s2.as_constant(), d);
}
// Use the right branch for the platform
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp(c, a, icc, p, d, rt);
else
Assembler::br(c, a, d, rt);
}
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
br(c, a, p, target(L));
}
// Branch that tests either xcc or icc depending on the
// architecture compiled (LP64 or not)
inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
#ifdef _LP64
Assembler::bp(c, a, xcc, p, d, rt);
#else
MacroAssembler::br(c, a, p, d, rt);
#endif
}
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
brx(c, a, p, target(L));
}
inline void MacroAssembler::ba( Label& L ) {
br(always, false, pt, L);
}
// Warning: V9 only functions
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
Assembler::bp(c, a, cc, p, d, rt);
}
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
Assembler::bp(c, a, cc, p, L);
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
fbp(c, a, fcc0, p, d, rt);
else
Assembler::fb(c, a, d, rt);
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
fb(c, a, p, target(L));
}
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
Assembler::fbp(c, a, cc, p, d, rt);
}
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
Assembler::fbp(c, a, cc, p, L);
}
inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
inline bool MacroAssembler::is_far_target(address d) {
if (ForceUnreachable) {
// References outside the code cache should be treated as far
return d < CodeCache::low_bound() || d > CodeCache::high_bound();
}
return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
}
// Call with a check to see if we need to deal with the added
// expense of relocation and if we overflow the displacement
// of the quick call instruction.
inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
#ifdef _LP64
intptr_t disp;
// NULL is ok because it will be relocated later.
// Must change NULL to a reachable address in order to
// pass asserts here and in wdisp.
if ( d == NULL )
d = pc();
// Is this address within range of the call instruction?
// If not, use the expensive instruction sequence
if (is_far_target(d)) {
relocate(rt);
AddressLiteral dest(d);
jumpl_to(dest, O7, O7);
} else {
Assembler::call(d, rt);
}
#else
Assembler::call( d, rt );
#endif
}
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
MacroAssembler::call( target(L), rt);
}
inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
// prefetch instruction
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp( never, true, xcc, pt, d, rt );
}
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
// clobbers o7 on V8!!
// returns delta from gotten pc to addr after
inline int MacroAssembler::get_pc( Register d ) {
int x = offset();
if (VM_Version::v9_instructions_work())
rdpc(d);
else {
Label lbl;
Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
if (d == O7) delayed()->nop();
else delayed()->mov(O7, d);
bind(lbl);
}
return offset() - x;
}
// Note: All MacroAssembler::set_foo functions are defined out-of-line.
// Loads the current PC of the following instruction as an immediate value in
// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
#ifdef _LP64
Unimplemented();
#else
Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
#endif
return thepc;
}
inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ld(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ldub(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ld_ptr(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, temp);
} else {
sethi(addrlit, temp);
}
st(s, temp, addrlit.low10() + offset);
}
inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, temp);
} else {
sethi(addrlit, temp);
}
st_ptr(s, temp, addrlit.low10() + offset);
}
// This code sequence is relocatable to any address, even on LP64.
inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
patchable_sethi(addrlit, temp);
jmpl(temp, addrlit.low10() + offset, d);
}
inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
jumpl_to(addrlit, temp, G0, offset);
}
inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
int ld_offset, int jmp_offset) {
assert_not_delayed();
//sethi(al); // sethi is caller responsibility for this one
ld_ptr(a, temp, ld_offset);
jmp(temp, jmp_offset);
}
inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
set_metadata(allocate_metadata_address(obj), d);
}
inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
set_metadata(constant_metadata_address(obj), d);
}
inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
set(obj_addr, d);
}
inline void MacroAssembler::set_oop(jobject obj, Register d) {
set_oop(allocate_oop_address(obj), d);
}
inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
set_oop(constant_oop_address(obj), d);
}
inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
set(obj_addr, d);
}
inline void MacroAssembler::load_argument( Argument& a, Register d ) {
if (a.is_register())
mov(a.as_register(), d);
else
ld (a.as_address(), d);
}
inline void MacroAssembler::store_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
st_ptr (s, a.as_address()); // ABI says everything is right justified.
}
inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
st_ptr (s, a.as_address());
}
#ifdef _LP64
inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
fmov(FloatRegisterImpl::S, s, a.as_float_register() );
else
// Floats are stored in the high half of the stack entry
// The low half is undefined per the ABI.
stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
}
inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
fmov(FloatRegisterImpl::D, s, a.as_double_register() );
else
stf(FloatRegisterImpl::D, s, a.as_address());
}
inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
stx(s, a.as_address());
}
#endif
inline void MacroAssembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype) {
relocate(rtype);
add(s1, simm13a, d);
}
inline void MacroAssembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec) {
relocate(rspec);
add(s1, simm13a, d);
}
// form effective addresses this way:
inline void MacroAssembler::add(const Address& a, Register d, int offset) {
if (a.has_index()) add(a.base(), a.index(), d);
else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void MacroAssembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
else { add(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void MacroAssembler::andn(Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) andn(s1, s2.as_register(), d);
else andn(s1, s2.as_constant(), d);
}
inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
#ifdef _LP64
// Make all 32 bit loads signed so 64 bit registers maintain proper sign
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
#else
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
#endif
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
# ifdef _LP64
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
# else
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
# endif
#endif
inline void MacroAssembler::ld( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
else { ld( a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldsb(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
else { ldsb(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldsh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
else { ldsh(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldsw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
else { ldsw(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldub(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
else { ldub(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::lduh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
else { lduh(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::lduw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
else { lduw(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldd( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
else { ldd( a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldx( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
else { ldx( a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
inline void MacroAssembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
inline void MacroAssembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
inline void MacroAssembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
else ldf(w, s1, s2.as_constant(), d);
}
inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
relocate(a.rspec(offset));
ldf(w, a.base(), a.disp() + offset, d);
}
// returns if membar generates anything, obviously this code should mirror
// membar below.
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
if( !os::is_MP() ) return false; // Not needed on single CPU
if( VM_Version::v9_instructions_work() ) {
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
return (effective_mask != 0);
} else {
return true;
}
}
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
// Uniprocessors do not need memory barriers
if (!os::is_MP()) return;
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
// 8.4.4.3, a.31 and a.50.
if( VM_Version::v9_instructions_work() ) {
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
// of the mmask subfield of const7a that does anything that isn't done
// implicitly is StoreLoad.
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
if ( effective_mask != 0 ) {
Assembler::membar( effective_mask );
}
} else {
// stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
// do not issue the stbar because to my knowledge all v8 machines implement TSO,
// which guarantees that all stores behave as if an stbar were issued just after
// each one of them. On these machines, stbar ought to be a nop. There doesn't
// appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
// it can't be specified by stbar, nor have I come up with a way to simulate it.
//
// Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
// space. Put one here to be on the safe side.
Assembler::ldstub(SP, 0, G0);
}
}
inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
relocate(a.rspec(offset));
assert(!a.has_index(), "");
prefetch(a.base(), a.disp() + offset, f);
}
inline void MacroAssembler::st(Register d, Register s1, Register s2) { stw(d, s1, s2); }
inline void MacroAssembler::st(Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
#endif
inline void MacroAssembler::st(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
else { st( d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::stb(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
else { stb(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::sth(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
else { sth(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::stw(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
else { stw(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::std(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
else { std(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::stx(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
else { stx(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
inline void MacroAssembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
inline void MacroAssembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
inline void MacroAssembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
inline void MacroAssembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
if (s2.is_register()) stf(w, d, s1, s2.as_register());
else stf(w, d, s1, s2.as_constant());
}
inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
else { stf(w, d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) sub(s1, s2.as_register(), d);
else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) sub(d, offset, d);
}
inline void MacroAssembler::swap(Address& a, Register d, int offset) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); }
else { swap(a.base(), a.disp() + offset, d); }
}
#endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"
@ -170,7 +171,8 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
__ load_sized_value(Address(method_temp, Method::size_of_parameters_offset()),
__ ld_ptr(method_temp, in_bytes(Method::const_offset()), temp2);
__ load_sized_value(Address(temp2, ConstMethod::size_of_parameters_offset()),
temp2,
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
@ -232,7 +234,8 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
__ load_sized_value(Address(G5_method, Method::size_of_parameters_offset()),
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), O4_param_size);
__ load_sized_value(Address(O4_param_size, ConstMethod::size_of_parameters_offset()),
O4_param_size,
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/oop.inline.hpp"

View File

@ -25,7 +25,7 @@
#ifndef CPU_SPARC_VM_NATIVEINST_SPARC_HPP
#define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "runtime/icache.hpp"
#include "runtime/os.hpp"
@ -194,11 +194,10 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
static int inv_simm( int x, int nbits ) { return Assembler::inv_simm(x, nbits); }
static intptr_t inv_wdisp( int x, int nbits ) { return Assembler::inv_wdisp( x, 0, nbits); }
static intptr_t inv_wdisp16( int x ) { return Assembler::inv_wdisp16(x, 0); }
static int branch_destination_offset(int x) { return Assembler::branch_destination(x, 0); }
static int branch_destination_offset(int x) { return MacroAssembler::branch_destination(x, 0); }
static int patch_branch_destination_offset(int dest_offset, int x) {
return Assembler::patched_branch(dest_offset, x, 0);
return MacroAssembler::patched_branch(dest_offset, x, 0);
}
void set_annul_bit() { set_long_at(0, long_at(0) | Assembler::annul(true)); }
// utility for checking if x is either of 2 small constants
static bool is_either(int x, int k1, int k2) {
@ -889,7 +888,6 @@ class NativeGeneralJump: public NativeInstruction {
int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0));
set_long_at(0, patched_instr);
}
void set_annul() { set_annul_bit(); }
NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));}
void fill_delay_slot(int instr) { set_long_at(4, instr);}
Assembler::Condition condition() {

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/assembler.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/oop.inline.hpp"

View File

@ -24,8 +24,7 @@
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/instanceOop.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
@ -494,9 +494,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// (gri - 2/25/2000)
const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
const Address size_of_locals (G5_method, Method::size_of_locals_offset());
const Address constMethod (G5_method, Method::const_offset());
int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
const int extra_space =
@ -506,11 +503,15 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
(native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
const Register Glocals_size = G3;
const Register RconstMethod = Glocals_size;
const Register Otmp1 = O3;
const Register Otmp2 = O4;
// Lscratch can't be used as a temporary because the call_stub uses
// it to assert that the stack frame was setup correctly.
const Address constMethod (G5_method, Method::const_offset());
const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
__ ld_ptr( constMethod, RconstMethod );
__ lduh( size_of_parameters, Glocals_size);
// Gargs points to first local + BytesPerWord
@ -530,6 +531,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
//
// Compute number of locals in method apart from incoming parameters
//
const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset());
__ ld_ptr( constMethod, Otmp1 );
__ lduh( size_of_locals, Otmp1 );
__ sub( Otmp1, Glocals_size, Glocals_size );
__ round_to( Glocals_size, WordsPerLong );
@ -1256,8 +1259,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// make sure registers are different!
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
const Address size_of_locals (G5_method, Method::size_of_locals_offset());
const Address constMethod (G5_method, Method::const_offset());
// Seems like G5_method is live at the point this is used. So we could make this look consistent
// and use in the asserts.
const Address access_flags (Lmethod, Method::access_flags_offset());
@ -1307,8 +1309,13 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
init_value = G0;
Label clear_loop;
const Register RconstMethod = O1;
const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset());
// NOTE: If you change the frame layout, this code will need to
// be updated!
__ ld_ptr( constMethod, RconstMethod );
__ lduh( size_of_locals, O2 );
__ lduh( size_of_parameters, O1 );
__ sll( O2, Interpreter::logStackElementSize, O2);
@ -1823,9 +1830,13 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
const Register Gtmp1 = G3_scratch;
const Register Gtmp2 = G1_scratch;
const Register RconstMethod = Gtmp1;
const Address constMethod(Lmethod, Method::const_offset());
const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
// Compute size of arguments for saving when returning to deoptimized caller
__ lduh(Lmethod, in_bytes(Method::size_of_parameters_offset()), Gtmp1);
__ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, Gtmp1);
__ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
__ sub(Llocals, Gtmp1, Gtmp2);
__ add(Gtmp2, wordSize, Gtmp2);

View File

@ -3040,7 +3040,8 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
Register Rtemp = G4_scratch;
// Load receiver from stack slot
__ lduh(G5_method, in_bytes(Method::size_of_parameters_offset()), G4_scratch);
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
__ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
__ load_receiver(G4_scratch, O0);
// receiver NULL check

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "code/vmreg.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_sparc.hpp"
#include "memory/resourceArea.hpp"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -28,48 +28,6 @@
#include "asm/assembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
#include "runtime/handles.inline.hpp"
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
unsigned char op = branch[0];
assert(op == 0xE8 /* call */ ||
op == 0xE9 /* jmp */ ||
op == 0xEB /* short jmp */ ||
(op & 0xF0) == 0x70 /* short jcc */ ||
op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
"Invalid opcode at patch point");
if (op == 0xEB || (op & 0xF0) == 0x70) {
// short offset operators (jmp and jcc)
char* disp = (char*) &branch[1];
int imm8 = target - (address) &disp[1];
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
*disp = imm8;
} else {
int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
int imm32 = target - (address) &disp[1];
*disp = imm32;
}
}
#ifndef PRODUCT
inline void MacroAssembler::pd_print_patched_instruction(address branch) {
const char* s;
unsigned char op = branch[0];
if (op == 0xE8) {
s = "call";
} else if (op == 0xE9 || op == 0xEB) {
s = "jmp";
} else if ((op & 0xF0) == 0x70) {
s = "jcc";
} else if (op == 0x0F) {
s = "jcc";
} else {
s = "????";
}
tty->print("%s (unresolved)", s);
}
#endif // ndef PRODUCT
#ifndef _LP64
inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { return reg_enc; }
@ -87,12 +45,6 @@ inline void Assembler::prefixq(Address adr, Register reg) {}
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
inline void Assembler::prefixq(Address adr, XMMRegister reg) {}
#else
inline void Assembler::emit_long64(jlong x) {
*(jlong*) _code_pos = x;
_code_pos += sizeof(jlong);
code_section()->set_end(_code_pos);
}
#endif // _LP64
#endif // CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/cppInterpreter.hpp"
#include "interpreter/interpreter.hpp"
@ -611,8 +611,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// C++ interpreter only
// rsi/r13 - previous interpreter state pointer
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
// InterpreterRuntime::frequency_counter_overflow takes one argument
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
// The call returns the address of the verified entry point for the method or NULL
@ -977,15 +975,16 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// to save/restore.
address entry_point = __ pc();
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
const Address size_of_locals (rbx, Method::size_of_locals_offset());
const Address constMethod (rbx, Method::const_offset());
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
const Address access_flags (rbx, Method::access_flags_offset());
const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
// rsi/r13 == state/locals rdi == prevstate
const Register locals = rdi;
// get parameter size (always needed)
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx: Method*
@ -994,6 +993,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// for natives the size of locals is zero
// compute beginning of parameters /locals
__ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
// initialize fixed part of activation frame
@ -1107,11 +1107,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
const Register method = rbx;
const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
const Address constMethod (method, Method::const_offset());
const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
// allocate space for parameters
__ movptr(method, STATE(_method));
__ verify_method_ptr(method);
__ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
__ movptr(t, constMethod);
__ load_unsigned_short(t, size_of_parameters);
__ shll(t, 2);
#ifdef _LP64
__ subptr(rsp, t);
@ -1700,15 +1703,17 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// save sender sp
__ push(rcx);
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
const Address size_of_locals (rbx, Method::size_of_locals_offset());
const Address constMethod (rbx, Method::const_offset());
const Address access_flags (rbx, Method::access_flags_offset());
const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
// const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
// const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
// const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
// get parameter size (always needed)
__ movptr(rdx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx: Method*
@ -1989,7 +1994,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ movptr(rbx, STATE(_result._to_call._callee));
// callee left args on top of expression stack, remove them
__ load_unsigned_short(rcx, Address(rbx, Method::size_of_parameters_offset()));
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
__ lea(rsp, Address(rsp, rcx, Address::times_ptr));
__ movl(rcx, Address(rbx, Method::result_index_offset()));
@ -2159,7 +2166,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// Make it look like call_stub calling conventions
// Get (potential) receiver
__ load_unsigned_short(rcx, size_of_parameters); // get size of parameters in words
// get size of parameters in words
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
__ pushptr(recursive.addr()); // make it look good in the debugger

View File

@ -25,6 +25,8 @@
#ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP
#define CPU_X86_VM_FRAME_X86_INLINE_HPP
#include "code/codeCache.hpp"
// Inline functions for Intel frames:
// Constructors:

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "runtime/icache.hpp"
#define __ _masm->

View File

@ -25,8 +25,10 @@
#ifndef CPU_X86_VM_INTERP_MASM_X86_32_HPP
#define CPU_X86_VM_INTERP_MASM_X86_32_HPP
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
#include "runtime/frame.hpp"
// This file specializes the assember with interpreter-specific macros

View File

@ -25,8 +25,10 @@
#ifndef CPU_X86_VM_INTERP_MASM_X86_64_HPP
#define CPU_X86_VM_INTERP_MASM_X86_64_HPP
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
#include "runtime/frame.hpp"
// This file specializes the assember with interpreter-specific macros

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
@ -168,8 +169,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
__ movptr(temp2, Address(method_temp, Method::const_offset()));
__ load_sized_value(temp2,
Address(method_temp, Method::size_of_parameters_offset()),
Address(temp2, ConstMethod::size_of_parameters_offset()),
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
Label L;
@ -233,8 +235,9 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
__ movptr(rdx_argp, Address(rbx_method, Method::const_offset()));
__ load_sized_value(rdx_argp,
Address(rbx_method, Method::size_of_parameters_offset()),
Address(rdx_argp, ConstMethod::size_of_parameters_offset()),
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
rdx_first_arg_addr = __ argument_address(rdx_argp, -1);

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"

View File

@ -24,12 +24,11 @@
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"

View File

@ -24,12 +24,11 @@
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
@ -424,8 +424,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// C++ interpreter only
// rsi - previous interpreter state pointer
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
// InterpreterRuntime::frequency_counter_overflow takes one argument
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
// The call returns the address of the verified entry point for the method or NULL
@ -868,12 +866,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// rsi: previous interpreter state (C++ interpreter) must preserve
address entry_point = __ pc();
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
const Address constMethod (rbx, Method::const_offset());
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
const Address access_flags (rbx, Method::access_flags_offset());
const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
// get parameter size (always needed)
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// native calls don't need the stack size check since they have no expression stack
@ -988,7 +987,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// allocate space for parameters
__ get_method(method);
__ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
__ movptr(t, Address(method, Method::const_offset()));
__ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
__ shlptr(t, Interpreter::logStackElementSize);
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
__ subptr(rsp, t);
@ -1297,13 +1298,14 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// rsi: sender sp
address entry_point = __ pc();
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
const Address size_of_locals (rbx, Method::size_of_locals_offset());
const Address constMethod (rbx, Method::const_offset());
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
const Address access_flags (rbx, Method::access_flags_offset());
const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
// get parameter size (always needed)
__ movptr(rdx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx,: Method*
@ -1734,7 +1736,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Compute size of arguments for saving when returning to deoptimized caller
__ get_method(rax);
__ load_unsigned_short(rax, Address(rax, in_bytes(Method::size_of_parameters_offset())));
__ movptr(rax, Address(rax, Method::const_offset()));
__ load_unsigned_short(rax, Address(rax, ConstMethod::size_of_parameters_offset()));
__ shlptr(rax, Interpreter::logStackElementSize);
__ restore_locals();
__ subptr(rdi, rax);

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
@ -369,9 +369,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// Everything as it was on entry
// rdx is not restored. Doesn't appear to really be set.
const Address size_of_parameters(rbx,
Method::size_of_parameters_offset());
// InterpreterRuntime::frequency_counter_overflow takes two
// arguments, the first (thread) is passed by call_VM, the second
// indicates if the counter overflow occurs at a backwards branch
@ -844,14 +841,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
address entry_point = __ pc();
const Address size_of_parameters(rbx, Method::
size_of_parameters_offset());
const Address constMethod (rbx, Method::const_offset());
const Address invocation_counter(rbx, Method::
invocation_counter_offset() +
InvocationCounter::counter_offset());
const Address access_flags (rbx, Method::access_flags_offset());
const Address size_of_parameters(rcx, ConstMethod::
size_of_parameters_offset());
// get parameter size (always needed)
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// native calls don't need the stack size check since they have no
@ -967,9 +967,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// allocate space for parameters
__ get_method(method);
__ load_unsigned_short(t,
Address(method,
Method::size_of_parameters_offset()));
__ movptr(t, Address(method, Method::const_offset()));
__ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
__ shll(t, Interpreter::logStackElementSize);
__ subptr(rsp, t);
@ -1302,15 +1301,18 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// r13: sender sp
address entry_point = __ pc();
const Address size_of_parameters(rbx,
Method::size_of_parameters_offset());
const Address size_of_locals(rbx, Method::size_of_locals_offset());
const Address constMethod(rbx, Method::const_offset());
const Address invocation_counter(rbx,
Method::invocation_counter_offset() +
InvocationCounter::counter_offset());
const Address access_flags(rbx, Method::access_flags_offset());
const Address size_of_parameters(rdx,
ConstMethod::size_of_parameters_offset());
const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
// get parameter size (always needed)
__ movptr(rdx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx: Method*
@ -1752,7 +1754,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Compute size of arguments for saving when returning to
// deoptimized caller
__ get_method(rax);
__ load_unsigned_short(rax, Address(rax, in_bytes(Method::
__ movptr(rax, Address(rax, Method::const_offset()));
__ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
size_of_parameters_offset())));
__ shll(rax, Interpreter::logStackElementSize);
__ restore_locals(); // XXX do we need this?

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86_32.hpp"
#include "memory/resourceArea.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86_64.hpp"
#include "memory/resourceArea.hpp"

View File

@ -46,6 +46,12 @@ int AbstractAssembler::code_fill_byte() {
return 0;
}
#ifdef ASSERT
bool AbstractAssembler::pd_check_instruction_mark() {
ShouldNotCallThis();
}
#endif
void Assembler::pd_patch_instruction(address branch, address target) {
ShouldNotCallThis();
}
@ -80,6 +86,11 @@ void MacroAssembler::store_oop(jobject obj) {
emit_address((address) obj);
}
void MacroAssembler::store_Metadata(Metadata* md) {
code_section()->relocate(pc(), metadata_Relocation::spec_for_immediate());
emit_address((address) md);
}
static void should_not_call() {
report_should_not_call(__FILE__, __LINE__);
}

View File

@ -55,14 +55,9 @@ class MacroAssembler : public Assembler {
public:
void advance(int bytes);
void store_oop(jobject obj);
void store_Metadata(Metadata* obj);
};
#ifdef ASSERT
inline bool AbstractAssembler::pd_check_instruction_mark() {
ShouldNotCallThis();
}
#endif
address ShouldNotCallThisStub();
address ShouldNotCallThisEntry();

View File

@ -1015,11 +1015,7 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
// Helper for figuring out if frames are interpreter frames
bool CppInterpreter::contains(address pc) {
#ifdef PRODUCT
ShouldNotCallThis();
#else
return false; // make frame::print_value_on work
#endif // !PRODUCT
}
// Result handlers and convertors

View File

@ -52,11 +52,7 @@ define_pd_global(intx, StackShadowPages, 5 LP64_ONLY(+1) DEBUG_ONLY(+3));
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
#ifdef _ALLBSD_SOURCE
define_pd_global(bool, UseMembar, true);
#else
define_pd_global(bool, UseMembar, false);
#endif
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread

View File

@ -23,29 +23,10 @@
*/
// no precompiled headers
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
#endif
#include <signal.h>
void OSThread::pd_initialize() {
assert(this != NULL, "check");

View File

@ -29,6 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_bsd.h"
#include "memory/allocation.inline.hpp"
@ -62,26 +63,6 @@
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
# include "nativeInst_ppc.hpp"
#endif
// put OS-includes here
# include <sys/types.h>

View File

@ -26,13 +26,13 @@
#define OS_BSD_VM_OS_BSD_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_OS_ARCH_bsd_x86
# include "atomic_bsd_x86.inline.hpp"
# include "orderAccess_bsd_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_zero
# include "atomic_bsd_zero.inline.hpp"
# include "orderAccess_bsd_zero.inline.hpp"
#endif

View File

@ -23,29 +23,10 @@
*/
// no precompiled headers
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/mutex.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
#endif
#include <signal.h>
void OSThread::pd_initialize() {
assert(this != NULL, "check");

View File

@ -29,6 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
@ -62,26 +63,6 @@
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
# include "nativeInst_ppc.hpp"
#endif
// put OS-includes here
# include <sys/types.h>

View File

@ -26,25 +26,22 @@
#define OS_LINUX_VM_OS_LINUX_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_OS_ARCH_linux_x86
# include "atomic_linux_x86.inline.hpp"
# include "orderAccess_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "atomic_linux_sparc.inline.hpp"
# include "orderAccess_linux_sparc.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_zero
# include "atomic_linux_zero.inline.hpp"
# include "orderAccess_linux_zero.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_arm
# include "atomic_linux_arm.inline.hpp"
# include "orderAccess_linux_arm.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_ppc
# include "atomic_linux_ppc.inline.hpp"
# include "orderAccess_linux_ppc.inline.hpp"
#endif

View File

@ -30,14 +30,8 @@
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
# include <signal.h>
#include <signal.h>
// ***************************************************************
// Platform dependent initialization and cleanup

View File

@ -29,6 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_solaris.h"
#include "memory/allocation.inline.hpp"
@ -63,14 +64,6 @@
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
# include "nativeInst_sparc.hpp"
#endif
// put OS-includes here
# include <dlfcn.h>

View File

@ -26,13 +26,13 @@
#define OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_OS_ARCH_solaris_x86
# include "atomic_solaris_x86.inline.hpp"
# include "orderAccess_solaris_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_sparc
# include "atomic_solaris_sparc.inline.hpp"
# include "orderAccess_solaris_sparc.inline.hpp"
#endif

View File

@ -30,9 +30,6 @@
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
void OSThread::pd_initialize() {
set_thread_handle(NULL);

View File

@ -32,6 +32,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_windows.h"
#include "memory/allocation.inline.hpp"
@ -65,10 +66,6 @@
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
#ifdef _DEBUG
#include <crtdbg.h>

View File

@ -26,9 +26,10 @@
#define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_OS_ARCH_windows_x86
# include "atomic_windows_x86.inline.hpp"
# include "orderAccess_windows_x86.inline.hpp"
#endif

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"

View File

@ -23,7 +23,7 @@
*/
// no precompiled headers
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@ -33,7 +33,6 @@
#include "jvm_bsd.h"
#include "memory/allocation.inline.hpp"
#include "mutex_bsd.inline.hpp"
#include "nativeInst_x86.hpp"
#include "os_share_bsd.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"

View File

@ -23,7 +23,7 @@
*/
// no precompiled headers
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"

Some files were not shown because too many files have changed in this diff Show More