Merge
This commit is contained in:
commit
a6b48817ff
@ -189,3 +189,4 @@ e07f499b9dccb529ecf74172cf6ac11a195ec57a jdk8-b60
|
|||||||
b772de306dc24c17f7bd1398531ddeb58723b804 jdk8-b65
|
b772de306dc24c17f7bd1398531ddeb58723b804 jdk8-b65
|
||||||
13bb8c326e7b7b0b19d78c8088033e3932e3f7ca jdk8-b66
|
13bb8c326e7b7b0b19d78c8088033e3932e3f7ca jdk8-b66
|
||||||
9a6ec97ec45c1a62d5233cefa91e8390e380e13a jdk8-b67
|
9a6ec97ec45c1a62d5233cefa91e8390e380e13a jdk8-b67
|
||||||
|
cdb401a60cea6ad5ef3f498725ed1decf8dda1ea jdk8-b68
|
||||||
|
@ -90,13 +90,25 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE],
|
|||||||
tmp="$complete EOL"
|
tmp="$complete EOL"
|
||||||
arguments="${tmp#* }"
|
arguments="${tmp#* }"
|
||||||
|
|
||||||
new_path=`$WHICH $path 2> /dev/null`
|
# Cannot rely on the command "which" here since it doesn't always work.
|
||||||
if test "x$new_path" = x; then
|
|
||||||
is_absolute_path=`$ECHO "$path" | $GREP ^/`
|
is_absolute_path=`$ECHO "$path" | $GREP ^/`
|
||||||
if test "x$is_absolute_path" != x; then
|
if test -z "$is_absolute_path"; then
|
||||||
AC_MSG_NOTICE([Resolving $1 (as $path) with 'which' failed, using $path directly.])
|
# Path to executable is not absolute. Find it.
|
||||||
new_path="$path"
|
IFS_save="$IFS"
|
||||||
|
IFS=:
|
||||||
|
for p in $PATH; do
|
||||||
|
if test -f "$p/$path" && test -x "$p/$path"; then
|
||||||
|
new_path="$p/$path"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
IFS="$IFS_save"
|
||||||
else
|
else
|
||||||
|
AC_MSG_NOTICE([Resolving $1 (as $path) failed, using $path directly.])
|
||||||
|
new_path="$path"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if test "x$new_path" = x; then
|
||||||
AC_MSG_NOTICE([The path of $1, which resolves as "$complete", is not found.])
|
AC_MSG_NOTICE([The path of $1, which resolves as "$complete", is not found.])
|
||||||
has_space=`$ECHO "$complete" | $GREP " "`
|
has_space=`$ECHO "$complete" | $GREP " "`
|
||||||
if test "x$has_space" != x; then
|
if test "x$has_space" != x; then
|
||||||
@ -105,7 +117,6 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE],
|
|||||||
AC_MSG_ERROR([Cannot locate the the path of $1])
|
AC_MSG_ERROR([Cannot locate the the path of $1])
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
# Now join together the path and the arguments once again
|
# Now join together the path and the arguments once again
|
||||||
if test "x$arguments" != xEOL; then
|
if test "x$arguments" != xEOL; then
|
||||||
|
File diff suppressed because it is too large
Load Diff
194
common/bin/hgforest.sh
Normal file
194
common/bin/hgforest.sh
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
#
|
||||||
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
|
# under the terms of the GNU General Public License version 2 only, as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
# version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
# accompanied this code).
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License version
|
||||||
|
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
#
|
||||||
|
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
# or visit www.oracle.com if you need additional information or have any
|
||||||
|
# questions.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Shell script for a fast parallel forest command
|
||||||
|
command="$1"
|
||||||
|
pull_extra_base="$2"
|
||||||
|
|
||||||
|
# Python always buffers stdout significantly, thus we will not see any output from hg clone jdk,
|
||||||
|
# until a lot of time has passed! By passing -u to python, we get incremental updates
|
||||||
|
# on stdout. Much nicer.
|
||||||
|
whichhg="`which hg`"
|
||||||
|
|
||||||
|
if [ "${whichhg}" = "" ] ; then
|
||||||
|
echo Cannot find hg!
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "" = "$command" ] ; then
|
||||||
|
echo No command to hg supplied!
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
has_hash_bang="`head -n 1 "${whichhg}" | cut -b 1-2`"
|
||||||
|
python=""
|
||||||
|
bpython=""
|
||||||
|
|
||||||
|
if [ "#!" = "$has_hash_bang" ] ; then
|
||||||
|
python="`head -n 1 ${whichhg} | cut -b 3-`"
|
||||||
|
bpython="`basename "$python"`"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "python" = "$bpython" -a -x "$python" ] ; then
|
||||||
|
hg="${python} -u ${whichhg}"
|
||||||
|
else
|
||||||
|
echo Cannot find python from hg launcher. Running plain hg, which probably has buffered stdout.
|
||||||
|
hg="hg"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean out the temporary directory that stores the pid files.
|
||||||
|
tmp=/tmp/forest.$$
|
||||||
|
rm -f -r ${tmp}
|
||||||
|
mkdir -p ${tmp}
|
||||||
|
|
||||||
|
safe_interrupt () {
|
||||||
|
if [ -d ${tmp} ]; then
|
||||||
|
if [ "`ls ${tmp}`" != "" ]; then
|
||||||
|
echo "Waiting for processes ( `cat ${tmp}/* | tr '\n' ' '`) to terminate nicely!"
|
||||||
|
sleep 1
|
||||||
|
# Pipe stderr to dev/null to silence kill, that complains when trying to kill
|
||||||
|
# a subprocess that has already exited.
|
||||||
|
kill -TERM `cat ${tmp}/* | tr '\n' ' '` 2> /dev/null
|
||||||
|
wait
|
||||||
|
echo Interrupt complete!
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
rm -f -r ${tmp}
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
nice_exit () {
|
||||||
|
if [ -d ${tmp} ]; then
|
||||||
|
if [ "`ls ${tmp}`" != "" ]; then
|
||||||
|
wait
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
rm -f -r ${tmp}
|
||||||
|
}
|
||||||
|
|
||||||
|
trap 'safe_interrupt' INT QUIT
|
||||||
|
trap 'nice_exit' EXIT
|
||||||
|
|
||||||
|
# Only look in specific locations for possible forests (avoids long searches)
|
||||||
|
pull_default=""
|
||||||
|
repos=""
|
||||||
|
repos_extra=""
|
||||||
|
if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
|
||||||
|
subrepos="corba jaxp jaxws langtools jdk hotspot"
|
||||||
|
if [ -f .hg/hgrc ] ; then
|
||||||
|
pull_default=`hg paths default`
|
||||||
|
if [ "${pull_default}" = "" ] ; then
|
||||||
|
echo "ERROR: Need initial clone with 'hg paths default' defined"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [ "${pull_default}" = "" ] ; then
|
||||||
|
echo "ERROR: Need initial repository to use this script"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
for i in ${subrepos} ; do
|
||||||
|
if [ ! -f ${i}/.hg/hgrc ] ; then
|
||||||
|
repos="${repos} ${i}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [ "${pull_extra_base}" != "" ] ; then
|
||||||
|
subrepos_extra="jdk/src/closed jdk/make/closed jdk/test/closed hotspot/make/closed hotspot/src/closed hotspot/test/closed deploy install sponsors pubs"
|
||||||
|
pull_default_tail=`echo ${pull_default} | sed -e 's@^.*://[^/]*/\(.*\)@\1@'`
|
||||||
|
pull_extra="${pull_extra_base}/${pull_default_tail}"
|
||||||
|
for i in ${subrepos_extra} ; do
|
||||||
|
if [ ! -f ${i}/.hg/hgrc ] ; then
|
||||||
|
repos_extra="${repos_extra} ${i}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
at_a_time=2
|
||||||
|
# Any repos to deal with?
|
||||||
|
if [ "${repos}" = "" -a "${repos_extra}" = "" ] ; then
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
hgdirs=`ls -d ./.hg ./*/.hg ./*/*/.hg ./*/*/*/.hg ./*/*/*/*/.hg 2>/dev/null`
|
||||||
|
# Derive repository names from the .hg directory locations
|
||||||
|
for i in ${hgdirs} ; do
|
||||||
|
repos="${repos} `echo ${i} | sed -e 's@/.hg$@@'`"
|
||||||
|
done
|
||||||
|
for i in ${repos} ; do
|
||||||
|
if [ -h ${i}/.hg/store/lock -o -f ${i}/.hg/store/lock ] ; then
|
||||||
|
locked="${i} ${locked}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
at_a_time=8
|
||||||
|
# Any repos to deal with?
|
||||||
|
if [ "${repos}" = "" ] ; then
|
||||||
|
echo "No repositories to process."
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
if [ "${locked}" != "" ] ; then
|
||||||
|
echo "These repositories are locked: ${locked}"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Echo out what repositories we do a command on.
|
||||||
|
echo "# Repositories: ${repos} ${repos_extra}"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Run the supplied command on all repos in parallel.
|
||||||
|
n=0
|
||||||
|
for i in ${repos} ${repos_extra} ; do
|
||||||
|
n=`expr ${n} '+' 1`
|
||||||
|
repopidfile=`echo ${i} | sed -e 's@./@@' -e 's@/@_@g'`
|
||||||
|
reponame=`echo ${i} | sed -e :a -e 's/^.\{1,20\}$/ &/;ta'`
|
||||||
|
pull_base="${pull_default}"
|
||||||
|
for j in $repos_extra ; do
|
||||||
|
if [ "$i" = "$j" ] ; then
|
||||||
|
pull_base="${pull_extra}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
(
|
||||||
|
(
|
||||||
|
if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
|
||||||
|
pull_newrepo="`echo ${pull_base}/${i} | sed -e 's@\([^:]/\)//*@\1@g'`"
|
||||||
|
echo ${hg} clone ${pull_newrepo} ${i}
|
||||||
|
${hg} clone ${pull_newrepo} ${i} &
|
||||||
|
else
|
||||||
|
echo "cd ${i} && ${hg} $*"
|
||||||
|
cd ${i} && ${hg} "$@" &
|
||||||
|
fi
|
||||||
|
echo $! > ${tmp}/${repopidfile}.pid
|
||||||
|
) 2>&1 | sed -e "s@^@${reponame}: @") &
|
||||||
|
|
||||||
|
if [ `expr ${n} '%' ${at_a_time}` -eq 0 ] ; then
|
||||||
|
sleep 2
|
||||||
|
echo Waiting 5 secs before spawning next background command.
|
||||||
|
sleep 3
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Wait for all hg commands to complete
|
||||||
|
wait
|
||||||
|
|
||||||
|
# Terminate with exit 0 all the time (hard to know when to say "failed")
|
||||||
|
exit 0
|
||||||
|
|
@ -131,6 +131,7 @@ CORE_PKGS = \
|
|||||||
java.util.concurrent \
|
java.util.concurrent \
|
||||||
java.util.concurrent.atomic \
|
java.util.concurrent.atomic \
|
||||||
java.util.concurrent.locks \
|
java.util.concurrent.locks \
|
||||||
|
java.util.function \
|
||||||
java.util.jar \
|
java.util.jar \
|
||||||
java.util.logging \
|
java.util.logging \
|
||||||
java.util.prefs \
|
java.util.prefs \
|
||||||
|
@ -26,8 +26,8 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
# Get clones of all nested repositories
|
# Get clones of all nested repositories
|
||||||
sh ./make/scripts/hgforest.sh clone $*
|
sh ./common/bin/hgforest.sh clone "$@"
|
||||||
|
|
||||||
# Update all existing repositories to the latest sources
|
# Update all existing repositories to the latest sources
|
||||||
sh ./make/scripts/hgforest.sh pull -u
|
sh ./common/bin/hgforest.sh pull -u
|
||||||
|
|
||||||
|
@ -299,3 +299,5 @@ cfc5309f03b7bd6c1567618b63cf1fc74c0f2a8f hs25-b10
|
|||||||
b61d9c88b759d1594b8af1655598e8fa00393672 hs25-b11
|
b61d9c88b759d1594b8af1655598e8fa00393672 hs25-b11
|
||||||
25bdce771bb3a7ae9825261a284d292cda700122 jdk8-b67
|
25bdce771bb3a7ae9825261a284d292cda700122 jdk8-b67
|
||||||
a35a72dd2e1255239d31f796f9f693e49b36bc9f hs25-b12
|
a35a72dd2e1255239d31f796f9f693e49b36bc9f hs25-b12
|
||||||
|
121aa71316af6cd877bf455e775fa3fdbcdd4b65 jdk8-b68
|
||||||
|
b6c9c0109a608eedbb6b868d260952990e3c91fe hs25-b13
|
||||||
|
@ -69,6 +69,8 @@ public class ConstMethod extends VMObject {
|
|||||||
signatureIndex = new CIntField(type.getCIntegerField("_signature_index"), 0);
|
signatureIndex = new CIntField(type.getCIntegerField("_signature_index"), 0);
|
||||||
idnum = new CIntField(type.getCIntegerField("_method_idnum"), 0);
|
idnum = new CIntField(type.getCIntegerField("_method_idnum"), 0);
|
||||||
maxStack = new CIntField(type.getCIntegerField("_max_stack"), 0);
|
maxStack = new CIntField(type.getCIntegerField("_max_stack"), 0);
|
||||||
|
maxLocals = new CIntField(type.getCIntegerField("_max_locals"), 0);
|
||||||
|
sizeOfParameters = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
|
||||||
|
|
||||||
// start of byte code
|
// start of byte code
|
||||||
bytecodeOffset = type.getSize();
|
bytecodeOffset = type.getSize();
|
||||||
@ -96,6 +98,8 @@ public class ConstMethod extends VMObject {
|
|||||||
private static CIntField signatureIndex;
|
private static CIntField signatureIndex;
|
||||||
private static CIntField idnum;
|
private static CIntField idnum;
|
||||||
private static CIntField maxStack;
|
private static CIntField maxStack;
|
||||||
|
private static CIntField maxLocals;
|
||||||
|
private static CIntField sizeOfParameters;
|
||||||
|
|
||||||
// start of bytecode
|
// start of bytecode
|
||||||
private static long bytecodeOffset;
|
private static long bytecodeOffset;
|
||||||
@ -151,6 +155,14 @@ public class ConstMethod extends VMObject {
|
|||||||
return maxStack.getValue(this);
|
return maxStack.getValue(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getMaxLocals() {
|
||||||
|
return maxLocals.getValue(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getSizeOfParameters() {
|
||||||
|
return sizeOfParameters.getValue(this);
|
||||||
|
}
|
||||||
|
|
||||||
public Symbol getName() {
|
public Symbol getName() {
|
||||||
return getMethod().getName();
|
return getMethod().getName();
|
||||||
}
|
}
|
||||||
@ -247,6 +259,8 @@ public class ConstMethod extends VMObject {
|
|||||||
visitor.doCInt(signatureIndex, true);
|
visitor.doCInt(signatureIndex, true);
|
||||||
visitor.doCInt(codeSize, true);
|
visitor.doCInt(codeSize, true);
|
||||||
visitor.doCInt(maxStack, true);
|
visitor.doCInt(maxStack, true);
|
||||||
|
visitor.doCInt(maxLocals, true);
|
||||||
|
visitor.doCInt(sizeOfParameters, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accessors
|
// Accessors
|
||||||
|
@ -50,8 +50,6 @@ public class Method extends Metadata {
|
|||||||
constMethod = type.getAddressField("_constMethod");
|
constMethod = type.getAddressField("_constMethod");
|
||||||
methodData = type.getAddressField("_method_data");
|
methodData = type.getAddressField("_method_data");
|
||||||
methodSize = new CIntField(type.getCIntegerField("_method_size"), 0);
|
methodSize = new CIntField(type.getCIntegerField("_method_size"), 0);
|
||||||
maxLocals = new CIntField(type.getCIntegerField("_max_locals"), 0);
|
|
||||||
sizeOfParameters = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
|
|
||||||
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
|
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
|
||||||
code = type.getAddressField("_code");
|
code = type.getAddressField("_code");
|
||||||
vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0);
|
vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0);
|
||||||
@ -83,8 +81,6 @@ public class Method extends Metadata {
|
|||||||
private static AddressField constMethod;
|
private static AddressField constMethod;
|
||||||
private static AddressField methodData;
|
private static AddressField methodData;
|
||||||
private static CIntField methodSize;
|
private static CIntField methodSize;
|
||||||
private static CIntField maxLocals;
|
|
||||||
private static CIntField sizeOfParameters;
|
|
||||||
private static CIntField accessFlags;
|
private static CIntField accessFlags;
|
||||||
private static CIntField vtableIndex;
|
private static CIntField vtableIndex;
|
||||||
private static CIntField invocationCounter;
|
private static CIntField invocationCounter;
|
||||||
@ -134,8 +130,8 @@ public class Method extends Metadata {
|
|||||||
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
|
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
|
||||||
public long getMethodSize() { return methodSize.getValue(this); }
|
public long getMethodSize() { return methodSize.getValue(this); }
|
||||||
public long getMaxStack() { return getConstMethod().getMaxStack(); }
|
public long getMaxStack() { return getConstMethod().getMaxStack(); }
|
||||||
public long getMaxLocals() { return maxLocals.getValue(this); }
|
public long getMaxLocals() { return getConstMethod().getMaxLocals(); }
|
||||||
public long getSizeOfParameters() { return sizeOfParameters.getValue(this); }
|
public long getSizeOfParameters() { return getConstMethod().getSizeOfParameters(); }
|
||||||
public long getNameIndex() { return getConstMethod().getNameIndex(); }
|
public long getNameIndex() { return getConstMethod().getNameIndex(); }
|
||||||
public long getSignatureIndex() { return getConstMethod().getSignatureIndex(); }
|
public long getSignatureIndex() { return getConstMethod().getSignatureIndex(); }
|
||||||
public long getGenericSignatureIndex() { return getConstMethod().getGenericSignatureIndex(); }
|
public long getGenericSignatureIndex() { return getConstMethod().getGenericSignatureIndex(); }
|
||||||
@ -282,8 +278,6 @@ public class Method extends Metadata {
|
|||||||
|
|
||||||
public void iterateFields(MetadataVisitor visitor) {
|
public void iterateFields(MetadataVisitor visitor) {
|
||||||
visitor.doCInt(methodSize, true);
|
visitor.doCInt(methodSize, true);
|
||||||
visitor.doCInt(maxLocals, true);
|
|
||||||
visitor.doCInt(sizeOfParameters, true);
|
|
||||||
visitor.doCInt(accessFlags, true);
|
visitor.doCInt(accessFlags, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
|
|||||||
|
|
||||||
HS_MAJOR_VER=25
|
HS_MAJOR_VER=25
|
||||||
HS_MINOR_VER=0
|
HS_MINOR_VER=0
|
||||||
HS_BUILD_NUMBER=12
|
HS_BUILD_NUMBER=13
|
||||||
|
|
||||||
JDK_MAJOR_VER=1
|
JDK_MAJOR_VER=1
|
||||||
JDK_MINOR_VER=8
|
JDK_MINOR_VER=8
|
||||||
|
@ -582,7 +582,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// the following temporary registers are used during frame creation
|
// the following temporary registers are used during frame creation
|
||||||
const Register Gtmp1 = G3_scratch ;
|
const Register Gtmp1 = G3_scratch ;
|
||||||
const Register Gtmp2 = G1_scratch;
|
const Register Gtmp2 = G1_scratch;
|
||||||
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
|
const Register RconstMethod = Gtmp1;
|
||||||
|
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
|
||||||
|
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||||
|
|
||||||
bool inc_counter = UseCompiler || CountCompiledCalls;
|
bool inc_counter = UseCompiler || CountCompiledCalls;
|
||||||
|
|
||||||
@ -618,6 +620,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
|
__ ld_ptr(constMethod, RconstMethod);
|
||||||
__ lduh(size_of_parameters, Gtmp1);
|
__ lduh(size_of_parameters, Gtmp1);
|
||||||
__ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes
|
__ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes
|
||||||
__ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord
|
__ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord
|
||||||
@ -1047,8 +1050,6 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
|||||||
const Register Gtmp = G3_scratch;
|
const Register Gtmp = G3_scratch;
|
||||||
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
|
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
|
||||||
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
|
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
|
||||||
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
|
|
||||||
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
|
|
||||||
|
|
||||||
// slop factor is two extra slots on the expression stack so that
|
// slop factor is two extra slots on the expression stack so that
|
||||||
// we always have room to store a result when returning from a call without parameters
|
// we always have room to store a result when returning from a call without parameters
|
||||||
@ -1066,6 +1067,9 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
|||||||
// Now compute new frame size
|
// Now compute new frame size
|
||||||
|
|
||||||
if (native) {
|
if (native) {
|
||||||
|
const Register RconstMethod = Gtmp;
|
||||||
|
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||||
|
__ ld_ptr(constMethod, RconstMethod);
|
||||||
__ lduh( size_of_parameters, Gtmp );
|
__ lduh( size_of_parameters, Gtmp );
|
||||||
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
|
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
|
||||||
} else {
|
} else {
|
||||||
@ -1236,9 +1240,13 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
|||||||
}
|
}
|
||||||
if (init_value != noreg) {
|
if (init_value != noreg) {
|
||||||
Label clear_loop;
|
Label clear_loop;
|
||||||
|
const Register RconstMethod = O1;
|
||||||
|
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||||
|
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
|
||||||
|
|
||||||
// NOTE: If you change the frame layout, this code will need to
|
// NOTE: If you change the frame layout, this code will need to
|
||||||
// be updated!
|
// be updated!
|
||||||
|
__ ld_ptr( constMethod, RconstMethod );
|
||||||
__ lduh( size_of_locals, O2 );
|
__ lduh( size_of_locals, O2 );
|
||||||
__ lduh( size_of_parameters, O1 );
|
__ lduh( size_of_parameters, O1 );
|
||||||
__ sll( O2, LogBytesPerWord, O2);
|
__ sll( O2, LogBytesPerWord, O2);
|
||||||
@ -1483,13 +1491,16 @@ void CppInterpreterGenerator::adjust_callers_stack(Register args) {
|
|||||||
//
|
//
|
||||||
// assert_different_registers(state, prev_state);
|
// assert_different_registers(state, prev_state);
|
||||||
const Register Gtmp = G3_scratch;
|
const Register Gtmp = G3_scratch;
|
||||||
|
const RconstMethod = G3_scratch;
|
||||||
const Register tmp = O2;
|
const Register tmp = O2;
|
||||||
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
|
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
|
||||||
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
|
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||||
|
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
|
||||||
|
|
||||||
|
__ ld_ptr(constMethod, RconstMethod);
|
||||||
__ lduh(size_of_parameters, tmp);
|
__ lduh(size_of_parameters, tmp);
|
||||||
__ sll(tmp, LogBytesPerWord, Gtmp); // parameter size in bytes
|
__ sll(tmp, LogBytesPerWord, Gargs); // parameter size in bytes
|
||||||
__ add(args, Gtmp, Gargs); // points to first local + BytesPerWord
|
__ add(args, Gargs, Gargs); // points to first local + BytesPerWord
|
||||||
// NEW
|
// NEW
|
||||||
__ add(Gargs, -wordSize, Gargs); // points to first local[0]
|
__ add(Gargs, -wordSize, Gargs); // points to first local[0]
|
||||||
// determine extra space for non-argument locals & adjust caller's SP
|
// determine extra space for non-argument locals & adjust caller's SP
|
||||||
@ -1541,8 +1552,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
|
|
||||||
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
|
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
|
||||||
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
|
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
|
||||||
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
|
|
||||||
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
|
|
||||||
|
|
||||||
address entry_point = __ pc();
|
address entry_point = __ pc();
|
||||||
__ mov(G0, prevState); // no current activation
|
__ mov(G0, prevState); // no current activation
|
||||||
@ -1750,7 +1759,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
|
|
||||||
__ ld_ptr(STATE(_result._to_call._callee), L4_scratch); // called method
|
__ ld_ptr(STATE(_result._to_call._callee), L4_scratch); // called method
|
||||||
__ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack
|
__ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack
|
||||||
__ lduh(L4_scratch, in_bytes(Method::size_of_parameters_offset()), L2_scratch); // get parameter size
|
// get parameter size
|
||||||
|
__ ld_ptr(L4_scratch, in_bytes(Method::const_offset()), L2_scratch);
|
||||||
|
__ lduh(L2_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), L2_scratch);
|
||||||
__ sll(L2_scratch, LogBytesPerWord, L2_scratch ); // parameter size in bytes
|
__ sll(L2_scratch, LogBytesPerWord, L2_scratch ); // parameter size in bytes
|
||||||
__ add(L1_scratch, L2_scratch, L1_scratch); // stack destination for result
|
__ add(L1_scratch, L2_scratch, L1_scratch); // stack destination for result
|
||||||
__ ld(L4_scratch, in_bytes(Method::result_index_offset()), L3_scratch); // called method result type index
|
__ ld(L4_scratch, in_bytes(Method::result_index_offset()), L3_scratch); // called method result type index
|
||||||
|
@ -171,7 +171,8 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
|||||||
|
|
||||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||||
// make sure recv is already on stack
|
// make sure recv is already on stack
|
||||||
__ load_sized_value(Address(method_temp, Method::size_of_parameters_offset()),
|
__ ld_ptr(method_temp, in_bytes(Method::const_offset()), temp2);
|
||||||
|
__ load_sized_value(Address(temp2, ConstMethod::size_of_parameters_offset()),
|
||||||
temp2,
|
temp2,
|
||||||
sizeof(u2), /*is_signed*/ false);
|
sizeof(u2), /*is_signed*/ false);
|
||||||
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
||||||
@ -233,7 +234,8 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
|
|||||||
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
|
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
|
||||||
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
|
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
|
||||||
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
||||||
__ load_sized_value(Address(G5_method, Method::size_of_parameters_offset()),
|
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), O4_param_size);
|
||||||
|
__ load_sized_value(Address(O4_param_size, ConstMethod::size_of_parameters_offset()),
|
||||||
O4_param_size,
|
O4_param_size,
|
||||||
sizeof(u2), /*is_signed*/ false);
|
sizeof(u2), /*is_signed*/ false);
|
||||||
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
||||||
|
@ -494,9 +494,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
|||||||
// (gri - 2/25/2000)
|
// (gri - 2/25/2000)
|
||||||
|
|
||||||
|
|
||||||
const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
|
|
||||||
const Address size_of_locals (G5_method, Method::size_of_locals_offset());
|
|
||||||
const Address constMethod (G5_method, Method::const_offset());
|
|
||||||
int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
|
int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
|
||||||
|
|
||||||
const int extra_space =
|
const int extra_space =
|
||||||
@ -506,11 +503,15 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
|||||||
(native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
|
(native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
|
||||||
|
|
||||||
const Register Glocals_size = G3;
|
const Register Glocals_size = G3;
|
||||||
|
const Register RconstMethod = Glocals_size;
|
||||||
const Register Otmp1 = O3;
|
const Register Otmp1 = O3;
|
||||||
const Register Otmp2 = O4;
|
const Register Otmp2 = O4;
|
||||||
// Lscratch can't be used as a temporary because the call_stub uses
|
// Lscratch can't be used as a temporary because the call_stub uses
|
||||||
// it to assert that the stack frame was setup correctly.
|
// it to assert that the stack frame was setup correctly.
|
||||||
|
const Address constMethod (G5_method, Method::const_offset());
|
||||||
|
const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
|
||||||
|
|
||||||
|
__ ld_ptr( constMethod, RconstMethod );
|
||||||
__ lduh( size_of_parameters, Glocals_size);
|
__ lduh( size_of_parameters, Glocals_size);
|
||||||
|
|
||||||
// Gargs points to first local + BytesPerWord
|
// Gargs points to first local + BytesPerWord
|
||||||
@ -530,6 +531,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
|||||||
//
|
//
|
||||||
// Compute number of locals in method apart from incoming parameters
|
// Compute number of locals in method apart from incoming parameters
|
||||||
//
|
//
|
||||||
|
const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset());
|
||||||
|
__ ld_ptr( constMethod, Otmp1 );
|
||||||
__ lduh( size_of_locals, Otmp1 );
|
__ lduh( size_of_locals, Otmp1 );
|
||||||
__ sub( Otmp1, Glocals_size, Glocals_size );
|
__ sub( Otmp1, Glocals_size, Glocals_size );
|
||||||
__ round_to( Glocals_size, WordsPerLong );
|
__ round_to( Glocals_size, WordsPerLong );
|
||||||
@ -1256,8 +1259,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
// make sure registers are different!
|
// make sure registers are different!
|
||||||
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
|
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
|
||||||
|
|
||||||
const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
|
const Address constMethod (G5_method, Method::const_offset());
|
||||||
const Address size_of_locals (G5_method, Method::size_of_locals_offset());
|
|
||||||
// Seems like G5_method is live at the point this is used. So we could make this look consistent
|
// Seems like G5_method is live at the point this is used. So we could make this look consistent
|
||||||
// and use in the asserts.
|
// and use in the asserts.
|
||||||
const Address access_flags (Lmethod, Method::access_flags_offset());
|
const Address access_flags (Lmethod, Method::access_flags_offset());
|
||||||
@ -1307,8 +1309,13 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
init_value = G0;
|
init_value = G0;
|
||||||
Label clear_loop;
|
Label clear_loop;
|
||||||
|
|
||||||
|
const Register RconstMethod = O1;
|
||||||
|
const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
|
||||||
|
const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset());
|
||||||
|
|
||||||
// NOTE: If you change the frame layout, this code will need to
|
// NOTE: If you change the frame layout, this code will need to
|
||||||
// be updated!
|
// be updated!
|
||||||
|
__ ld_ptr( constMethod, RconstMethod );
|
||||||
__ lduh( size_of_locals, O2 );
|
__ lduh( size_of_locals, O2 );
|
||||||
__ lduh( size_of_parameters, O1 );
|
__ lduh( size_of_parameters, O1 );
|
||||||
__ sll( O2, Interpreter::logStackElementSize, O2);
|
__ sll( O2, Interpreter::logStackElementSize, O2);
|
||||||
@ -1823,9 +1830,13 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
|||||||
|
|
||||||
const Register Gtmp1 = G3_scratch;
|
const Register Gtmp1 = G3_scratch;
|
||||||
const Register Gtmp2 = G1_scratch;
|
const Register Gtmp2 = G1_scratch;
|
||||||
|
const Register RconstMethod = Gtmp1;
|
||||||
|
const Address constMethod(Lmethod, Method::const_offset());
|
||||||
|
const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
|
||||||
|
|
||||||
// Compute size of arguments for saving when returning to deoptimized caller
|
// Compute size of arguments for saving when returning to deoptimized caller
|
||||||
__ lduh(Lmethod, in_bytes(Method::size_of_parameters_offset()), Gtmp1);
|
__ ld_ptr(constMethod, RconstMethod);
|
||||||
|
__ lduh(size_of_parameters, Gtmp1);
|
||||||
__ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
|
__ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
|
||||||
__ sub(Llocals, Gtmp1, Gtmp2);
|
__ sub(Llocals, Gtmp1, Gtmp2);
|
||||||
__ add(Gtmp2, wordSize, Gtmp2);
|
__ add(Gtmp2, wordSize, Gtmp2);
|
||||||
|
@ -3040,7 +3040,8 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
|
|||||||
Register Rtemp = G4_scratch;
|
Register Rtemp = G4_scratch;
|
||||||
|
|
||||||
// Load receiver from stack slot
|
// Load receiver from stack slot
|
||||||
__ lduh(G5_method, in_bytes(Method::size_of_parameters_offset()), G4_scratch);
|
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
|
||||||
|
__ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
|
||||||
__ load_receiver(G4_scratch, O0);
|
__ load_receiver(G4_scratch, O0);
|
||||||
|
|
||||||
// receiver NULL check
|
// receiver NULL check
|
||||||
|
@ -611,8 +611,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
|||||||
// C++ interpreter only
|
// C++ interpreter only
|
||||||
// rsi/r13 - previous interpreter state pointer
|
// rsi/r13 - previous interpreter state pointer
|
||||||
|
|
||||||
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
|
|
||||||
|
|
||||||
// InterpreterRuntime::frequency_counter_overflow takes one argument
|
// InterpreterRuntime::frequency_counter_overflow takes one argument
|
||||||
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
|
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
|
||||||
// The call returns the address of the verified entry point for the method or NULL
|
// The call returns the address of the verified entry point for the method or NULL
|
||||||
@ -977,15 +975,16 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// to save/restore.
|
// to save/restore.
|
||||||
address entry_point = __ pc();
|
address entry_point = __ pc();
|
||||||
|
|
||||||
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
|
const Address constMethod (rbx, Method::const_offset());
|
||||||
const Address size_of_locals (rbx, Method::size_of_locals_offset());
|
|
||||||
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
|
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
|
||||||
const Address access_flags (rbx, Method::access_flags_offset());
|
const Address access_flags (rbx, Method::access_flags_offset());
|
||||||
|
const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
|
||||||
|
|
||||||
// rsi/r13 == state/locals rdi == prevstate
|
// rsi/r13 == state/locals rdi == prevstate
|
||||||
const Register locals = rdi;
|
const Register locals = rdi;
|
||||||
|
|
||||||
// get parameter size (always needed)
|
// get parameter size (always needed)
|
||||||
|
__ movptr(rcx, constMethod);
|
||||||
__ load_unsigned_short(rcx, size_of_parameters);
|
__ load_unsigned_short(rcx, size_of_parameters);
|
||||||
|
|
||||||
// rbx: Method*
|
// rbx: Method*
|
||||||
@ -994,6 +993,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// for natives the size of locals is zero
|
// for natives the size of locals is zero
|
||||||
|
|
||||||
// compute beginning of parameters /locals
|
// compute beginning of parameters /locals
|
||||||
|
|
||||||
__ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
|
__ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
|
||||||
|
|
||||||
// initialize fixed part of activation frame
|
// initialize fixed part of activation frame
|
||||||
@ -1107,11 +1107,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
const Register method = rbx;
|
const Register method = rbx;
|
||||||
const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
|
const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
|
||||||
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
|
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
|
||||||
|
const Address constMethod (method, Method::const_offset());
|
||||||
|
const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
|
||||||
|
|
||||||
// allocate space for parameters
|
// allocate space for parameters
|
||||||
__ movptr(method, STATE(_method));
|
__ movptr(method, STATE(_method));
|
||||||
__ verify_method_ptr(method);
|
__ verify_method_ptr(method);
|
||||||
__ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
|
__ movptr(t, constMethod);
|
||||||
|
__ load_unsigned_short(t, size_of_parameters);
|
||||||
__ shll(t, 2);
|
__ shll(t, 2);
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
__ subptr(rsp, t);
|
__ subptr(rsp, t);
|
||||||
@ -1700,15 +1703,17 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
// save sender sp
|
// save sender sp
|
||||||
__ push(rcx);
|
__ push(rcx);
|
||||||
|
|
||||||
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
|
const Address constMethod (rbx, Method::const_offset());
|
||||||
const Address size_of_locals (rbx, Method::size_of_locals_offset());
|
|
||||||
const Address access_flags (rbx, Method::access_flags_offset());
|
const Address access_flags (rbx, Method::access_flags_offset());
|
||||||
|
const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
|
||||||
|
const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
|
||||||
|
|
||||||
// const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
|
// const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
|
||||||
// const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
|
// const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
|
||||||
// const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
|
// const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
|
||||||
|
|
||||||
// get parameter size (always needed)
|
// get parameter size (always needed)
|
||||||
|
__ movptr(rdx, constMethod);
|
||||||
__ load_unsigned_short(rcx, size_of_parameters);
|
__ load_unsigned_short(rcx, size_of_parameters);
|
||||||
|
|
||||||
// rbx: Method*
|
// rbx: Method*
|
||||||
@ -1989,7 +1994,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
__ movptr(rbx, STATE(_result._to_call._callee));
|
__ movptr(rbx, STATE(_result._to_call._callee));
|
||||||
|
|
||||||
// callee left args on top of expression stack, remove them
|
// callee left args on top of expression stack, remove them
|
||||||
__ load_unsigned_short(rcx, Address(rbx, Method::size_of_parameters_offset()));
|
__ movptr(rcx, constMethod);
|
||||||
|
__ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
|
||||||
|
|
||||||
__ lea(rsp, Address(rsp, rcx, Address::times_ptr));
|
__ lea(rsp, Address(rsp, rcx, Address::times_ptr));
|
||||||
|
|
||||||
__ movl(rcx, Address(rbx, Method::result_index_offset()));
|
__ movl(rcx, Address(rbx, Method::result_index_offset()));
|
||||||
@ -2159,7 +2166,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
// Make it look like call_stub calling conventions
|
// Make it look like call_stub calling conventions
|
||||||
|
|
||||||
// Get (potential) receiver
|
// Get (potential) receiver
|
||||||
__ load_unsigned_short(rcx, size_of_parameters); // get size of parameters in words
|
// get size of parameters in words
|
||||||
|
__ movptr(rcx, constMethod);
|
||||||
|
__ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
|
||||||
|
|
||||||
ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
|
ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
|
||||||
__ pushptr(recursive.addr()); // make it look good in the debugger
|
__ pushptr(recursive.addr()); // make it look good in the debugger
|
||||||
|
@ -169,8 +169,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
|||||||
|
|
||||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||||
// make sure recv is already on stack
|
// make sure recv is already on stack
|
||||||
|
__ movptr(temp2, Address(method_temp, Method::const_offset()));
|
||||||
__ load_sized_value(temp2,
|
__ load_sized_value(temp2,
|
||||||
Address(method_temp, Method::size_of_parameters_offset()),
|
Address(temp2, ConstMethod::size_of_parameters_offset()),
|
||||||
sizeof(u2), /*is_signed*/ false);
|
sizeof(u2), /*is_signed*/ false);
|
||||||
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
||||||
Label L;
|
Label L;
|
||||||
@ -234,8 +235,9 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
|
|||||||
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
|
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
|
||||||
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
|
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
|
||||||
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
||||||
|
__ movptr(rdx_argp, Address(rbx_method, Method::const_offset()));
|
||||||
__ load_sized_value(rdx_argp,
|
__ load_sized_value(rdx_argp,
|
||||||
Address(rbx_method, Method::size_of_parameters_offset()),
|
Address(rdx_argp, ConstMethod::size_of_parameters_offset()),
|
||||||
sizeof(u2), /*is_signed*/ false);
|
sizeof(u2), /*is_signed*/ false);
|
||||||
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
||||||
rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
|
rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
|
||||||
|
@ -424,8 +424,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
|||||||
// C++ interpreter only
|
// C++ interpreter only
|
||||||
// rsi - previous interpreter state pointer
|
// rsi - previous interpreter state pointer
|
||||||
|
|
||||||
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
|
|
||||||
|
|
||||||
// InterpreterRuntime::frequency_counter_overflow takes one argument
|
// InterpreterRuntime::frequency_counter_overflow takes one argument
|
||||||
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
|
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
|
||||||
// The call returns the address of the verified entry point for the method or NULL
|
// The call returns the address of the verified entry point for the method or NULL
|
||||||
@ -868,12 +866,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// rsi: previous interpreter state (C++ interpreter) must preserve
|
// rsi: previous interpreter state (C++ interpreter) must preserve
|
||||||
address entry_point = __ pc();
|
address entry_point = __ pc();
|
||||||
|
|
||||||
|
const Address constMethod (rbx, Method::const_offset());
|
||||||
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
|
|
||||||
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
|
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
|
||||||
const Address access_flags (rbx, Method::access_flags_offset());
|
const Address access_flags (rbx, Method::access_flags_offset());
|
||||||
|
const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
|
||||||
|
|
||||||
// get parameter size (always needed)
|
// get parameter size (always needed)
|
||||||
|
__ movptr(rcx, constMethod);
|
||||||
__ load_unsigned_short(rcx, size_of_parameters);
|
__ load_unsigned_short(rcx, size_of_parameters);
|
||||||
|
|
||||||
// native calls don't need the stack size check since they have no expression stack
|
// native calls don't need the stack size check since they have no expression stack
|
||||||
@ -988,7 +987,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
|
|
||||||
// allocate space for parameters
|
// allocate space for parameters
|
||||||
__ get_method(method);
|
__ get_method(method);
|
||||||
__ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
|
__ movptr(t, Address(method, Method::const_offset()));
|
||||||
|
__ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
|
||||||
|
|
||||||
__ shlptr(t, Interpreter::logStackElementSize);
|
__ shlptr(t, Interpreter::logStackElementSize);
|
||||||
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
|
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
|
||||||
__ subptr(rsp, t);
|
__ subptr(rsp, t);
|
||||||
@ -1297,13 +1298,14 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
// rsi: sender sp
|
// rsi: sender sp
|
||||||
address entry_point = __ pc();
|
address entry_point = __ pc();
|
||||||
|
|
||||||
|
const Address constMethod (rbx, Method::const_offset());
|
||||||
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
|
|
||||||
const Address size_of_locals (rbx, Method::size_of_locals_offset());
|
|
||||||
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
|
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
|
||||||
const Address access_flags (rbx, Method::access_flags_offset());
|
const Address access_flags (rbx, Method::access_flags_offset());
|
||||||
|
const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
|
||||||
|
const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
|
||||||
|
|
||||||
// get parameter size (always needed)
|
// get parameter size (always needed)
|
||||||
|
__ movptr(rdx, constMethod);
|
||||||
__ load_unsigned_short(rcx, size_of_parameters);
|
__ load_unsigned_short(rcx, size_of_parameters);
|
||||||
|
|
||||||
// rbx,: Method*
|
// rbx,: Method*
|
||||||
@ -1734,7 +1736,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
|||||||
|
|
||||||
// Compute size of arguments for saving when returning to deoptimized caller
|
// Compute size of arguments for saving when returning to deoptimized caller
|
||||||
__ get_method(rax);
|
__ get_method(rax);
|
||||||
__ load_unsigned_short(rax, Address(rax, in_bytes(Method::size_of_parameters_offset())));
|
__ movptr(rax, Address(rax, Method::const_offset()));
|
||||||
|
__ load_unsigned_short(rax, Address(rax, ConstMethod::size_of_parameters_offset()));
|
||||||
__ shlptr(rax, Interpreter::logStackElementSize);
|
__ shlptr(rax, Interpreter::logStackElementSize);
|
||||||
__ restore_locals();
|
__ restore_locals();
|
||||||
__ subptr(rdi, rax);
|
__ subptr(rdi, rax);
|
||||||
|
@ -369,9 +369,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
|||||||
// Everything as it was on entry
|
// Everything as it was on entry
|
||||||
// rdx is not restored. Doesn't appear to really be set.
|
// rdx is not restored. Doesn't appear to really be set.
|
||||||
|
|
||||||
const Address size_of_parameters(rbx,
|
|
||||||
Method::size_of_parameters_offset());
|
|
||||||
|
|
||||||
// InterpreterRuntime::frequency_counter_overflow takes two
|
// InterpreterRuntime::frequency_counter_overflow takes two
|
||||||
// arguments, the first (thread) is passed by call_VM, the second
|
// arguments, the first (thread) is passed by call_VM, the second
|
||||||
// indicates if the counter overflow occurs at a backwards branch
|
// indicates if the counter overflow occurs at a backwards branch
|
||||||
@ -844,14 +841,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
|
|
||||||
address entry_point = __ pc();
|
address entry_point = __ pc();
|
||||||
|
|
||||||
const Address size_of_parameters(rbx, Method::
|
const Address constMethod (rbx, Method::const_offset());
|
||||||
size_of_parameters_offset());
|
|
||||||
const Address invocation_counter(rbx, Method::
|
const Address invocation_counter(rbx, Method::
|
||||||
invocation_counter_offset() +
|
invocation_counter_offset() +
|
||||||
InvocationCounter::counter_offset());
|
InvocationCounter::counter_offset());
|
||||||
const Address access_flags (rbx, Method::access_flags_offset());
|
const Address access_flags (rbx, Method::access_flags_offset());
|
||||||
|
const Address size_of_parameters(rcx, ConstMethod::
|
||||||
|
size_of_parameters_offset());
|
||||||
|
|
||||||
|
|
||||||
// get parameter size (always needed)
|
// get parameter size (always needed)
|
||||||
|
__ movptr(rcx, constMethod);
|
||||||
__ load_unsigned_short(rcx, size_of_parameters);
|
__ load_unsigned_short(rcx, size_of_parameters);
|
||||||
|
|
||||||
// native calls don't need the stack size check since they have no
|
// native calls don't need the stack size check since they have no
|
||||||
@ -967,9 +967,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
|
|
||||||
// allocate space for parameters
|
// allocate space for parameters
|
||||||
__ get_method(method);
|
__ get_method(method);
|
||||||
__ load_unsigned_short(t,
|
__ movptr(t, Address(method, Method::const_offset()));
|
||||||
Address(method,
|
__ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
|
||||||
Method::size_of_parameters_offset()));
|
|
||||||
__ shll(t, Interpreter::logStackElementSize);
|
__ shll(t, Interpreter::logStackElementSize);
|
||||||
|
|
||||||
__ subptr(rsp, t);
|
__ subptr(rsp, t);
|
||||||
@ -1302,15 +1301,18 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
// r13: sender sp
|
// r13: sender sp
|
||||||
address entry_point = __ pc();
|
address entry_point = __ pc();
|
||||||
|
|
||||||
const Address size_of_parameters(rbx,
|
const Address constMethod(rbx, Method::const_offset());
|
||||||
Method::size_of_parameters_offset());
|
|
||||||
const Address size_of_locals(rbx, Method::size_of_locals_offset());
|
|
||||||
const Address invocation_counter(rbx,
|
const Address invocation_counter(rbx,
|
||||||
Method::invocation_counter_offset() +
|
Method::invocation_counter_offset() +
|
||||||
InvocationCounter::counter_offset());
|
InvocationCounter::counter_offset());
|
||||||
const Address access_flags(rbx, Method::access_flags_offset());
|
const Address access_flags(rbx, Method::access_flags_offset());
|
||||||
|
const Address size_of_parameters(rdx,
|
||||||
|
ConstMethod::size_of_parameters_offset());
|
||||||
|
const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
|
||||||
|
|
||||||
|
|
||||||
// get parameter size (always needed)
|
// get parameter size (always needed)
|
||||||
|
__ movptr(rdx, constMethod);
|
||||||
__ load_unsigned_short(rcx, size_of_parameters);
|
__ load_unsigned_short(rcx, size_of_parameters);
|
||||||
|
|
||||||
// rbx: Method*
|
// rbx: Method*
|
||||||
@ -1752,7 +1754,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
|||||||
// Compute size of arguments for saving when returning to
|
// Compute size of arguments for saving when returning to
|
||||||
// deoptimized caller
|
// deoptimized caller
|
||||||
__ get_method(rax);
|
__ get_method(rax);
|
||||||
__ load_unsigned_short(rax, Address(rax, in_bytes(Method::
|
__ movptr(rax, Address(rax, Method::const_offset()));
|
||||||
|
__ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
|
||||||
size_of_parameters_offset())));
|
size_of_parameters_offset())));
|
||||||
__ shll(rax, Interpreter::logStackElementSize);
|
__ shll(rax, Interpreter::logStackElementSize);
|
||||||
__ restore_locals(); // XXX do we need this?
|
__ restore_locals(); // XXX do we need this?
|
||||||
|
@ -327,14 +327,14 @@ jchar* java_lang_String::as_unicode_string(oop java_string, int& length) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int java_lang_String::to_hash(oop java_string) {
|
unsigned int java_lang_String::hash_code(oop java_string) {
|
||||||
int length = java_lang_String::length(java_string);
|
int length = java_lang_String::length(java_string);
|
||||||
// Zero length string will hash to zero with String.toHash() function.
|
// Zero length string will hash to zero with String.hashCode() function.
|
||||||
if (length == 0) return 0;
|
if (length == 0) return 0;
|
||||||
|
|
||||||
typeArrayOop value = java_lang_String::value(java_string);
|
typeArrayOop value = java_lang_String::value(java_string);
|
||||||
int offset = java_lang_String::offset(java_string);
|
int offset = java_lang_String::offset(java_string);
|
||||||
return java_lang_String::to_hash(value->char_at_addr(offset), length);
|
return java_lang_String::hash_code(value->char_at_addr(offset), length);
|
||||||
}
|
}
|
||||||
|
|
||||||
char* java_lang_String::as_quoted_ascii(oop java_string) {
|
char* java_lang_String::as_quoted_ascii(oop java_string) {
|
||||||
|
@ -166,8 +166,8 @@ class java_lang_String : AllStatic {
|
|||||||
// objects in the shared archive file.
|
// objects in the shared archive file.
|
||||||
// hash P(31) from Kernighan & Ritchie
|
// hash P(31) from Kernighan & Ritchie
|
||||||
//
|
//
|
||||||
// For this reason, THIS ALGORITHM MUST MATCH String.toHash().
|
// For this reason, THIS ALGORITHM MUST MATCH String.hashCode().
|
||||||
template <typename T> static unsigned int to_hash(T* s, int len) {
|
template <typename T> static unsigned int hash_code(T* s, int len) {
|
||||||
unsigned int h = 0;
|
unsigned int h = 0;
|
||||||
while (len-- > 0) {
|
while (len-- > 0) {
|
||||||
h = 31*h + (unsigned int) *s;
|
h = 31*h + (unsigned int) *s;
|
||||||
@ -175,10 +175,10 @@ class java_lang_String : AllStatic {
|
|||||||
}
|
}
|
||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
static unsigned int to_hash(oop java_string);
|
static unsigned int hash_code(oop java_string);
|
||||||
|
|
||||||
// This is the string hash code used by the StringTable, which may be
|
// This is the string hash code used by the StringTable, which may be
|
||||||
// the same as String.toHash or an alternate hash code.
|
// the same as String.hashCode or an alternate hash code.
|
||||||
static unsigned int hash_string(oop java_string);
|
static unsigned int hash_string(oop java_string);
|
||||||
|
|
||||||
static bool equals(oop java_string, jchar* chars, int len);
|
static bool equals(oop java_string, jchar* chars, int len);
|
||||||
|
@ -179,7 +179,7 @@ Symbol* SymbolTable::lookup(int index, const char* name,
|
|||||||
unsigned int SymbolTable::hash_symbol(const char* s, int len) {
|
unsigned int SymbolTable::hash_symbol(const char* s, int len) {
|
||||||
return use_alternate_hashcode() ?
|
return use_alternate_hashcode() ?
|
||||||
AltHashing::murmur3_32(seed(), (const jbyte*)s, len) :
|
AltHashing::murmur3_32(seed(), (const jbyte*)s, len) :
|
||||||
java_lang_String::to_hash(s, len);
|
java_lang_String::hash_code(s, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -617,7 +617,7 @@ bool StringTable::_needs_rehashing = false;
|
|||||||
// Pick hashing algorithm
|
// Pick hashing algorithm
|
||||||
unsigned int StringTable::hash_string(const jchar* s, int len) {
|
unsigned int StringTable::hash_string(const jchar* s, int len) {
|
||||||
return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
|
return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
|
||||||
java_lang_String::to_hash(s, len);
|
java_lang_String::hash_code(s, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
oop StringTable::lookup(int index, jchar* name,
|
oop StringTable::lookup(int index, jchar* name,
|
||||||
|
@ -46,27 +46,11 @@
|
|||||||
|
|
||||||
// Concurrent marking bit map wrapper
|
// Concurrent marking bit map wrapper
|
||||||
|
|
||||||
CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
|
CMBitMapRO::CMBitMapRO(int shifter) :
|
||||||
_bm((uintptr_t*)NULL,0),
|
_bm(),
|
||||||
_shifter(shifter) {
|
_shifter(shifter) {
|
||||||
_bmStartWord = (HeapWord*)(rs.base());
|
_bmStartWord = 0;
|
||||||
_bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes
|
_bmWordSize = 0;
|
||||||
ReservedSpace brs(ReservedSpace::allocation_align_size_up(
|
|
||||||
(_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
|
|
||||||
|
|
||||||
MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
|
|
||||||
|
|
||||||
guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map");
|
|
||||||
// For now we'll just commit all of the bit map up fromt.
|
|
||||||
// Later on we'll try to be more parsimonious with swap.
|
|
||||||
guarantee(_virtual_space.initialize(brs, brs.size()),
|
|
||||||
"couldn't reseve backing store for concurrent marking bit map");
|
|
||||||
assert(_virtual_space.committed_size() == brs.size(),
|
|
||||||
"didn't reserve backing store for all of concurrent marking bit map?");
|
|
||||||
_bm.set_map((uintptr_t*)_virtual_space.low());
|
|
||||||
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
|
|
||||||
_bmWordSize, "inconsistency in bit map sizing");
|
|
||||||
_bm.set_size(_bmWordSize >> _shifter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
|
HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
|
||||||
@ -108,15 +92,40 @@ int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
bool CMBitMapRO::covers(ReservedSpace rs) const {
|
bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
|
||||||
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
|
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
|
||||||
assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
|
assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
|
||||||
"size inconsistency");
|
"size inconsistency");
|
||||||
return _bmStartWord == (HeapWord*)(rs.base()) &&
|
return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
|
||||||
_bmWordSize == rs.size()>>LogHeapWordSize;
|
_bmWordSize == heap_rs.size()>>LogHeapWordSize;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
bool CMBitMap::allocate(ReservedSpace heap_rs) {
|
||||||
|
_bmStartWord = (HeapWord*)(heap_rs.base());
|
||||||
|
_bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
|
||||||
|
ReservedSpace brs(ReservedSpace::allocation_align_size_up(
|
||||||
|
(_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
|
||||||
|
if (!brs.is_reserved()) {
|
||||||
|
warning("ConcurrentMark marking bit map allocation failure");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
|
||||||
|
// For now we'll just commit all of the bit map up front.
|
||||||
|
// Later on we'll try to be more parsimonious with swap.
|
||||||
|
if (!_virtual_space.initialize(brs, brs.size())) {
|
||||||
|
warning("ConcurrentMark marking bit map backing store failure");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
assert(_virtual_space.committed_size() == brs.size(),
|
||||||
|
"didn't reserve backing store for all of concurrent marking bit map?");
|
||||||
|
_bm.set_map((uintptr_t*)_virtual_space.low());
|
||||||
|
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
|
||||||
|
_bmWordSize, "inconsistency in bit map sizing");
|
||||||
|
_bm.set_size(_bmWordSize >> _shifter);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void CMBitMap::clearAll() {
|
void CMBitMap::clearAll() {
|
||||||
_bm.clear();
|
_bm.clear();
|
||||||
return;
|
return;
|
||||||
@ -163,20 +172,79 @@ CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
|
|||||||
#endif
|
#endif
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void CMMarkStack::allocate(size_t size) {
|
bool CMMarkStack::allocate(size_t capacity) {
|
||||||
_base = NEW_C_HEAP_ARRAY(oop, size, mtGC);
|
// allocate a stack of the requisite depth
|
||||||
if (_base == NULL) {
|
ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
|
||||||
vm_exit_during_initialization("Failed to allocate CM region mark stack");
|
if (!rs.is_reserved()) {
|
||||||
|
warning("ConcurrentMark MarkStack allocation failure");
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
_index = 0;
|
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
|
||||||
_capacity = (jint) size;
|
if (!_virtual_space.initialize(rs, rs.size())) {
|
||||||
|
warning("ConcurrentMark MarkStack backing store failure");
|
||||||
|
// Release the virtual memory reserved for the marking stack
|
||||||
|
rs.release();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
assert(_virtual_space.committed_size() == rs.size(),
|
||||||
|
"Didn't reserve backing store for all of ConcurrentMark stack?");
|
||||||
|
_base = (oop*) _virtual_space.low();
|
||||||
|
setEmpty();
|
||||||
|
_capacity = (jint) capacity;
|
||||||
_saved_index = -1;
|
_saved_index = -1;
|
||||||
NOT_PRODUCT(_max_depth = 0);
|
NOT_PRODUCT(_max_depth = 0);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CMMarkStack::expand() {
|
||||||
|
// Called, during remark, if we've overflown the marking stack during marking.
|
||||||
|
assert(isEmpty(), "stack should been emptied while handling overflow");
|
||||||
|
assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
|
||||||
|
// Clear expansion flag
|
||||||
|
_should_expand = false;
|
||||||
|
if (_capacity == (jint) MarkStackSizeMax) {
|
||||||
|
if (PrintGCDetails && Verbose) {
|
||||||
|
gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Double capacity if possible
|
||||||
|
jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
|
||||||
|
// Do not give up existing stack until we have managed to
|
||||||
|
// get the double capacity that we desired.
|
||||||
|
ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
|
||||||
|
sizeof(oop)));
|
||||||
|
if (rs.is_reserved()) {
|
||||||
|
// Release the backing store associated with old stack
|
||||||
|
_virtual_space.release();
|
||||||
|
// Reinitialize virtual space for new stack
|
||||||
|
if (!_virtual_space.initialize(rs, rs.size())) {
|
||||||
|
fatal("Not enough swap for expanded marking stack capacity");
|
||||||
|
}
|
||||||
|
_base = (oop*)(_virtual_space.low());
|
||||||
|
_index = 0;
|
||||||
|
_capacity = new_capacity;
|
||||||
|
} else {
|
||||||
|
if (PrintGCDetails && Verbose) {
|
||||||
|
// Failed to double capacity, continue;
|
||||||
|
gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
|
||||||
|
SIZE_FORMAT"K to " SIZE_FORMAT"K",
|
||||||
|
_capacity / K, new_capacity / K);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CMMarkStack::set_should_expand() {
|
||||||
|
// If we're resetting the marking state because of an
|
||||||
|
// marking stack overflow, record that we should, if
|
||||||
|
// possible, expand the stack.
|
||||||
|
_should_expand = _cm->has_overflown();
|
||||||
}
|
}
|
||||||
|
|
||||||
CMMarkStack::~CMMarkStack() {
|
CMMarkStack::~CMMarkStack() {
|
||||||
if (_base != NULL) {
|
if (_base != NULL) {
|
||||||
FREE_C_HEAP_ARRAY(oop, _base, mtGC);
|
_base = NULL;
|
||||||
|
_virtual_space.release();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,7 +296,6 @@ void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
|
void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
|
||||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||||
jint start = _index;
|
jint start = _index;
|
||||||
@ -244,9 +311,9 @@ void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
|
|||||||
assert(ind < _capacity, "By overflow test above.");
|
assert(ind < _capacity, "By overflow test above.");
|
||||||
_base[ind] = ptr_arr[i];
|
_base[ind] = ptr_arr[i];
|
||||||
}
|
}
|
||||||
|
NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
|
bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
|
||||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||||
jint index = _index;
|
jint index = _index;
|
||||||
@ -404,9 +471,10 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
|
|||||||
return MAX2((n_par_threads + 2) / 4, 1U);
|
return MAX2((n_par_threads + 2) / 4, 1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
|
ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
|
||||||
_markBitMap1(rs, MinObjAlignment - 1),
|
_g1h(g1h),
|
||||||
_markBitMap2(rs, MinObjAlignment - 1),
|
_markBitMap1(MinObjAlignment - 1),
|
||||||
|
_markBitMap2(MinObjAlignment - 1),
|
||||||
|
|
||||||
_parallel_marking_threads(0),
|
_parallel_marking_threads(0),
|
||||||
_max_parallel_marking_threads(0),
|
_max_parallel_marking_threads(0),
|
||||||
@ -415,8 +483,8 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
|
|||||||
_cleanup_sleep_factor(0.0),
|
_cleanup_sleep_factor(0.0),
|
||||||
_cleanup_task_overhead(1.0),
|
_cleanup_task_overhead(1.0),
|
||||||
_cleanup_list("Cleanup List"),
|
_cleanup_list("Cleanup List"),
|
||||||
_region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
|
_region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
|
||||||
_card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
|
_card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
|
||||||
CardTableModRefBS::card_shift,
|
CardTableModRefBS::card_shift,
|
||||||
false /* in_resource_area*/),
|
false /* in_resource_area*/),
|
||||||
|
|
||||||
@ -449,7 +517,8 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
|
|||||||
_parallel_workers(NULL),
|
_parallel_workers(NULL),
|
||||||
|
|
||||||
_count_card_bitmaps(NULL),
|
_count_card_bitmaps(NULL),
|
||||||
_count_marked_bytes(NULL) {
|
_count_marked_bytes(NULL),
|
||||||
|
_completed_initialization(false) {
|
||||||
CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
|
CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
|
||||||
if (verbose_level < no_verbose) {
|
if (verbose_level < no_verbose) {
|
||||||
verbose_level = no_verbose;
|
verbose_level = no_verbose;
|
||||||
@ -464,61 +533,34 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
|
|||||||
"heap end = "PTR_FORMAT, _heap_start, _heap_end);
|
"heap end = "PTR_FORMAT, _heap_start, _heap_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
_markStack.allocate(MarkStackSize);
|
if (!_markBitMap1.allocate(heap_rs)) {
|
||||||
|
warning("Failed to allocate first CM bit map");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!_markBitMap2.allocate(heap_rs)) {
|
||||||
|
warning("Failed to allocate second CM bit map");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Create & start a ConcurrentMark thread.
|
// Create & start a ConcurrentMark thread.
|
||||||
_cmThread = new ConcurrentMarkThread(this);
|
_cmThread = new ConcurrentMarkThread(this);
|
||||||
assert(cmThread() != NULL, "CM Thread should have been created");
|
assert(cmThread() != NULL, "CM Thread should have been created");
|
||||||
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
|
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
|
||||||
|
|
||||||
_g1h = G1CollectedHeap::heap();
|
|
||||||
assert(CGC_lock != NULL, "Where's the CGC_lock?");
|
assert(CGC_lock != NULL, "Where's the CGC_lock?");
|
||||||
assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
|
assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
|
||||||
assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
|
assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
|
||||||
|
|
||||||
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
|
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
|
||||||
satb_qs.set_buffer_size(G1SATBBufferSize);
|
satb_qs.set_buffer_size(G1SATBBufferSize);
|
||||||
|
|
||||||
_root_regions.init(_g1h, this);
|
_root_regions.init(_g1h, this);
|
||||||
|
|
||||||
_tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
|
|
||||||
_accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
|
|
||||||
|
|
||||||
_count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
|
|
||||||
_count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
|
|
||||||
|
|
||||||
BitMap::idx_t card_bm_size = _card_bm.size();
|
|
||||||
|
|
||||||
// so that the assertion in MarkingTaskQueue::task_queue doesn't fail
|
|
||||||
_active_tasks = _max_worker_id;
|
|
||||||
for (uint i = 0; i < _max_worker_id; ++i) {
|
|
||||||
CMTaskQueue* task_queue = new CMTaskQueue();
|
|
||||||
task_queue->initialize();
|
|
||||||
_task_queues->register_queue(i, task_queue);
|
|
||||||
|
|
||||||
_count_card_bitmaps[i] = BitMap(card_bm_size, false);
|
|
||||||
_count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions, mtGC);
|
|
||||||
|
|
||||||
_tasks[i] = new CMTask(i, this,
|
|
||||||
_count_marked_bytes[i],
|
|
||||||
&_count_card_bitmaps[i],
|
|
||||||
task_queue, _task_queues);
|
|
||||||
|
|
||||||
_accum_task_vtime[i] = 0.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the card number for the bottom of the heap. Used
|
|
||||||
// in biasing indexes into the accounting card bitmaps.
|
|
||||||
_heap_bottom_card_num =
|
|
||||||
intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
|
|
||||||
CardTableModRefBS::card_shift);
|
|
||||||
|
|
||||||
// Clear all the liveness counting data
|
|
||||||
clear_all_count_data();
|
|
||||||
|
|
||||||
if (ConcGCThreads > ParallelGCThreads) {
|
if (ConcGCThreads > ParallelGCThreads) {
|
||||||
vm_exit_during_initialization("Can't have more ConcGCThreads "
|
warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
|
||||||
"than ParallelGCThreads.");
|
"than ParallelGCThreads (" UINT32_FORMAT ").",
|
||||||
|
ConcGCThreads, ParallelGCThreads);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
if (ParallelGCThreads == 0) {
|
if (ParallelGCThreads == 0) {
|
||||||
// if we are not running with any parallel GC threads we will not
|
// if we are not running with any parallel GC threads we will not
|
||||||
@ -590,9 +632,86 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (FLAG_IS_DEFAULT(MarkStackSize)) {
|
||||||
|
uintx mark_stack_size =
|
||||||
|
MIN2(MarkStackSizeMax,
|
||||||
|
MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
|
||||||
|
// Verify that the calculated value for MarkStackSize is in range.
|
||||||
|
// It would be nice to use the private utility routine from Arguments.
|
||||||
|
if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
|
||||||
|
warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
|
||||||
|
"must be between " UINTX_FORMAT " and " UINTX_FORMAT,
|
||||||
|
mark_stack_size, 1, MarkStackSizeMax);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
|
||||||
|
} else {
|
||||||
|
// Verify MarkStackSize is in range.
|
||||||
|
if (FLAG_IS_CMDLINE(MarkStackSize)) {
|
||||||
|
if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
|
||||||
|
if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
|
||||||
|
warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
|
||||||
|
"must be between " UINTX_FORMAT " and " UINTX_FORMAT,
|
||||||
|
MarkStackSize, 1, MarkStackSizeMax);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
|
||||||
|
if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
|
||||||
|
warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
|
||||||
|
" or for MarkStackSizeMax (" UINTX_FORMAT ")",
|
||||||
|
MarkStackSize, MarkStackSizeMax);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_markStack.allocate(MarkStackSize)) {
|
||||||
|
warning("Failed to allocate CM marking stack");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
_tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
|
||||||
|
_accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
|
||||||
|
|
||||||
|
_count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
|
||||||
|
_count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
|
||||||
|
|
||||||
|
BitMap::idx_t card_bm_size = _card_bm.size();
|
||||||
|
|
||||||
|
// so that the assertion in MarkingTaskQueue::task_queue doesn't fail
|
||||||
|
_active_tasks = _max_worker_id;
|
||||||
|
|
||||||
|
size_t max_regions = (size_t) _g1h->max_regions();
|
||||||
|
for (uint i = 0; i < _max_worker_id; ++i) {
|
||||||
|
CMTaskQueue* task_queue = new CMTaskQueue();
|
||||||
|
task_queue->initialize();
|
||||||
|
_task_queues->register_queue(i, task_queue);
|
||||||
|
|
||||||
|
_count_card_bitmaps[i] = BitMap(card_bm_size, false);
|
||||||
|
_count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
|
||||||
|
|
||||||
|
_tasks[i] = new CMTask(i, this,
|
||||||
|
_count_marked_bytes[i],
|
||||||
|
&_count_card_bitmaps[i],
|
||||||
|
task_queue, _task_queues);
|
||||||
|
|
||||||
|
_accum_task_vtime[i] = 0.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the card number for the bottom of the heap. Used
|
||||||
|
// in biasing indexes into the accounting card bitmaps.
|
||||||
|
_heap_bottom_card_num =
|
||||||
|
intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
|
||||||
|
CardTableModRefBS::card_shift);
|
||||||
|
|
||||||
|
// Clear all the liveness counting data
|
||||||
|
clear_all_count_data();
|
||||||
|
|
||||||
// so that the call below can read a sensible value
|
// so that the call below can read a sensible value
|
||||||
_heap_start = (HeapWord*) rs.base();
|
_heap_start = (HeapWord*) heap_rs.base();
|
||||||
set_non_marking_state();
|
set_non_marking_state();
|
||||||
|
_completed_initialization = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMark::update_g1_committed(bool force) {
|
void ConcurrentMark::update_g1_committed(bool force) {
|
||||||
@ -1165,6 +1284,11 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||||||
assert(!restart_for_overflow(), "sanity");
|
assert(!restart_for_overflow(), "sanity");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Expand the marking stack, if we have to and if we can.
|
||||||
|
if (_markStack.should_expand()) {
|
||||||
|
_markStack.expand();
|
||||||
|
}
|
||||||
|
|
||||||
// Reset the marking state if marking completed
|
// Reset the marking state if marking completed
|
||||||
if (!restart_for_overflow()) {
|
if (!restart_for_overflow()) {
|
||||||
set_non_marking_state();
|
set_non_marking_state();
|
||||||
@ -2785,7 +2909,7 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
|
|||||||
// Verify entries on the task queues
|
// Verify entries on the task queues
|
||||||
for (uint i = 0; i < _max_worker_id; i += 1) {
|
for (uint i = 0; i < _max_worker_id; i += 1) {
|
||||||
cl.set_phase(VerifyNoCSetOopsQueues, i);
|
cl.set_phase(VerifyNoCSetOopsQueues, i);
|
||||||
OopTaskQueue* queue = _task_queues->queue(i);
|
CMTaskQueue* queue = _task_queues->queue(i);
|
||||||
queue->oops_do(&cl);
|
queue->oops_do(&cl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2840,8 +2964,8 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
|
|||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
void ConcurrentMark::clear_marking_state(bool clear_overflow) {
|
void ConcurrentMark::clear_marking_state(bool clear_overflow) {
|
||||||
_markStack.setEmpty();
|
_markStack.set_should_expand();
|
||||||
_markStack.clear_overflow();
|
_markStack.setEmpty(); // Also clears the _markStack overflow flag
|
||||||
if (clear_overflow) {
|
if (clear_overflow) {
|
||||||
clear_has_overflown();
|
clear_has_overflown();
|
||||||
} else {
|
} else {
|
||||||
@ -2850,7 +2974,7 @@ void ConcurrentMark::clear_marking_state(bool clear_overflow) {
|
|||||||
_finger = _heap_start;
|
_finger = _heap_start;
|
||||||
|
|
||||||
for (uint i = 0; i < _max_worker_id; ++i) {
|
for (uint i = 0; i < _max_worker_id; ++i) {
|
||||||
OopTaskQueue* queue = _task_queues->queue(i);
|
CMTaskQueue* queue = _task_queues->queue(i);
|
||||||
queue->set_empty();
|
queue->set_empty();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,7 +63,7 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
// constructor
|
// constructor
|
||||||
CMBitMapRO(ReservedSpace rs, int shifter);
|
CMBitMapRO(int shifter);
|
||||||
|
|
||||||
enum { do_yield = true };
|
enum { do_yield = true };
|
||||||
|
|
||||||
@ -117,8 +117,11 @@ class CMBitMap : public CMBitMapRO {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
// constructor
|
// constructor
|
||||||
CMBitMap(ReservedSpace rs, int shifter) :
|
CMBitMap(int shifter) :
|
||||||
CMBitMapRO(rs, shifter) {}
|
CMBitMapRO(shifter) {}
|
||||||
|
|
||||||
|
// Allocates the back store for the marking bitmap
|
||||||
|
bool allocate(ReservedSpace heap_rs);
|
||||||
|
|
||||||
// write marks
|
// write marks
|
||||||
void mark(HeapWord* addr) {
|
void mark(HeapWord* addr) {
|
||||||
@ -155,9 +158,9 @@ class CMBitMap : public CMBitMapRO {
|
|||||||
MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
|
MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Represents a marking stack used by the CM collector.
|
// Represents a marking stack used by ConcurrentMarking in the G1 collector.
|
||||||
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
|
|
||||||
class CMMarkStack VALUE_OBJ_CLASS_SPEC {
|
class CMMarkStack VALUE_OBJ_CLASS_SPEC {
|
||||||
|
VirtualSpace _virtual_space; // Underlying backing store for actual stack
|
||||||
ConcurrentMark* _cm;
|
ConcurrentMark* _cm;
|
||||||
oop* _base; // bottom of stack
|
oop* _base; // bottom of stack
|
||||||
jint _index; // one more than last occupied index
|
jint _index; // one more than last occupied index
|
||||||
@ -166,6 +169,7 @@ class CMMarkStack VALUE_OBJ_CLASS_SPEC {
|
|||||||
NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
|
NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
|
||||||
|
|
||||||
bool _overflow;
|
bool _overflow;
|
||||||
|
bool _should_expand;
|
||||||
DEBUG_ONLY(bool _drain_in_progress;)
|
DEBUG_ONLY(bool _drain_in_progress;)
|
||||||
DEBUG_ONLY(bool _drain_in_progress_yields;)
|
DEBUG_ONLY(bool _drain_in_progress_yields;)
|
||||||
|
|
||||||
@ -173,7 +177,13 @@ class CMMarkStack VALUE_OBJ_CLASS_SPEC {
|
|||||||
CMMarkStack(ConcurrentMark* cm);
|
CMMarkStack(ConcurrentMark* cm);
|
||||||
~CMMarkStack();
|
~CMMarkStack();
|
||||||
|
|
||||||
void allocate(size_t size);
|
#ifndef PRODUCT
|
||||||
|
jint max_depth() const {
|
||||||
|
return _max_depth;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
bool allocate(size_t capacity);
|
||||||
|
|
||||||
oop pop() {
|
oop pop() {
|
||||||
if (!isEmpty()) {
|
if (!isEmpty()) {
|
||||||
@ -236,6 +246,12 @@ class CMMarkStack VALUE_OBJ_CLASS_SPEC {
|
|||||||
bool overflow() { return _overflow; }
|
bool overflow() { return _overflow; }
|
||||||
void clear_overflow() { _overflow = false; }
|
void clear_overflow() { _overflow = false; }
|
||||||
|
|
||||||
|
bool should_expand() const { return _should_expand; }
|
||||||
|
void set_should_expand();
|
||||||
|
|
||||||
|
// Expand the stack, typically in response to an overflow condition
|
||||||
|
void expand();
|
||||||
|
|
||||||
int size() { return _index; }
|
int size() { return _index; }
|
||||||
|
|
||||||
void setEmpty() { _index = 0; clear_overflow(); }
|
void setEmpty() { _index = 0; clear_overflow(); }
|
||||||
@ -344,6 +360,7 @@ public:
|
|||||||
class ConcurrentMarkThread;
|
class ConcurrentMarkThread;
|
||||||
|
|
||||||
class ConcurrentMark: public CHeapObj<mtGC> {
|
class ConcurrentMark: public CHeapObj<mtGC> {
|
||||||
|
friend class CMMarkStack;
|
||||||
friend class ConcurrentMarkThread;
|
friend class ConcurrentMarkThread;
|
||||||
friend class CMTask;
|
friend class CMTask;
|
||||||
friend class CMBitMapClosure;
|
friend class CMBitMapClosure;
|
||||||
@ -577,6 +594,9 @@ protected:
|
|||||||
// the card bitmaps.
|
// the card bitmaps.
|
||||||
intptr_t _heap_bottom_card_num;
|
intptr_t _heap_bottom_card_num;
|
||||||
|
|
||||||
|
// Set to true when initialization is complete
|
||||||
|
bool _completed_initialization;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Manipulation of the global mark stack.
|
// Manipulation of the global mark stack.
|
||||||
// Notice that the first mark_stack_push is CAS-based, whereas the
|
// Notice that the first mark_stack_push is CAS-based, whereas the
|
||||||
@ -636,7 +656,7 @@ public:
|
|||||||
return _task_queues->steal(worker_id, hash_seed, obj);
|
return _task_queues->steal(worker_id, hash_seed, obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMark(ReservedSpace rs, uint max_regions);
|
ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
|
||||||
~ConcurrentMark();
|
~ConcurrentMark();
|
||||||
|
|
||||||
ConcurrentMarkThread* cmThread() { return _cmThread; }
|
ConcurrentMarkThread* cmThread() { return _cmThread; }
|
||||||
@ -907,6 +927,11 @@ public:
|
|||||||
// Should *not* be called from parallel code.
|
// Should *not* be called from parallel code.
|
||||||
inline bool mark_and_count(oop obj);
|
inline bool mark_and_count(oop obj);
|
||||||
|
|
||||||
|
// Returns true if initialization was successfully completed.
|
||||||
|
bool completed_initialization() const {
|
||||||
|
return _completed_initialization;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Clear all the per-task bitmaps and arrays used to store the
|
// Clear all the per-task bitmaps and arrays used to store the
|
||||||
// counting data.
|
// counting data.
|
||||||
|
@ -2079,7 +2079,11 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
// Create the ConcurrentMark data structure and thread.
|
// Create the ConcurrentMark data structure and thread.
|
||||||
// (Must do this late, so that "max_regions" is defined.)
|
// (Must do this late, so that "max_regions" is defined.)
|
||||||
_cm = new ConcurrentMark(heap_rs, max_regions());
|
_cm = new ConcurrentMark(this, heap_rs);
|
||||||
|
if (_cm == NULL || !_cm->completed_initialization()) {
|
||||||
|
vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
|
||||||
|
return JNI_ENOMEM;
|
||||||
|
}
|
||||||
_cmThread = _cm->cmThread();
|
_cmThread = _cm->cmThread();
|
||||||
|
|
||||||
// Initialize the from_card cache structure of HeapRegionRemSet.
|
// Initialize the from_card cache structure of HeapRegionRemSet.
|
||||||
@ -2087,7 +2091,7 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
// Now expand into the initial heap size.
|
// Now expand into the initial heap size.
|
||||||
if (!expand(init_byte_size)) {
|
if (!expand(init_byte_size)) {
|
||||||
vm_exit_during_initialization("Failed to allocate initial heap.");
|
vm_shutdown_during_initialization("Failed to allocate initial heap.");
|
||||||
return JNI_ENOMEM;
|
return JNI_ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
|
#include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
|
||||||
|
#include "memory/allocation.inline.hpp"
|
||||||
#include "runtime/java.hpp"
|
#include "runtime/java.hpp"
|
||||||
|
|
||||||
AdjoiningVirtualSpaces::AdjoiningVirtualSpaces(ReservedSpace rs,
|
AdjoiningVirtualSpaces::AdjoiningVirtualSpaces(ReservedSpace rs,
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc_implementation/shared/gcStats.hpp"
|
#include "gc_implementation/shared/gcStats.hpp"
|
||||||
#include "gc_implementation/shared/gcUtil.hpp"
|
#include "gc_implementation/shared/gcUtil.hpp"
|
||||||
|
#include "memory/allocation.inline.hpp"
|
||||||
|
|
||||||
GCStats::GCStats() {
|
GCStats::GCStats() {
|
||||||
_avg_promoted = new AdaptivePaddedNoZeroDevAverage(
|
_avg_promoted = new AdaptivePaddedNoZeroDevAverage(
|
||||||
|
@ -46,6 +46,7 @@
|
|||||||
// | interp_kind | flags | code_size |
|
// | interp_kind | flags | code_size |
|
||||||
// | name index | signature index |
|
// | name index | signature index |
|
||||||
// | method_idnum | max_stack |
|
// | method_idnum | max_stack |
|
||||||
|
// | max_locals | size_of_parameters |
|
||||||
// |------------------------------------------------------|
|
// |------------------------------------------------------|
|
||||||
// | |
|
// | |
|
||||||
// | byte codes |
|
// | byte codes |
|
||||||
@ -150,7 +151,8 @@ private:
|
|||||||
// initially corresponds to the index into the methods array.
|
// initially corresponds to the index into the methods array.
|
||||||
// but this may change with redefinition
|
// but this may change with redefinition
|
||||||
u2 _max_stack; // Maximum number of entries on the expression stack
|
u2 _max_stack; // Maximum number of entries on the expression stack
|
||||||
|
u2 _max_locals; // Number of local variables used by this method
|
||||||
|
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
|
||||||
|
|
||||||
// Constructor
|
// Constructor
|
||||||
ConstMethod(int byte_code_size,
|
ConstMethod(int byte_code_size,
|
||||||
@ -338,6 +340,11 @@ public:
|
|||||||
|
|
||||||
static ByteSize max_stack_offset()
|
static ByteSize max_stack_offset()
|
||||||
{ return byte_offset_of(ConstMethod, _max_stack); }
|
{ return byte_offset_of(ConstMethod, _max_stack); }
|
||||||
|
static ByteSize size_of_locals_offset()
|
||||||
|
{ return byte_offset_of(ConstMethod, _max_locals); }
|
||||||
|
static ByteSize size_of_parameters_offset()
|
||||||
|
{ return byte_offset_of(ConstMethod, _size_of_parameters); }
|
||||||
|
|
||||||
|
|
||||||
// Unique id for the method
|
// Unique id for the method
|
||||||
static const u2 MAX_IDNUM;
|
static const u2 MAX_IDNUM;
|
||||||
@ -349,6 +356,14 @@ public:
|
|||||||
int max_stack() const { return _max_stack; }
|
int max_stack() const { return _max_stack; }
|
||||||
void set_max_stack(int size) { _max_stack = size; }
|
void set_max_stack(int size) { _max_stack = size; }
|
||||||
|
|
||||||
|
// max locals
|
||||||
|
int max_locals() const { return _max_locals; }
|
||||||
|
void set_max_locals(int size) { _max_locals = size; }
|
||||||
|
|
||||||
|
// size of parameters
|
||||||
|
int size_of_parameters() const { return _size_of_parameters; }
|
||||||
|
void set_size_of_parameters(int size) { _size_of_parameters = size; }
|
||||||
|
|
||||||
// Deallocation for RedefineClasses
|
// Deallocation for RedefineClasses
|
||||||
void deallocate_contents(ClassLoaderData* loader_data);
|
void deallocate_contents(ClassLoaderData* loader_data);
|
||||||
bool is_klass() const { return false; }
|
bool is_klass() const { return false; }
|
||||||
|
@ -73,8 +73,7 @@
|
|||||||
// |------------------------------------------------------|
|
// |------------------------------------------------------|
|
||||||
// | result_index (C++ interpreter only) |
|
// | result_index (C++ interpreter only) |
|
||||||
// |------------------------------------------------------|
|
// |------------------------------------------------------|
|
||||||
// | method_size | max_locals |
|
// | method_size | intrinsic_id| flags |
|
||||||
// | size_of_parameters | intrinsic_id| flags |
|
|
||||||
// |------------------------------------------------------|
|
// |------------------------------------------------------|
|
||||||
// | throwout_count | num_breakpoints |
|
// | throwout_count | num_breakpoints |
|
||||||
// |------------------------------------------------------|
|
// |------------------------------------------------------|
|
||||||
@ -116,8 +115,6 @@ class Method : public Metadata {
|
|||||||
int _result_index; // C++ interpreter needs for converting results to/from stack
|
int _result_index; // C++ interpreter needs for converting results to/from stack
|
||||||
#endif
|
#endif
|
||||||
u2 _method_size; // size of this object
|
u2 _method_size; // size of this object
|
||||||
u2 _max_locals; // Number of local variables used by this method
|
|
||||||
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
|
|
||||||
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
|
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
|
||||||
u1 _jfr_towrite : 1, // Flags
|
u1 _jfr_towrite : 1, // Flags
|
||||||
_force_inline : 1,
|
_force_inline : 1,
|
||||||
@ -292,8 +289,8 @@ class Method : public Metadata {
|
|||||||
void set_max_stack(int size) { constMethod()->set_max_stack(size); }
|
void set_max_stack(int size) { constMethod()->set_max_stack(size); }
|
||||||
|
|
||||||
// max locals
|
// max locals
|
||||||
int max_locals() const { return _max_locals; }
|
int max_locals() const { return constMethod()->max_locals(); }
|
||||||
void set_max_locals(int size) { _max_locals = size; }
|
void set_max_locals(int size) { constMethod()->set_max_locals(size); }
|
||||||
|
|
||||||
int highest_comp_level() const;
|
int highest_comp_level() const;
|
||||||
void set_highest_comp_level(int level);
|
void set_highest_comp_level(int level);
|
||||||
@ -311,7 +308,8 @@ class Method : public Metadata {
|
|||||||
void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; }
|
void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; }
|
||||||
|
|
||||||
// size of parameters
|
// size of parameters
|
||||||
int size_of_parameters() const { return _size_of_parameters; }
|
int size_of_parameters() const { return constMethod()->size_of_parameters(); }
|
||||||
|
void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); }
|
||||||
|
|
||||||
bool has_stackmap_table() const {
|
bool has_stackmap_table() const {
|
||||||
return constMethod()->has_stackmap_table();
|
return constMethod()->has_stackmap_table();
|
||||||
@ -588,8 +586,6 @@ class Method : public Metadata {
|
|||||||
#ifdef CC_INTERP
|
#ifdef CC_INTERP
|
||||||
static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); }
|
static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); }
|
||||||
#endif /* CC_INTERP */
|
#endif /* CC_INTERP */
|
||||||
static ByteSize size_of_locals_offset() { return byte_offset_of(Method, _max_locals ); }
|
|
||||||
static ByteSize size_of_parameters_offset() { return byte_offset_of(Method, _size_of_parameters); }
|
|
||||||
static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
|
static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
|
||||||
static ByteSize code_offset() { return byte_offset_of(Method, _code); }
|
static ByteSize code_offset() { return byte_offset_of(Method, _code); }
|
||||||
static ByteSize invocation_counter_offset() { return byte_offset_of(Method, _invocation_counter); }
|
static ByteSize invocation_counter_offset() { return byte_offset_of(Method, _invocation_counter); }
|
||||||
@ -796,9 +792,6 @@ class Method : public Metadata {
|
|||||||
Array<AnnotationArray*>* methods_default_annotations,
|
Array<AnnotationArray*>* methods_default_annotations,
|
||||||
bool idempotent = false);
|
bool idempotent = false);
|
||||||
|
|
||||||
// size of parameters
|
|
||||||
void set_size_of_parameters(int size) { _size_of_parameters = size; }
|
|
||||||
|
|
||||||
// Deallocation function for redefine classes or if an error occurs
|
// Deallocation function for redefine classes or if an error occurs
|
||||||
void deallocate_contents(ClassLoaderData* loader_data);
|
void deallocate_contents(ClassLoaderData* loader_data);
|
||||||
|
|
||||||
|
@ -1499,13 +1499,12 @@ void Arguments::set_g1_gc_flags() {
|
|||||||
Abstract_VM_Version::parallel_worker_threads());
|
Abstract_VM_Version::parallel_worker_threads());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FLAG_IS_DEFAULT(MarkStackSize)) {
|
// MarkStackSize will be set (if it hasn't been set by the user)
|
||||||
FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
|
// when concurrent marking is initialized.
|
||||||
}
|
// Its value will be based upon the number of parallel marking threads.
|
||||||
if (PrintGCDetails && Verbose) {
|
// But we do set the maximum mark stack size here.
|
||||||
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
|
if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
|
||||||
MarkStackSize / K, MarkStackSizeMax / K);
|
FLAG_SET_DEFAULT(MarkStackSizeMax, 128 * TASKQUEUE_SIZE);
|
||||||
tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
|
if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
|
||||||
@ -1517,6 +1516,12 @@ void Arguments::set_g1_gc_flags() {
|
|||||||
// is allocation). We might consider increase it further.
|
// is allocation). We might consider increase it further.
|
||||||
FLAG_SET_DEFAULT(GCTimeRatio, 9);
|
FLAG_SET_DEFAULT(GCTimeRatio, 9);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (PrintGCDetails && Verbose) {
|
||||||
|
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
|
||||||
|
MarkStackSize / K, MarkStackSizeMax / K);
|
||||||
|
tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Arguments::set_heap_size() {
|
void Arguments::set_heap_size() {
|
||||||
@ -1980,6 +1985,9 @@ bool Arguments::check_vm_args_consistency() {
|
|||||||
status = status && verify_min_value(ClassMetaspaceSize, 1*M,
|
status = status && verify_min_value(ClassMetaspaceSize, 1*M,
|
||||||
"ClassMetaspaceSize");
|
"ClassMetaspaceSize");
|
||||||
|
|
||||||
|
status = status && verify_interval(MarkStackSizeMax,
|
||||||
|
1, (max_jint - 1), "MarkStackSizeMax");
|
||||||
|
|
||||||
#ifdef SPARC
|
#ifdef SPARC
|
||||||
if (UseConcMarkSweepGC || UseG1GC) {
|
if (UseConcMarkSweepGC || UseG1GC) {
|
||||||
// Issue a stern warning if the user has explicitly set
|
// Issue a stern warning if the user has explicitly set
|
||||||
|
@ -355,8 +355,6 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
|||||||
nonstatic_field(Method, _access_flags, AccessFlags) \
|
nonstatic_field(Method, _access_flags, AccessFlags) \
|
||||||
nonstatic_field(Method, _vtable_index, int) \
|
nonstatic_field(Method, _vtable_index, int) \
|
||||||
nonstatic_field(Method, _method_size, u2) \
|
nonstatic_field(Method, _method_size, u2) \
|
||||||
nonstatic_field(Method, _max_locals, u2) \
|
|
||||||
nonstatic_field(Method, _size_of_parameters, u2) \
|
|
||||||
nonstatic_field(Method, _interpreter_throwout_count, u2) \
|
nonstatic_field(Method, _interpreter_throwout_count, u2) \
|
||||||
nonstatic_field(Method, _number_of_breakpoints, u2) \
|
nonstatic_field(Method, _number_of_breakpoints, u2) \
|
||||||
nonstatic_field(Method, _invocation_counter, InvocationCounter) \
|
nonstatic_field(Method, _invocation_counter, InvocationCounter) \
|
||||||
@ -378,6 +376,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
|||||||
nonstatic_field(ConstMethod, _signature_index, u2) \
|
nonstatic_field(ConstMethod, _signature_index, u2) \
|
||||||
nonstatic_field(ConstMethod, _method_idnum, u2) \
|
nonstatic_field(ConstMethod, _method_idnum, u2) \
|
||||||
nonstatic_field(ConstMethod, _max_stack, u2) \
|
nonstatic_field(ConstMethod, _max_stack, u2) \
|
||||||
|
nonstatic_field(ConstMethod, _max_locals, u2) \
|
||||||
|
nonstatic_field(ConstMethod, _size_of_parameters, u2) \
|
||||||
nonstatic_field(ObjArrayKlass, _element_klass, Klass*) \
|
nonstatic_field(ObjArrayKlass, _element_klass, Klass*) \
|
||||||
nonstatic_field(ObjArrayKlass, _bottom_klass, Klass*) \
|
nonstatic_field(ObjArrayKlass, _bottom_klass, Klass*) \
|
||||||
volatile_nonstatic_field(Symbol, _refcount, int) \
|
volatile_nonstatic_field(Symbol, _refcount, int) \
|
||||||
|
Loading…
x
Reference in New Issue
Block a user