This commit is contained in:
J. Duke 2017-07-05 20:50:16 +02:00
commit aa27e550fa
857 changed files with 16564 additions and 9948 deletions

View File

@ -324,3 +324,4 @@ d82072b699b880a1f647a5e2d7c0f86cec958941 jdk9-b76
ba08a9f79b9849716bae1f39f71333d47f604012 jdk9-b79 ba08a9f79b9849716bae1f39f71333d47f604012 jdk9-b79
f7c5ae2933c0b8510a420d1713a955e4ffc7ad0b jdk9-b80 f7c5ae2933c0b8510a420d1713a955e4ffc7ad0b jdk9-b80
b8afcf91331d78626a583ec1b63164468d6f4181 jdk9-b81 b8afcf91331d78626a583ec1b63164468d6f4181 jdk9-b81
42b56d1f418523ecb61a49d7493302c80c8009cc jdk9-b82

View File

@ -841,7 +841,6 @@ JDK_MINOR_VERSION
JDK_MAJOR_VERSION JDK_MAJOR_VERSION
USER_RELEASE_SUFFIX USER_RELEASE_SUFFIX
COMPRESS_JARS COMPRESS_JARS
RMICONNECTOR_IIOP
UNLIMITED_CRYPTO UNLIMITED_CRYPTO
CACERTS_FILE CACERTS_FILE
TEST_IN_BUILD TEST_IN_BUILD
@ -1060,7 +1059,6 @@ enable_headful
enable_hotspot_test_in_build enable_hotspot_test_in_build
with_cacerts_file with_cacerts_file
enable_unlimited_crypto enable_unlimited_crypto
enable_rmiconnector_iiop
with_milestone with_milestone
with_update_version with_update_version
with_user_release_suffix with_user_release_suffix
@ -1848,9 +1846,6 @@ Optional Features:
run the Queens test after Hotspot build [disabled] run the Queens test after Hotspot build [disabled]
--enable-unlimited-crypto --enable-unlimited-crypto
Enable unlimited crypto policy [disabled] Enable unlimited crypto policy [disabled]
--enable-rmiconnector-iiop
enable the JMX RMIConnector iiop transport
[disabled]
--disable-warnings-as-errors --disable-warnings-as-errors
do not consider native warnings to be an error do not consider native warnings to be an error
[enabled] [enabled]
@ -3951,7 +3946,7 @@ pkgadd_help() {
# #
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -20098,22 +20093,6 @@ fi
# #
###############################################################################
#
# --enable-rmiconnector-iiop
#
# Check whether --enable-rmiconnector-iiop was given.
if test "${enable_rmiconnector_iiop+set}" = set; then :
enableval=$enable_rmiconnector_iiop;
fi
if test "x$enable_rmiconnector_iiop" = "xyes"; then
RMICONNECTOR_IIOP=true
else
RMICONNECTOR_IIOP=false
fi
############################################################################### ###############################################################################
# #
# Compress jars # Compress jars

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -453,19 +453,6 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
AC_SUBST(ENABLE_INTREE_EC) AC_SUBST(ENABLE_INTREE_EC)
]) ])
###############################################################################
#
# --enable-rmiconnector-iiop
#
AC_ARG_ENABLE(rmiconnector-iiop, [AS_HELP_STRING([--enable-rmiconnector-iiop],
[enable the JMX RMIConnector iiop transport @<:@disabled@:>@])])
if test "x$enable_rmiconnector_iiop" = "xyes"; then
RMICONNECTOR_IIOP=true
else
RMICONNECTOR_IIOP=false
fi
AC_SUBST(RMICONNECTOR_IIOP)
############################################################################### ###############################################################################
# #
# Compress jars # Compress jars

View File

@ -262,9 +262,6 @@ CACERTS_FILE=@CACERTS_FILE@
# Enable unlimited crypto policy # Enable unlimited crypto policy
UNLIMITED_CRYPTO=@UNLIMITED_CRYPTO@ UNLIMITED_CRYPTO=@UNLIMITED_CRYPTO@
# Enable RMIConnector IIOP transport
RMICONNECTOR_IIOP=@RMICONNECTOR_IIOP@
GCOV_ENABLED=@GCOV_ENABLED@ GCOV_ENABLED=@GCOV_ENABLED@
# Necessary additional compiler flags to compile X11 # Necessary additional compiler flags to compile X11

View File

@ -0,0 +1,60 @@
#!/bin/bash
#
# Copyright 2015 Google, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
usage() {
(
echo "$0 DIR ..."
echo "Modifies in place all the java source files found"
echo "in the given directories so that all java language modifiers"
echo "are in the canonical order given by Modifier#toString()."
echo "Tries to get it right even within javadoc comments,"
echo "and even if the list of modifiers spans 2 lines."
echo
echo "See:"
echo "https://docs.oracle.com/javase/8/docs/api/java/lang/reflect/Modifier.html#toString-int-"
echo
echo "Example:"
echo "$0 jdk/src/java.base jdk/test/java/{util,io,lang}"
) >&2
exit 1
}
set -eu
declare -ar dirs=("$@")
[[ "${#dirs[@]}" > 0 ]] || usage
for dir in "${dirs[@]}"; do [[ -d "$dir" ]] || usage; done
declare -ar modifiers=(
public protected private
abstract static final transient
volatile synchronized native strictfp
)
declare -r SAVE_IFS="$IFS"
for ((i = 3; i < "${#modifiers[@]}"; i++)); do
IFS='|'; x="${modifiers[*]:0:i}" y="${modifiers[*]:i}"; IFS="$SAVE_IFS"
if [[ -n "$x" && -n "$y" ]]; then
find "${dirs[@]}" -name '*.java' -type f -print0 | \
xargs -0 perl -0777 -p -i -e \
"do {} while s/^([A-Za-z@* ]*)\b($y)(\s|(?:\s|\n\s+\*)*\s)($x)\b/\1\4\3\2/mg"
fi
done

View File

@ -484,3 +484,4 @@ e66c3813789debfc06f206afde1bf7a84cb08451 jdk9-b77
e9e63d93bbfe2c6c23447e2c1f5cc71c98671cba jdk9-b79 e9e63d93bbfe2c6c23447e2c1f5cc71c98671cba jdk9-b79
8e8377739c06b99b9011c003c77e0bef84c91e09 jdk9-b80 8e8377739c06b99b9011c003c77e0bef84c91e09 jdk9-b80
4142c190cd5ca4fb70ec367b4f97ef936272d8ef jdk9-b81 4142c190cd5ca4fb70ec367b4f97ef936272d8ef jdk9-b81
1c453a12be3036d482abef1dd470f8aff536b6b9 jdk9-b82

View File

@ -633,9 +633,9 @@ create_jdk: copy_jdk update_jdk
update_jdk: export_product_jdk export_fastdebug_jdk test_jdk update_jdk: export_product_jdk export_fastdebug_jdk test_jdk
copy_jdk: $(JDK_IMAGE_DIR)/jre/lib/rt.jar copy_jdk: $(JDK_IMAGE_DIR)/bin/java
$(JDK_IMAGE_DIR)/jre/lib/rt.jar: $(JDK_IMAGE_DIR)/bin/java:
$(RM) -r $(JDK_IMAGE_DIR) $(RM) -r $(JDK_IMAGE_DIR)
$(MKDIR) -p $(JDK_IMAGE_DIR) $(MKDIR) -p $(JDK_IMAGE_DIR)
($(CD) $(JDK_IMPORT_PATH) && \ ($(CD) $(JDK_IMPORT_PATH) && \

View File

@ -141,18 +141,6 @@ SUNWprivate_1.1 {
JVM_Halt; JVM_Halt;
JVM_HoldsLock; JVM_HoldsLock;
JVM_IHashCode; JVM_IHashCode;
JVM_ImageAttributeOffsets;
JVM_ImageAttributeOffsetsLength;
JVM_ImageClose;
JVM_ImageFindAttributes;
JVM_ImageGetAttributes;
JVM_ImageGetAttributesCount;
JVM_ImageGetDataAddress;
JVM_ImageGetIndexAddress;
JVM_ImageGetStringBytes;
JVM_ImageOpen;
JVM_ImageRead;
JVM_ImageReadCompressed;
JVM_InitAgentProperties; JVM_InitAgentProperties;
JVM_InitProperties; JVM_InitProperties;
JVM_InternString; JVM_InternString;

View File

@ -139,18 +139,6 @@ SUNWprivate_1.1 {
JVM_Halt; JVM_Halt;
JVM_HoldsLock; JVM_HoldsLock;
JVM_IHashCode; JVM_IHashCode;
JVM_ImageAttributeOffsets;
JVM_ImageAttributeOffsetsLength;
JVM_ImageClose;
JVM_ImageFindAttributes;
JVM_ImageGetAttributes;
JVM_ImageGetAttributesCount;
JVM_ImageGetDataAddress;
JVM_ImageGetIndexAddress;
JVM_ImageGetStringBytes;
JVM_ImageOpen;
JVM_ImageRead;
JVM_ImageReadCompressed;
JVM_InitAgentProperties; JVM_InitAgentProperties;
JVM_InitProperties; JVM_InitProperties;
JVM_InternString; JVM_InternString;

View File

@ -139,18 +139,6 @@
_JVM_Halt _JVM_Halt
_JVM_HoldsLock _JVM_HoldsLock
_JVM_IHashCode _JVM_IHashCode
_JVM_ImageAttributeOffsets
_JVM_ImageAttributeOffsetsLength
_JVM_ImageClose
_JVM_ImageFindAttributes
_JVM_ImageGetAttributes
_JVM_ImageGetAttributesCount
_JVM_ImageGetDataAddress
_JVM_ImageGetIndexAddress
_JVM_ImageGetStringBytes
_JVM_ImageOpen
_JVM_ImageRead
_JVM_ImageReadCompressed
_JVM_InitAgentProperties _JVM_InitAgentProperties
_JVM_InitProperties _JVM_InitProperties
_JVM_InternString _JVM_InternString

View File

@ -139,18 +139,6 @@
_JVM_Halt _JVM_Halt
_JVM_HoldsLock _JVM_HoldsLock
_JVM_IHashCode _JVM_IHashCode
_JVM_ImageAttributeOffsets
_JVM_ImageAttributeOffsetsLength
_JVM_ImageClose
_JVM_ImageFindAttributes
_JVM_ImageGetAttributes
_JVM_ImageGetAttributesCount
_JVM_ImageGetDataAddress
_JVM_ImageGetIndexAddress
_JVM_ImageGetStringBytes
_JVM_ImageOpen
_JVM_ImageRead
_JVM_ImageReadCompressed
_JVM_InitAgentProperties _JVM_InitAgentProperties
_JVM_InitProperties _JVM_InitProperties
_JVM_InternString _JVM_InternString

View File

@ -141,18 +141,6 @@ SUNWprivate_1.1 {
JVM_Halt; JVM_Halt;
JVM_HoldsLock; JVM_HoldsLock;
JVM_IHashCode; JVM_IHashCode;
JVM_ImageAttributeOffsets;
JVM_ImageAttributeOffsetsLength;
JVM_ImageClose;
JVM_ImageFindAttributes;
JVM_ImageGetAttributes;
JVM_ImageGetAttributesCount;
JVM_ImageGetDataAddress;
JVM_ImageGetIndexAddress;
JVM_ImageGetStringBytes;
JVM_ImageOpen;
JVM_ImageRead;
JVM_ImageReadCompressed;
JVM_InitAgentProperties; JVM_InitAgentProperties;
JVM_InitProperties; JVM_InitProperties;
JVM_InternString; JVM_InternString;

View File

@ -141,18 +141,6 @@ SUNWprivate_1.1 {
JVM_Halt; JVM_Halt;
JVM_HoldsLock; JVM_HoldsLock;
JVM_IHashCode; JVM_IHashCode;
JVM_ImageAttributeOffsets;
JVM_ImageAttributeOffsetsLength;
JVM_ImageClose;
JVM_ImageFindAttributes;
JVM_ImageGetAttributes;
JVM_ImageGetAttributesCount;
JVM_ImageGetDataAddress;
JVM_ImageGetIndexAddress;
JVM_ImageGetStringBytes;
JVM_ImageOpen;
JVM_ImageRead;
JVM_ImageReadCompressed;
JVM_InitAgentProperties; JVM_InitAgentProperties;
JVM_InitProperties; JVM_InitProperties;
JVM_InternString; JVM_InternString;

View File

@ -131,7 +131,7 @@ endif
# By default, link the *.o into the library, not the executable. # By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM LINK_INTO$(LINK_INTO) = LIBJVM
JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH) JDK_LIBDIR = $(JAVA_HOME)/lib/$(LIBARCH)
#---------------------------------------------------------------------- #----------------------------------------------------------------------
# jvm_db & dtrace # jvm_db & dtrace

View File

@ -49,7 +49,7 @@ fi
# Just in case: # Just in case:
JAVA_HOME=`( cd $JAVA_HOME; pwd )` JAVA_HOME=`( cd $JAVA_HOME; pwd )`
if [ "${ALT_BOOTDIR-}" = "" -o ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/jre/lib/ ]; then if [ "${ALT_BOOTDIR-}" = "" -o ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/lib/ ]; then
ALT_BOOTDIR=${JAVA_HOME} ALT_BOOTDIR=${JAVA_HOME}
fi fi

View File

@ -127,7 +127,7 @@ fi
# o $JRE/lib/$ARCH # o $JRE/lib/$ARCH
# followed by the user's previous effective LD_LIBRARY_PATH, if # followed by the user's previous effective LD_LIBRARY_PATH, if
# any. # any.
JRE=$JDK/jre JRE=$JDK
JAVA_HOME=$JDK JAVA_HOME=$JDK
export JAVA_HOME export JAVA_HOME

View File

@ -141,18 +141,6 @@ SUNWprivate_1.1 {
JVM_Halt; JVM_Halt;
JVM_HoldsLock; JVM_HoldsLock;
JVM_IHashCode; JVM_IHashCode;
JVM_ImageAttributeOffsets;
JVM_ImageAttributeOffsetsLength;
JVM_ImageClose;
JVM_ImageFindAttributes;
JVM_ImageGetAttributes;
JVM_ImageGetAttributesCount;
JVM_ImageGetDataAddress;
JVM_ImageGetIndexAddress;
JVM_ImageGetStringBytes;
JVM_ImageOpen;
JVM_ImageRead;
JVM_ImageReadCompressed;
JVM_InitAgentProperties; JVM_InitAgentProperties;
JVM_InitProperties; JVM_InitProperties;
JVM_InternString; JVM_InternString;

View File

@ -141,18 +141,6 @@ SUNWprivate_1.1 {
JVM_Halt; JVM_Halt;
JVM_HoldsLock; JVM_HoldsLock;
JVM_IHashCode; JVM_IHashCode;
JVM_ImageAttributeOffsets;
JVM_ImageAttributeOffsetsLength;
JVM_ImageClose;
JVM_ImageFindAttributes;
JVM_ImageGetAttributes;
JVM_ImageGetAttributesCount;
JVM_ImageGetDataAddress;
JVM_ImageGetIndexAddress;
JVM_ImageGetStringBytes;
JVM_ImageOpen;
JVM_ImageRead;
JVM_ImageReadCompressed;
JVM_InitAgentProperties; JVM_InitAgentProperties;
JVM_InitProperties; JVM_InitProperties;
JVM_InternString; JVM_InternString;

View File

@ -76,6 +76,11 @@ endif
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
CFLAGS_WARN = +w -errwarn CFLAGS_WARN = +w -errwarn
endif endif
# When using compiler version 5.13 (Solaris Studio 12.4), calls to explicitly
# instantiated template functions trigger this warning when +w is active.
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 513), 1)
CFLAGS_WARN += -erroff=notemsource
endif
CFLAGS += $(CFLAGS_WARN) CFLAGS += $(CFLAGS_WARN)
ifeq ("${Platform_compiler}", "sparcWorks") ifeq ("${Platform_compiler}", "sparcWorks")

View File

@ -270,6 +270,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "CP ?= cp"; \ echo "CP ?= cp"; \
echo "MV ?= mv"; \ echo "MV ?= mv"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
echo "include \$$(GAMMADIR)/make/excludeSrc.make"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
) > $@ ) > $@

View File

@ -141,18 +141,6 @@ SUNWprivate_1.1 {
JVM_Halt; JVM_Halt;
JVM_HoldsLock; JVM_HoldsLock;
JVM_IHashCode; JVM_IHashCode;
JVM_ImageAttributeOffsets;
JVM_ImageAttributeOffsetsLength;
JVM_ImageClose;
JVM_ImageFindAttributes;
JVM_ImageGetAttributes;
JVM_ImageGetAttributesCount;
JVM_ImageGetDataAddress;
JVM_ImageGetIndexAddress;
JVM_ImageGetStringBytes;
JVM_ImageOpen;
JVM_ImageRead;
JVM_ImageReadCompressed;
JVM_InitAgentProperties; JVM_InitAgentProperties;
JVM_InitProperties; JVM_InitProperties;
JVM_InternString; JVM_InternString;

View File

@ -197,7 +197,7 @@ Src_Dirs/COMPILER1 := $(CORE_PATHS) $(COMPILER1_PATHS)
Src_Dirs/COMPILER2 := $(CORE_PATHS) $(COMPILER2_PATHS) Src_Dirs/COMPILER2 := $(CORE_PATHS) $(COMPILER2_PATHS)
Src_Dirs/TIERED := $(CORE_PATHS) $(COMPILER1_PATHS) $(COMPILER2_PATHS) Src_Dirs/TIERED := $(CORE_PATHS) $(COMPILER1_PATHS) $(COMPILER2_PATHS)
Src_Dirs/ZERO := $(CORE_PATHS) Src_Dirs/ZERO := $(CORE_PATHS)
Src_Dirs/SHARK := $(CORE_PATHS) Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS)
Src_Dirs := $(Src_Dirs/$(TYPE)) Src_Dirs := $(Src_Dirs/$(TYPE))
COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
@ -206,7 +206,7 @@ SHARK_SPECIFIC_FILES := shark
ZERO_SPECIFIC_FILES := zero ZERO_SPECIFIC_FILES := zero
# Always exclude these. # Always exclude these.
Src_Files_EXCLUDE := dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp Src_Files_EXCLUDE += dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
# Exclude per type. # Exclude per type.
Src_Files_EXCLUDE/CORE := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp Src_Files_EXCLUDE/CORE := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp

View File

@ -4373,12 +4373,12 @@ encode %{
return; return;
} }
if (UseBiasedLocking) { if (UseBiasedLocking && !UseOptoBiasInlining) {
__ biased_locking_enter(disp_hdr, oop, box, tmp, true, cont); __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
} }
// Handle existing monitor // Handle existing monitor
if (EmitSync & 0x02) { if ((EmitSync & 0x02) == 0) {
// we can use AArch64's bit test and branch here but // we can use AArch64's bit test and branch here but
// markoopDesc does not define a bit index just the bit value // markoopDesc does not define a bit index just the bit value
// so assert in case the bit pos changes // so assert in case the bit pos changes
@ -4518,7 +4518,7 @@ encode %{
return; return;
} }
if (UseBiasedLocking) { if (UseBiasedLocking && !UseOptoBiasInlining) {
__ biased_locking_exit(oop, tmp, cont); __ biased_locking_exit(oop, tmp, cont);
} }

View File

@ -1210,7 +1210,7 @@ public:
INSN(ldrs, 0b00, 1); INSN(ldrs, 0b00, 1);
INSN(ldrd, 0b01, 1); INSN(ldrd, 0b01, 1);
INSN(ldrq, 0x10, 1); INSN(ldrq, 0b10, 1);
#undef INSN #undef INSN
@ -2285,13 +2285,13 @@ public:
#undef INSN #undef INSN
// Table vector lookup // Table vector lookup
#define INSN(NAME, op) \ #define INSN(NAME, op) \
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \ void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \
starti; \ starti; \
assert(T == T8B || T == T16B, "invalid arrangement"); \ assert(T == T8B || T == T16B, "invalid arrangement"); \
assert(0 < registers && registers <= 4, "invalid number of registers"); \ assert(0 < registers && registers <= 4, "invalid number of registers"); \
f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \ f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \
f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \ f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \
} }
INSN(tbl, 0); INSN(tbl, 0);
@ -2299,6 +2299,7 @@ public:
#undef INSN #undef INSN
// AdvSIMD two-reg misc
#define INSN(NAME, U, opcode) \ #define INSN(NAME, U, opcode) \
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
starti; \ starti; \
@ -2316,10 +2317,19 @@ public:
#define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H) #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H)
INSN(rev32, 1, 0b00000); INSN(rev32, 1, 0b00000);
private:
INSN(_rbit, 1, 0b00101);
public:
#undef ASSERTION #undef ASSERTION
#define ASSERTION (T == T8B || T == T16B) #define ASSERTION (T == T8B || T == T16B)
INSN(rev16, 0, 0b00001); INSN(rev16, 0, 0b00001);
// RBIT only allows T8B and T16B but encodes them oddly. Argh...
void rbit(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
assert((ASSERTION), MSG);
_rbit(Vd, SIMD_Arrangement(T & 1 | 0b010), Vn);
}
#undef ASSERTION #undef ASSERTION
#undef MSG #undef MSG

View File

@ -3043,7 +3043,9 @@ void MacroAssembler::store_check(Register obj) {
// register obj is destroyed afterwards. // register obj is destroyed afterwards.
BarrierSet* bs = Universe::heap()->barrier_set(); BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension,
"Wrong barrier set kind");
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");

View File

@ -691,7 +691,7 @@ class StubGenerator: public StubCodeGenerator {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp } __ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
case BarrierSet::ModRef: case BarrierSet::ModRef:
break; break;
@ -731,7 +731,7 @@ class StubGenerator: public StubCodeGenerator {
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp } __ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
} }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
CardTableModRefBS* ct = (CardTableModRefBS*)bs; CardTableModRefBS* ct = (CardTableModRefBS*)bs;
@ -2364,7 +2364,7 @@ class StubGenerator: public StubCodeGenerator {
* c_rarg3 - int* table * c_rarg3 - int* table
* *
* Ouput: * Ouput:
* rax - int crc result * r0 - int crc result
*/ */
address generate_updateBytesCRC32C() { address generate_updateBytesCRC32C() {
assert(UseCRC32CIntrinsics, "what are we doing here?"); assert(UseCRC32CIntrinsics, "what are we doing here?");
@ -2435,6 +2435,69 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi,
FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0,
FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3, FloatRegister tmp4) {
// Karatsuba multiplication performs a 128*128 -> 256-bit
// multiplication in three 128-bit multiplications and a few
// additions.
//
// (C1:C0) = A1*B1, (D1:D0) = A0*B0, (E1:E0) = (A0+A1)(B0+B1)
// (A1:A0)(B1:B0) = C1:(C0+C1+D1+E1):(D1+C0+D0+E0):D0
//
// Inputs:
//
// A0 in a.d[0] (subkey)
// A1 in a.d[1]
// (A1+A0) in a1_xor_a0.d[0]
//
// B0 in b.d[0] (state)
// B1 in b.d[1]
__ ext(tmp1, __ T16B, b, b, 0x08);
__ pmull2(result_hi, __ T1Q, b, a, __ T2D); // A1*B1
__ eor(tmp1, __ T16B, tmp1, b); // (B1+B0)
__ pmull(result_lo, __ T1Q, b, a, __ T1D); // A0*B0
__ pmull(tmp2, __ T1Q, tmp1, a1_xor_a0, __ T1D); // (A1+A0)(B1+B0)
__ ext(tmp4, __ T16B, result_lo, result_hi, 0x08);
__ eor(tmp3, __ T16B, result_hi, result_lo); // A1*B1+A0*B0
__ eor(tmp2, __ T16B, tmp2, tmp4);
__ eor(tmp2, __ T16B, tmp2, tmp3);
// Register pair <result_hi:result_lo> holds the result of carry-less multiplication
__ ins(result_hi, __ D, tmp2, 0, 1);
__ ins(result_lo, __ D, tmp2, 1, 0);
}
void ghash_reduce(FloatRegister result, FloatRegister lo, FloatRegister hi,
FloatRegister p, FloatRegister z, FloatRegister t1) {
const FloatRegister t0 = result;
// The GCM field polynomial f is z^128 + p(z), where p =
// z^7+z^2+z+1.
//
// z^128 === -p(z) (mod (z^128 + p(z)))
//
// so, given that the product we're reducing is
// a == lo + hi * z^128
// substituting,
// === lo - hi * p(z) (mod (z^128 + p(z)))
//
// we reduce by multiplying hi by p(z) and subtracting the result
// from (i.e. XORing it with) lo. Because p has no nonzero high
// bits we can do this with two 64-bit multiplications, lo*p and
// hi*p.
__ pmull2(t0, __ T1Q, hi, p, __ T2D);
__ ext(t1, __ T16B, t0, z, 8);
__ eor(hi, __ T16B, hi, t1);
__ ext(t1, __ T16B, z, t0, 8);
__ eor(lo, __ T16B, lo, t1);
__ pmull(t0, __ T1Q, hi, p, __ T1D);
__ eor(result, __ T16B, lo, t0);
}
/** /**
* Arguments: * Arguments:
* *
@ -2448,10 +2511,27 @@ class StubGenerator: public StubCodeGenerator {
* Updated state at c_rarg0 * Updated state at c_rarg0
*/ */
address generate_ghash_processBlocks() { address generate_ghash_processBlocks() {
__ align(CodeEntryAlignment); // Bafflingly, GCM uses little-endian for the byte order, but
Label L_ghash_loop, L_exit; // big-endian for the bit order. For example, the polynomial 1 is
// represented as the 16-byte string 80 00 00 00 | 12 bytes of 00.
//
// So, we must either reverse the bytes in each word and do
// everything big-endian or reverse the bits in each byte and do
// it little-endian. On AArch64 it's more idiomatic to reverse
// the bits in each byte (we have an instruction, RBIT, to do
// that) and keep the data in little-endian bit order throught the
// calculation, bit-reversing the inputs and outputs.
StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
__ align(wordSize * 2);
address p = __ pc();
__ emit_int64(0x87); // The low-order bits of the field
// polynomial (i.e. p = z^7+z^2+z+1)
// repeated in the low and high parts of a
// 128-bit vector
__ emit_int64(0x87);
__ align(CodeEntryAlignment);
address start = __ pc(); address start = __ pc();
Register state = c_rarg0; Register state = c_rarg0;
@ -2462,104 +2542,43 @@ class StubGenerator: public StubCodeGenerator {
FloatRegister vzr = v30; FloatRegister vzr = v30;
__ eor(vzr, __ T16B, vzr, vzr); // zero register __ eor(vzr, __ T16B, vzr, vzr); // zero register
__ mov(v26, __ T16B, 1); __ ldrq(v0, Address(state));
__ mov(v27, __ T16B, 63); __ ldrq(v1, Address(subkeyH));
__ mov(v28, __ T16B, 62);
__ mov(v29, __ T16B, 57);
__ ldrq(v6, Address(state)); __ rev64(v0, __ T16B, v0); // Bit-reverse words in state and subkeyH
__ ldrq(v16, Address(subkeyH)); __ rbit(v0, __ T16B, v0);
__ rev64(v1, __ T16B, v1);
__ rbit(v1, __ T16B, v1);
__ ext(v0, __ T16B, v6, v6, 0x08); __ ldrq(v26, p);
__ ext(v1, __ T16B, v16, v16, 0x08);
__ eor(v16, __ T16B, v16, v1);
__ bind(L_ghash_loop); __ ext(v16, __ T16B, v1, v1, 0x08); // long-swap subkeyH into v1
__ eor(v16, __ T16B, v16, v1); // xor subkeyH into subkeyL (Karatsuba: (A1+A0))
__ ldrq(v2, Address(__ post(data, 0x10))); {
__ rev64(v2, __ T16B, v2); // swap data Label L_ghash_loop;
__ bind(L_ghash_loop);
__ ext(v6, __ T16B, v0, v0, 0x08); __ ldrq(v2, Address(__ post(data, 0x10))); // Load the data, bit
__ eor(v6, __ T16B, v6, v2); // reversing each byte
__ ext(v2, __ T16B, v6, v6, 0x08); __ rbit(v2, __ T16B, v2);
__ eor(v2, __ T16B, v0, v2); // bit-swapped data ^ bit-swapped state
__ pmull2(v7, __ T1Q, v2, v1, __ T2D); // A1*B1 // Multiply state in v2 by subkey in v1
__ eor(v6, __ T16B, v6, v2); ghash_multiply(/*result_lo*/v5, /*result_hi*/v7,
__ pmull(v5, __ T1Q, v2, v1, __ T1D); // A0*B0 /*a*/v1, /*b*/v2, /*a1_xor_a0*/v16,
__ pmull(v20, __ T1Q, v6, v16, __ T1D); // (A1 + A0)(B1 + B0) /*temps*/v6, v20, v18, v21);
// Reduce v7:v5 by the field polynomial
ghash_reduce(v0, v5, v7, v26, vzr, v20);
__ ext(v21, __ T16B, v5, v7, 0x08); __ sub(blocks, blocks, 1);
__ eor(v18, __ T16B, v7, v5); // A1*B1 xor A0*B0 __ cbnz(blocks, L_ghash_loop);
__ eor(v20, __ T16B, v20, v21); }
__ eor(v20, __ T16B, v20, v18);
// Registers pair <v7:v5> holds the result of carry-less multiplication // The bit-reversed result is at this point in v0
__ ins(v7, __ D, v20, 0, 1); __ rev64(v1, __ T16B, v0);
__ ins(v5, __ D, v20, 1, 0); __ rbit(v1, __ T16B, v1);
// Result of the multiplication is shifted by one bit position
// [X3:X2:X1:X0] = [X3:X2:X1:X0] << 1
__ ushr(v18, __ T2D, v5, -63 & 63);
__ ins(v25, __ D, v18, 1, 0);
__ ins(v25, __ D, vzr, 0, 0);
__ ushl(v5, __ T2D, v5, v26);
__ orr(v5, __ T16B, v5, v25);
__ ushr(v19, __ T2D, v7, -63 & 63);
__ ins(v19, __ D, v19, 1, 0);
__ ins(v19, __ D, v18, 0, 1);
__ ushl(v7, __ T2D, v7, v26);
__ orr(v6, __ T16B, v7, v19);
__ ins(v24, __ D, v5, 0, 1);
// A = X0 << 63
__ ushl(v21, __ T2D, v5, v27);
// A = X0 << 62
__ ushl(v22, __ T2D, v5, v28);
// A = X0 << 57
__ ushl(v23, __ T2D, v5, v29);
// D = X1^A^B^C
__ eor(v21, __ T16B, v21, v22);
__ eor(v21, __ T16B, v21, v23);
__ eor(v21, __ T16B, v21, v24);
__ ins(v5, __ D, v21, 1, 0);
// [E1:E0] = [D:X0] >> 1
__ ushr(v20, __ T2D, v5, -1 & 63);
__ ushl(v18, __ T2D, v5, v27);
__ ext(v25, __ T16B, v18, vzr, 0x08);
__ orr(v19, __ T16B, v20, v25);
__ eor(v7, __ T16B, v5, v19);
// [F1:F0] = [D:X0] >> 2
__ ushr(v20, __ T2D, v5, -2 & 63);
__ ushl(v18, __ T2D, v5, v28);
__ ins(v25, __ D, v18, 0, 1);
__ orr(v19, __ T16B, v20, v25);
__ eor(v7, __ T16B, v7, v19);
// [G1:G0] = [D:X0] >> 7
__ ushr(v20, __ T2D, v5, -7 & 63);
__ ushl(v18, __ T2D, v5, v29);
__ ins(v25, __ D, v18, 0, 1);
__ orr(v19, __ T16B, v20, v25);
// [H1:H0] = [D^E1^F1^G1:X0^E0^F0^G0]
__ eor(v7, __ T16B, v7, v19);
// Result = [H1:H0]^[X3:X2]
__ eor(v0, __ T16B, v7, v6);
__ subs(blocks, blocks, 1);
__ cbnz(blocks, L_ghash_loop);
__ ext(v1, __ T16B, v0, v0, 0x08);
__ st1(v1, __ T16B, state); __ st1(v1, __ T16B, state);
__ ret(lr); __ ret(lr);

View File

@ -186,7 +186,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
} }
break; break;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
if (val == noreg) { if (val == noreg) {

View File

@ -177,6 +177,12 @@ void VM_Version::get_processor_features() {
if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) { if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
warning("UseCRC32 specified, but not supported on this CPU"); warning("UseCRC32 specified, but not supported on this CPU");
} }
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (auxv & HWCAP_AES) { if (auxv & HWCAP_AES) {
UseAES = UseAES || FLAG_IS_DEFAULT(UseAES); UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
UseAESIntrinsics = UseAESIntrinsics =

View File

@ -2614,7 +2614,7 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register t
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) { void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
CardTableModRefBS* bs = CardTableModRefBS* bs =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
assert(bs->kind() == BarrierSet::CardTableModRef || assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
#ifdef ASSERT #ifdef ASSERT
cmpdi(CCR0, Rnew_val, 0); cmpdi(CCR0, Rnew_val, 0);

View File

@ -656,7 +656,7 @@ class StubGenerator: public StubCodeGenerator {
__ bind(filtered); __ bind(filtered);
} }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
case BarrierSet::ModRef: case BarrierSet::ModRef:
break; break;
@ -697,7 +697,7 @@ class StubGenerator: public StubCodeGenerator {
} }
} }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
Label Lskip_loop, Lstore_loop; Label Lskip_loop, Lstore_loop;

View File

@ -105,7 +105,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
} }
break; break;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
Label Lnull, Ldone; Label Lnull, Ldone;

View File

@ -200,6 +200,11 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
} }
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
UseMultiplyToLenIntrinsic = true; UseMultiplyToLenIntrinsic = true;
} }

View File

@ -3958,7 +3958,7 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
if (new_val == G0) return; if (new_val == G0) return;
CardTableModRefBS* bs = CardTableModRefBS* bs =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
assert(bs->kind() == BarrierSet::CardTableModRef || assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
card_table_write(bs->byte_map_base, tmp, store_addr); card_table_write(bs->byte_map_base, tmp, store_addr);
} }

View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "runtime/prefetch.inline.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
// An implementation of memset, for use when there may be concurrent
// readers of the region being stored into.
//
// We can't use the standard library memset if it is implemented using
// block initializing stores. Doing so can result in concurrent readers
// seeing spurious zeros.
//
// We can't use the obvious C/C++ for-loop, because the compiler may
// recognize the idiomatic loop and optimize it into a call to the
// standard library memset; we've seen exactly this happen with, for
// example, Solaris Studio 12.3. Hence the use of inline assembly
// code, hiding loops from the compiler's optimizer.
//
// We don't attempt to use the standard library memset when it is safe
// to do so. We could conservatively do so by detecting the presence
// of block initializing stores (VM_Version::has_blk_init()), but the
// implementation provided here should be sufficient.
inline void fill_subword(void* start, void* end, int value) {
STATIC_ASSERT(BytesPerWord == 8);
assert(pointer_delta(end, start, 1) < BytesPerWord, "precondition");
// Dispatch on (end - start).
void* pc;
__asm__ volatile(
// offset := (7 - (end - start)) + 3
// 3 instructions from rdpc to DISPATCH
" sub %[offset], %[end], %[offset]\n\t" // offset := start - end
" sllx %[offset], 2, %[offset]\n\t" // scale offset for instruction size of 4
" add %[offset], 40, %[offset]\n\t" // offset += 10 * instruction size
" rd %pc, %[pc]\n\t" // dispatch on scaled offset
" jmpl %[pc]+%[offset], %g0\n\t"
" nop\n\t"
// DISPATCH: no direct reference, but without it the store block may be elided.
"1:\n\t"
" stb %[value], [%[end]-7]\n\t" // end[-7] = value
" stb %[value], [%[end]-6]\n\t"
" stb %[value], [%[end]-5]\n\t"
" stb %[value], [%[end]-4]\n\t"
" stb %[value], [%[end]-3]\n\t"
" stb %[value], [%[end]-2]\n\t"
" stb %[value], [%[end]-1]\n\t" // end[-1] = value
: /* no outputs */
[pc] "&=r" (pc) // temp
: [offset] "&+r" (start),
[end] "r" (end),
[value] "r" (value)
: "memory");
}
void memset_with_concurrent_readers(void* to, int value, size_t size) {
Prefetch::write(to, 0);
void* end = static_cast<char*>(to) + size;
if (size >= BytesPerWord) {
// Fill any partial word prefix.
uintx* aligned_to = static_cast<uintx*>(align_ptr_up(to, BytesPerWord));
fill_subword(to, aligned_to, value);
// Compute fill word.
STATIC_ASSERT(BitsPerByte == 8);
STATIC_ASSERT(BitsPerWord == 64);
uintx xvalue = value & 0xff;
xvalue |= (xvalue << 8);
xvalue |= (xvalue << 16);
xvalue |= (xvalue << 32);
uintx* aligned_end = static_cast<uintx*>(align_ptr_down(end, BytesPerWord));
assert(aligned_to <= aligned_end, "invariant");
// for ( ; aligned_to < aligned_end; ++aligned_to) {
// *aligned_to = xvalue;
// }
uintptr_t temp;
__asm__ volatile(
// Unroll loop x8.
" sub %[aend], %[ato], %[temp]\n\t"
" cmp %[temp], 56\n\t" // cc := (aligned_end - aligned_to) > 7 words
" ba %xcc, 2f\n\t" // goto TEST always
" sub %[aend], 56, %[temp]\n\t" // limit := aligned_end - 7 words
// LOOP:
"1:\n\t" // unrolled x8 store loop top
" cmp %[temp], %[ato]\n\t" // cc := limit > (next) aligned_to
" stx %[xvalue], [%[ato]-64]\n\t" // store 8 words, aligned_to pre-incremented
" stx %[xvalue], [%[ato]-56]\n\t"
" stx %[xvalue], [%[ato]-48]\n\t"
" stx %[xvalue], [%[ato]-40]\n\t"
" stx %[xvalue], [%[ato]-32]\n\t"
" stx %[xvalue], [%[ato]-24]\n\t"
" stx %[xvalue], [%[ato]-16]\n\t"
" stx %[xvalue], [%[ato]-8]\n\t"
// TEST:
"2:\n\t"
" bgu,a %xcc, 1b\n\t" // goto LOOP if more than 7 words remaining
" add %[ato], 64, %[ato]\n\t" // aligned_to += 8, for next iteration
// Fill remaining < 8 full words.
// Dispatch on (aligned_end - aligned_to).
// offset := (7 - (aligned_end - aligned_to)) + 3
// 3 instructions from rdpc to DISPATCH
" sub %[ato], %[aend], %[ato]\n\t" // offset := aligned_to - aligned_end
" srax %[ato], 1, %[ato]\n\t" // scale offset for instruction size of 4
" add %[ato], 40, %[ato]\n\t" // offset += 10 * instruction size
" rd %pc, %[temp]\n\t" // dispatch on scaled offset
" jmpl %[temp]+%[ato], %g0\n\t"
" nop\n\t"
// DISPATCH: no direct reference, but without it the store block may be elided.
"3:\n\t"
" stx %[xvalue], [%[aend]-56]\n\t" // aligned_end[-7] = xvalue
" stx %[xvalue], [%[aend]-48]\n\t"
" stx %[xvalue], [%[aend]-40]\n\t"
" stx %[xvalue], [%[aend]-32]\n\t"
" stx %[xvalue], [%[aend]-24]\n\t"
" stx %[xvalue], [%[aend]-16]\n\t"
" stx %[xvalue], [%[aend]-8]\n\t" // aligned_end[-1] = xvalue
: /* no outputs */
[temp] "&=r" (temp)
: [ato] "&+r" (aligned_to),
[aend] "r" (aligned_end),
[xvalue] "r" (xvalue)
: "cc", "memory");
to = aligned_end; // setup for suffix
}
// Fill any partial word suffix. Also the prefix if size < BytesPerWord.
fill_subword(to, end, value);
}
#endif // INCLUDE_ALL_GCS

View File

@ -981,7 +981,7 @@ class StubGenerator: public StubCodeGenerator {
__ restore(); __ restore();
} }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
case BarrierSet::ModRef: case BarrierSet::ModRef:
break; break;
@ -1014,7 +1014,7 @@ class StubGenerator: public StubCodeGenerator {
__ restore(); __ restore();
} }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
@ -5110,6 +5110,188 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
#define ADLER32_NUM_TEMPS 16
/**
* Arguments:
*
* Inputs:
* O0 - int adler
* O1 - byte* buff
* O2 - int len
*
* Output:
* O0 - int adler result
*/
address generate_updateBytesAdler32() {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32");
address start = __ pc();
Label L_cleanup_loop, L_cleanup_loop_check;
Label L_main_loop_check, L_main_loop, L_inner_loop, L_inner_loop_check;
Label L_nmax_check_done;
// Aliases
Register s1 = O0;
Register s2 = O3;
Register buff = O1;
Register len = O2;
Register temp[ADLER32_NUM_TEMPS] = {L0, L1, L2, L3, L4, L5, L6, L7, I0, I1, I2, I3, I4, I5, G3, I7};
// Max number of bytes we can process before having to take the mod
// 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
unsigned long NMAX = 0x15B0;
// Zero-out the upper bits of len
__ clruwu(len);
// Create the mask 0xFFFF
__ set64(0x00FFFF, O4, O5); // O5 is the temp register
// s1 is initialized to the lower 16 bits of adler
// s2 is initialized to the upper 16 bits of adler
__ srlx(O0, 16, O5); // adler >> 16
__ and3(O0, O4, s1); // s1 = (adler & 0xFFFF)
__ and3(O5, O4, s2); // s2 = ((adler >> 16) & 0xFFFF)
// The pipelined loop needs at least 16 elements for 1 iteration
// It does check this, but it is more effective to skip to the cleanup loop
// Setup the constant for cutoff checking
__ mov(15, O4);
// Check if we are above the cutoff, if not go to the cleanup loop immediately
__ cmp_and_br_short(len, O4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_loop_check);
// Free up some registers for our use
for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
__ movxtod(temp[i], as_FloatRegister(2*i));
}
// Loop maintenance stuff is done at the end of the loop, so skip to there
__ ba_short(L_main_loop_check);
__ BIND(L_main_loop);
// Prologue for inner loop
__ ldub(buff, 0, L0);
__ dec(O5);
for (int i = 1; i < 8; i++) {
__ ldub(buff, i, temp[i]);
}
__ inc(buff, 8);
// Inner loop processes 16 elements at a time, might never execute if only 16 elements
// to be processed by the outter loop
__ ba_short(L_inner_loop_check);
__ BIND(L_inner_loop);
for (int i = 0; i < 8; i++) {
__ ldub(buff, (2*i), temp[(8+(2*i)) % ADLER32_NUM_TEMPS]);
__ add(s1, temp[i], s1);
__ ldub(buff, (2*i)+1, temp[(8+(2*i)+1) % ADLER32_NUM_TEMPS]);
__ add(s2, s1, s2);
}
// Original temp 0-7 used and new loads to temp 0-7 issued
// temp 8-15 ready to be consumed
__ add(s1, I0, s1);
__ dec(O5);
__ add(s2, s1, s2);
__ add(s1, I1, s1);
__ inc(buff, 16);
__ add(s2, s1, s2);
for (int i = 0; i < 6; i++) {
__ add(s1, temp[10+i], s1);
__ add(s2, s1, s2);
}
__ BIND(L_inner_loop_check);
__ nop();
__ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_inner_loop);
// Epilogue
for (int i = 0; i < 4; i++) {
__ ldub(buff, (2*i), temp[8+(2*i)]);
__ add(s1, temp[i], s1);
__ ldub(buff, (2*i)+1, temp[8+(2*i)+1]);
__ add(s2, s1, s2);
}
__ add(s1, temp[4], s1);
__ inc(buff, 8);
for (int i = 0; i < 11; i++) {
__ add(s2, s1, s2);
__ add(s1, temp[5+i], s1);
}
__ add(s2, s1, s2);
// Take the mod for s1 and s2
__ set64(0xFFF1, L0, L1);
__ udivx(s1, L0, L1);
__ udivx(s2, L0, L2);
__ mulx(L0, L1, L1);
__ mulx(L0, L2, L2);
__ sub(s1, L1, s1);
__ sub(s2, L2, s2);
// Make sure there is something left to process
__ BIND(L_main_loop_check);
__ set64(NMAX, L0, L1);
// k = len < NMAX ? len : NMAX
__ cmp_and_br_short(len, L0, Assembler::greaterEqualUnsigned, Assembler::pt, L_nmax_check_done);
__ andn(len, 0x0F, L0); // only loop a multiple of 16 times
__ BIND(L_nmax_check_done);
__ mov(L0, O5);
__ sub(len, L0, len); // len -= k
__ srlx(O5, 4, O5); // multiplies of 16
__ cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_main_loop);
// Restore anything we used, take the mod one last time, combine and return
// Restore any registers we saved
for (int i = 0; i < ADLER32_NUM_TEMPS; i++) {
__ movdtox(as_FloatRegister(2*i), temp[i]);
}
// There might be nothing left to process
__ ba_short(L_cleanup_loop_check);
__ BIND(L_cleanup_loop);
__ ldub(buff, 0, O4); // load single byte form buffer
__ inc(buff); // buff++
__ add(s1, O4, s1); // s1 += *buff++;
__ dec(len); // len--
__ add(s1, s2, s2); // s2 += s1;
__ BIND(L_cleanup_loop_check);
__ nop();
__ cmp_and_br_short(len, 0, Assembler::notEqual, Assembler::pt, L_cleanup_loop);
// Take the mod one last time
__ set64(0xFFF1, O1, O2);
__ udivx(s1, O1, O2);
__ udivx(s2, O1, O5);
__ mulx(O1, O2, O2);
__ mulx(O1, O5, O5);
__ sub(s1, O2, s1);
__ sub(s2, O5, s2);
// Combine lower bits and higher bits
__ sllx(s2, 16, s2); // s2 = s2 << 16
__ or3(s1, s2, s1); // adler = s2 | s1
// Final return value is in O0
__ retl();
__ delayed()->nop();
return start;
}
void generate_initial() { void generate_initial() {
// Generates all stubs and initializes the entry points // Generates all stubs and initializes the entry points
@ -5206,6 +5388,11 @@ class StubGenerator: public StubCodeGenerator {
if (UseCRC32CIntrinsics) { if (UseCRC32CIntrinsics) {
StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(); StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
} }
// generate Adler32 intrinsics code
if (UseAdler32Intrinsics) {
StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
}
} }

View File

@ -41,7 +41,7 @@ static bool returns_to_call_stub(address return_pc) {
enum /* platform_dependent_constants */ { enum /* platform_dependent_constants */ {
// %%%%%%%% May be able to shrink this a lot // %%%%%%%% May be able to shrink this a lot
code_size1 = 20000, // simply increase if too small (assembler will crash if too small) code_size1 = 20000, // simply increase if too small (assembler will crash if too small)
code_size2 = 24000 // simply increase if too small (assembler will crash if too small) code_size2 = 27000 // simply increase if too small (assembler will crash if too small)
}; };
class Sparc { class Sparc {

View File

@ -91,7 +91,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
} }
break; break;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
if (index == noreg ) { if (index == noreg ) {

View File

@ -85,27 +85,6 @@ void VM_Version::initialize() {
_supports_cx8 = has_v9(); _supports_cx8 = has_v9();
_supports_atomic_getset4 = true; // swap instruction _supports_atomic_getset4 = true; // swap instruction
// There are Fujitsu Sparc64 CPUs which support blk_init as well so
// we have to take this check out of the 'is_niagara()' block below.
if (has_blk_init()) {
// When using CMS or G1, we cannot use memset() in BOT updates
// because the sun4v/CMT version in libc_psr uses BIS which
// exposes "phantom zeros" to concurrent readers. See 6948537.
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
FLAG_SET_DEFAULT(UseMemSetInBOT, false);
}
// Issue a stern warning if the user has explicitly set
// UseMemSetInBOT (it is known to cause issues), but allow
// use for experimentation and debugging.
if (UseConcMarkSweepGC || UseG1GC) {
if (UseMemSetInBOT) {
assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
" on sun4v; please understand that you are using at your own risk!");
}
}
}
if (is_niagara()) { if (is_niagara()) {
// Indirect branch is the same cost as direct // Indirect branch is the same cost as direct
if (FLAG_IS_DEFAULT(UseInlineCaches)) { if (FLAG_IS_DEFAULT(UseInlineCaches)) {
@ -377,6 +356,15 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
} }
if (UseVIS > 2) {
if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
}
} else if (UseAdler32Intrinsics) {
warning("SPARC Adler32 intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
(cache_line_size > ContendedPaddingWidth)) (cache_line_size > ContendedPaddingWidth))
ContendedPaddingWidth = cache_line_size; ContendedPaddingWidth = cache_line_size;

View File

@ -4320,7 +4320,9 @@ void MacroAssembler::store_check(Register obj) {
// register obj is destroyed afterwards. // register obj is destroyed afterwards.
BarrierSet* bs = Universe::heap()->barrier_set(); BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension,
"Wrong barrier set kind");
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");

View File

@ -722,7 +722,7 @@ class StubGenerator: public StubCodeGenerator {
__ popa(); __ popa();
} }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
case BarrierSet::ModRef: case BarrierSet::ModRef:
break; break;
@ -754,7 +754,7 @@ class StubGenerator: public StubCodeGenerator {
} }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);

View File

@ -367,16 +367,20 @@ class StubGenerator: public StubCodeGenerator {
#ifdef ASSERT #ifdef ASSERT
// verify that threads correspond // verify that threads correspond
{ {
Label L, S; Label L1, L2, L3;
__ cmpptr(r15_thread, thread); __ cmpptr(r15_thread, thread);
__ jcc(Assembler::notEqual, S); __ jcc(Assembler::equal, L1);
__ stop("StubRoutines::call_stub: r15_thread is corrupted");
__ bind(L1);
__ get_thread(rbx); __ get_thread(rbx);
__ cmpptr(r15_thread, thread);
__ jcc(Assembler::equal, L2);
__ stop("StubRoutines::call_stub: r15_thread is modified by call");
__ bind(L2);
__ cmpptr(r15_thread, rbx); __ cmpptr(r15_thread, rbx);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L3);
__ bind(S);
__ jcc(Assembler::equal, L);
__ stop("StubRoutines::call_stub: threads must correspond"); __ stop("StubRoutines::call_stub: threads must correspond");
__ bind(L); __ bind(L3);
} }
#endif #endif
@ -450,15 +454,20 @@ class StubGenerator: public StubCodeGenerator {
#ifdef ASSERT #ifdef ASSERT
// verify that threads correspond // verify that threads correspond
{ {
Label L, S; Label L1, L2, L3;
__ cmpptr(r15_thread, thread); __ cmpptr(r15_thread, thread);
__ jcc(Assembler::notEqual, S); __ jcc(Assembler::equal, L1);
__ stop("StubRoutines::catch_exception: r15_thread is corrupted");
__ bind(L1);
__ get_thread(rbx); __ get_thread(rbx);
__ cmpptr(r15_thread, thread);
__ jcc(Assembler::equal, L2);
__ stop("StubRoutines::catch_exception: r15_thread is modified by call");
__ bind(L2);
__ cmpptr(r15_thread, rbx); __ cmpptr(r15_thread, rbx);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L3);
__ bind(S);
__ stop("StubRoutines::catch_exception: threads must correspond"); __ stop("StubRoutines::catch_exception: threads must correspond");
__ bind(L); __ bind(L3);
} }
#endif #endif
@ -1244,7 +1253,7 @@ class StubGenerator: public StubCodeGenerator {
__ popa(); __ popa();
} }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
case BarrierSet::ModRef: case BarrierSet::ModRef:
break; break;
@ -1284,7 +1293,7 @@ class StubGenerator: public StubCodeGenerator {
__ popa(); __ popa();
} }
break; break;
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);

View File

@ -200,7 +200,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
} }
break; break;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
if (val == noreg) { if (val == noreg) {

View File

@ -714,6 +714,11 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
} }
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
// Adjust RTM (Restricted Transactional Memory) flags // Adjust RTM (Restricted Transactional Memory) flags
if (!supports_rtm() && UseRTMLocking) { if (!supports_rtm() && UseRTMLocking) {
// Can't continue because UseRTMLocking affects UseBiasedLocking flag // Can't continue because UseRTMLocking affects UseBiasedLocking flag

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -53,6 +53,10 @@ static bool detect_niagara() {
return cpuinfo_field_contains("cpu", "Niagara"); return cpuinfo_field_contains("cpu", "Niagara");
} }
static bool detect_M_family() {
return cpuinfo_field_contains("cpu", "SPARC-M");
}
static bool detect_blkinit() { static bool detect_blkinit() {
return cpuinfo_field_contains("cpucaps", "blkinit"); return cpuinfo_field_contains("cpucaps", "blkinit");
} }
@ -66,6 +70,11 @@ int VM_Version::platform_features(int features) {
features = niagara1_m | T_family_m; features = niagara1_m | T_family_m;
} }
if (detect_M_family()) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");)
features = sun4v_m | generic_v9_m | M_family_m | T_family_m;
}
if (detect_blkinit()) { if (detect_blkinit()) {
features |= blk_init_instructions_m; features |= blk_init_instructions_m;
} }

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1997, 1998, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,7 @@ HotSpot Architecture Description Language. This language is used to describe
the architecture of a processor, and is the input to the ADL Compiler. The the architecture of a processor, and is the input to the ADL Compiler. The
ADL Compiler compiles an ADL file into code which is incorporated into the ADL Compiler compiles an ADL file into code which is incorporated into the
Optimizing Just In Time Compiler (OJIT) to generate efficient and correct code Optimizing Just In Time Compiler (OJIT) to generate efficient and correct code
for the target architecture. The ADL describes three bassic different types for the target architecture. The ADL describes three basic different types
of architectural features. It describes the instruction set (and associated of architectural features. It describes the instruction set (and associated
operands) of the target architecture. It describes the register set of the operands) of the target architecture. It describes the register set of the
target architecture along with relevant information for the register allocator. target architecture along with relevant information for the register allocator.

View File

@ -32,7 +32,6 @@
#include "c1/c1_Runtime1.hpp" #include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueType.hpp" #include "c1/c1_ValueType.hpp"
#include "compiler/compileBroker.hpp" #include "compiler/compileBroker.hpp"
#include "compiler/compilerOracle.hpp"
#include "interpreter/linkResolver.hpp" #include "interpreter/linkResolver.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"

View File

@ -4212,7 +4212,7 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes
if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) { if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
return; return;
} }
CompileTask::print_inlining(callee, scope()->level(), bci(), msg); CompileTask::print_inlining_tty(callee, scope()->level(), bci(), msg);
if (success && CIPrintMethodCodes) { if (success && CIPrintMethodCodes) {
callee->print_codes(); callee->print_codes();
} }

View File

@ -1425,7 +1425,7 @@ void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info); G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
break; break;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
// No pre barriers // No pre barriers
break; break;
@ -1445,7 +1445,7 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
G1SATBCardTableModRef_post_barrier(addr, new_val); G1SATBCardTableModRef_post_barrier(addr, new_val);
break; break;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableModRef: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
CardTableModRef_post_barrier(addr, new_val); CardTableModRef_post_barrier(addr, new_val);
break; break;

View File

@ -1447,7 +1447,6 @@ BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
if (methodData() == NULL) if (methodData() == NULL)
return; return;
bool printit = _method->should_print_assembly();
if (methodData()->has_escape_info()) { if (methodData()->has_escape_info()) {
TRACE_BCEA(2, tty->print_cr("[EA] Reading previous results for %s.%s", TRACE_BCEA(2, tty->print_cr("[EA] Reading previous results for %s.%s",
method->holder()->name()->as_utf8(), method->holder()->name()->as_utf8(),

View File

@ -28,8 +28,8 @@
#include "classfile/classLoader.hpp" #include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp" #include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderExt.hpp" #include "classfile/classLoaderExt.hpp"
#include "classfile/imageFile.hpp"
#include "classfile/javaClasses.hpp" #include "classfile/javaClasses.hpp"
#include "classfile/jimage.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp" #include "compiler/compileBroker.hpp"
@ -58,6 +58,7 @@
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "runtime/threadCritical.hpp" #include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp" #include "runtime/timer.hpp"
#include "runtime/vm_version.hpp"
#include "services/management.hpp" #include "services/management.hpp"
#include "services/threadService.hpp" #include "services/threadService.hpp"
#include "utilities/events.hpp" #include "utilities/events.hpp"
@ -68,7 +69,7 @@
#include "classfile/sharedPathsMiscInfo.hpp" #include "classfile/sharedPathsMiscInfo.hpp"
#endif #endif
// Entry points in zip.dll for loading zip/jar file entries and image file entries // Entry points in zip.dll for loading zip/jar file entries
typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg); typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
typedef void (JNICALL *ZipClose_t)(jzfile *zip); typedef void (JNICALL *ZipClose_t)(jzfile *zip);
@ -89,6 +90,15 @@ static canonicalize_fn_t CanonicalizeEntry = NULL;
static ZipInflateFully_t ZipInflateFully = NULL; static ZipInflateFully_t ZipInflateFully = NULL;
static Crc32_t Crc32 = NULL; static Crc32_t Crc32 = NULL;
// Entry points for jimage.dll for loading jimage file entries
static JImageOpen_t JImageOpen = NULL;
static JImageClose_t JImageClose = NULL;
static JImagePackageToModule_t JImagePackageToModule = NULL;
static JImageFindResource_t JImageFindResource = NULL;
static JImageGetResource_t JImageGetResource = NULL;
static JImageResourceIterator_t JImageResourceIterator = NULL;
// Globals // Globals
PerfCounter* ClassLoader::_perf_accumulated_time = NULL; PerfCounter* ClassLoader::_perf_accumulated_time = NULL;
@ -141,6 +151,15 @@ bool string_starts_with(const char* str, const char* str_to_find) {
return (strncmp(str, str_to_find, str_to_find_len) == 0); return (strncmp(str, str_to_find, str_to_find_len) == 0);
} }
static const char* get_jimage_version_string() {
static char version_string[10] = "";
if (version_string[0] == '\0') {
jio_snprintf(version_string, sizeof(version_string), "%d.%d",
Abstract_VM_Version::vm_minor_version(), Abstract_VM_Version::vm_micro_version());
}
return (const char*)version_string;
}
bool string_ends_with(const char* str, const char* str_to_find) { bool string_ends_with(const char* str, const char* str_to_find) {
size_t str_len = strlen(str); size_t str_len = strlen(str);
size_t str_to_find_len = strlen(str_to_find); size_t str_to_find_len = strlen(str_to_find);
@ -272,97 +291,113 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
} }
} }
ClassPathImageEntry::ClassPathImageEntry(ImageFileReader* image) : ClassPathImageEntry::ClassPathImageEntry(JImageFile* jimage, const char* name) :
ClassPathEntry(), ClassPathEntry(),
_image(image), _jimage(jimage) {
_module_data(NULL) { guarantee(jimage != NULL, "jimage file is null");
guarantee(image != NULL, "image file is null"); guarantee(name != NULL, "jimage file name is null");
size_t len = strlen(name) + 1;
char module_data_name[JVM_MAXPATHLEN]; _name = NEW_C_HEAP_ARRAY(const char, len, mtClass);
ImageModuleData::module_data_name(module_data_name, _image->name()); strncpy((char *)_name, name, len);
_module_data = new ImageModuleData(_image, module_data_name);
} }
ClassPathImageEntry::~ClassPathImageEntry() { ClassPathImageEntry::~ClassPathImageEntry() {
if (_module_data != NULL) { if (_name != NULL) {
delete _module_data; FREE_C_HEAP_ARRAY(const char, _name);
_module_data = NULL; _name = NULL;
} }
if (_jimage != NULL) {
if (_image != NULL) { (*JImageClose)(_jimage);
ImageFileReader::close(_image); _jimage = NULL;
_image = NULL;
} }
} }
const char* ClassPathImageEntry::name() { void ClassPathImageEntry::name_to_package(const char* name, char* buffer, int length) {
return _image ? _image->name() : ""; const char *pslash = strrchr(name, '/');
if (pslash == NULL) {
buffer[0] = '\0';
return;
}
int len = pslash - name;
#if INCLUDE_CDS
if (len <= 0 && DumpSharedSpaces) {
buffer[0] = '\0';
return;
}
#endif
assert(len > 0, "Bad length for package name");
if (len >= length) {
buffer[0] = '\0';
return;
}
// drop name after last slash (including slash)
// Ex., "java/lang/String.class" => "java/lang"
strncpy(buffer, name, len);
// ensure string termination (strncpy does not guarantee)
buffer[len] = '\0';
} }
// For a class in a named module, look it up in the jimage file using this syntax:
// /<module-name>/<package-name>/<base-class>
//
// Assumptions:
// 1. There are no unnamed modules in the jimage file.
// 2. A package is in at most one module in the jimage file.
//
ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) { ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) {
ImageLocation location; jlong size;
bool found = _image->find_location(name, location); JImageLocationRef location = (*JImageFindResource)(_jimage, "", get_jimage_version_string(), name, &size);
if (!found) { if (location == 0) {
const char *pslash = strrchr(name, '/'); char package[JIMAGE_MAX_PATH];
int len = pslash - name; name_to_package(name, package, JIMAGE_MAX_PATH);
if (package[0] != '\0') {
// NOTE: IMAGE_MAX_PATH is used here since this path is internal to the jimage const char* module = (*JImagePackageToModule)(_jimage, package);
// (effectively unlimited.) There are several JCK tests that use paths over if (module == NULL) {
// 1024 characters long, the limit on Windows systems. module = "java.base";
if (pslash && 0 < len && len < IMAGE_MAX_PATH) { }
location = (*JImageFindResource)(_jimage, module, get_jimage_version_string(), name, &size);
char path[IMAGE_MAX_PATH];
strncpy(path, name, len);
path[len] = '\0';
const char* moduleName = _module_data->package_to_module(path);
if (moduleName != NULL && (len + strlen(moduleName) + 2) < IMAGE_MAX_PATH) {
jio_snprintf(path, IMAGE_MAX_PATH - 1, "/%s/%s", moduleName, name);
location.clear_data();
found = _image->find_location(path, location);
}
} }
} }
if (found) { if (location != 0) {
u8 size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
if (UsePerfData) { if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(size); ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
} }
u1* data = NEW_RESOURCE_ARRAY(u1, size); char* data = NEW_RESOURCE_ARRAY(char, size);
_image->get_resource(location, data); (*JImageGetResource)(_jimage, location, data, size);
return new ClassFileStream(data, (int)size, _image->name()); // Resource allocated return new ClassFileStream((u1*)data, (int)size, _name); // Resource allocated
} }
return NULL; return NULL;
} }
#ifndef PRODUCT #ifndef PRODUCT
bool ctw_visitor(JImageFile* jimage,
const char* module_name, const char* version, const char* package,
const char* name, const char* extension, void* arg) {
if (strcmp(extension, "class") == 0) {
Thread* THREAD = Thread::current();
char path[JIMAGE_MAX_PATH];
jio_snprintf(path, JIMAGE_MAX_PATH - 1, "%s/%s.class", package, name);
ClassLoader::compile_the_world_in(path, *(Handle*)arg, THREAD);
return !HAS_PENDING_EXCEPTION;
}
return true;
}
void ClassPathImageEntry::compile_the_world(Handle loader, TRAPS) { void ClassPathImageEntry::compile_the_world(Handle loader, TRAPS) {
tty->print_cr("CompileTheWorld : Compiling all classes in %s", name()); tty->print_cr("CompileTheWorld : Compiling all classes in %s", name());
tty->cr(); tty->cr();
const ImageStrings strings = _image->get_strings(); (*JImageResourceIterator)(_jimage, (JImageResourceVisitor_t)ctw_visitor, (void *)&loader);
// Retrieve each path component string.
u4 length = _image->table_length();
for (u4 i = 0; i < length; i++) {
u1* location_data = _image->get_location_data(i);
if (location_data != NULL) {
ImageLocation location(location_data);
char path[IMAGE_MAX_PATH];
_image->location_path(location, path, IMAGE_MAX_PATH);
ClassLoader::compile_the_world_in(path, loader, CHECK);
}
}
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) { if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
tty->print_cr("\nCompileTheWorld : Ran out of memory\n"); tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
tty->print_cr("Increase class metadata storage if a limit was set"); tty->print_cr("Increase class metadata storage if a limit was set");
} else { } else {
tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n"); tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
} }
} }
} }
@ -490,7 +525,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
JavaThread* thread = JavaThread::current(); JavaThread* thread = JavaThread::current();
ClassPathEntry* new_entry = NULL; ClassPathEntry* new_entry = NULL;
if ((st->st_mode & S_IFREG) == S_IFREG) { if ((st->st_mode & S_IFREG) == S_IFREG) {
// Regular file, should be a zip or image file // Regular file, should be a zip or jimage file
// Canonicalized filename // Canonicalized filename
char canonical_path[JVM_MAXPATHLEN]; char canonical_path[JVM_MAXPATHLEN];
if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) { if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
@ -501,9 +536,10 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
return NULL; return NULL;
} }
} }
ImageFileReader* image = ImageFileReader::open(canonical_path); jint error;
if (image != NULL) { JImageFile* jimage =(*JImageOpen)(canonical_path, &error);
new_entry = new ClassPathImageEntry(image); if (jimage != NULL) {
new_entry = new ClassPathImageEntry(jimage, canonical_path);
} else { } else {
char* error_msg = NULL; char* error_msg = NULL;
jzfile* zip; jzfile* zip;
@ -682,6 +718,35 @@ void ClassLoader::load_zip_library() {
// This lookup only works on 1.3. Do not check for non-null here // This lookup only works on 1.3. Do not check for non-null here
} }
void ClassLoader::load_jimage_library() {
// First make sure native library is loaded
os::native_java_library();
// Load jimage library
char path[JVM_MAXPATHLEN];
char ebuf[1024];
void* handle = NULL;
if (os::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "jimage")) {
handle = os::dll_load(path, ebuf, sizeof ebuf);
}
if (handle == NULL) {
vm_exit_during_initialization("Unable to load jimage library", path);
}
// Lookup jimage entry points
JImageOpen = CAST_TO_FN_PTR(JImageOpen_t, os::dll_lookup(handle, "JIMAGE_Open"));
guarantee(JImageOpen != NULL, "function JIMAGE_Open not found");
JImageClose = CAST_TO_FN_PTR(JImageClose_t, os::dll_lookup(handle, "JIMAGE_Close"));
guarantee(JImageClose != NULL, "function JIMAGE_Close not found");
JImagePackageToModule = CAST_TO_FN_PTR(JImagePackageToModule_t, os::dll_lookup(handle, "JIMAGE_PackageToModule"));
guarantee(JImagePackageToModule != NULL, "function JIMAGE_PackageToModule not found");
JImageFindResource = CAST_TO_FN_PTR(JImageFindResource_t, os::dll_lookup(handle, "JIMAGE_FindResource"));
guarantee(JImageFindResource != NULL, "function JIMAGE_FindResource not found");
JImageGetResource = CAST_TO_FN_PTR(JImageGetResource_t, os::dll_lookup(handle, "JIMAGE_GetResource"));
guarantee(JImageGetResource != NULL, "function JIMAGE_GetResource not found");
JImageResourceIterator = CAST_TO_FN_PTR(JImageResourceIterator_t, os::dll_lookup(handle, "JIMAGE_ResourceIterator"));
guarantee(JImageResourceIterator != NULL, "function JIMAGE_ResourceIterator not found");
}
jboolean ClassLoader::decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg) { jboolean ClassLoader::decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg) {
return (*ZipInflateFully)(in, inSize, out, outSize, pmsg); return (*ZipInflateFully)(in, inSize, out, outSize, pmsg);
} }
@ -1086,6 +1151,8 @@ void ClassLoader::initialize() {
// lookup zip library entry points // lookup zip library entry points
load_zip_library(); load_zip_library();
// lookup jimage library entry points
load_jimage_library();
#if INCLUDE_CDS #if INCLUDE_CDS
// initialize search path // initialize search path
if (DumpSharedSpaces) { if (DumpSharedSpaces) {

View File

@ -37,8 +37,7 @@
// Class path entry (directory or zip file) // Class path entry (directory or zip file)
class ImageFileReader; class JImageFile;
class ImageModuleData;
class ClassPathEntry: public CHeapObj<mtClass> { class ClassPathEntry: public CHeapObj<mtClass> {
private: private:
@ -52,7 +51,7 @@ class ClassPathEntry: public CHeapObj<mtClass> {
} }
virtual bool is_jar_file() = 0; virtual bool is_jar_file() = 0;
virtual const char* name() = 0; virtual const char* name() = 0;
virtual ImageFileReader* image() = 0; virtual JImageFile* jimage() = 0;
// Constructor // Constructor
ClassPathEntry(); ClassPathEntry();
// Attempt to locate file_name through this class path entry. // Attempt to locate file_name through this class path entry.
@ -70,7 +69,7 @@ class ClassPathDirEntry: public ClassPathEntry {
public: public:
bool is_jar_file() { return false; } bool is_jar_file() { return false; }
const char* name() { return _dir; } const char* name() { return _dir; }
ImageFileReader* image() { return NULL; } JImageFile* jimage() { return NULL; }
ClassPathDirEntry(const char* dir); ClassPathDirEntry(const char* dir);
ClassFileStream* open_stream(const char* name, TRAPS); ClassFileStream* open_stream(const char* name, TRAPS);
// Debugging // Debugging
@ -100,7 +99,7 @@ class ClassPathZipEntry: public ClassPathEntry {
public: public:
bool is_jar_file() { return true; } bool is_jar_file() { return true; }
const char* name() { return _zip_name; } const char* name() { return _zip_name; }
ImageFileReader* image() { return NULL; } JImageFile* jimage() { return NULL; }
ClassPathZipEntry(jzfile* zip, const char* zip_name); ClassPathZipEntry(jzfile* zip, const char* zip_name);
~ClassPathZipEntry(); ~ClassPathZipEntry();
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS); u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
@ -115,16 +114,16 @@ class ClassPathZipEntry: public ClassPathEntry {
// For java image files // For java image files
class ClassPathImageEntry: public ClassPathEntry { class ClassPathImageEntry: public ClassPathEntry {
private: private:
ImageFileReader* _image; JImageFile* _jimage;
ImageModuleData* _module_data; const char* _name;
public: public:
bool is_jar_file() { return false; } bool is_jar_file() { return false; }
bool is_open() { return _image != NULL; } bool is_open() { return _jimage != NULL; }
const char* name(); const char* name() { return _name == NULL ? "" : _name; }
ImageFileReader* image() { return _image; } JImageFile* jimage() { return _jimage; }
ImageModuleData* module_data() { return _module_data; } ClassPathImageEntry(JImageFile* jimage, const char* name);
ClassPathImageEntry(ImageFileReader* image);
~ClassPathImageEntry(); ~ClassPathImageEntry();
static void name_to_package(const char* name, char* buffer, int length);
ClassFileStream* open_stream(const char* name, TRAPS); ClassFileStream* open_stream(const char* name, TRAPS);
// Debugging // Debugging
@ -206,6 +205,7 @@ class ClassLoader: AllStatic {
static void setup_search_path(const char *class_path); static void setup_search_path(const char *class_path);
static void load_zip_library(); static void load_zip_library();
static void load_jimage_library();
static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st, static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
bool throw_exception, TRAPS); bool throw_exception, TRAPS);

View File

@ -1,121 +0,0 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/thread.inline.hpp"
#include "classfile/imageDecompressor.hpp"
#include "runtime/thread.hpp"
#include "utilities/bytes.hpp"
/*
* Allocate in C Heap not in resource area, otherwise JVM crashes.
* This array life time is the VM life time. Array is never freed and
* is not expected to contain more than few references.
*/
GrowableArray<ImageDecompressor*>* ImageDecompressor::_decompressors =
new(ResourceObj::C_HEAP, mtInternal) GrowableArray<ImageDecompressor*>(2, true);
static Symbol* createSymbol(const char* str) {
Thread* THREAD = Thread::current();
Symbol* sym = SymbolTable::lookup(str, (int) strlen(str), THREAD);
if (HAS_PENDING_EXCEPTION) {
warning("can't create symbol\n");
CLEAR_PENDING_EXCEPTION;
return NULL;
}
return sym;
}
/*
* Initialize the array of decompressors.
*/
bool image_decompressor_init() {
Symbol* zipSymbol = createSymbol("zip");
if (zipSymbol == NULL) {
return false;
}
ImageDecompressor::add_decompressor(new ZipDecompressor(zipSymbol));
return true;
}
/*
* Decompression entry point. Called from ImageFileReader::get_resource.
*/
void ImageDecompressor::decompress_resource(u1* compressed, u1* uncompressed,
u4 uncompressed_size, const ImageStrings* strings, bool is_C_heap) {
bool has_header = false;
u1* decompressed_resource = compressed;
u1* compressed_resource = compressed;
// Resource could have been transformed by a stack of decompressors.
// Iterate and decompress resources until there is no more header.
do {
ResourceHeader _header;
memcpy(&_header, compressed_resource, sizeof (ResourceHeader));
has_header = _header._magic == ResourceHeader::resource_header_magic;
if (has_header) {
// decompressed_resource array contains the result of decompression
// when a resource content is terminal, it means that it is an actual resource,
// not an intermediate not fully uncompressed content. In this case
// the resource is allocated as an mtClass, otherwise as an mtOther
decompressed_resource = is_C_heap && _header._is_terminal ?
NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtClass) :
NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtOther);
// Retrieve the decompressor name
const char* decompressor_name = strings->get(_header._decompressor_name_offset);
if (decompressor_name == NULL) warning("image decompressor not found\n");
guarantee(decompressor_name, "image decompressor not found");
// Retrieve the decompressor instance
ImageDecompressor* decompressor = get_decompressor(decompressor_name);
if (decompressor == NULL) {
warning("image decompressor %s not found\n", decompressor_name);
}
guarantee(decompressor, "image decompressor not found");
u1* compressed_resource_base = compressed_resource;
compressed_resource += ResourceHeader::resource_header_length;
// Ask the decompressor to decompress the compressed content
decompressor->decompress_resource(compressed_resource, decompressed_resource,
&_header, strings);
if (compressed_resource_base != compressed) {
FREE_C_HEAP_ARRAY(char, compressed_resource_base);
}
compressed_resource = decompressed_resource;
}
} while (has_header);
memcpy(uncompressed, decompressed_resource, uncompressed_size);
}
// Zip decompressor
void ZipDecompressor::decompress_resource(u1* data, u1* uncompressed,
ResourceHeader* header, const ImageStrings* strings) {
char* msg = NULL;
jboolean res = ClassLoader::decompress(data, header->_size, uncompressed,
header->_uncompressed_size, &msg);
if (!res) warning("decompression failed due to %s\n", msg);
guarantee(res, "decompression failed");
}
// END Zip Decompressor

View File

@ -29,7 +29,6 @@
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
#include "code/debugInfo.hpp" #include "code/debugInfo.hpp"
#include "code/pcDesc.hpp" #include "code/pcDesc.hpp"
#include "compiler/compilerOracle.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"

View File

@ -0,0 +1,176 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "jni.h"
// Opaque reference to a JImage file.
class JImageFile;
// Opaque reference to an image file resource location.
typedef jlong JImageLocationRef;
// Max path length limit independent of platform. Windows max path is 1024,
// other platforms use 4096. The JCK fails several tests when 1024 is used.
#define JIMAGE_MAX_PATH 4096
// JImage Error Codes
// The image file is not prefixed with 0xCAFEDADA
#define JIMAGE_BAD_MAGIC (-1)
// The image file does not have a compatible (translatable) version
#define JIMAGE_BAD_VERSION (-2)
// The image file content is malformed
#define JIMAGE_CORRUPTED (-3)
/*
* JImageOpen - Given the supplied full path file name, open an image file. This
* function will also initialize tables and retrieve meta-data necessary to
* satisfy other functions in the API. If the image file has been previously
* open, a new open request will share memory and resources used by the previous
* open. A call to JImageOpen should be balanced by a call to JImageClose, to
* release memory and resources used. If the image file is not found or cannot
* be open, then NULL is returned and error will contain a reason for the
* failure; a positive value for a system error number, negative for a jimage
* specific error (see JImage Error Codes.)
*
* Ex.
* jint error;
* JImageFile* jimage = (*JImageOpen)(JAVA_HOME "lib/modules/bootmodules.jimage", &error);
* if (image == NULL) {
* tty->print_cr("JImage failed to open: %d", error);
* ...
* }
* ...
*/
extern "C" JImageFile* JIMAGE_Open(const char *name, jint* error);
typedef JImageFile* (*JImageOpen_t)(const char *name, jint* error);
/*
* JImageClose - Given the supplied open image file (see JImageOpen), release
* memory and resources used by the open file and close the file. If the image
* file is shared by other uses, release and close is deferred until the last use
* is also closed.
*
* Ex.
* (*JImageClose)(image);
*/
extern "C" void JIMAGE_Close(JImageFile* jimage);
typedef void (*JImageClose_t)(JImageFile* jimage);
/*
* JImagePackageToModule - Given an open image file (see JImageOpen) and the name
* of a package, return the name of module where the package resides. If the
* package does not exist in the image file, the function returns NULL.
* The resulting string does/should not have to be released. All strings are
* utf-8, zero byte terminated.
*
* Ex.
* const char* package = (*JImagePackageToModule)(image, "java/lang");
* tty->print_cr(package);
* > java.base
*/
extern "C" const char * JIMAGE_PackageToModule(JImageFile* jimage, const char* package_name);
typedef const char* (*JImagePackageToModule_t)(JImageFile* jimage, const char* package_name);
/*
* JImageFindResource - Given an open image file (see JImageOpen), a module
* name, a version string and the name of a class/resource, return location
* information describing the resource and its size. If no resource is found, the
* function returns JIMAGE_NOT_FOUND and the value of size is undefined.
* The version number should be "9.0" and is not used in locating the resource.
* The resulting location does/should not have to be released.
* All strings are utf-8, zero byte terminated.
*
* Ex.
* jlong size;
* JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
*/
extern "C" JImageLocationRef JIMAGE_FindResource(JImageFile* jimage,
const char* module_name, const char* version, const char* name,
jlong* size);
typedef JImageLocationRef(*JImageFindResource_t)(JImageFile* jimage,
const char* module_name, const char* version, const char* name,
jlong* size);
/*
* JImageGetResource - Given an open image file (see JImageOpen), a resources
* location information (see JImageFindResource), a buffer of appropriate
* size and the size, retrieve the bytes associated with the
* resource. If the size is less than the resource size then the read is truncated.
* If the size is greater than the resource size then the remainder of the buffer
* is zero filled. The function will return the actual size of the resource.
*
* Ex.
* jlong size;
* JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
* char* buffer = new char[size];
* (*JImageGetResource)(image, location, buffer, size);
*/
extern "C" jlong JIMAGE_GetResource(JImageFile* jimage, JImageLocationRef location,
char* buffer, jlong size);
typedef jlong(*JImageGetResource_t)(JImageFile* jimage, JImageLocationRef location,
char* buffer, jlong size);
/*
* JImageResourceIterator - Given an open image file (see JImageOpen), a visitor
* function and a visitor argument, iterator through each of the image's resources.
* The visitor function is called with the image file, the module name, the
* package name, the base name, the extension and the visitor argument. The return
* value of the visitor function should be true, unless an early iteration exit is
* required. All strings are utf-8, zero byte terminated.file.
*
* Ex.
* bool ctw_visitor(JImageFile* jimage, const char* module_name, const char* version, const char* package, const char* name, const char* extension, void* arg) {
* if (strcmp(extension, class) == 0) {
* char path[JIMAGE_MAX_PATH];
* Thread* THREAD = Thread::current();
* jio_snprintf(path, JIMAGE_MAX_PATH - 1, "/%s/%s", package, name);
* ClassLoader::compile_the_world_in(path, (Handle)arg, THREAD);
* return !HAS_PENDING_EXCEPTION;
* }
* return true;
* }
* (*JImageResourceIterator)(image, ctw_visitor, loader);
*/
typedef bool (*JImageResourceVisitor_t)(JImageFile* jimage,
const char* module_name, const char* version, const char* package,
const char* name, const char* extension, void* arg);
extern "C" void JIMAGE_ResourceIterator(JImageFile* jimage,
JImageResourceVisitor_t visitor, void *arg);
typedef void (*JImageResourceIterator_t)(JImageFile* jimage,
JImageResourceVisitor_t visitor, void* arg);

View File

@ -625,6 +625,10 @@ bool vmIntrinsics::is_disabled_by_flags(methodHandle method, methodHandle compil
case vmIntrinsics::_updateDirectByteBufferCRC32C: case vmIntrinsics::_updateDirectByteBufferCRC32C:
if (!UseCRC32CIntrinsics) return true; if (!UseCRC32CIntrinsics) return true;
break; break;
case vmIntrinsics::_updateBytesAdler32:
case vmIntrinsics::_updateByteBufferAdler32:
if (!UseAdler32Intrinsics) return true;
break;
case vmIntrinsics::_copyMemory: case vmIntrinsics::_copyMemory:
if (!InlineArrayCopy || !InlineUnsafeOps) return true; if (!InlineArrayCopy || !InlineUnsafeOps) return true;
break; break;

View File

@ -927,6 +927,12 @@
do_intrinsic(_updateDirectByteBufferCRC32C, java_util_zip_CRC32C, updateDirectByteBuffer_C_name, updateByteBuffer_signature, F_S) \ do_intrinsic(_updateDirectByteBufferCRC32C, java_util_zip_CRC32C, updateDirectByteBuffer_C_name, updateByteBuffer_signature, F_S) \
do_name( updateDirectByteBuffer_C_name, "updateDirectByteBuffer") \ do_name( updateDirectByteBuffer_C_name, "updateDirectByteBuffer") \
\ \
/* support for java.util.zip.Adler32 */ \
do_class(java_util_zip_Adler32, "java/util/zip/Adler32") \
do_intrinsic(_updateBytesAdler32, java_util_zip_Adler32, updateBytes_C_name, updateBytes_signature, F_SN) \
do_intrinsic(_updateByteBufferAdler32, java_util_zip_Adler32, updateByteBuffer_A_name, updateByteBuffer_signature, F_SN) \
do_name( updateByteBuffer_A_name, "updateByteBuffer") \
\
/* support for sun.misc.Unsafe */ \ /* support for sun.misc.Unsafe */ \
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \ do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
\ \

View File

@ -848,10 +848,10 @@ void nmethod::print_on(outputStream* st, const char* msg) const {
if (st != NULL) { if (st != NULL) {
ttyLocker ttyl; ttyLocker ttyl;
if (WizardMode) { if (WizardMode) {
CompileTask::print_compilation(st, this, msg, /*short_form:*/ true); CompileTask::print(st, this, msg, /*short_form:*/ true);
st->print_cr(" (" INTPTR_FORMAT ")", this); st->print_cr(" (" INTPTR_FORMAT ")", this);
} else { } else {
CompileTask::print_compilation(st, this, msg, /*short_form:*/ false); CompileTask::print(st, this, msg, /*short_form:*/ false);
} }
} }
} }

View File

@ -157,7 +157,6 @@ long CompileBroker::_peak_compilation_time = 0;
CompileQueue* CompileBroker::_c2_compile_queue = NULL; CompileQueue* CompileBroker::_c2_compile_queue = NULL;
CompileQueue* CompileBroker::_c1_compile_queue = NULL; CompileQueue* CompileBroker::_c1_compile_queue = NULL;
class CompilationLog : public StringEventLog { class CompilationLog : public StringEventLog {
public: public:
CompilationLog() : StringEventLog("Compilation events") { CompilationLog() : StringEventLog("Compilation events") {
@ -167,7 +166,7 @@ class CompilationLog : public StringEventLog {
StringLogMessage lm; StringLogMessage lm;
stringStream sstr = lm.stream(); stringStream sstr = lm.stream();
// msg.time_stamp().update_to(tty->time_stamp().ticks()); // msg.time_stamp().update_to(tty->time_stamp().ticks());
task->print_compilation(&sstr, NULL, true, false); task->print(&sstr, NULL, true, false);
log(thread, "%s", (const char*)lm); log(thread, "%s", (const char*)lm);
} }
@ -233,371 +232,6 @@ CompileTaskWrapper::~CompileTaskWrapper() {
} }
} }
CompileTask* CompileTask::_task_free_list = NULL;
#ifdef ASSERT
int CompileTask::_num_allocated_tasks = 0;
#endif
/**
* Allocate a CompileTask, from the free list if possible.
*/
CompileTask* CompileTask::allocate() {
MutexLocker locker(CompileTaskAlloc_lock);
CompileTask* task = NULL;
if (_task_free_list != NULL) {
task = _task_free_list;
_task_free_list = task->next();
task->set_next(NULL);
} else {
task = new CompileTask();
DEBUG_ONLY(_num_allocated_tasks++;)
assert (WhiteBoxAPI || _num_allocated_tasks < 10000, "Leaking compilation tasks?");
task->set_next(NULL);
task->set_is_free(true);
}
assert(task->is_free(), "Task must be free.");
task->set_is_free(false);
return task;
}
/**
* Add a task to the free list.
*/
void CompileTask::free(CompileTask* task) {
MutexLocker locker(CompileTaskAlloc_lock);
if (!task->is_free()) {
task->set_code(NULL);
assert(!task->lock()->is_locked(), "Should not be locked when freed");
JNIHandles::destroy_global(task->_method_holder);
JNIHandles::destroy_global(task->_hot_method_holder);
task->set_is_free(true);
task->set_next(_task_free_list);
_task_free_list = task;
}
}
void CompileTask::initialize(int compile_id,
methodHandle method,
int osr_bci,
int comp_level,
methodHandle hot_method,
int hot_count,
const char* comment,
bool is_blocking) {
assert(!_lock->is_locked(), "bad locking");
_compile_id = compile_id;
_method = method();
_method_holder = JNIHandles::make_global(method->method_holder()->klass_holder());
_osr_bci = osr_bci;
_is_blocking = is_blocking;
_comp_level = comp_level;
_num_inlined_bytecodes = 0;
_is_complete = false;
_is_success = false;
_code_handle = NULL;
_hot_method = NULL;
_hot_method_holder = NULL;
_hot_count = hot_count;
_time_queued = 0; // tidy
_comment = comment;
_failure_reason = NULL;
if (LogCompilation) {
_time_queued = os::elapsed_counter();
if (hot_method.not_null()) {
if (hot_method == method) {
_hot_method = _method;
} else {
_hot_method = hot_method();
// only add loader or mirror if different from _method_holder
_hot_method_holder = JNIHandles::make_global(hot_method->method_holder()->klass_holder());
}
}
}
_next = NULL;
}
// ------------------------------------------------------------------
// CompileTask::code/set_code
nmethod* CompileTask::code() const {
if (_code_handle == NULL) return NULL;
return _code_handle->code();
}
void CompileTask::set_code(nmethod* nm) {
if (_code_handle == NULL && nm == NULL) return;
guarantee(_code_handle != NULL, "");
_code_handle->set_code(nm);
if (nm == NULL) _code_handle = NULL; // drop the handle also
}
void CompileTask::mark_on_stack() {
// Mark these methods as something redefine classes cannot remove.
_method->set_on_stack(true);
if (_hot_method != NULL) {
_hot_method->set_on_stack(true);
}
}
// RedefineClasses support
void CompileTask::metadata_do(void f(Metadata*)) {
f(method());
if (hot_method() != NULL && hot_method() != method()) {
f(hot_method());
}
}
// ------------------------------------------------------------------
// CompileTask::print_line_on_error
//
// This function is called by fatal error handler when the thread
// causing troubles is a compiler thread.
//
// Do not grab any lock, do not allocate memory.
//
// Otherwise it's the same as CompileTask::print_line()
//
void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
// print compiler name
st->print("%s:", CompileBroker::compiler_name(comp_level()));
print_compilation(st);
}
// ------------------------------------------------------------------
// CompileTask::print_line
void CompileTask::print_tty() {
ttyLocker ttyl; // keep the following output all in one block
// print compiler name if requested
if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler_name(comp_level()));
print_compilation(tty);
}
// ------------------------------------------------------------------
// CompileTask::print_compilation_impl
void CompileTask::print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
bool is_osr_method, int osr_bci, bool is_blocking,
const char* msg, bool short_form, bool cr) {
if (!short_form) {
st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp
}
st->print("%4d ", compile_id); // print compilation number
// For unloaded methods the transition to zombie occurs after the
// method is cleared so it's impossible to report accurate
// information for that case.
bool is_synchronized = false;
bool has_exception_handler = false;
bool is_native = false;
if (method != NULL) {
is_synchronized = method->is_synchronized();
has_exception_handler = method->has_exception_handler();
is_native = method->is_native();
}
// method attributes
const char compile_type = is_osr_method ? '%' : ' ';
const char sync_char = is_synchronized ? 's' : ' ';
const char exception_char = has_exception_handler ? '!' : ' ';
const char blocking_char = is_blocking ? 'b' : ' ';
const char native_char = is_native ? 'n' : ' ';
// print method attributes
st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
if (TieredCompilation) {
if (comp_level != -1) st->print("%d ", comp_level);
else st->print("- ");
}
st->print(" "); // more indent
if (method == NULL) {
st->print("(method)");
} else {
method->print_short_name(st);
if (is_osr_method) {
st->print(" @ %d", osr_bci);
}
if (method->is_native())
st->print(" (native)");
else
st->print(" (%d bytes)", method->code_size());
}
if (msg != NULL) {
st->print(" %s", msg);
}
if (cr) {
st->cr();
}
}
// ------------------------------------------------------------------
// CompileTask::print_inlining
void CompileTask::print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg) {
// 1234567
st->print(" "); // print timestamp
// 1234
st->print(" "); // print compilation number
// method attributes
if (method->is_loaded()) {
const char sync_char = method->is_synchronized() ? 's' : ' ';
const char exception_char = method->has_exception_handlers() ? '!' : ' ';
const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' ';
// print method attributes
st->print(" %c%c%c ", sync_char, exception_char, monitors_char);
} else {
// %s!bn
st->print(" "); // print method attributes
}
if (TieredCompilation) {
st->print(" ");
}
st->print(" "); // more indent
st->print(" "); // initial inlining indent
for (int i = 0; i < inline_level; i++) st->print(" ");
st->print("@ %d ", bci); // print bci
method->print_short_name(st);
if (method->is_loaded())
st->print(" (%d bytes)", method->code_size());
else
st->print(" (not loaded)");
if (msg != NULL) {
st->print(" %s", msg);
}
st->cr();
}
// ------------------------------------------------------------------
// CompileTask::print_inline_indent
void CompileTask::print_inline_indent(int inline_level, outputStream* st) {
// 1234567
st->print(" "); // print timestamp
// 1234
st->print(" "); // print compilation number
// %s!bn
st->print(" "); // print method attributes
if (TieredCompilation) {
st->print(" ");
}
st->print(" "); // more indent
st->print(" "); // initial inlining indent
for (int i = 0; i < inline_level; i++) st->print(" ");
}
// ------------------------------------------------------------------
// CompileTask::print_compilation
void CompileTask::print_compilation(outputStream* st, const char* msg, bool short_form, bool cr) {
bool is_osr_method = osr_bci() != InvocationEntryBci;
print_compilation_impl(st, method(), compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), msg, short_form, cr);
}
// ------------------------------------------------------------------
// CompileTask::log_task
void CompileTask::log_task(xmlStream* log) {
Thread* thread = Thread::current();
methodHandle method(thread, this->method());
ResourceMark rm(thread);
// <task compiler='Cx' id='9' method='M' osr_bci='X' level='1' blocking='1' stamp='1.234'>
log->print(" compiler='%s' compile_id='%d'", _comp_level <= CompLevel_full_profile ? "C1" : "C2", _compile_id);
if (_osr_bci != CompileBroker::standard_entry_bci) {
log->print(" compile_kind='osr'"); // same as nmethod::compile_kind
} // else compile_kind='c2c'
if (!method.is_null()) log->method(method);
if (_osr_bci != CompileBroker::standard_entry_bci) {
log->print(" osr_bci='%d'", _osr_bci);
}
if (_comp_level != CompLevel_highest_tier) {
log->print(" level='%d'", _comp_level);
}
if (_is_blocking) {
log->print(" blocking='1'");
}
log->stamp();
}
// ------------------------------------------------------------------
// CompileTask::log_task_queued
void CompileTask::log_task_queued() {
Thread* thread = Thread::current();
ttyLocker ttyl;
ResourceMark rm(thread);
xtty->begin_elem("task_queued");
log_task(xtty);
if (_comment != NULL) {
xtty->print(" comment='%s'", _comment);
}
if (_hot_method != NULL) {
methodHandle hot(thread, _hot_method);
methodHandle method(thread, _method);
if (hot() != method()) {
xtty->method(hot);
}
}
if (_hot_count != 0) {
xtty->print(" hot_count='%d'", _hot_count);
}
xtty->end_elem();
}
// ------------------------------------------------------------------
// CompileTask::log_task_start
void CompileTask::log_task_start(CompileLog* log) {
log->begin_head("task");
log_task(log);
log->end_head();
}
// ------------------------------------------------------------------
// CompileTask::log_task_done
void CompileTask::log_task_done(CompileLog* log) {
Thread* thread = Thread::current();
methodHandle method(thread, this->method());
ResourceMark rm(thread);
if (!_is_success) {
const char* reason = _failure_reason != NULL ? _failure_reason : "unknown";
log->elem("failure reason='%s'", reason);
}
// <task_done ... stamp='1.234'> </task>
nmethod* nm = code();
log->begin_elem("task_done success='%d' nmsize='%d' count='%d'",
_is_success, nm == NULL ? 0 : nm->content_size(),
method->invocation_count());
int bec = method->backedge_count();
if (bec != 0) log->print(" backedge_count='%d'", bec);
// Note: "_is_complete" is about to be set, but is not.
if (_num_inlined_bytecodes != 0) {
log->print(" inlined_bytes='%d'", _num_inlined_bytecodes);
}
log->stamp();
log->end_elem();
log->tail("task");
log->clear_identities(); // next task will have different CI
if (log->unflushed_count() > 2000) {
log->flush();
}
log->mark_file_end();
}
/** /**
* Add a CompileTask to a CompileQueue. * Add a CompileTask to a CompileQueue.
*/ */
@ -807,7 +441,7 @@ void CompileQueue::print(outputStream* st) {
st->print_cr("Empty"); st->print_cr("Empty");
} else { } else {
while (task != NULL) { while (task != NULL) {
task->print_compilation(st, NULL, true, true); task->print(st, NULL, true, true);
task = task->next(); task = task->next();
} }
} }
@ -1349,7 +983,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
#ifndef TIERED #ifndef TIERED
// seems like an assert of dubious value // seems like an assert of dubious value
assert(comp_level == CompLevel_highest_tier, assert(comp_level == CompLevel_highest_tier,
"all OSR compiles are assumed to be at a single compilation lavel"); "all OSR compiles are assumed to be at a single compilation level");
#endif // TIERED #endif // TIERED
// We accept a higher level osr method // We accept a higher level osr method
nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false); nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
@ -2037,7 +1671,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
FormatBufferResource msg = retry_message != NULL ? FormatBufferResource msg = retry_message != NULL ?
err_msg_res("COMPILE SKIPPED: %s (%s)", ci_env.failure_reason(), retry_message) : err_msg_res("COMPILE SKIPPED: %s (%s)", ci_env.failure_reason(), retry_message) :
err_msg_res("COMPILE SKIPPED: %s", ci_env.failure_reason()); err_msg_res("COMPILE SKIPPED: %s", ci_env.failure_reason());
task->print_compilation(tty, msg); task->print(tty, msg);
} }
} else { } else {
task->mark_success(); task->mark_success();

View File

@ -27,127 +27,12 @@
#include "ci/compilerInterface.hpp" #include "ci/compilerInterface.hpp"
#include "compiler/abstractCompiler.hpp" #include "compiler/abstractCompiler.hpp"
#include "compiler/compileTask.hpp"
#include "runtime/perfData.hpp" #include "runtime/perfData.hpp"
class nmethod; class nmethod;
class nmethodLocker; class nmethodLocker;
// CompileTask
//
// An entry in the compile queue. It represents a pending or current
// compilation.
class CompileTask : public CHeapObj<mtCompiler> {
friend class VMStructs;
private:
static CompileTask* _task_free_list;
#ifdef ASSERT
static int _num_allocated_tasks;
#endif
Monitor* _lock;
uint _compile_id;
Method* _method;
jobject _method_holder;
int _osr_bci;
bool _is_complete;
bool _is_success;
bool _is_blocking;
int _comp_level;
int _num_inlined_bytecodes;
nmethodLocker* _code_handle; // holder of eventual result
CompileTask* _next, *_prev;
bool _is_free;
// Fields used for logging why the compilation was initiated:
jlong _time_queued; // in units of os::elapsed_counter()
Method* _hot_method; // which method actually triggered this task
jobject _hot_method_holder;
int _hot_count; // information about its invocation counter
const char* _comment; // more info about the task
const char* _failure_reason;
public:
CompileTask() {
_lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
}
void initialize(int compile_id, methodHandle method, int osr_bci, int comp_level,
methodHandle hot_method, int hot_count, const char* comment,
bool is_blocking);
static CompileTask* allocate();
static void free(CompileTask* task);
int compile_id() const { return _compile_id; }
Method* method() const { return _method; }
Method* hot_method() const { return _hot_method; }
int osr_bci() const { return _osr_bci; }
bool is_complete() const { return _is_complete; }
bool is_blocking() const { return _is_blocking; }
bool is_success() const { return _is_success; }
nmethodLocker* code_handle() const { return _code_handle; }
void set_code_handle(nmethodLocker* l) { _code_handle = l; }
nmethod* code() const; // _code_handle->code()
void set_code(nmethod* nm); // _code_handle->set_code(nm)
Monitor* lock() const { return _lock; }
void mark_complete() { _is_complete = true; }
void mark_success() { _is_success = true; }
int comp_level() { return _comp_level;}
void set_comp_level(int comp_level) { _comp_level = comp_level;}
int num_inlined_bytecodes() const { return _num_inlined_bytecodes; }
void set_num_inlined_bytecodes(int n) { _num_inlined_bytecodes = n; }
CompileTask* next() const { return _next; }
void set_next(CompileTask* next) { _next = next; }
CompileTask* prev() const { return _prev; }
void set_prev(CompileTask* prev) { _prev = prev; }
bool is_free() const { return _is_free; }
void set_is_free(bool val) { _is_free = val; }
// RedefineClasses support
void metadata_do(void f(Metadata*));
private:
static void print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false,
const char* msg = NULL, bool short_form = false, bool cr = true);
public:
void print_compilation(outputStream* st = tty, const char* msg = NULL, bool short_form = false, bool cr = true);
static void print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL, bool short_form = false, bool cr = true) {
print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(),
nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false,
msg, short_form, cr);
}
static void print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL);
static void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
print_inlining(tty, method, inline_level, bci, msg);
}
// Redefine Classes support
void mark_on_stack();
static void print_inline_indent(int inline_level, outputStream* st = tty);
void print_tty();
void print_line_on_error(outputStream* st, char* buf, int buflen);
void log_task(xmlStream* log);
void log_task_queued();
void log_task_start(CompileLog* log);
void log_task_done(CompileLog* log);
void set_failure_reason(const char* reason) {
_failure_reason = reason;
}
};
// CompilerCounters // CompilerCounters
// //
// Per Compiler Performance Counters. // Per Compiler Performance Counters.

View File

@ -0,0 +1,391 @@
/*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "compiler/compileTask.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/compileBroker.hpp"
CompileTask* CompileTask::_task_free_list = NULL;
#ifdef ASSERT
int CompileTask::_num_allocated_tasks = 0;
#endif
/**
* Allocate a CompileTask, from the free list if possible.
*/
CompileTask* CompileTask::allocate() {
MutexLocker locker(CompileTaskAlloc_lock);
CompileTask* task = NULL;
if (_task_free_list != NULL) {
task = _task_free_list;
_task_free_list = task->next();
task->set_next(NULL);
} else {
task = new CompileTask();
DEBUG_ONLY(_num_allocated_tasks++;)
assert (WhiteBoxAPI || _num_allocated_tasks < 10000, "Leaking compilation tasks?");
task->set_next(NULL);
task->set_is_free(true);
}
assert(task->is_free(), "Task must be free.");
task->set_is_free(false);
return task;
}
/**
* Add a task to the free list.
*/
void CompileTask::free(CompileTask* task) {
MutexLocker locker(CompileTaskAlloc_lock);
if (!task->is_free()) {
task->set_code(NULL);
assert(!task->lock()->is_locked(), "Should not be locked when freed");
JNIHandles::destroy_global(task->_method_holder);
JNIHandles::destroy_global(task->_hot_method_holder);
task->set_is_free(true);
task->set_next(_task_free_list);
_task_free_list = task;
}
}
void CompileTask::initialize(int compile_id,
methodHandle method,
int osr_bci,
int comp_level,
methodHandle hot_method,
int hot_count,
const char* comment,
bool is_blocking) {
assert(!_lock->is_locked(), "bad locking");
_compile_id = compile_id;
_method = method();
_method_holder = JNIHandles::make_global(method->method_holder()->klass_holder());
_osr_bci = osr_bci;
_is_blocking = is_blocking;
_comp_level = comp_level;
_num_inlined_bytecodes = 0;
_is_complete = false;
_is_success = false;
_code_handle = NULL;
_hot_method = NULL;
_hot_method_holder = NULL;
_hot_count = hot_count;
_time_queued = 0; // tidy
_comment = comment;
_failure_reason = NULL;
if (LogCompilation) {
_time_queued = os::elapsed_counter();
if (hot_method.not_null()) {
if (hot_method == method) {
_hot_method = _method;
} else {
_hot_method = hot_method();
// only add loader or mirror if different from _method_holder
_hot_method_holder = JNIHandles::make_global(hot_method->method_holder()->klass_holder());
}
}
}
_next = NULL;
}
// ------------------------------------------------------------------
// CompileTask::code/set_code
//
nmethod* CompileTask::code() const {
if (_code_handle == NULL) return NULL;
return _code_handle->code();
}
void CompileTask::set_code(nmethod* nm) {
if (_code_handle == NULL && nm == NULL) return;
guarantee(_code_handle != NULL, "");
_code_handle->set_code(nm);
if (nm == NULL) _code_handle = NULL; // drop the handle also
}
void CompileTask::mark_on_stack() {
// Mark these methods as something redefine classes cannot remove.
_method->set_on_stack(true);
if (_hot_method != NULL) {
_hot_method->set_on_stack(true);
}
}
// RedefineClasses support
void CompileTask::metadata_do(void f(Metadata*)) {
f(method());
if (hot_method() != NULL && hot_method() != method()) {
f(hot_method());
}
}
// ------------------------------------------------------------------
// CompileTask::print_line_on_error
//
// This function is called by fatal error handler when the thread
// causing troubles is a compiler thread.
//
// Do not grab any lock, do not allocate memory.
//
// Otherwise it's the same as CompileTask::print_line()
//
void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
// print compiler name
st->print("%s:", CompileBroker::compiler_name(comp_level()));
print(st);
}
// ------------------------------------------------------------------
// CompileTask::print_tty
void CompileTask::print_tty() {
ttyLocker ttyl; // keep the following output all in one block
// print compiler name if requested
if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler_name(comp_level()));
print(tty);
}
// ------------------------------------------------------------------
// CompileTask::print_impl
void CompileTask::print_impl(outputStream* st, Method* method, int compile_id, int comp_level,
bool is_osr_method, int osr_bci, bool is_blocking,
const char* msg, bool short_form, bool cr) {
if (!short_form) {
st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp
}
st->print("%4d ", compile_id); // print compilation number
// For unloaded methods the transition to zombie occurs after the
// method is cleared so it's impossible to report accurate
// information for that case.
bool is_synchronized = false;
bool has_exception_handler = false;
bool is_native = false;
if (method != NULL) {
is_synchronized = method->is_synchronized();
has_exception_handler = method->has_exception_handler();
is_native = method->is_native();
}
// method attributes
const char compile_type = is_osr_method ? '%' : ' ';
const char sync_char = is_synchronized ? 's' : ' ';
const char exception_char = has_exception_handler ? '!' : ' ';
const char blocking_char = is_blocking ? 'b' : ' ';
const char native_char = is_native ? 'n' : ' ';
// print method attributes
st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
if (TieredCompilation) {
if (comp_level != -1) st->print("%d ", comp_level);
else st->print("- ");
}
st->print(" "); // more indent
if (method == NULL) {
st->print("(method)");
} else {
method->print_short_name(st);
if (is_osr_method) {
st->print(" @ %d", osr_bci);
}
if (method->is_native())
st->print(" (native)");
else
st->print(" (%d bytes)", method->code_size());
}
if (msg != NULL) {
st->print(" %s", msg);
}
if (cr) {
st->cr();
}
}
void CompileTask::print_inline_indent(int inline_level, outputStream* st) {
// 1234567
st->print(" "); // print timestamp
// 1234
st->print(" "); // print compilation number
// %s!bn
st->print(" "); // print method attributes
if (TieredCompilation) {
st->print(" ");
}
st->print(" "); // more indent
st->print(" "); // initial inlining indent
for (int i = 0; i < inline_level; i++) st->print(" ");
}
// ------------------------------------------------------------------
// CompileTask::print_compilation
void CompileTask::print(outputStream* st, const char* msg, bool short_form, bool cr) {
bool is_osr_method = osr_bci() != InvocationEntryBci;
print_impl(st, method(), compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), msg, short_form, cr);
}
// ------------------------------------------------------------------
// CompileTask::log_task
void CompileTask::log_task(xmlStream* log) {
Thread* thread = Thread::current();
methodHandle method(thread, this->method());
ResourceMark rm(thread);
// <task id='9' method='M' osr_bci='X' level='1' blocking='1' stamp='1.234'>
log->print(" compile_id='%d'", _compile_id);
if (_osr_bci != CompileBroker::standard_entry_bci) {
log->print(" compile_kind='osr'"); // same as nmethod::compile_kind
} // else compile_kind='c2c'
if (!method.is_null()) log->method(method);
if (_osr_bci != CompileBroker::standard_entry_bci) {
log->print(" osr_bci='%d'", _osr_bci);
}
if (_comp_level != CompLevel_highest_tier) {
log->print(" level='%d'", _comp_level);
}
if (_is_blocking) {
log->print(" blocking='1'");
}
log->stamp();
}
// ------------------------------------------------------------------
// CompileTask::log_task_queued
void CompileTask::log_task_queued() {
Thread* thread = Thread::current();
ttyLocker ttyl;
ResourceMark rm(thread);
xtty->begin_elem("task_queued");
log_task(xtty);
if (_comment != NULL) {
xtty->print(" comment='%s'", _comment);
}
if (_hot_method != NULL) {
methodHandle hot(thread, _hot_method);
methodHandle method(thread, _method);
if (hot() != method()) {
xtty->method(hot);
}
}
if (_hot_count != 0) {
xtty->print(" hot_count='%d'", _hot_count);
}
xtty->end_elem();
}
// ------------------------------------------------------------------
// CompileTask::log_task_start
void CompileTask::log_task_start(CompileLog* log) {
log->begin_head("task");
log_task(log);
log->end_head();
}
// ------------------------------------------------------------------
// CompileTask::log_task_done
void CompileTask::log_task_done(CompileLog* log) {
Thread* thread = Thread::current();
methodHandle method(thread, this->method());
ResourceMark rm(thread);
if (!_is_success) {
const char* reason = _failure_reason != NULL ? _failure_reason : "unknown";
log->elem("failure reason='%s'", reason);
}
// <task_done ... stamp='1.234'> </task>
nmethod* nm = code();
log->begin_elem("task_done success='%d' nmsize='%d' count='%d'",
_is_success, nm == NULL ? 0 : nm->content_size(),
method->invocation_count());
int bec = method->backedge_count();
if (bec != 0) log->print(" backedge_count='%d'", bec);
// Note: "_is_complete" is about to be set, but is not.
if (_num_inlined_bytecodes != 0) {
log->print(" inlined_bytes='%d'", _num_inlined_bytecodes);
}
log->stamp();
log->end_elem();
log->clear_identities(); // next task will have different CI
log->tail("task");
if (log->unflushed_count() > 2000) {
log->flush();
}
log->mark_file_end();
}
// ------------------------------------------------------------------
// CompileTask::print_inlining
void CompileTask::print_inlining_inner(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg) {
// 1234567
st->print(" "); // print timestamp
// 1234
st->print(" "); // print compilation number
// method attributes
if (method->is_loaded()) {
const char sync_char = method->is_synchronized() ? 's' : ' ';
const char exception_char = method->has_exception_handlers() ? '!' : ' ';
const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' ';
// print method attributes
st->print(" %c%c%c ", sync_char, exception_char, monitors_char);
} else {
// %s!bn
st->print(" "); // print method attributes
}
if (TieredCompilation) {
st->print(" ");
}
st->print(" "); // more indent
st->print(" "); // initial inlining indent
for (int i = 0; i < inline_level; i++) st->print(" ");
st->print("@ %d ", bci); // print bci
method->print_short_name(st);
if (method->is_loaded())
st->print(" (%d bytes)", method->code_size());
else
st->print(" (not loaded)");
if (msg != NULL) {
st->print(" %s", msg);
}
st->cr();
}

View File

@ -0,0 +1,151 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_COMPILER_COMPILETASK_HPP
#define SHARE_VM_COMPILER_COMPILETASK_HPP
#include "code/nmethod.hpp"
#include "ci/ciMethod.hpp"
#include "compiler/compileLog.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/xmlstream.hpp"
// CompileTask
//
// An entry in the compile queue. It represents a pending or current
// compilation.
class CompileTask : public CHeapObj<mtCompiler> {
friend class VMStructs;
private:
static CompileTask* _task_free_list;
#ifdef ASSERT
static int _num_allocated_tasks;
#endif
Monitor* _lock;
uint _compile_id;
Method* _method;
jobject _method_holder;
int _osr_bci;
bool _is_complete;
bool _is_success;
bool _is_blocking;
int _comp_level;
int _num_inlined_bytecodes;
nmethodLocker* _code_handle; // holder of eventual result
CompileTask* _next, *_prev;
bool _is_free;
// Fields used for logging why the compilation was initiated:
jlong _time_queued; // in units of os::elapsed_counter()
Method* _hot_method; // which method actually triggered this task
jobject _hot_method_holder;
int _hot_count; // information about its invocation counter
const char* _comment; // more info about the task
const char* _failure_reason;
public:
CompileTask() {
_lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
}
void initialize(int compile_id, methodHandle method, int osr_bci, int comp_level,
methodHandle hot_method, int hot_count, const char* comment,
bool is_blocking);
static CompileTask* allocate();
static void free(CompileTask* task);
int compile_id() const { return _compile_id; }
Method* method() const { return _method; }
Method* hot_method() const { return _hot_method; }
int osr_bci() const { return _osr_bci; }
bool is_complete() const { return _is_complete; }
bool is_blocking() const { return _is_blocking; }
bool is_success() const { return _is_success; }
nmethodLocker* code_handle() const { return _code_handle; }
void set_code_handle(nmethodLocker* l) { _code_handle = l; }
nmethod* code() const; // _code_handle->code()
void set_code(nmethod* nm); // _code_handle->set_code(nm)
Monitor* lock() const { return _lock; }
void mark_complete() { _is_complete = true; }
void mark_success() { _is_success = true; }
int comp_level() { return _comp_level;}
void set_comp_level(int comp_level) { _comp_level = comp_level;}
int num_inlined_bytecodes() const { return _num_inlined_bytecodes; }
void set_num_inlined_bytecodes(int n) { _num_inlined_bytecodes = n; }
CompileTask* next() const { return _next; }
void set_next(CompileTask* next) { _next = next; }
CompileTask* prev() const { return _prev; }
void set_prev(CompileTask* prev) { _prev = prev; }
bool is_free() const { return _is_free; }
void set_is_free(bool val) { _is_free = val; }
// RedefineClasses support
void metadata_do(void f(Metadata*));
void mark_on_stack();
private:
static void print_impl(outputStream* st, Method* method, int compile_id, int comp_level,
bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false,
const char* msg = NULL, bool short_form = false, bool cr = true);
public:
void print(outputStream* st = tty, const char* msg = NULL, bool short_form = false, bool cr = true);
static void print(outputStream* st, const nmethod* nm, const char* msg = NULL, bool short_form = false, bool cr = true) {
print_impl(st, nm->method(), nm->compile_id(), nm->comp_level(),
nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false,
msg, short_form, cr);
}
static void print_inline_indent(int inline_level, outputStream* st = tty);
void print_tty();
void print_line_on_error(outputStream* st, char* buf, int buflen);
void log_task(xmlStream* log);
void log_task_queued();
void log_task_start(CompileLog* log);
void log_task_done(CompileLog* log);
void set_failure_reason(const char* reason) {
_failure_reason = reason;
}
bool check_break_at_flags();
static void print_inlining_inner(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL);
static void print_inlining_tty(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
print_inlining_inner(tty, method, inline_level, bci, msg);
}
};
#endif // SHARE_VM_COMPILER_COMPILETASK_HPP

View File

@ -66,7 +66,8 @@ class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
virtual void do_klass(Klass* k); virtual void do_klass(Klass* k);
void do_klass_nv(Klass* k); void do_klass_nv(Klass* k);
virtual void do_class_loader_data(ClassLoaderData* cld); virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
void do_cld_nv(ClassLoaderData* cld);
}; };
class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure { class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {

View File

@ -50,11 +50,11 @@ inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) { inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
ClassLoaderData* cld = k->class_loader_data(); ClassLoaderData* cld = k->class_loader_data();
do_class_loader_data(cld); do_cld_nv(cld);
} }
inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); } inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) { inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
assert(_klass_closure._oop_closure == this, "Must be"); assert(_klass_closure._oop_closure == this, "Must be");
bool claim = true; // Must claim the class loader data before processing. bool claim = true; // Must claim the class loader data before processing.

View File

@ -702,7 +702,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
oop(bottom)) && \ oop(bottom)) && \
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
bottom += _cfls->adjustObjectSize(word_sz); \ bottom += _cfls->adjustObjectSize(word_sz); \
} else { \ } else { \
bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
@ -729,7 +729,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
oop(bottom)) && \ oop(bottom)) && \
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
bottom += _cfls->adjustObjectSize(word_sz); \ bottom += _cfls->adjustObjectSize(word_sz); \
} else { \ } else { \
bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
@ -2989,7 +2989,7 @@ initialize_sequential_subtasks_for_marking(int n_threads,
assert(task_size > CardTableModRefBS::card_size_in_words && assert(task_size > CardTableModRefBS::card_size_in_words &&
(task_size % CardTableModRefBS::card_size_in_words == 0), (task_size % CardTableModRefBS::card_size_in_words == 0),
"Otherwise arithmetic below would be incorrect"); "Otherwise arithmetic below would be incorrect");
MemRegion span = _gen->reserved(); MemRegion span = _old_gen->reserved();
if (low != NULL) { if (low != NULL) {
if (span.contains(low)) { if (span.contains(low)) {
// Align low down to a card boundary so that // Align low down to a card boundary so that

View File

@ -99,7 +99,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
BlockOffsetArrayNonContigSpace _bt; BlockOffsetArrayNonContigSpace _bt;
CMSCollector* _collector; CMSCollector* _collector;
ConcurrentMarkSweepGeneration* _gen; ConcurrentMarkSweepGeneration* _old_gen;
// Data structures for free blocks (used during allocation/sweeping) // Data structures for free blocks (used during allocation/sweeping)

View File

@ -212,7 +212,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
use_adaptive_freelists, use_adaptive_freelists,
dictionaryChoice); dictionaryChoice);
NOT_PRODUCT(debug_cms_space = _cmsSpace;) NOT_PRODUCT(debug_cms_space = _cmsSpace;)
_cmsSpace->_gen = this; _cmsSpace->_old_gen = this;
_gc_stats = new CMSGCStats(); _gc_stats = new CMSGCStats();
@ -359,13 +359,13 @@ double CMSStats::time_until_cms_gen_full() const {
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
if (cms_free > expected_promotion) { if (cms_free > expected_promotion) {
// Start a cms collection if there isn't enough space to promote // Start a cms collection if there isn't enough space to promote
// for the next minor collection. Use the padded average as // for the next young collection. Use the padded average as
// a safety factor. // a safety factor.
cms_free -= expected_promotion; cms_free -= expected_promotion;
// Adjust by the safety factor. // Adjust by the safety factor.
double cms_free_dbl = (double)cms_free; double cms_free_dbl = (double)cms_free;
double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0; double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
// Apply a further correction factor which tries to adjust // Apply a further correction factor which tries to adjust
// for recent occurance of concurrent mode failures. // for recent occurance of concurrent mode failures.
cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
@ -531,7 +531,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
if (CMSConcurrentMTEnabled) { if (CMSConcurrentMTEnabled) {
if (FLAG_IS_DEFAULT(ConcGCThreads)) { if (FLAG_IS_DEFAULT(ConcGCThreads)) {
// just for now // just for now
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
} }
if (ConcGCThreads > 1) { if (ConcGCThreads > 1) {
_conc_workers = new YieldingFlexibleWorkGang("CMS Thread", _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
@ -592,7 +592,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
// Clip CMSBootstrapOccupancy between 0 and 100. // Clip CMSBootstrapOccupancy between 0 and 100.
_bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100; _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
// Now tell CMS generations the identity of their collector // Now tell CMS generations the identity of their collector
ConcurrentMarkSweepGeneration::set_collector(this); ConcurrentMarkSweepGeneration::set_collector(this);
@ -613,7 +613,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_end_addr = gch->end_addr(); _end_addr = gch->end_addr();
assert(_young_gen != NULL, "no _young_gen"); assert(_young_gen != NULL, "no _young_gen");
_eden_chunk_index = 0; _eden_chunk_index = 0;
_eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
_eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
} }
@ -795,29 +795,22 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
gclog_or_tty->print_cr("\nFrom compute_new_size: "); gclog_or_tty->print_cr("\nFrom compute_new_size: ");
gclog_or_tty->print_cr(" Free fraction %f", free_percentage); gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
gclog_or_tty->print_cr(" Desired free fraction %f", gclog_or_tty->print_cr(" Desired free fraction %f", desired_free_percentage);
desired_free_percentage); gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage);
gclog_or_tty->print_cr(" Maximum free fraction %f", gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity() / 1000);
maximum_free_percentage); gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity()/1000);
gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT,
desired_capacity/1000);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->is_old_gen(this), "The CMS generation should always be the old generation"); assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
size_t young_size = gch->young_gen()->capacity(); size_t young_size = gch->young_gen()->capacity();
gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000); gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000);
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
unsafe_max_alloc_nogc()/1000); gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
contiguous_available()/1000);
gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)",
expand_bytes);
} }
// safe if expansion fails // safe if expansion fails
expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr(" Expanded free fraction %f", gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity());
((double) free()) / capacity());
} }
} else { } else {
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
@ -834,16 +827,14 @@ Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
return cmsSpace()->freelistLock(); return cmsSpace()->freelistLock();
} }
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
bool tlab) {
CMSSynchronousYieldRequest yr; CMSSynchronousYieldRequest yr;
MutexLockerEx x(freelistLock(), MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
Mutex::_no_safepoint_check_flag);
return have_lock_and_allocate(size, tlab); return have_lock_and_allocate(size, tlab);
} }
HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
bool tlab /* ignored */) { bool tlab /* ignored */) {
assert_lock_strong(freelistLock()); assert_lock_strong(freelistLock());
size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
HeapWord* res = cmsSpace()->allocate(adjustedSize); HeapWord* res = cmsSpace()->allocate(adjustedSize);
@ -2426,7 +2417,7 @@ void CMSCollector::verify_after_remark_work_1() {
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
GenCollectedHeap::OldGen, GenCollectedHeap::OldGen,
true, // younger gens are roots true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()), GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(), should_unload_classes(),
&notOlder, &notOlder,
@ -2498,7 +2489,7 @@ void CMSCollector::verify_after_remark_work_2() {
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
GenCollectedHeap::OldGen, GenCollectedHeap::OldGen,
true, // younger gens are roots true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()), GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(), should_unload_classes(),
&notOlder, &notOlder,
@ -2952,12 +2943,7 @@ void CMSCollector::checkpointRootsInitialWork() {
assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
assert(_collectorState == InitialMarking, "just checking"); assert(_collectorState == InitialMarking, "just checking");
// If there has not been a GC[n-1] since last GC[n] cycle completed, // Already have locks.
// precede our marking with a collection of all
// younger generations to keep floating garbage to a minimum.
// XXX: we won't do this for now -- it's an optimization to be done later.
// already have locks
assert_lock_strong(bitMapLock()); assert_lock_strong(bitMapLock());
assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
@ -3027,7 +3013,7 @@ void CMSCollector::checkpointRootsInitialWork() {
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
GenCollectedHeap::OldGen, GenCollectedHeap::OldGen,
true, // younger gens are roots true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()), GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(), should_unload_classes(),
&notOlder, &notOlder,
@ -3037,7 +3023,7 @@ void CMSCollector::checkpointRootsInitialWork() {
} }
// Clear mod-union table; it will be dirtied in the prologue of // Clear mod-union table; it will be dirtied in the prologue of
// CMS generation per each younger generation collection. // CMS generation per each young generation collection.
assert(_modUnionTable.isAllClear(), assert(_modUnionTable.isAllClear(),
"Was cleared in most recent final checkpoint phase" "Was cleared in most recent final checkpoint phase"
@ -3057,7 +3043,7 @@ bool CMSCollector::markFromRoots() {
// assert(!SafepointSynchronize::is_at_safepoint(), // assert(!SafepointSynchronize::is_at_safepoint(),
// "inconsistent argument?"); // "inconsistent argument?");
// However that wouldn't be right, because it's possible that // However that wouldn't be right, because it's possible that
// a safepoint is indeed in progress as a younger generation // a safepoint is indeed in progress as a young generation
// stop-the-world GC happens even as we mark in this generation. // stop-the-world GC happens even as we mark in this generation.
assert(_collectorState == Marking, "inconsistent state?"); assert(_collectorState == Marking, "inconsistent state?");
check_correct_thread_executing(); check_correct_thread_executing();
@ -3065,7 +3051,7 @@ bool CMSCollector::markFromRoots() {
// Weak ref discovery note: We may be discovering weak // Weak ref discovery note: We may be discovering weak
// refs in this generation concurrent (but interleaved) with // refs in this generation concurrent (but interleaved) with
// weak ref discovery by a younger generation collector. // weak ref discovery by the young generation collector.
CMSTokenSyncWithLocks ts(true, bitMapLock()); CMSTokenSyncWithLocks ts(true, bitMapLock());
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
@ -3095,7 +3081,7 @@ bool CMSCollector::markFromRootsWork() {
// Note that when we do a marking step we need to hold the // Note that when we do a marking step we need to hold the
// bit map lock -- recall that direct allocation (by mutators) // bit map lock -- recall that direct allocation (by mutators)
// and promotion (by younger generation collectors) is also // and promotion (by the young generation collector) is also
// marking the bit map. [the so-called allocate live policy.] // marking the bit map. [the so-called allocate live policy.]
// Because the implementation of bit map marking is not // Because the implementation of bit map marking is not
// robust wrt simultaneous marking of bits in the same word, // robust wrt simultaneous marking of bits in the same word,
@ -4049,7 +4035,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
// one of these methods, please check the other method too. // one of these methods, please check the other method too.
size_t CMSCollector::preclean_mod_union_table( size_t CMSCollector::preclean_mod_union_table(
ConcurrentMarkSweepGeneration* gen, ConcurrentMarkSweepGeneration* old_gen,
ScanMarkedObjectsAgainCarefullyClosure* cl) { ScanMarkedObjectsAgainCarefullyClosure* cl) {
verify_work_stacks_empty(); verify_work_stacks_empty();
verify_overflow_empty(); verify_overflow_empty();
@ -4064,10 +4050,10 @@ size_t CMSCollector::preclean_mod_union_table(
// generation, but we might potentially miss cards when the // generation, but we might potentially miss cards when the
// generation is rapidly expanding while we are in the midst // generation is rapidly expanding while we are in the midst
// of precleaning. // of precleaning.
HeapWord* startAddr = gen->reserved().start(); HeapWord* startAddr = old_gen->reserved().start();
HeapWord* endAddr = gen->reserved().end(); HeapWord* endAddr = old_gen->reserved().end();
cl->setFreelistLock(gen->freelistLock()); // needed for yielding cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
size_t numDirtyCards, cumNumDirtyCards; size_t numDirtyCards, cumNumDirtyCards;
HeapWord *nextAddr, *lastAddr; HeapWord *nextAddr, *lastAddr;
@ -4109,7 +4095,7 @@ size_t CMSCollector::preclean_mod_union_table(
HeapWord* stop_point = NULL; HeapWord* stop_point = NULL;
stopTimer(); stopTimer();
// Potential yield point // Potential yield point
CMSTokenSyncWithLocks ts(true, gen->freelistLock(), CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
bitMapLock()); bitMapLock());
startTimer(); startTimer();
{ {
@ -4117,7 +4103,7 @@ size_t CMSCollector::preclean_mod_union_table(
verify_overflow_empty(); verify_overflow_empty();
sample_eden(); sample_eden();
stop_point = stop_point =
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
} }
if (stop_point != NULL) { if (stop_point != NULL) {
// The careful iteration stopped early either because it found an // The careful iteration stopped early either because it found an
@ -4152,15 +4138,15 @@ size_t CMSCollector::preclean_mod_union_table(
// below are largely identical; if you need to modify // below are largely identical; if you need to modify
// one of these methods, please check the other method too. // one of these methods, please check the other method too.
size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen, size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
ScanMarkedObjectsAgainCarefullyClosure* cl) { ScanMarkedObjectsAgainCarefullyClosure* cl) {
// strategy: it's similar to precleamModUnionTable above, in that // strategy: it's similar to precleamModUnionTable above, in that
// we accumulate contiguous ranges of dirty cards, mark these cards // we accumulate contiguous ranges of dirty cards, mark these cards
// precleaned, then scan the region covered by these cards. // precleaned, then scan the region covered by these cards.
HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high()); HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high());
HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low()); HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
cl->setFreelistLock(gen->freelistLock()); // needed for yielding cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
size_t numDirtyCards, cumNumDirtyCards; size_t numDirtyCards, cumNumDirtyCards;
HeapWord *lastAddr, *nextAddr; HeapWord *lastAddr, *nextAddr;
@ -4197,13 +4183,13 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
if (!dirtyRegion.is_empty()) { if (!dirtyRegion.is_empty()) {
stopTimer(); stopTimer();
CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
startTimer(); startTimer();
sample_eden(); sample_eden();
verify_work_stacks_empty(); verify_work_stacks_empty();
verify_overflow_empty(); verify_overflow_empty();
HeapWord* stop_point = HeapWord* stop_point =
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
if (stop_point != NULL) { if (stop_point != NULL) {
assert((_collectorState == AbortablePreclean && should_abort_preclean()), assert((_collectorState == AbortablePreclean && should_abort_preclean()),
"Should only be AbortablePreclean."); "Should only be AbortablePreclean.");
@ -4623,7 +4609,7 @@ void CMSParRemarkTask::work(uint worker_id) {
ResourceMark rm; ResourceMark rm;
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds(); GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
for (int i = 0; i < array->length(); i++) { for (int i = 0; i < array->length(); i++) {
par_mrias_cl.do_class_loader_data(array->at(i)); par_mrias_cl.do_cld_nv(array->at(i));
} }
// We don't need to keep track of new CLDs anymore. // We don't need to keep track of new CLDs anymore.
@ -5086,7 +5072,7 @@ void CMSCollector::do_remark_parallel() {
// preclean phase did of eden, plus the [two] tasks of // preclean phase did of eden, plus the [two] tasks of
// scanning the [two] survivor spaces. Further fine-grain // scanning the [two] survivor spaces. Further fine-grain
// parallelization of the scanning of the survivor spaces // parallelization of the scanning of the survivor spaces
// themselves, and of precleaning of the younger gen itself // themselves, and of precleaning of the young gen itself
// is deferred to the future. // is deferred to the future.
initialize_sequential_subtasks_for_young_gen_rescan(n_workers); initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
@ -5177,7 +5163,7 @@ void CMSCollector::do_remark_non_parallel() {
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
GenCollectedHeap::OldGen, GenCollectedHeap::OldGen,
true, // younger gens as roots true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()), GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(), should_unload_classes(),
&mrias_cl, &mrias_cl,
@ -5199,7 +5185,7 @@ void CMSCollector::do_remark_non_parallel() {
ResourceMark rm; ResourceMark rm;
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds(); GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
for (int i = 0; i < array->length(); i++) { for (int i = 0; i < array->length(); i++) {
mrias_cl.do_class_loader_data(array->at(i)); mrias_cl.do_cld_nv(array->at(i));
} }
// We don't need to keep track of new CLDs anymore. // We don't need to keep track of new CLDs anymore.
@ -5661,7 +5647,7 @@ void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generati
} }
} }
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) { void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
// We iterate over the space(s) underlying this generation, // We iterate over the space(s) underlying this generation,
// checking the mark bit map to see if the bits corresponding // checking the mark bit map to see if the bits corresponding
// to specific blocks are marked or not. Blocks that are // to specific blocks are marked or not. Blocks that are
@ -5690,26 +5676,26 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
// check that we hold the requisite locks // check that we hold the requisite locks
assert(have_cms_token(), "Should hold cms token"); assert(have_cms_token(), "Should hold cms token");
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep"); assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
assert_lock_strong(gen->freelistLock()); assert_lock_strong(old_gen->freelistLock());
assert_lock_strong(bitMapLock()); assert_lock_strong(bitMapLock());
assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
_inter_sweep_estimate.padded_average(), _inter_sweep_estimate.padded_average(),
_intra_sweep_estimate.padded_average()); _intra_sweep_estimate.padded_average());
gen->setNearLargestChunk(); old_gen->setNearLargestChunk();
{ {
SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield); SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
gen->cmsSpace()->blk_iterate_careful(&sweepClosure); old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
// We need to free-up/coalesce garbage/blocks from a // We need to free-up/coalesce garbage/blocks from a
// co-terminal free run. This is done in the SweepClosure // co-terminal free run. This is done in the SweepClosure
// destructor; so, do not remove this scope, else the // destructor; so, do not remove this scope, else the
// end-of-sweep-census below will be off by a little bit. // end-of-sweep-census below will be off by a little bit.
} }
gen->cmsSpace()->sweep_completed(); old_gen->cmsSpace()->sweep_completed();
gen->cmsSpace()->endSweepFLCensus(sweep_count()); old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
if (should_unload_classes()) { // unloaded classes this cycle, if (should_unload_classes()) { // unloaded classes this cycle,
_concurrent_cycles_since_last_unload = 0; // ... reset count _concurrent_cycles_since_last_unload = 0; // ... reset count
} else { // did not unload classes, } else { // did not unload classes,
@ -6324,12 +6310,12 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
// objArrays are precisely marked; restrict scanning // objArrays are precisely marked; restrict scanning
// to dirty cards only. // to dirty cards only.
size = CompactibleFreeListSpace::adjustObjectSize( size = CompactibleFreeListSpace::adjustObjectSize(
p->oop_iterate(_scanningClosure, mr)); p->oop_iterate_size(_scanningClosure, mr));
} else { } else {
// A non-array may have been imprecisely marked; we need // A non-array may have been imprecisely marked; we need
// to scan object in its entirety. // to scan object in its entirety.
size = CompactibleFreeListSpace::adjustObjectSize( size = CompactibleFreeListSpace::adjustObjectSize(
p->oop_iterate(_scanningClosure)); p->oop_iterate_size(_scanningClosure));
} }
#ifdef ASSERT #ifdef ASSERT
size_t direct_size = size_t direct_size =
@ -6417,7 +6403,7 @@ size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
// Note that we do not yield while we iterate over // Note that we do not yield while we iterate over
// the interior oops of p, pushing the relevant ones // the interior oops of p, pushing the relevant ones
// on our marking stack. // on our marking stack.
size_t size = p->oop_iterate(_scanning_closure); size_t size = p->oop_iterate_size(_scanning_closure);
do_yield_check(); do_yield_check();
// Observe that below, we do not abandon the preclean // Observe that below, we do not abandon the preclean
// phase as soon as we should; rather we empty the // phase as soon as we should; rather we empty the

View File

@ -723,7 +723,7 @@ class CMSCollector: public CHeapObj<mtGC> {
private: private:
// Support for parallelizing young gen rescan in CMS remark phase // Support for parallelizing young gen rescan in CMS remark phase
ParNewGeneration* _young_gen; // the younger gen ParNewGeneration* _young_gen;
HeapWord** _top_addr; // ... Top of Eden HeapWord** _top_addr; // ... Top of Eden
HeapWord** _end_addr; // ... End of Eden HeapWord** _end_addr; // ... End of Eden
@ -772,9 +772,9 @@ class CMSCollector: public CHeapObj<mtGC> {
private: private:
// Concurrent precleaning work // Concurrent precleaning work
size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen,
ScanMarkedObjectsAgainCarefullyClosure* cl); ScanMarkedObjectsAgainCarefullyClosure* cl);
size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
ScanMarkedObjectsAgainCarefullyClosure* cl); ScanMarkedObjectsAgainCarefullyClosure* cl);
// Does precleaning work, returning a quantity indicative of // Does precleaning work, returning a quantity indicative of
// the amount of "useful work" done. // the amount of "useful work" done.
@ -797,7 +797,7 @@ class CMSCollector: public CHeapObj<mtGC> {
void refProcessingWork(); void refProcessingWork();
// Concurrent sweeping work // Concurrent sweeping work
void sweepWork(ConcurrentMarkSweepGeneration* gen); void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
// (Concurrent) resetting of support data structures // (Concurrent) resetting of support data structures
void reset(bool concurrent); void reset(bool concurrent);
@ -1120,10 +1120,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
MemRegion used_region_at_save_marks() const; MemRegion used_region_at_save_marks() const;
// Does a "full" (forced) collection invoked on this generation collect // Does a "full" (forced) collection invoked on this generation collect
// all younger generations as well? Note that the second conjunct is a // the young generation as well?
// hack to allow the collection of the younger gen first if the flag is virtual bool full_collects_young_generation() const {
// set.
virtual bool full_collects_younger_generations() const {
return !ScavengeBeforeFullGC; return !ScavengeBeforeFullGC;
} }
@ -1153,9 +1151,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
// Inform this (non-young) generation that a promotion failure was // Inform this (old) generation that a promotion failure was
// encountered during a collection of a younger generation that // encountered during a collection of the young generation.
// promotes into this generation.
virtual void promotion_failure_occurred(); virtual void promotion_failure_occurred();
bool should_collect(bool full, size_t size, bool tlab); bool should_collect(bool full, size_t size, bool tlab);

View File

@ -295,7 +295,7 @@ inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin; promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
} }
// If the younger gen collections were skipped, then the // If the young gen collection was skipped, then the
// number of promoted bytes will be 0 and adding it to the // number of promoted bytes will be 0 and adding it to the
// average will incorrectly lessen the average. It is, however, // average will incorrectly lessen the average. It is, however,
// also possible that no promotion was needed. // also possible that no promotion was needed.

View File

@ -39,23 +39,17 @@
// ======= Concurrent Mark Sweep Thread ======== // ======= Concurrent Mark Sweep Thread ========
// The CMS thread is created when Concurrent Mark Sweep is used in the ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
// older of two generations in a generational memory system. CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
bool ConcurrentMarkSweepThread::_should_terminate = false;
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
ConcurrentMarkSweepThread* volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
ConcurrentMarkSweepThread::_cmst = NULL;
CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
bool ConcurrentMarkSweepThread::_should_terminate = false;
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0; SurrogateLockerThread* ConcurrentMarkSweepThread::_slt = NULL;
SurrogateLockerThread*
ConcurrentMarkSweepThread::_slt = NULL;
SurrogateLockerThread::SLT_msg_type SurrogateLockerThread::SLT_msg_type
ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty; ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
Monitor* Monitor* ConcurrentMarkSweepThread::_sltMonitor = NULL;
ConcurrentMarkSweepThread::_sltMonitor = NULL;
ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector) ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
: ConcurrentGCThread() { : ConcurrentGCThread() {

View File

@ -69,20 +69,28 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
Stack<oop, mtGC>* overflow_stacks_, Stack<oop, mtGC>* overflow_stacks_,
size_t desired_plab_sz_, size_t desired_plab_sz_,
ParallelTaskTerminator& term_) : ParallelTaskTerminator& term_) :
_to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_), _to_space(to_space_),
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), _old_gen(old_gen_),
_young_gen(young_gen_),
_thread_num(thread_num_),
_work_queue(work_queue_set_->queue(thread_num_)),
_to_space_full(false),
_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
_ageTable(false), // false ==> not the global age table, no perf data. _ageTable(false), // false ==> not the global age table, no perf data.
_to_space_alloc_buffer(desired_plab_sz_), _to_space_alloc_buffer(desired_plab_sz_),
_to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this), _to_space_closure(young_gen_, this),
_to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
_to_space_root_closure(young_gen_, this),
_old_gen_root_closure(young_gen_, this),
_older_gen_closure(young_gen_, this), _older_gen_closure(young_gen_, this),
_evacuate_followers(this, &_to_space_closure, &_old_gen_closure, _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
&_to_space_root_closure, young_gen_, &_old_gen_root_closure, &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
work_queue_set_, &term_), work_queue_set_, &term_),
_is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this), _is_alive_closure(young_gen_),
_scan_weak_ref_closure(young_gen_, this),
_keep_alive_closure(&_scan_weak_ref_closure), _keep_alive_closure(&_scan_weak_ref_closure),
_strong_roots_time(0.0), _term_time(0.0) _strong_roots_time(0.0),
_term_time(0.0)
{ {
#if TASKQUEUE_STATS #if TASKQUEUE_STATS
_term_attempts = 0; _term_attempts = 0;
@ -90,8 +98,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
_overflow_refill_objs = 0; _overflow_refill_objs = 0;
#endif // TASKQUEUE_STATS #endif // TASKQUEUE_STATS
_survivor_chunk_array = _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
(ChunkArray*) old_gen()->get_data_recorder(thread_num());
_hash_seed = 17; // Might want to take time-based random value. _hash_seed = 17; // Might want to take time-based random value.
_start = os::elapsedTime(); _start = os::elapsedTime();
_old_gen_closure.set_generation(old_gen_); _old_gen_closure.set_generation(old_gen_);
@ -154,7 +161,6 @@ void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
} }
} }
void ParScanThreadState::trim_queues(int max_size) { void ParScanThreadState::trim_queues(int max_size) {
ObjToScanQueue* queue = work_queue(); ObjToScanQueue* queue = work_queue();
do { do {
@ -222,15 +228,12 @@ void ParScanThreadState::push_on_overflow_stack(oop p) {
} }
HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
// If the object is small enough, try to reallocate the buffer.
// Otherwise, if the object is small enough, try to reallocate the
// buffer.
HeapWord* obj = NULL; HeapWord* obj = NULL;
if (!_to_space_full) { if (!_to_space_full) {
PLAB* const plab = to_space_alloc_buffer(); PLAB* const plab = to_space_alloc_buffer();
Space* const sp = to_space(); Space* const sp = to_space();
if (word_sz * 100 < if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
ParallelGCBufferWastePct * plab->word_sz()) {
// Is small enough; abandon this buffer and start a new one. // Is small enough; abandon this buffer and start a new one.
plab->retire(); plab->retire();
size_t buf_size = plab->word_sz(); size_t buf_size = plab->word_sz();
@ -241,8 +244,7 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
size_t free_bytes = sp->free(); size_t free_bytes = sp->free();
while(buf_space == NULL && free_bytes >= min_bytes) { while(buf_space == NULL && free_bytes >= min_bytes) {
buf_size = free_bytes >> LogHeapWordSize; buf_size = free_bytes >> LogHeapWordSize;
assert(buf_size == (size_t)align_object_size(buf_size), assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
"Invariant");
buf_space = sp->par_allocate(buf_size); buf_space = sp->par_allocate(buf_size);
free_bytes = sp->free(); free_bytes = sp->free();
} }
@ -262,7 +264,6 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
// We're used up. // We're used up.
_to_space_full = true; _to_space_full = true;
} }
} else { } else {
// Too large; allocate the object individually. // Too large; allocate the object individually.
obj = sp->par_allocate(word_sz); obj = sp->par_allocate(word_sz);
@ -271,7 +272,6 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
return obj; return obj;
} }
void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
to_space_alloc_buffer()->undo_allocation(obj, word_sz); to_space_alloc_buffer()->undo_allocation(obj, word_sz);
} }
@ -288,7 +288,7 @@ public:
// Initializes states for the specified number of threads; // Initializes states for the specified number of threads;
ParScanThreadStateSet(int num_threads, ParScanThreadStateSet(int num_threads,
Space& to_space, Space& to_space,
ParNewGeneration& gen, ParNewGeneration& young_gen,
Generation& old_gen, Generation& old_gen,
ObjToScanQueueSet& queue_set, ObjToScanQueueSet& queue_set,
Stack<oop, mtGC>* overflow_stacks_, Stack<oop, mtGC>* overflow_stacks_,
@ -315,21 +315,25 @@ public:
private: private:
ParallelTaskTerminator& _term; ParallelTaskTerminator& _term;
ParNewGeneration& _gen; ParNewGeneration& _young_gen;
Generation& _old_gen; Generation& _old_gen;
public: public:
bool is_valid(int id) const { return id < length(); } bool is_valid(int id) const { return id < length(); }
ParallelTaskTerminator* terminator() { return &_term; } ParallelTaskTerminator* terminator() { return &_term; }
}; };
ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
ParScanThreadStateSet::ParScanThreadStateSet( Space& to_space,
int num_threads, Space& to_space, ParNewGeneration& gen, ParNewGeneration& young_gen,
Generation& old_gen, ObjToScanQueueSet& queue_set, Generation& old_gen,
Stack<oop, mtGC>* overflow_stacks, ObjToScanQueueSet& queue_set,
size_t desired_plab_sz, ParallelTaskTerminator& term) Stack<oop, mtGC>* overflow_stacks,
size_t desired_plab_sz,
ParallelTaskTerminator& term)
: ResourceArray(sizeof(ParScanThreadState), num_threads), : ResourceArray(sizeof(ParScanThreadState), num_threads),
_gen(gen), _old_gen(old_gen), _term(term) _young_gen(young_gen),
_old_gen(old_gen),
_term(term)
{ {
assert(num_threads > 0, "sanity check!"); assert(num_threads > 0, "sanity check!");
assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
@ -337,13 +341,12 @@ ParScanThreadStateSet::ParScanThreadStateSet(
// Initialize states. // Initialize states.
for (int i = 0; i < num_threads; ++i) { for (int i = 0; i < num_threads; ++i) {
new ((ParScanThreadState*)_data + i) new ((ParScanThreadState*)_data + i)
ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
overflow_stacks, desired_plab_sz, term); overflow_stacks, desired_plab_sz, term);
} }
} }
inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
{
assert(i >= 0 && i < length(), "sanity check!"); assert(i >= 0 && i < length(), "sanity check!");
return ((ParScanThreadState*)_data)[i]; return ((ParScanThreadState*)_data)[i];
} }
@ -357,8 +360,7 @@ void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_trace
} }
} }
void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
{
_term.reset_for_reuse(active_threads); _term.reset_for_reuse(active_threads);
if (promotion_failed) { if (promotion_failed) {
for (int i = 0; i < length(); ++i) { for (int i = 0; i < length(); ++i) {
@ -368,36 +370,27 @@ void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
} }
#if TASKQUEUE_STATS #if TASKQUEUE_STATS
void void ParScanThreadState::reset_stats() {
ParScanThreadState::reset_stats()
{
taskqueue_stats().reset(); taskqueue_stats().reset();
_term_attempts = 0; _term_attempts = 0;
_overflow_refills = 0; _overflow_refills = 0;
_overflow_refill_objs = 0; _overflow_refill_objs = 0;
} }
void ParScanThreadStateSet::reset_stats() void ParScanThreadStateSet::reset_stats() {
{
for (int i = 0; i < length(); ++i) { for (int i = 0; i < length(); ++i) {
thread_state(i).reset_stats(); thread_state(i).reset_stats();
} }
} }
void void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
{
st->print_raw_cr("GC Termination Stats"); st->print_raw_cr("GC Termination Stats");
st->print_raw_cr(" elapsed --strong roots-- " st->print_raw_cr(" elapsed --strong roots-- -------termination-------");
"-------termination-------"); st->print_raw_cr("thr ms ms % ms % attempts");
st->print_raw_cr("thr ms ms % " st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
" ms % attempts");
st->print_raw_cr("--- --------- --------- ------ "
"--------- ------ --------");
} }
void ParScanThreadStateSet::print_termination_stats(outputStream* const st) void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {
{
print_termination_stats_hdr(st); print_termination_stats_hdr(st);
for (int i = 0; i < length(); ++i) { for (int i = 0; i < length(); ++i) {
@ -405,23 +398,20 @@ void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
const double elapsed_ms = pss.elapsed_time() * 1000.0; const double elapsed_ms = pss.elapsed_time() * 1000.0;
const double s_roots_ms = pss.strong_roots_time() * 1000.0; const double s_roots_ms = pss.strong_roots_time() * 1000.0;
const double term_ms = pss.term_time() * 1000.0; const double term_ms = pss.term_time() * 1000.0;
st->print_cr("%3d %9.2f %9.2f %6.2f " st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
"%9.2f %6.2f " SIZE_FORMAT_W(8),
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
} }
} }
// Print stats related to work queue activity. // Print stats related to work queue activity.
void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
{
st->print_raw_cr("GC Task Stats"); st->print_raw_cr("GC Task Stats");
st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
} }
void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {
{
print_taskqueue_stats_hdr(st); print_taskqueue_stats_hdr(st);
TaskQueueStats totals; TaskQueueStats totals;
@ -443,8 +433,7 @@ void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
} }
#endif // TASKQUEUE_STATS #endif // TASKQUEUE_STATS
void ParScanThreadStateSet::flush() void ParScanThreadStateSet::flush() {
{
// Work in this loop should be kept as lightweight as // Work in this loop should be kept as lightweight as
// possible since this might otherwise become a bottleneck // possible since this might otherwise become a bottleneck
// to scaling. Should we add heavy-weight work into this // to scaling. Should we add heavy-weight work into this
@ -454,12 +443,12 @@ void ParScanThreadStateSet::flush()
// Flush stats related to To-space PLAB activity and // Flush stats related to To-space PLAB activity and
// retire the last buffer. // retire the last buffer.
par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats()); par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
// Every thread has its own age table. We need to merge // Every thread has its own age table. We need to merge
// them all into one. // them all into one.
ageTable *local_table = par_scan_state.age_table(); ageTable *local_table = par_scan_state.age_table();
_gen.age_table()->merge(local_table); _young_gen.age_table()->merge(local_table);
// Inform old gen that we're done. // Inform old gen that we're done.
_old_gen.par_promote_alloc_done(i); _old_gen.par_promote_alloc_done(i);
@ -478,8 +467,7 @@ void ParScanThreadStateSet::flush()
ParScanClosure::ParScanClosure(ParNewGeneration* g, ParScanClosure::ParScanClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) : ParScanThreadState* par_scan_state) :
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
{
_boundary = _g->reserved().end(); _boundary = _g->reserved().end();
} }
@ -531,24 +519,23 @@ void ParEvacuateFollowersClosure::do_void() {
ObjToScanQueue* work_q = par_scan_state()->work_queue(); ObjToScanQueue* work_q = par_scan_state()->work_queue();
while (true) { while (true) {
// Scan to-space and old-gen objs until we run out of both. // Scan to-space and old-gen objs until we run out of both.
oop obj_to_scan; oop obj_to_scan;
par_scan_state()->trim_queues(0); par_scan_state()->trim_queues(0);
// We have no local work, attempt to steal from other threads. // We have no local work, attempt to steal from other threads.
// attempt to steal work from promoted. // Attempt to steal work from promoted.
if (task_queues()->steal(par_scan_state()->thread_num(), if (task_queues()->steal(par_scan_state()->thread_num(),
par_scan_state()->hash_seed(), par_scan_state()->hash_seed(),
obj_to_scan)) { obj_to_scan)) {
bool res = work_q->push(obj_to_scan); bool res = work_q->push(obj_to_scan);
assert(res, "Empty queue should have room for a push."); assert(res, "Empty queue should have room for a push.");
// if successful, goto Start. // If successful, goto Start.
continue; continue;
// try global overflow list. // Try global overflow list.
} else if (par_gen()->take_from_overflow_list(par_scan_state())) { } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
continue; continue;
} }
@ -564,15 +551,17 @@ void ParEvacuateFollowersClosure::do_void() {
par_scan_state()->end_term_time(); par_scan_state()->end_term_time();
} }
ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen, ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set, Generation* old_gen,
HeapWord* young_old_boundary,
ParScanThreadStateSet* state_set,
StrongRootsScope* strong_roots_scope) : StrongRootsScope* strong_roots_scope) :
AbstractGangTask("ParNewGeneration collection"), AbstractGangTask("ParNewGeneration collection"),
_young_gen(young_gen), _old_gen(old_gen), _young_gen(young_gen), _old_gen(old_gen),
_young_old_boundary(young_old_boundary), _young_old_boundary(young_old_boundary),
_state_set(state_set), _state_set(state_set),
_strong_roots_scope(strong_roots_scope) _strong_roots_scope(strong_roots_scope)
{} {}
void ParNewGenTask::work(uint worker_id) { void ParNewGenTask::work(uint worker_id) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
@ -595,8 +584,7 @@ void ParNewGenTask::work(uint worker_id) {
par_scan_state.start_strong_roots(); par_scan_state.start_strong_roots();
gch->gen_process_roots(_strong_roots_scope, gch->gen_process_roots(_strong_roots_scope,
GenCollectedHeap::YoungGen, GenCollectedHeap::YoungGen,
true, // Process younger gens, if any, true, // Process younger gens, if any, as strong roots.
// as strong roots.
GenCollectedHeap::SO_ScavengeCodeCache, GenCollectedHeap::SO_ScavengeCodeCache,
GenCollectedHeap::StrongAndWeakRoots, GenCollectedHeap::StrongAndWeakRoots,
&par_scan_state.to_space_root_closure(), &par_scan_state.to_space_root_closure(),
@ -613,8 +601,7 @@ void ParNewGenTask::work(uint worker_id) {
#pragma warning( push ) #pragma warning( push )
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif #endif
ParNewGeneration:: ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
: DefNewGeneration(rs, initial_byte_size, "PCopy"), : DefNewGeneration(rs, initial_byte_size, "PCopy"),
_overflow_list(NULL), _overflow_list(NULL),
_is_alive_closure(this), _is_alive_closure(this),
@ -625,20 +612,19 @@ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
_task_queues = new ObjToScanQueueSet(ParallelGCThreads); _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
guarantee(_task_queues != NULL, "task_queues allocation failure."); guarantee(_task_queues != NULL, "task_queues allocation failure.");
for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { for (uint i = 0; i < ParallelGCThreads; i++) {
ObjToScanQueue *q = new ObjToScanQueue(); ObjToScanQueue *q = new ObjToScanQueue();
guarantee(q != NULL, "work_queue Allocation failure."); guarantee(q != NULL, "work_queue Allocation failure.");
_task_queues->register_queue(i1, q); _task_queues->register_queue(i, q);
} }
for (uint i2 = 0; i2 < ParallelGCThreads; i2++) for (uint i = 0; i < ParallelGCThreads; i++) {
_task_queues->queue(i2)->initialize(); _task_queues->queue(i)->initialize();
}
_overflow_stacks = NULL; _overflow_stacks = NULL;
if (ParGCUseLocalOverflow) { if (ParGCUseLocalOverflow) {
// typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
// typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
// with ','
typedef Stack<oop, mtGC> GCOopStack; typedef Stack<oop, mtGC> GCOopStack;
_overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
@ -742,7 +728,7 @@ class ParNewRefProcTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
public: public:
ParNewRefProcTaskProxy(ProcessTask& task, ParNewRefProcTaskProxy(ProcessTask& task,
ParNewGeneration& gen, ParNewGeneration& young_gen,
Generation& old_gen, Generation& old_gen,
HeapWord* young_old_boundary, HeapWord* young_old_boundary,
ParScanThreadStateSet& state_set); ParScanThreadStateSet& state_set);
@ -768,11 +754,9 @@ ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
_old_gen(old_gen), _old_gen(old_gen),
_young_old_boundary(young_old_boundary), _young_old_boundary(young_old_boundary),
_state_set(state_set) _state_set(state_set)
{ { }
}
void ParNewRefProcTaskProxy::work(uint worker_id) void ParNewRefProcTaskProxy::work(uint worker_id) {
{
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
@ -792,15 +776,12 @@ public:
_task(task) _task(task)
{ } { }
virtual void work(uint worker_id) virtual void work(uint worker_id) {
{
_task.work(worker_id); _task.work(worker_id);
} }
}; };
void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
{
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
WorkGang* workers = gch->workers(); WorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads."); assert(workers != NULL, "Need parallel worker threads.");
@ -812,8 +793,7 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
_young_gen.promotion_failed()); _young_gen.promotion_failed());
} }
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
{
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
WorkGang* workers = gch->workers(); WorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads."); assert(workers != NULL, "Need parallel worker threads.");
@ -821,8 +801,7 @@ void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
workers->run_task(&enq_task); workers->run_task(&enq_task);
} }
void ParNewRefProcTaskExecutor::set_single_threaded_mode() void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
{
_state_set.flush(); _state_set.flush();
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->save_marks(); gch->save_marks();
@ -830,7 +809,8 @@ void ParNewRefProcTaskExecutor::set_single_threaded_mode()
ScanClosureWithParBarrier:: ScanClosureWithParBarrier::
ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
ScanClosure(g, gc_barrier) {} ScanClosure(g, gc_barrier)
{ }
EvacuateFollowersClosureGeneral:: EvacuateFollowersClosureGeneral::
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
@ -838,7 +818,7 @@ EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
OopsInGenClosure* older) : OopsInGenClosure* older) :
_gch(gch), _gch(gch),
_scan_cur_or_nonheap(cur), _scan_older(older) _scan_cur_or_nonheap(cur), _scan_older(older)
{} { }
void EvacuateFollowersClosureGeneral::do_void() { void EvacuateFollowersClosureGeneral::do_void() {
do { do {
@ -850,7 +830,6 @@ void EvacuateFollowersClosureGeneral::do_void() {
} while (!_gch->no_allocs_since_save_marks()); } while (!_gch->no_allocs_since_save_marks());
} }
// A Generation that does parallel young-gen collection. // A Generation that does parallel young-gen collection.
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) { void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
@ -996,9 +975,9 @@ void ParNewGeneration::collect(bool full,
if (ZapUnusedHeapArea) { if (ZapUnusedHeapArea) {
// This is now done here because of the piece-meal mangling which // This is now done here because of the piece-meal mangling which
// can check for valid mangling at intermediate points in the // can check for valid mangling at intermediate points in the
// collection(s). When a minor collection fails to collect // collection(s). When a young collection fails to collect
// sufficient space resizing of the young generation can occur // sufficient space resizing of the young generation can occur
// an redistribute the spaces in the young generation. Mangle // and redistribute the spaces in the young generation. Mangle
// here so that unzapped regions don't get distributed to // here so that unzapped regions don't get distributed to
// other spaces. // other spaces.
to()->mangle_unused_area(); to()->mangle_unused_area();
@ -1113,8 +1092,10 @@ void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
// thus avoiding the need to undo the copy as in // thus avoiding the need to undo the copy as in
// copy_to_survivor_space_avoiding_with_undo. // copy_to_survivor_space_avoiding_with_undo.
oop ParNewGeneration::copy_to_survivor_space( oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { oop old,
size_t sz,
markOop m) {
// In the sequential version, this assert also says that the object is // In the sequential version, this assert also says that the object is
// not forwarded. That might not be the case here. It is the case that // not forwarded. That might not be the case here. It is the case that
// the caller observed it to be not forwarded at some time in the past. // the caller observed it to be not forwarded at some time in the past.
@ -1141,8 +1122,7 @@ oop ParNewGeneration::copy_to_survivor_space(
} }
if (new_obj == NULL) { if (new_obj == NULL) {
// Either to-space is full or we decided to promote // Either to-space is full or we decided to promote try allocating obj tenured
// try allocating obj tenured
// Attempt to install a null forwarding pointer (atomically), // Attempt to install a null forwarding pointer (atomically),
// to claim the right to install the real forwarding pointer. // to claim the right to install the real forwarding pointer.

View File

@ -71,11 +71,7 @@ class ParScanThreadState {
ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
// One of these two will be passed to process_roots, which will // Will be passed to process_roots to set its generation.
// set its generation. The first is for two-gen configs where the
// old gen collects the perm gen; the second is for arbitrary configs.
// The second isn't used right now (it used to be used for the train, an
// incremental collector) but the declaration has been left as a reminder.
ParRootScanWithBarrierTwoGensClosure _older_gen_closure; ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
// This closure will always be bound to the old gen; it will be used // This closure will always be bound to the old gen; it will be used
// in evacuate_followers. // in evacuate_followers.
@ -85,7 +81,6 @@ class ParScanThreadState {
ParScanWeakRefClosure _scan_weak_ref_closure; ParScanWeakRefClosure _scan_weak_ref_closure;
ParKeepAliveClosure _keep_alive_closure; ParKeepAliveClosure _keep_alive_closure;
Space* _to_space; Space* _to_space;
Space* to_space() { return _to_space; } Space* to_space() { return _to_space; }

View File

@ -1143,7 +1143,7 @@ void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
while (curr < end) { while (curr < end) {
Prefetch::read(curr, interval); Prefetch::read(curr, interval);
oop obj = oop(curr); oop obj = oop(curr);
int size = obj->oop_iterate(&cl); int size = obj->oop_iterate_size(&cl);
assert(size == obj->size(), "sanity"); assert(size == obj->size(), "sanity");
curr += size; curr += size;
} }

View File

@ -367,7 +367,7 @@ bool G1ArchiveAllocator::alloc_new_region() {
_max = _bottom + HeapRegion::min_region_size_in_words(); _max = _bottom + HeapRegion::min_region_size_in_words();
// Tell mark-sweep that objects in this region are not to be marked. // Tell mark-sweep that objects in this region are not to be marked.
G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords)); G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
// Since we've modified the old set, call update_sizes. // Since we've modified the old set, call update_sizes.
_g1h->g1mm()->update_sizes(); _g1h->g1mm()->update_sizes();

View File

@ -27,6 +27,7 @@
#include "gc/g1/g1BlockOffsetTable.hpp" #include "gc/g1/g1BlockOffsetTable.hpp"
#include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegion.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "gc/shared/space.hpp" #include "gc/shared/space.hpp"
inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) { inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
@ -68,15 +69,7 @@ void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_cha
check_index(right, "right index out of range"); check_index(right, "right index out of range");
assert(left <= right, "indexes out of order"); assert(left <= right, "indexes out of order");
size_t num_cards = right - left + 1; size_t num_cards = right - left + 1;
if (UseMemSetInBOT) { memset_with_concurrent_readers(&_offset_array[left], offset, num_cards);
memset(&_offset_array[left], offset, num_cards);
} else {
size_t i = left;
const size_t end = i + num_cards;
for (; i < end; i++) {
_offset_array[i] = offset;
}
}
} }
// Variant of index_for that does not check the index for validity. // Variant of index_for that does not check the index for validity.

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/nmethod.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "oops/oop.inline.hpp"
template <typename T>
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
_work->do_oop(p);
T oop_or_narrowoop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(oop_or_narrowoop)) {
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
HeapRegion* hr = _g1h->heap_region_containing_raw(o);
assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
hr->add_strong_code_root(_nm);
}
}
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(oop* o) {
do_oop_work(o);
}
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(narrowOop* o) {
do_oop_work(o);
}
void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL) {
if (!nm->test_set_oops_do_mark()) {
_oc.set_nm(nm);
nm->oops_do(&_oc);
nm->fix_oop_relocations();
}
}
}

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "gc/g1/g1CollectedHeap.hpp"
#include "memory/iterator.hpp"
class nmethod;
class G1CodeBlobClosure : public CodeBlobClosure {
class HeapRegionGatheringOopClosure : public OopClosure {
G1CollectedHeap* _g1h;
OopClosure* _work;
nmethod* _nm;
template <typename T>
void do_oop_work(T* p);
public:
HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
void do_oop(oop* o);
void do_oop(narrowOop* o);
void set_nm(nmethod* nm) {
_nm = nm;
}
};
HeapRegionGatheringOopClosure _oc;
public:
G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
void do_code_blob(CodeBlob* cb);
};

View File

@ -65,6 +65,7 @@
#include "memory/iterator.hpp" #include "memory/iterator.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp" #include "runtime/atomic.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/orderAccess.inline.hpp" #include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp" #include "runtime/vmThread.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
@ -949,6 +950,7 @@ bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
} }
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) { bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL"); assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided"); assert(count != 0, "No MemRegions provided");
MutexLockerEx x(Heap_lock); MutexLockerEx x(Heap_lock);
@ -1037,12 +1039,13 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
} }
// Notify mark-sweep of the archive range. // Notify mark-sweep of the archive range.
G1MarkSweep::mark_range_archive(curr_range); G1MarkSweep::set_range_archive(curr_range, true);
} }
return true; return true;
} }
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) { void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL"); assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided"); assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved(); MemRegion reserved = _hrm.reserved();
@ -1125,6 +1128,81 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
return result; return result;
} }
void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
size_t size_used = 0;
size_t uncommitted_regions = 0;
// For each Memregion, free the G1 regions that constitute it, and
// notify mark-sweep that the range is no longer to be considered 'archive.'
MutexLockerEx x(Heap_lock);
for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
assert(reserved.contains(start_address) && reserved.contains(last_address),
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address)));
assert(start_address > prev_last_addr,
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr)));
size_used += ranges[i].byte_size();
prev_last_addr = last_address;
HeapRegion* start_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
// Check for ranges that start in the same G1 region in which the previous
// range ended, and adjust the start address so we don't try to free
// the same region again. If the current range is entirely within that
// region, skip it.
if (start_region == prev_last_region) {
start_address = start_region->end();
if (start_address > last_address) {
continue;
}
start_region = _hrm.addr_to_region(start_address);
}
prev_last_region = last_region;
// After verifying that each region was marked as an archive region by
// alloc_archive_regions, set it free and empty and uncommit it.
HeapRegion* curr_region = start_region;
while (curr_region != NULL) {
guarantee(curr_region->is_archive(),
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
uint curr_index = curr_region->hrm_index();
_old_set.remove(curr_region);
curr_region->set_free();
curr_region->set_top(curr_region->bottom());
if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
_hrm.shrink_at(curr_index, 1);
uncommitted_regions++;
}
// Notify mark-sweep that this is no longer an archive range.
G1MarkSweep::set_range_archive(ranges[i], false);
}
if (uncommitted_regions != 0) {
ergo_verbose1(ErgoHeapSizing,
"attempt heap shrinking",
ergo_format_reason("uncommitted archive regions")
ergo_format_byte("total size"),
HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
}
decrease_used(size_used);
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
uint* gc_count_before_ret, uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) { uint* gclocker_retry_count_ret) {
@ -2845,9 +2923,9 @@ size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
} }
// For G1 TLABs should not contain humongous objects, so the maximum TLAB size // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
// must be smaller than the humongous object limit. // must be equal to the humongous object limit.
size_t G1CollectedHeap::max_tlab_size() const { size_t G1CollectedHeap::max_tlab_size() const {
return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment); return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
} }
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
@ -4051,7 +4129,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE #endif // YOUNG_LIST_VERBOSE
g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); g1_policy()->finalize_cset(target_pause_time_ms);
evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
register_humongous_regions_with_cset(); register_humongous_regions_with_cset();
@ -4175,7 +4255,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// investigate this in CR 7178365. // investigate this in CR 7178365.
double sample_end_time_sec = os::elapsedTime(); double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info); g1_policy()->record_collection_pause_end(pause_time_ms);
evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
MemoryService::track_memory_usage(); MemoryService::track_memory_usage();
@ -4501,8 +4584,7 @@ public:
bool only_young, bool claim) bool only_young, bool claim)
: _oop_closure(oop_closure), : _oop_closure(oop_closure),
_oop_in_klass_closure(oop_closure->g1(), _oop_in_klass_closure(oop_closure->g1(),
oop_closure->pss(), oop_closure->pss()),
oop_closure->rp()),
_klass_in_cld_closure(&_oop_in_klass_closure, only_young), _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
_claim(claim) { _claim(claim) {
@ -4531,18 +4613,18 @@ public:
bool only_young = _g1h->collector_state()->gcs_are_young(); bool only_young = _g1h->collector_state()->gcs_are_young();
// Non-IM young GC. // Non-IM young GC.
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss, rp); G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss);
G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl, G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
only_young, // Only process dirty klasses. only_young, // Only process dirty klasses.
false); // No need to claim CLDs. false); // No need to claim CLDs.
// IM young GC. // IM young GC.
// Strong roots closures. // Strong roots closures.
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss, rp); G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss);
G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl, G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
false, // Process all klasses. false, // Process all klasses.
true); // Need to claim CLDs. true); // Need to claim CLDs.
// Weak roots closures. // Weak roots closures.
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss, rp); G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl, G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
false, // Process all klasses. false, // Process all klasses.
true); // Need to claim CLDs. true); // Need to claim CLDs.
@ -4582,9 +4664,9 @@ public:
worker_id); worker_id);
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss); G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
_root_processor->scan_remembered_sets(&push_heap_rs_cl, _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
weak_root_cl, weak_root_cl,
worker_id); worker_id);
double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec; double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
double term_sec = 0.0; double term_sec = 0.0;
@ -5241,9 +5323,9 @@ public:
G1ParScanThreadState* pss = _pss[worker_id]; G1ParScanThreadState* pss = _pss[worker_id];
pss->set_ref_processor(NULL); pss->set_ref_processor(NULL);
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
@ -5341,9 +5423,9 @@ public:
pss->set_ref_processor(NULL); pss->set_ref_processor(NULL);
assert(pss->queue_is_empty(), "both queue and overflow should be empty"); assert(pss->queue_is_empty(), "both queue and overflow should be empty");
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
@ -5451,9 +5533,9 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadState** per_t
// closures while we're actually processing the discovered // closures while we're actually processing the discovered
// reference objects. // reference objects.
G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss, NULL); G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss);
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;

View File

@ -757,6 +757,12 @@ public:
// alloc_archive_regions, and after class loading has occurred. // alloc_archive_regions, and after class loading has occurred.
void fill_archive_regions(MemRegion* range, size_t count); void fill_archive_regions(MemRegion* range, size_t count);
// For each of the specified MemRegions, uncommit the containing G1 regions
// which had been allocated by alloc_archive_regions. This should be called
// rather than fill_archive_regions at JVM init time if the archive file
// mapping failed, with the same non-overlapping and sorted MemRegion array.
void dealloc_archive_regions(MemRegion* range, size_t count);
protected: protected:
// Shrink the garbage-first heap by at most the given size (in bytes!). // Shrink the garbage-first heap by at most the given size (in bytes!).

View File

@ -181,15 +181,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
G1ErgoVerbose::set_enabled(false); G1ErgoVerbose::set_enabled(false);
} }
// Verify PLAB sizes
const size_t region_size = HeapRegion::GrainWords;
if (YoungPLABSize > region_size || OldPLABSize > region_size) {
char buffer[128];
jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most " SIZE_FORMAT,
OldPLABSize > region_size ? "Old" : "Young", region_size);
vm_exit_during_initialization(buffer);
}
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
@ -932,7 +923,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
// Anything below that is considered to be zero // Anything below that is considered to be zero
#define MIN_TIMER_GRANULARITY 0.0000001 #define MIN_TIMER_GRANULARITY 0.0000001
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) { void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
double end_time_sec = os::elapsedTime(); double end_time_sec = os::elapsedTime();
assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
"otherwise, the subtraction below does not make sense"); "otherwise, the subtraction below does not make sense");
@ -964,9 +955,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
end_time_sec, _g1->gc_tracer_stw()->gc_id()); end_time_sec, _g1->gc_tracer_stw()->gc_id());
evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
if (update_stats) { if (update_stats) {
_trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times()); _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
// this is where we update the allocation rate of the application // this is where we update the allocation rate of the application
@ -1883,7 +1871,7 @@ uint G1CollectorPolicy::calc_max_old_cset_length() {
} }
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) { void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
double young_start_time_sec = os::elapsedTime(); double young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list(); YoungList* young_list = _g1->young_list();
@ -2093,7 +2081,6 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
double non_young_end_time_sec = os::elapsedTime(); double non_young_end_time_sec = os::elapsedTime();
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
evacuation_info.set_collectionset_regions(cset_region_length());
} }
void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) { void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {

View File

@ -604,10 +604,6 @@ public:
virtual G1CollectorPolicy* as_g1_policy() { return this; } virtual G1CollectorPolicy* as_g1_policy() { return this; }
virtual CollectorPolicy::Name kind() {
return CollectorPolicy::G1CollectorPolicyKind;
}
G1CollectorState* collector_state(); G1CollectorState* collector_state();
G1GCPhaseTimes* phase_times() const { return _phase_times; } G1GCPhaseTimes* phase_times() const { return _phase_times; }
@ -634,13 +630,11 @@ public:
virtual HeapWord* satisfy_failed_allocation(size_t size, virtual HeapWord* satisfy_failed_allocation(size_t size,
bool is_tlab); bool is_tlab);
BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
// Record the start and end of an evacuation pause. // Record the start and end of an evacuation pause.
void record_collection_pause_start(double start_time_sec); void record_collection_pause_start(double start_time_sec);
void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info); void record_collection_pause_end(double pause_time_ms);
// Record the start and end of a full collection. // Record the start and end of a full collection.
void record_full_collection_start(); void record_full_collection_start();
@ -682,6 +676,10 @@ public:
return _bytes_copied_during_gc; return _bytes_copied_during_gc;
} }
size_t collection_set_bytes_used_before() const {
return _collection_set_bytes_used_before;
}
// Determine whether there are candidate regions so that the // Determine whether there are candidate regions so that the
// next GC should be mixed. The two action strings are used // next GC should be mixed. The two action strings are used
// in the ergo output when the method returns true or false. // in the ergo output when the method returns true or false.
@ -691,7 +689,7 @@ public:
// Choose a new collection set. Marks the chosen regions as being // Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of // "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods. // the collection set are available via access methods.
void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info); void finalize_cset(double target_pause_time_ms);
// The head of the list (via "next_in_collection_set()") representing the // The head of the list (via "next_in_collection_set()") representing the
// current collection set. // current collection set.

View File

@ -54,17 +54,46 @@ void G1EvacStats::adjust_desired_plab_sz() {
_allocated, _wasted, _region_end_waste, _unused, used())); _allocated, _wasted, _region_end_waste, _unused, used()));
_allocated = 1; _allocated = 1;
} }
// We account region end waste fully to PLAB allocation. This is not completely fair, // The size of the PLAB caps the amount of space that can be wasted at the
// but is a conservative assumption because PLABs may be sized flexibly while we // end of the collection. In the worst case the last PLAB could be completely
// cannot adjust direct allocations. // empty.
// In some cases, wasted_frac may become > 1 but that just reflects the problem // This allows us to calculate the new PLAB size to achieve the
// with region_end_waste. // TargetPLABWastePct given the latest memory usage and that the last buffer
double wasted_frac = (double)(_unused + _wasted + _region_end_waste) / (double)_allocated; // will be G1LastPLABAverageOccupancy full.
size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct); //
if (target_refills == 0) { // E.g. assume that if in the current GC 100 words were allocated and a
target_refills = 1; // TargetPLABWastePct of 10 had been set.
} //
size_t cur_plab_sz = used() / target_refills; // So we could waste up to 10 words to meet that percentage. Given that we
// also assume that that buffer is typically half-full, the new desired PLAB
// size is set to 20 words.
//
// The amount of allocation performed should be independent of the number of
// threads, so should the maximum waste we can spend in total. So if
// we used n threads to allocate, each of them can spend maximum waste/n words in
// a first rough approximation. The number of threads only comes into play later
// when actually retrieving the actual desired PLAB size.
//
// After calculating this optimal PLAB size the algorithm applies the usual
// exponential decaying average over this value to guess the next PLAB size.
//
// We account region end waste fully to PLAB allocation (in the calculation of
// what we consider as "used_for_waste_calculation" below). This is not
// completely fair, but is a conservative assumption because PLABs may be sized
// flexibly while we cannot adjust inline allocations.
// Allocation during GC will try to minimize region end waste so this impact
// should be minimal.
//
// We need to cover overflow when calculating the amount of space actually used
// by objects in PLABs when subtracting the region end waste.
// Region end waste may be higher than actual allocation. This may occur if many
// threads do not allocate anything but a few rather large objects. In this
// degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
// which is an okay reaction.
size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
size_t const cur_plab_sz = (double)total_waste_allowed / G1LastPLABAverageOccupancy;
// Take historical weighted average // Take historical weighted average
_filter.sample(cur_plab_sz); _filter.sample(cur_plab_sz);
// Clip from above and below, and align to object boundary // Clip from above and below, and align to object boundary

View File

@ -74,7 +74,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
assert(rp != NULL, "should be non-NULL"); assert(rp != NULL, "should be non-NULL");
assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition"); assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
GenMarkSweep::_ref_processor = rp; GenMarkSweep::set_ref_processor(rp);
rp->setup_policy(clear_all_softrefs); rp->setup_policy(clear_all_softrefs);
// When collecting the permanent generation Method*s may be moving, // When collecting the permanent generation Method*s may be moving,
@ -108,7 +108,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
JvmtiExport::gc_epilogue(); JvmtiExport::gc_epilogue();
// refs processing: clean slate // refs processing: clean slate
GenMarkSweep::_ref_processor = NULL; GenMarkSweep::set_ref_processor(NULL);
} }
@ -310,9 +310,9 @@ void G1MarkSweep::enable_archive_object_check() {
HeapRegion::GrainBytes); HeapRegion::GrainBytes);
} }
void G1MarkSweep::mark_range_archive(MemRegion range) { void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) {
assert(_archive_check_enabled, "archive range check not enabled"); assert(_archive_check_enabled, "archive range check not enabled");
_archive_region_map.set_by_address(range, true); _archive_region_map.set_by_address(range, is_archive);
} }
bool G1MarkSweep::in_archive_range(oop object) { bool G1MarkSweep::in_archive_range(oop object) {

View File

@ -58,8 +58,8 @@ class G1MarkSweep : AllStatic {
// Create the _archive_region_map which is used to identify archive objects. // Create the _archive_region_map which is used to identify archive objects.
static void enable_archive_object_check(); static void enable_archive_object_check();
// Mark the regions containing the specified address range as archive regions. // Set the regions containing the specified address range as archive/non-archive.
static void mark_range_archive(MemRegion range); static void set_range_archive(MemRegion range, bool is_archive);
// Check if an object is in an archive region using the _archive_region_map. // Check if an object is in an archive region using the _archive_region_map.
static bool in_archive_range(oop object); static bool in_archive_range(oop object);

View File

@ -125,8 +125,7 @@ private:
template <class T> void do_oop_work(T* p); template <class T> void do_oop_work(T* p);
public: public:
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
ReferenceProcessor* rp) :
G1ParCopyHelper(g1, par_scan_state) { G1ParCopyHelper(g1, par_scan_state) {
assert(_ref_processor == NULL, "sanity"); assert(_ref_processor == NULL, "sanity");
} }
@ -141,7 +140,6 @@ public:
G1CollectedHeap* g1() { return _g1; }; G1CollectedHeap* g1() { return _g1; };
G1ParScanThreadState* pss() { return _par_scan_state; } G1ParScanThreadState* pss() { return _par_scan_state; }
ReferenceProcessor* rp() { return _ref_processor; };
}; };
typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure; typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure;

View File

@ -186,6 +186,21 @@ InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop co
return dest(state); return dest(state);
} }
void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
oop const old, size_t word_sz, uint age,
HeapWord * const obj_ptr,
const AllocationContext_t context) const {
G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
if (alloc_buf->contains(obj_ptr)) {
_g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
dest_state.value() == InCSetState::Old,
alloc_buf->word_sz());
} else {
_g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
dest_state.value() == InCSetState::Old);
}
}
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
oop const old, oop const old,
markOop const old_mark) { markOop const old_mark) {
@ -219,6 +234,10 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
return handle_evacuation_failure_par(old, old_mark); return handle_evacuation_failure_par(old, old_mark);
} }
} }
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
// The events are checked individually as part of the actual commit
report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
}
} }
assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");

View File

@ -173,6 +173,10 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
bool previous_plab_refill_failed); bool previous_plab_refill_failed);
inline InCSetState next_state(InCSetState const state, markOop const m, uint& age); inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
void report_promotion_event(InCSetState const dest_state,
oop const old, size_t word_sz, uint age,
HeapWord * const obj_ptr, const AllocationContext_t context) const;
public: public:
oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark); oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);

View File

@ -26,6 +26,7 @@
#include "gc/g1/concurrentG1Refine.hpp" #include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentG1RefineThread.hpp" #include "gc/g1/concurrentG1RefineThread.hpp"
#include "gc/g1/g1BlockOffsetTable.inline.hpp" #include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1GCPhaseTimes.hpp"
@ -228,12 +229,15 @@ public:
}; };
void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc, void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl, OopClosure* non_heap_roots,
uint worker_i) { uint worker_i) {
double rs_time_start = os::elapsedTime(); double rs_time_start = os::elapsedTime();
G1CodeBlobClosure code_root_cl(non_heap_roots);
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i); HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
ScanRSClosure scanRScl(oc, code_root_cl, worker_i); ScanRSClosure scanRScl(oc, &code_root_cl, worker_i);
_g1->collection_set_iterate_from(startRegion, &scanRScl); _g1->collection_set_iterate_from(startRegion, &scanRScl);
scanRScl.set_try_claimed(); scanRScl.set_try_claimed();
@ -295,7 +299,7 @@ void G1RemSet::cleanupHRRS() {
} }
void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc, void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl, OopClosure* non_heap_roots,
uint worker_i) { uint worker_i) {
#if CARD_REPEAT_HISTO #if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset(); ct_freq_update_histo_and_reset();
@ -318,7 +322,7 @@ void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
updateRS(&into_cset_dcq, worker_i); updateRS(&into_cset_dcq, worker_i);
scanRS(oc, code_root_cl, worker_i); scanRS(oc, non_heap_roots, worker_i);
// We now clear the cached values of _cset_rs_update_cl for this worker // We now clear the cached values of _cset_rs_update_cl for this worker
_cset_rs_update_cl[worker_i] = NULL; _cset_rs_update_cl[worker_i] = NULL;

View File

@ -85,7 +85,7 @@ public:
// invoked "blk->set_region" to set the "from" region correctly // invoked "blk->set_region" to set the "from" region correctly
// beforehand.) // beforehand.)
// //
// Invoke code_root_cl->do_code_blob on the unmarked nmethods // Apply non_heap_roots on the oops of the unmarked nmethods
// on the strong code roots list for each region in the // on the strong code roots list for each region in the
// collection set. // collection set.
// //
@ -95,7 +95,7 @@ public:
// the "i" passed to the calling thread's work(i) function. // the "i" passed to the calling thread's work(i) function.
// In the sequential case this param will be ignored. // In the sequential case this param will be ignored.
void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk, void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
CodeBlobClosure* code_root_cl, OopClosure* non_heap_roots,
uint worker_i); uint worker_i);
// Prepare for and cleanup after an oops_into_collection_set_do // Prepare for and cleanup after an oops_into_collection_set_do
@ -107,7 +107,7 @@ public:
void cleanup_after_oops_into_collection_set_do(); void cleanup_after_oops_into_collection_set_do();
void scanRS(G1ParPushHeapRSClosure* oc, void scanRS(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl, OopClosure* non_heap_roots,
uint worker_i); uint worker_i);
void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i); void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);

View File

@ -28,6 +28,7 @@
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "gc/g1/bufferingOopClosure.hpp" #include "gc/g1/bufferingOopClosure.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1CollectorState.hpp"
@ -40,57 +41,6 @@
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
#include "services/management.hpp" #include "services/management.hpp"
class G1CodeBlobClosure : public CodeBlobClosure {
class HeapRegionGatheringOopClosure : public OopClosure {
G1CollectedHeap* _g1h;
OopClosure* _work;
nmethod* _nm;
template <typename T>
void do_oop_work(T* p) {
_work->do_oop(p);
T oop_or_narrowoop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(oop_or_narrowoop)) {
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
HeapRegion* hr = _g1h->heap_region_containing_raw(o);
assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
hr->add_strong_code_root(_nm);
}
}
public:
HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
void do_oop(oop* o) {
do_oop_work(o);
}
void do_oop(narrowOop* o) {
do_oop_work(o);
}
void set_nm(nmethod* nm) {
_nm = nm;
}
};
HeapRegionGatheringOopClosure _oc;
public:
G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
void do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL) {
if (!nm->test_set_oops_do_mark()) {
_oc.set_nm(nm);
nm->oops_do(&_oc);
nm->fix_oop_relocations();
}
}
}
};
void G1RootProcessor::worker_has_discovered_all_strong_classes() { void G1RootProcessor::worker_has_discovered_all_strong_classes() {
assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading"); assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
@ -321,14 +271,6 @@ void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
} }
} }
void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
OopClosure* scan_non_heap_weak_roots,
uint worker_i) {
G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
_g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
}
uint G1RootProcessor::n_workers() const { uint G1RootProcessor::n_workers() const {
return _srs.n_threads(); return _srs.n_threads();
} }

View File

@ -107,13 +107,6 @@ public:
CLDClosure* clds, CLDClosure* clds,
CodeBlobClosure* blobs); CodeBlobClosure* blobs);
// Apply scan_rs to all locations in the union of the remembered sets for all
// regions in the collection set
// (having done "set_region" to indicate the region in which the root resides),
void scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
OopClosure* scan_non_heap_weak_roots,
uint worker_i);
// Number of worker threads used by the root processor. // Number of worker threads used by the root processor.
uint n_workers() const; uint n_workers() const;
}; };

View File

@ -27,6 +27,7 @@
#include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegion.hpp"
#include "gc/g1/satbQueue.hpp" #include "gc/g1/satbQueue.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp" #include "runtime/atomic.inline.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
@ -108,15 +109,7 @@ void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start()); jbyte *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last()); jbyte *const last = byte_after(mr.last());
// Below we may use an explicit loop instead of memset() because on memset_with_concurrent_readers(first, g1_young_gen, last - first);
// certain platforms memset() can give concurrent readers phantom zeros.
if (UseMemSetInBOT) {
memset(first, g1_young_gen, last - first);
} else {
for (jbyte* i = first; i < last; i++) {
*i = g1_young_gen;
}
}
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -207,7 +200,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
// Otherwise, log it. // Otherwise, log it.
G1SATBCardTableLoggingModRefBS* g1_bs = G1SATBCardTableLoggingModRefBS* g1_bs =
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set()); barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set());
g1_bs->write_ref_field_work(field, new_val); g1_bs->write_ref_field_work(field, new_val, false);
} }
void void

View File

@ -147,6 +147,10 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
private: private:
G1SATBCardTableLoggingModRefBSChangedListener _listener; G1SATBCardTableLoggingModRefBSChangedListener _listener;
DirtyCardQueueSet& _dcqs; DirtyCardQueueSet& _dcqs;
protected:
virtual void write_ref_field_work(void* field, oop new_val, bool release);
public: public:
static size_t compute_size(size_t mem_region_size_in_words) { static size_t compute_size(size_t mem_region_size_in_words) {
size_t number_of_slots = (mem_region_size_in_words / card_size_in_words); size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
@ -165,8 +169,6 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); } virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
void write_ref_field_work(void* field, oop new_val, bool release = false);
// Can be called from static contexts. // Can be called from static contexts.
static void write_ref_field_static(void* field, oop new_val); static void write_ref_field_static(void* field, oop new_val);

View File

@ -82,6 +82,11 @@
"If true, enable reference discovery during concurrent " \ "If true, enable reference discovery during concurrent " \
"marking and reference processing at the end of remark.") \ "marking and reference processing at the end of remark.") \
\ \
experimental(double, G1LastPLABAverageOccupancy, 50.0, \
"The expected average occupancy of the last PLAB in " \
"percent.") \
range(0.001, 100.0) \
\
product(size_t, G1SATBBufferSize, 1*K, \ product(size_t, G1SATBBufferSize, 1*K, \
"Number of entries in an SATB log buffer.") \ "Number of entries in an SATB log buffer.") \
\ \

View File

@ -68,7 +68,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
// or it was allocated after marking finished, then we add it. Otherwise // or it was allocated after marking finished, then we add it. Otherwise
// we can safely ignore the object. // we can safely ignore the object.
if (!g1h->is_obj_dead(oop(cur), _hr)) { if (!g1h->is_obj_dead(oop(cur), _hr)) {
oop_size = oop(cur)->oop_iterate(_rs_scan, mr); oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
} else { } else {
oop_size = _hr->block_size(cur); oop_size = _hr->block_size(cur);
} }

View File

@ -426,7 +426,7 @@ uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
(num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) { (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found); uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove); shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
cur = idx_last_found; cur = idx_last_found;
removed += to_remove; removed += to_remove;
@ -437,6 +437,17 @@ uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
return removed; return removed;
} }
void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
#ifdef ASSERT
for (uint i = index; i < (index + num_regions); i++) {
assert(is_available(i), err_msg("Expected available region at index %u", i));
assert(at(i)->is_empty(), err_msg("Expected empty region at index %u", i));
assert(at(i)->is_free(), err_msg("Expected free region at index %u", i));
}
#endif
uncommit_regions(index, num_regions);
}
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
guarantee(start_idx < _allocated_heapregions_length, "checking"); guarantee(start_idx < _allocated_heapregions_length, "checking");
guarantee(res_idx != NULL, "checking"); guarantee(res_idx != NULL, "checking");

View File

@ -241,6 +241,10 @@ public:
// Return the actual number of uncommitted regions. // Return the actual number of uncommitted regions.
uint shrink_by(uint num_regions_to_remove); uint shrink_by(uint num_regions_to_remove);
// Uncommit a number of regions starting at the specified index, which must be available,
// empty, and free.
void shrink_at(uint index, size_t num_regions);
void verify(); void verify();
// Do some sanity checking. // Do some sanity checking.

View File

@ -35,7 +35,7 @@ private:
// We encode the value of the heap region type so the generation can be // We encode the value of the heap region type so the generation can be
// determined quickly. The tag is split into two parts: // determined quickly. The tag is split into two parts:
// //
// major type (young, humongous) : top N-1 bits // major type (young, old, humongous, archive) : top N-1 bits
// minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit // minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
// //
// If there's need to increase the number of minor types in the // If there's need to increase the number of minor types in the

View File

@ -89,7 +89,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
CheckForUnmarkedOops object_check(_young_gen, _card_table); CheckForUnmarkedOops object_check(_young_gen, _card_table);
obj->oop_iterate_no_header(&object_check); obj->oop_iterate_no_header(&object_check);
if (object_check.has_unmarked_oop()) { if (object_check.has_unmarked_oop()) {
assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object"); guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
} }
} }
}; };

View File

@ -56,13 +56,7 @@ class CardTableExtension : public CardTableModRefBS {
CardTableExtension(MemRegion whole_heap) : CardTableExtension(MemRegion whole_heap) :
CardTableModRefBS( CardTableModRefBS(
whole_heap, whole_heap,
// Concrete tag should be BarrierSet::CardTableExtension. BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
// That will presently break things in a bunch of places though.
// The concrete tag is used as a dispatch key in many places, and
// CardTableExtension does not correctly dispatch in some of those
// uses. This will be addressed as part of a reorganization of the
// BarrierSet hierarchy.
BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableExtension))
{ } { }
// Scavenge support // Scavenge support

View File

@ -44,7 +44,7 @@ void ImmutableSpace::oop_iterate(ExtendedOopClosure* cl) {
HeapWord* t = end(); HeapWord* t = end();
// Could call objects iterate, but this is easier. // Could call objects iterate, but this is easier.
while (obj_addr < t) { while (obj_addr < t) {
obj_addr += oop(obj_addr)->oop_iterate(cl); obj_addr += oop(obj_addr)->oop_iterate_size(cl);
} }
} }

Some files were not shown because too many files have changed in this diff Show More