Merge
This commit is contained in:
commit
b6b00269d9
1
.hgtags
1
.hgtags
@ -231,3 +231,4 @@ b5ed503c26ad38869c247c5e32debec217fd056b jdk8-b104
|
||||
892889f445755790ae90e61775bfb59ddc6182b5 jdk8-b107
|
||||
74049f7a28b48c14910106a75d9f2504169c352e jdk8-b108
|
||||
af9a674e12a16da1a4bd53e4990ddb1121a21ef1 jdk8-b109
|
||||
b5d2bf482a3ea1cca08c994512804ffbc73de0a1 jdk8-b110
|
||||
|
@ -231,3 +231,4 @@ b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103
|
||||
0874bb4707b723d5bb108d379c557cf41529d1a7 jdk8-b107
|
||||
9286a6e61291246d88af713f1ef79adeea30fe2e jdk8-b108
|
||||
91f47e8da5c60de58ed195e9b57f3bf192a18f83 jdk8-b109
|
||||
4faa09c7fe555de086dd9048d3c5cc92317d6f45 jdk8-b110
|
||||
|
@ -66,7 +66,7 @@ else
|
||||
else
|
||||
# We are building multiple configurations.
|
||||
# First, find out the valid targets
|
||||
# Run the makefile with an arbitraty SPEC using -p -q (quiet dry-run and dump rules) to find
|
||||
# Run the makefile with an arbitrary SPEC using -p -q (quiet dry-run and dump rules) to find
|
||||
# available PHONY targets. Use this list as valid targets to pass on to the repeated calls.
|
||||
all_phony_targets=$(filter-out $(global_targets) bundles-only, $(strip $(shell \
|
||||
$(MAKE) -p -q -f common/makefiles/Main.gmk FRC SPEC=$(firstword $(SPEC)) | \
|
||||
|
@ -1016,8 +1016,8 @@ with_cacerts_file
|
||||
enable_unlimited_crypto
|
||||
with_milestone
|
||||
with_update_version
|
||||
with_build_number
|
||||
with_user_release_suffix
|
||||
with_build_number
|
||||
with_boot_jdk
|
||||
with_boot_jdk_jvmargs
|
||||
with_add_source_root
|
||||
@ -1755,10 +1755,10 @@ Optional Packages:
|
||||
--with-cacerts-file specify alternative cacerts file
|
||||
--with-milestone Set milestone value for build [internal]
|
||||
--with-update-version Set update version value for build [b00]
|
||||
--with-build-number Set build number value for build [b00]
|
||||
--with-user-release-suffix
|
||||
Add a custom string to the version string if build
|
||||
number isn't set.[username_builddateb00]
|
||||
--with-build-number Set build number value for build [b00]
|
||||
--with-boot-jdk path to Boot JDK (used to bootstrap build) [probed]
|
||||
--with-boot-jdk-jvmargs specify JVM arguments to be passed to all
|
||||
invocations of the Boot JDK, overriding the default
|
||||
@ -3818,7 +3818,7 @@ fi
|
||||
#CUSTOM_AUTOCONF_INCLUDE
|
||||
|
||||
# Do not change or remove the following line, it is needed for consistency checks:
|
||||
DATE_WHEN_GENERATED=1379504921
|
||||
DATE_WHEN_GENERATED=1381162713
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
@ -10935,7 +10935,7 @@ BUILD_HEADLESS="BUILD_HEADLESS:=true"
|
||||
|
||||
if test "x$SUPPORT_HEADFUL" = xyes; then
|
||||
# We are building both headful and headless.
|
||||
headful_msg="inlude support for both headful and headless"
|
||||
headful_msg="include support for both headful and headless"
|
||||
fi
|
||||
|
||||
if test "x$SUPPORT_HEADFUL" = xno; then
|
||||
@ -11048,6 +11048,18 @@ elif test "x$with_update_version" != x; then
|
||||
fi
|
||||
|
||||
|
||||
# Check whether --with-user-release-suffix was given.
|
||||
if test "${with_user_release_suffix+set}" = set; then :
|
||||
withval=$with_user_release_suffix;
|
||||
fi
|
||||
|
||||
if test "x$with_user_release_suffix" = xyes; then
|
||||
as_fn_error $? "Release suffix must have a value" "$LINENO" 5
|
||||
elif test "x$with_user_release_suffix" != x; then
|
||||
USER_RELEASE_SUFFIX="$with_user_release_suffix"
|
||||
fi
|
||||
|
||||
|
||||
# Check whether --with-build-number was given.
|
||||
if test "${with_build_number+set}" = set; then :
|
||||
withval=$with_build_number;
|
||||
@ -11058,28 +11070,17 @@ if test "x$with_build_number" = xyes; then
|
||||
elif test "x$with_build_number" != x; then
|
||||
JDK_BUILD_NUMBER="$with_build_number"
|
||||
fi
|
||||
# Define default USER_RELEASE_SUFFIX if BUILD_NUMBER and USER_RELEASE_SUFFIX are not set
|
||||
if test "x$JDK_BUILD_NUMBER" = x; then
|
||||
JDK_BUILD_NUMBER=b00
|
||||
if test "x$USER_RELEASE_SUFFIX" = x; then
|
||||
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
|
||||
# Avoid [:alnum:] since it depends on the locale.
|
||||
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
|
||||
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Check whether --with-user-release-suffix was given.
|
||||
if test "${with_user_release_suffix+set}" = set; then :
|
||||
withval=$with_user_release_suffix;
|
||||
fi
|
||||
|
||||
if test "x$with_user_release_suffix" = xyes; then
|
||||
as_fn_error $? "Release suffix must have a value" "$LINENO" 5
|
||||
elif test "x$with_user_release_suffix" != x; then
|
||||
USER_RELEASE_SUFFIX="$with_user_release_suffix"
|
||||
else
|
||||
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
|
||||
# Avoid [:alnum:] since it depends on the locale.
|
||||
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
|
||||
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
|
||||
fi
|
||||
|
||||
|
||||
# Now set the JDK version, milestone, build number etc.
|
||||
|
||||
|
||||
@ -11095,6 +11096,7 @@ fi
|
||||
|
||||
|
||||
|
||||
|
||||
COPYRIGHT_YEAR=`date +'%Y'`
|
||||
|
||||
|
||||
|
@ -316,7 +316,7 @@ BUILD_HEADLESS="BUILD_HEADLESS:=true"
|
||||
|
||||
if test "x$SUPPORT_HEADFUL" = xyes; then
|
||||
# We are building both headful and headless.
|
||||
headful_msg="inlude support for both headful and headless"
|
||||
headful_msg="include support for both headful and headless"
|
||||
fi
|
||||
|
||||
if test "x$SUPPORT_HEADFUL" = xno; then
|
||||
@ -426,6 +426,14 @@ elif test "x$with_update_version" != x; then
|
||||
JDK_UPDATE_VERSION="$with_update_version"
|
||||
fi
|
||||
|
||||
AC_ARG_WITH(user-release-suffix, [AS_HELP_STRING([--with-user-release-suffix],
|
||||
[Add a custom string to the version string if build number isn't set.@<:@username_builddateb00@:>@])])
|
||||
if test "x$with_user_release_suffix" = xyes; then
|
||||
AC_MSG_ERROR([Release suffix must have a value])
|
||||
elif test "x$with_user_release_suffix" != x; then
|
||||
USER_RELEASE_SUFFIX="$with_user_release_suffix"
|
||||
fi
|
||||
|
||||
AC_ARG_WITH(build-number, [AS_HELP_STRING([--with-build-number],
|
||||
[Set build number value for build @<:@b00@:>@])])
|
||||
if test "x$with_build_number" = xyes; then
|
||||
@ -433,25 +441,19 @@ if test "x$with_build_number" = xyes; then
|
||||
elif test "x$with_build_number" != x; then
|
||||
JDK_BUILD_NUMBER="$with_build_number"
|
||||
fi
|
||||
# Define default USER_RELEASE_SUFFIX if BUILD_NUMBER and USER_RELEASE_SUFFIX are not set
|
||||
if test "x$JDK_BUILD_NUMBER" = x; then
|
||||
JDK_BUILD_NUMBER=b00
|
||||
if test "x$USER_RELEASE_SUFFIX" = x; then
|
||||
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
|
||||
# Avoid [:alnum:] since it depends on the locale.
|
||||
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
|
||||
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_ARG_WITH(user-release-suffix, [AS_HELP_STRING([--with-user-release-suffix],
|
||||
[Add a custom string to the version string if build number isn't set.@<:@username_builddateb00@:>@])])
|
||||
if test "x$with_user_release_suffix" = xyes; then
|
||||
AC_MSG_ERROR([Release suffix must have a value])
|
||||
elif test "x$with_user_release_suffix" != x; then
|
||||
USER_RELEASE_SUFFIX="$with_user_release_suffix"
|
||||
else
|
||||
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
|
||||
# Avoid [:alnum:] since it depends on the locale.
|
||||
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
|
||||
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
|
||||
fi
|
||||
AC_SUBST(USER_RELEASE_SUFFIX)
|
||||
|
||||
# Now set the JDK version, milestone, build number etc.
|
||||
AC_SUBST(USER_RELEASE_SUFFIX)
|
||||
AC_SUBST(JDK_MAJOR_VERSION)
|
||||
AC_SUBST(JDK_MINOR_VERSION)
|
||||
AC_SUBST(JDK_MICRO_VERSION)
|
||||
|
@ -161,6 +161,7 @@ JDK_RC_PLATFORM_NAME:=@JDK_RC_PLATFORM_NAME@
|
||||
COMPANY_NAME:=@COMPANY_NAME@
|
||||
MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
|
||||
MACOSX_BUNDLE_ID_BASE=@MACOSX_BUNDLE_ID_BASE@
|
||||
USER_RELEASE_SUFFIX=@USER_RELEASE_SUFFIX@
|
||||
|
||||
# Different version strings generated from the above information.
|
||||
JDK_VERSION:=@JDK_VERSION@
|
||||
@ -173,8 +174,8 @@ ifeq ($(MILESTONE),)
|
||||
else
|
||||
RELEASE=$(JDK_VERSION)-$(MILESTONE)$(BUILD_VARIANT_RELEASE)
|
||||
endif
|
||||
ifeq ($(JDK_BUILD_NUMBER),b00)
|
||||
USER_RELEASE_SUFFIX=@USER_RELEASE_SUFFIX@
|
||||
|
||||
ifneq ($(USER_RELEASE_SUFFIX),)
|
||||
FULL_VERSION=$(RELEASE)-$(USER_RELEASE_SUFFIX)-$(JDK_BUILD_NUMBER)
|
||||
else
|
||||
FULL_VERSION=$(RELEASE)-$(JDK_BUILD_NUMBER)
|
||||
|
@ -159,7 +159,7 @@ define SetupArchive
|
||||
endif
|
||||
endif
|
||||
|
||||
# Utility macros, to make the shell script receipt somewhat easier to dechipher.
|
||||
# Utility macros, to make the shell script receipt somewhat easier to decipher.
|
||||
|
||||
# The capture contents macro finds all files (matching the patterns, typically
|
||||
# .class and .prp) that are newer than the jar-file, ie the new content to be put into the jar.
|
||||
@ -520,7 +520,7 @@ define SetupJavaCompilation
|
||||
# Using plain javac to batch compile everything.
|
||||
$1 := $$($1_ALL_COPY_TARGETS) $$($1_ALL_COPY_CLEAN_TARGETS) $$($1_BIN)/_the.batch
|
||||
|
||||
# When buliding in batch, put headers in a temp dir to filter out those that actually
|
||||
# When building in batch, put headers in a temp dir to filter out those that actually
|
||||
# changed before copying them to the real header dir.
|
||||
ifneq (,$$($1_HEADERS))
|
||||
$1_HEADERS_ARG := -h $$($1_HEADERS).tmp
|
||||
|
@ -231,3 +231,4 @@ d411c60a8c2fe8fdc572af907775e90f7eefd513 jdk8-b104
|
||||
23fc34133152692b725db4bd617b4c8dfd6ccb05 jdk8-b107
|
||||
a4bb3b4500164748a9c33b2283cfda76d89f25ab jdk8-b108
|
||||
428428cf5e06163322144cfb5367e1faa86acf20 jdk8-b109
|
||||
3d2b7ce93c5c2e3db748f29c3d29620a8b3b748a jdk8-b110
|
||||
|
@ -381,3 +381,5 @@ a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
|
||||
566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
|
||||
c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
|
||||
58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
|
||||
6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
|
||||
562a3d356de67670b4172b82aca2d30743449e04 hs25-b53
|
||||
|
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.memory;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.oops.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
public class ProtectionDomainCacheEntry extends VMObject {
|
||||
private static sun.jvm.hotspot.types.OopField protectionDomainField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("ProtectionDomainCacheEntry");
|
||||
protectionDomainField = type.getOopField("_literal");
|
||||
}
|
||||
|
||||
public ProtectionDomainCacheEntry(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
public Oop protectionDomain() {
|
||||
return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,7 +32,7 @@ import sun.jvm.hotspot.types.*;
|
||||
|
||||
public class ProtectionDomainEntry extends VMObject {
|
||||
private static AddressField nextField;
|
||||
private static sun.jvm.hotspot.types.OopField protectionDomainField;
|
||||
private static AddressField pdCacheField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
@ -46,7 +46,7 @@ public class ProtectionDomainEntry extends VMObject {
|
||||
Type type = db.lookupType("ProtectionDomainEntry");
|
||||
|
||||
nextField = type.getAddressField("_next");
|
||||
protectionDomainField = type.getOopField("_protection_domain");
|
||||
pdCacheField = type.getAddressField("_pd_cache");
|
||||
}
|
||||
|
||||
public ProtectionDomainEntry(Address addr) {
|
||||
@ -54,10 +54,12 @@ public class ProtectionDomainEntry extends VMObject {
|
||||
}
|
||||
|
||||
public ProtectionDomainEntry next() {
|
||||
return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, addr);
|
||||
return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, nextField.getValue(addr));
|
||||
}
|
||||
|
||||
public Oop protectionDomain() {
|
||||
return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
|
||||
ProtectionDomainCacheEntry pd_cache = (ProtectionDomainCacheEntry)
|
||||
VMObjectFactory.newObject(ProtectionDomainCacheEntry.class, pdCacheField.getValue(addr));
|
||||
return pd_cache.protectionDomain();
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
|
||||
|
||||
HS_MAJOR_VER=25
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=53
|
||||
HS_BUILD_NUMBER=54
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
@ -37,6 +37,9 @@
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "vmreg_sparc.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif
|
||||
|
||||
// Implementation of StubAssembler
|
||||
|
||||
@ -912,7 +915,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
Register tmp2 = G3_scratch;
|
||||
jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
|
||||
|
||||
Label not_already_dirty, restart, refill;
|
||||
Label not_already_dirty, restart, refill, young_card;
|
||||
|
||||
#ifdef _LP64
|
||||
__ srlx(addr, CardTableModRefBS::card_shift, addr);
|
||||
@ -924,9 +927,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ set(rs, cardtable); // cardtable := <card table base>
|
||||
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
|
||||
|
||||
__ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
|
||||
|
||||
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
||||
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
|
||||
|
||||
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
|
||||
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
|
||||
|
||||
__ bind(young_card);
|
||||
// We didn't take the branch, so we're already dirty: return.
|
||||
// Use return-from-leaf
|
||||
__ retl();
|
||||
|
@ -3752,7 +3752,7 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
|
||||
#define __ masm.
|
||||
address start = __ pc();
|
||||
|
||||
Label not_already_dirty, restart, refill;
|
||||
Label not_already_dirty, restart, refill, young_card;
|
||||
|
||||
#ifdef _LP64
|
||||
__ srlx(O0, CardTableModRefBS::card_shift, O0);
|
||||
@ -3763,9 +3763,15 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
|
||||
__ set(addrlit, O1); // O1 := <card table base>
|
||||
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
|
||||
|
||||
__ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
|
||||
|
||||
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
||||
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
|
||||
|
||||
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
|
||||
__ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
|
||||
|
||||
__ bind(young_card);
|
||||
// We didn't take the branch, so we're already dirty: return.
|
||||
// Use return-from-leaf
|
||||
__ retl();
|
||||
|
@ -38,6 +38,9 @@
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "vmreg_x86.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif
|
||||
|
||||
|
||||
// Implementation of StubAssembler
|
||||
@ -1753,13 +1756,17 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
|
||||
#endif
|
||||
|
||||
__ cmpb(Address(card_addr, 0), 0);
|
||||
__ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
||||
__ jcc(Assembler::equal, done);
|
||||
|
||||
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
||||
__ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
|
||||
__ jcc(Assembler::equal, done);
|
||||
|
||||
// storing region crossing non-NULL, card is clean.
|
||||
// dirty card and log.
|
||||
|
||||
__ movb(Address(card_addr, 0), 0);
|
||||
__ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
|
||||
|
||||
__ cmpl(queue_index, 0);
|
||||
__ jcc(Assembler::equal, runtime);
|
||||
|
@ -3389,13 +3389,18 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||
const Register card_addr = tmp;
|
||||
lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
|
||||
#endif
|
||||
cmpb(Address(card_addr, 0), 0);
|
||||
cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
||||
jcc(Assembler::equal, done);
|
||||
|
||||
membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
||||
cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
|
||||
jcc(Assembler::equal, done);
|
||||
|
||||
|
||||
// storing a region crossing, non-NULL oop, card is clean.
|
||||
// dirty card and log.
|
||||
|
||||
movb(Address(card_addr, 0), 0);
|
||||
movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
|
||||
|
||||
cmpl(queue_index, 0);
|
||||
jcc(Assembler::equal, runtime);
|
||||
|
@ -53,7 +53,7 @@
|
||||
// Defines Linux-specific default values. The flags are available on all
|
||||
// platforms, but they may have different default values on other platforms.
|
||||
//
|
||||
define_pd_global(bool, UseLargePages, true);
|
||||
define_pd_global(bool, UseLargePages, false);
|
||||
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
|
||||
define_pd_global(bool, UseOSErrorReporting, false);
|
||||
define_pd_global(bool, UseThreadPriorities, true) ;
|
||||
|
@ -3361,13 +3361,15 @@ bool os::Linux::setup_large_page_type(size_t page_size) {
|
||||
if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
|
||||
FLAG_IS_DEFAULT(UseSHM) &&
|
||||
FLAG_IS_DEFAULT(UseTransparentHugePages)) {
|
||||
// If UseLargePages is specified on the command line try all methods,
|
||||
// if it's default, then try only UseTransparentHugePages.
|
||||
if (FLAG_IS_DEFAULT(UseLargePages)) {
|
||||
UseTransparentHugePages = true;
|
||||
} else {
|
||||
UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
|
||||
}
|
||||
|
||||
// The type of large pages has not been specified by the user.
|
||||
|
||||
// Try UseHugeTLBFS and then UseSHM.
|
||||
UseHugeTLBFS = UseSHM = true;
|
||||
|
||||
// Don't try UseTransparentHugePages since there are known
|
||||
// performance issues with it turned on. This might change in the future.
|
||||
UseTransparentHugePages = false;
|
||||
}
|
||||
|
||||
if (UseTransparentHugePages) {
|
||||
@ -3393,9 +3395,19 @@ bool os::Linux::setup_large_page_type(size_t page_size) {
|
||||
}
|
||||
|
||||
void os::large_page_init() {
|
||||
if (!UseLargePages) {
|
||||
UseHugeTLBFS = false;
|
||||
if (!UseLargePages &&
|
||||
!UseTransparentHugePages &&
|
||||
!UseHugeTLBFS &&
|
||||
!UseSHM) {
|
||||
// Not using large pages.
|
||||
return;
|
||||
}
|
||||
|
||||
if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
|
||||
// The user explicitly turned off large pages.
|
||||
// Ignore the rest of the large pages flags.
|
||||
UseTransparentHugePages = false;
|
||||
UseHugeTLBFS = false;
|
||||
UseSHM = false;
|
||||
return;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/dictionary.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiRedefineClassesTrace.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
@ -38,17 +39,21 @@ Dictionary::Dictionary(int table_size)
|
||||
: TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry)) {
|
||||
_current_class_index = 0;
|
||||
_current_class_entry = NULL;
|
||||
_pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
|
||||
};
|
||||
|
||||
|
||||
|
||||
Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t,
|
||||
int number_of_entries)
|
||||
: TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
|
||||
_current_class_index = 0;
|
||||
_current_class_entry = NULL;
|
||||
_pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
|
||||
};
|
||||
|
||||
ProtectionDomainCacheEntry* Dictionary::cache_get(oop protection_domain) {
|
||||
return _pd_cache_table->get(protection_domain);
|
||||
}
|
||||
|
||||
DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass,
|
||||
ClassLoaderData* loader_data) {
|
||||
@ -105,11 +110,12 @@ bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
|
||||
}
|
||||
|
||||
|
||||
void DictionaryEntry::add_protection_domain(oop protection_domain) {
|
||||
void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_domain) {
|
||||
assert_locked_or_safepoint(SystemDictionary_lock);
|
||||
if (!contains_protection_domain(protection_domain)) {
|
||||
ProtectionDomainCacheEntry* entry = dict->cache_get(protection_domain);
|
||||
ProtectionDomainEntry* new_head =
|
||||
new ProtectionDomainEntry(protection_domain, _pd_set);
|
||||
new ProtectionDomainEntry(entry, _pd_set);
|
||||
// Warning: Preserve store ordering. The SystemDictionary is read
|
||||
// without locks. The new ProtectionDomainEntry must be
|
||||
// complete before other threads can be allowed to see it
|
||||
@ -193,7 +199,10 @@ bool Dictionary::do_unloading() {
|
||||
|
||||
|
||||
void Dictionary::always_strong_oops_do(OopClosure* blk) {
|
||||
// Follow all system classes and temporary placeholders in dictionary
|
||||
// Follow all system classes and temporary placeholders in dictionary; only
|
||||
// protection domain oops contain references into the heap. In a first
|
||||
// pass over the system dictionary determine which need to be treated as
|
||||
// strongly reachable and mark them as such.
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (DictionaryEntry *probe = bucket(index);
|
||||
probe != NULL;
|
||||
@ -201,10 +210,13 @@ void Dictionary::always_strong_oops_do(OopClosure* blk) {
|
||||
Klass* e = probe->klass();
|
||||
ClassLoaderData* loader_data = probe->loader_data();
|
||||
if (is_strongly_reachable(loader_data, e)) {
|
||||
probe->protection_domain_set_oops_do(blk);
|
||||
probe->set_strongly_reachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
// Then iterate over the protection domain cache to apply the closure on the
|
||||
// previously marked ones.
|
||||
_pd_cache_table->always_strong_oops_do(blk);
|
||||
}
|
||||
|
||||
|
||||
@ -266,18 +278,12 @@ void Dictionary::classes_do(void f(Klass*, ClassLoaderData*)) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Dictionary::oops_do(OopClosure* f) {
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (DictionaryEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
probe->protection_domain_set_oops_do(f);
|
||||
}
|
||||
}
|
||||
// Only the protection domain oops contain references into the heap. Iterate
|
||||
// over all of them.
|
||||
_pd_cache_table->oops_do(f);
|
||||
}
|
||||
|
||||
|
||||
void Dictionary::methods_do(void f(Method*)) {
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (DictionaryEntry* probe = bucket(index);
|
||||
@ -292,6 +298,11 @@ void Dictionary::methods_do(void f(Method*)) {
|
||||
}
|
||||
}
|
||||
|
||||
void Dictionary::unlink(BoolObjectClosure* is_alive) {
|
||||
// Only the protection domain cache table may contain references to the heap
|
||||
// that need to be unlinked.
|
||||
_pd_cache_table->unlink(is_alive);
|
||||
}
|
||||
|
||||
Klass* Dictionary::try_get_next_class() {
|
||||
while (true) {
|
||||
@ -306,7 +317,6 @@ Klass* Dictionary::try_get_next_class() {
|
||||
// never reached
|
||||
}
|
||||
|
||||
|
||||
// Add a loaded class to the system dictionary.
|
||||
// Readers of the SystemDictionary aren't always locked, so _buckets
|
||||
// is volatile. The store of the next field in the constructor is
|
||||
@ -396,7 +406,7 @@ void Dictionary::add_protection_domain(int index, unsigned int hash,
|
||||
assert(protection_domain() != NULL,
|
||||
"real protection domain should be present");
|
||||
|
||||
entry->add_protection_domain(protection_domain());
|
||||
entry->add_protection_domain(this, protection_domain());
|
||||
|
||||
assert(entry->contains_protection_domain(protection_domain()),
|
||||
"now protection domain should be present");
|
||||
@ -446,6 +456,146 @@ void Dictionary::reorder_dictionary() {
|
||||
}
|
||||
}
|
||||
|
||||
ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
|
||||
: Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
|
||||
{
|
||||
}
|
||||
|
||||
void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be");
|
||||
for (int i = 0; i < table_size(); ++i) {
|
||||
ProtectionDomainCacheEntry** p = bucket_addr(i);
|
||||
ProtectionDomainCacheEntry* entry = bucket(i);
|
||||
while (entry != NULL) {
|
||||
if (is_alive->do_object_b(entry->literal())) {
|
||||
p = entry->next_addr();
|
||||
} else {
|
||||
*p = entry->next();
|
||||
free_entry(entry);
|
||||
}
|
||||
entry = *p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (ProtectionDomainCacheEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
probe->oops_do(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint ProtectionDomainCacheTable::bucket_size() {
|
||||
return sizeof(ProtectionDomainCacheEntry);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ProtectionDomainCacheTable::print() {
|
||||
tty->print_cr("Protection domain cache table (table_size=%d, classes=%d)",
|
||||
table_size(), number_of_entries());
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (ProtectionDomainCacheEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
probe->print();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ProtectionDomainCacheEntry::print() {
|
||||
tty->print_cr("entry "PTR_FORMAT" value "PTR_FORMAT" strongly_reachable %d next "PTR_FORMAT,
|
||||
this, (void*)literal(), _strongly_reachable, next());
|
||||
}
|
||||
#endif
|
||||
|
||||
void ProtectionDomainCacheTable::verify() {
|
||||
int element_count = 0;
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (ProtectionDomainCacheEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
probe->verify();
|
||||
element_count++;
|
||||
}
|
||||
}
|
||||
guarantee(number_of_entries() == element_count,
|
||||
"Verify of protection domain cache table failed");
|
||||
debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
|
||||
}
|
||||
|
||||
void ProtectionDomainCacheEntry::verify() {
|
||||
guarantee(literal()->is_oop(), "must be an oop");
|
||||
}
|
||||
|
||||
void ProtectionDomainCacheTable::always_strong_oops_do(OopClosure* f) {
|
||||
// the caller marked the protection domain cache entries that we need to apply
|
||||
// the closure on. Only process them.
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (ProtectionDomainCacheEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
if (probe->is_strongly_reachable()) {
|
||||
probe->reset_strongly_reachable();
|
||||
probe->oops_do(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) {
|
||||
unsigned int hash = compute_hash(protection_domain);
|
||||
int index = hash_to_index(hash);
|
||||
|
||||
ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain);
|
||||
if (entry == NULL) {
|
||||
entry = add_entry(index, hash, protection_domain);
|
||||
}
|
||||
return entry;
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, oop protection_domain) {
|
||||
for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
|
||||
if (e->protection_domain() == protection_domain) {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) {
|
||||
assert_locked_or_safepoint(SystemDictionary_lock);
|
||||
assert(index == index_for(protection_domain), "incorrect index?");
|
||||
assert(find_entry(index, protection_domain) == NULL, "no double entry");
|
||||
|
||||
ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain);
|
||||
Hashtable<oop, mtClass>::add_entry(index, p);
|
||||
return p;
|
||||
}
|
||||
|
||||
void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) {
|
||||
unsigned int hash = compute_hash(to_delete->protection_domain());
|
||||
int index = hash_to_index(hash);
|
||||
|
||||
ProtectionDomainCacheEntry** p = bucket_addr(index);
|
||||
ProtectionDomainCacheEntry* entry = bucket(index);
|
||||
while (true) {
|
||||
assert(entry != NULL, "sanity");
|
||||
|
||||
if (entry == to_delete) {
|
||||
*p = entry->next();
|
||||
Hashtable<oop, mtClass>::free_entry(entry);
|
||||
break;
|
||||
} else {
|
||||
p = entry->next_addr();
|
||||
entry = *p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SymbolPropertyTable::SymbolPropertyTable(int table_size)
|
||||
: Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry))
|
||||
{
|
||||
@ -532,11 +682,13 @@ void Dictionary::print() {
|
||||
tty->cr();
|
||||
}
|
||||
}
|
||||
tty->cr();
|
||||
_pd_cache_table->print();
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
void Dictionary::verify() {
|
||||
guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
|
||||
|
||||
@ -563,5 +715,7 @@ void Dictionary::verify() {
|
||||
guarantee(number_of_entries() == element_count,
|
||||
"Verify of system dictionary failed");
|
||||
debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
|
||||
|
||||
_pd_cache_table->verify();
|
||||
}
|
||||
|
||||
|
@ -27,11 +27,14 @@
|
||||
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
|
||||
class DictionaryEntry;
|
||||
class PSPromotionManager;
|
||||
class ProtectionDomainCacheTable;
|
||||
class ProtectionDomainCacheEntry;
|
||||
class BoolObjectClosure;
|
||||
|
||||
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
// The data structure for the system dictionary (and the shared system
|
||||
@ -45,6 +48,8 @@ private:
|
||||
// pointer to the current hash table entry.
|
||||
static DictionaryEntry* _current_class_entry;
|
||||
|
||||
ProtectionDomainCacheTable* _pd_cache_table;
|
||||
|
||||
DictionaryEntry* get_entry(int index, unsigned int hash,
|
||||
Symbol* name, ClassLoaderData* loader_data);
|
||||
|
||||
@ -93,6 +98,7 @@ public:
|
||||
|
||||
void methods_do(void f(Method*));
|
||||
|
||||
void unlink(BoolObjectClosure* is_alive);
|
||||
|
||||
// Classes loaded by the bootstrap loader are always strongly reachable.
|
||||
// If we're not doing class unloading, all classes are strongly reachable.
|
||||
@ -118,6 +124,7 @@ public:
|
||||
// Sharing support
|
||||
void reorder_dictionary();
|
||||
|
||||
ProtectionDomainCacheEntry* cache_get(oop protection_domain);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print();
|
||||
@ -126,21 +133,112 @@ public:
|
||||
};
|
||||
|
||||
// The following classes can be in dictionary.cpp, but we need these
|
||||
// to be in header file so that SA's vmStructs can access.
|
||||
// to be in header file so that SA's vmStructs can access them.
|
||||
class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// Flag indicating whether this protection domain entry is strongly reachable.
|
||||
// Used during iterating over the system dictionary to remember oops that need
|
||||
// to be updated.
|
||||
bool _strongly_reachable;
|
||||
public:
|
||||
oop protection_domain() { return literal(); }
|
||||
|
||||
void init() {
|
||||
_strongly_reachable = false;
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry* next() {
|
||||
return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next();
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry** next_addr() {
|
||||
return (ProtectionDomainCacheEntry**)HashtableEntry<oop, mtClass>::next_addr();
|
||||
}
|
||||
|
||||
void oops_do(OopClosure* f) {
|
||||
f->do_oop(literal_addr());
|
||||
}
|
||||
|
||||
void set_strongly_reachable() { _strongly_reachable = true; }
|
||||
bool is_strongly_reachable() { return _strongly_reachable; }
|
||||
void reset_strongly_reachable() { _strongly_reachable = false; }
|
||||
|
||||
void print() PRODUCT_RETURN;
|
||||
void verify();
|
||||
};
|
||||
|
||||
// The ProtectionDomainCacheTable contains all protection domain oops. The system
|
||||
// dictionary entries reference its entries instead of having references to oops
|
||||
// directly.
|
||||
// This is used to speed up system dictionary iteration: the oops in the
|
||||
// protection domain are the only ones referring the Java heap. So when there is
|
||||
// need to update these, instead of going over every entry of the system dictionary,
|
||||
// we only need to iterate over this set.
|
||||
// The amount of different protection domains used is typically magnitudes smaller
|
||||
// than the number of system dictionary entries (loaded classes).
|
||||
class ProtectionDomainCacheTable : public Hashtable<oop, mtClass> {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
ProtectionDomainCacheEntry* bucket(int i) {
|
||||
return (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::bucket(i);
|
||||
}
|
||||
|
||||
// The following method is not MT-safe and must be done under lock.
|
||||
ProtectionDomainCacheEntry** bucket_addr(int i) {
|
||||
return (ProtectionDomainCacheEntry**) Hashtable<oop, mtClass>::bucket_addr(i);
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry* new_entry(unsigned int hash, oop protection_domain) {
|
||||
ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::new_entry(hash, protection_domain);
|
||||
entry->init();
|
||||
return entry;
|
||||
}
|
||||
|
||||
static unsigned int compute_hash(oop protection_domain) {
|
||||
return (unsigned int)(protection_domain->identity_hash());
|
||||
}
|
||||
|
||||
int index_for(oop protection_domain) {
|
||||
return hash_to_index(compute_hash(protection_domain));
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain);
|
||||
ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain);
|
||||
|
||||
public:
|
||||
|
||||
ProtectionDomainCacheTable(int table_size);
|
||||
|
||||
ProtectionDomainCacheEntry* get(oop protection_domain);
|
||||
void free(ProtectionDomainCacheEntry* entry);
|
||||
|
||||
void unlink(BoolObjectClosure* cl);
|
||||
|
||||
// GC support
|
||||
void oops_do(OopClosure* f);
|
||||
void always_strong_oops_do(OopClosure* f);
|
||||
|
||||
static uint bucket_size();
|
||||
|
||||
void print() PRODUCT_RETURN;
|
||||
void verify();
|
||||
};
|
||||
|
||||
|
||||
class ProtectionDomainEntry :public CHeapObj<mtClass> {
|
||||
friend class VMStructs;
|
||||
public:
|
||||
ProtectionDomainEntry* _next;
|
||||
oop _protection_domain;
|
||||
ProtectionDomainCacheEntry* _pd_cache;
|
||||
|
||||
ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) {
|
||||
_protection_domain = protection_domain;
|
||||
_next = next;
|
||||
ProtectionDomainEntry(ProtectionDomainCacheEntry* pd_cache, ProtectionDomainEntry* next) {
|
||||
_pd_cache = pd_cache;
|
||||
_next = next;
|
||||
}
|
||||
|
||||
ProtectionDomainEntry* next() { return _next; }
|
||||
oop protection_domain() { return _protection_domain; }
|
||||
oop protection_domain() { return _pd_cache->protection_domain(); }
|
||||
};
|
||||
|
||||
// An entry in the system dictionary, this describes a class as
|
||||
@ -151,6 +249,24 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
|
||||
private:
|
||||
// Contains the set of approved protection domains that can access
|
||||
// this system dictionary entry.
|
||||
//
|
||||
// This protection domain set is a set of tuples:
|
||||
//
|
||||
// (InstanceKlass C, initiating class loader ICL, Protection Domain PD)
|
||||
//
|
||||
// [Note that C.protection_domain(), which is stored in the java.lang.Class
|
||||
// mirror of C, is NOT the same as PD]
|
||||
//
|
||||
// If such an entry (C, ICL, PD) exists in the table, it means that
|
||||
// it is okay for a class Foo to reference C, where
|
||||
//
|
||||
// Foo.protection_domain() == PD, and
|
||||
// Foo's defining class loader == ICL
|
||||
//
|
||||
// The usage of the PD set can be seen in SystemDictionary::validate_protection_domain()
|
||||
// It is essentially a cache to avoid repeated Java up-calls to
|
||||
// ClassLoader.checkPackageAccess().
|
||||
//
|
||||
ProtectionDomainEntry* _pd_set;
|
||||
ClassLoaderData* _loader_data;
|
||||
|
||||
@ -158,7 +274,7 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
|
||||
// Tells whether a protection is in the approved set.
|
||||
bool contains_protection_domain(oop protection_domain) const;
|
||||
// Adds a protection domain to the approved set.
|
||||
void add_protection_domain(oop protection_domain);
|
||||
void add_protection_domain(Dictionary* dict, oop protection_domain);
|
||||
|
||||
Klass* klass() const { return (Klass*)literal(); }
|
||||
Klass** klass_addr() { return (Klass**)literal_addr(); }
|
||||
@ -189,12 +305,11 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
|
||||
: contains_protection_domain(protection_domain());
|
||||
}
|
||||
|
||||
|
||||
void protection_domain_set_oops_do(OopClosure* f) {
|
||||
void set_strongly_reachable() {
|
||||
for (ProtectionDomainEntry* current = _pd_set;
|
||||
current != NULL;
|
||||
current = current->_next) {
|
||||
f->do_oop(&(current->_protection_domain));
|
||||
current->_pd_cache->set_strongly_reachable();
|
||||
}
|
||||
}
|
||||
|
||||
@ -202,7 +317,7 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
|
||||
for (ProtectionDomainEntry* current = _pd_set;
|
||||
current != NULL;
|
||||
current = current->_next) {
|
||||
current->_protection_domain->verify();
|
||||
current->_pd_cache->protection_domain()->verify();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1697,6 +1697,24 @@ int SystemDictionary::calculate_systemdictionary_size(int classcount) {
|
||||
return newsize;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
class VerifySDReachableAndLiveClosure : public OopClosure {
|
||||
private:
|
||||
BoolObjectClosure* _is_alive;
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(_is_alive->do_object_b(obj), "Oop in system dictionary must be live");
|
||||
}
|
||||
|
||||
public:
|
||||
VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { }
|
||||
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
};
|
||||
#endif
|
||||
|
||||
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
|
||||
// Note: anonymous classes are not in the SD.
|
||||
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
|
||||
@ -1707,7 +1725,15 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
|
||||
unloading_occurred = dictionary()->do_unloading();
|
||||
constraints()->purge_loader_constraints();
|
||||
resolution_errors()->purge_resolution_errors();
|
||||
}
|
||||
}
|
||||
// Oops referenced by the system dictionary may get unreachable independently
|
||||
// of the class loader (eg. cached protection domain oops). So we need to
|
||||
// explicitly unlink them here instead of in Dictionary::do_unloading.
|
||||
dictionary()->unlink(is_alive);
|
||||
#ifdef ASSERT
|
||||
VerifySDReachableAndLiveClosure cl(is_alive);
|
||||
dictionary()->oops_do(&cl);
|
||||
#endif
|
||||
return unloading_occurred;
|
||||
}
|
||||
|
||||
|
@ -6035,7 +6035,11 @@ void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
|
||||
// is dirty.
|
||||
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
|
||||
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
|
||||
ct_bs->verify_dirty_region(mr);
|
||||
if (hr->is_young()) {
|
||||
ct_bs->verify_g1_young_region(mr);
|
||||
} else {
|
||||
ct_bs->verify_dirty_region(mr);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
|
||||
@ -134,7 +135,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
||||
|
||||
MemRegion mr(start, end);
|
||||
g1_barrier_set()->dirty(mr);
|
||||
g1_barrier_set()->g1_mark_as_young(mr);
|
||||
}
|
||||
|
||||
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
|
||||
|
@ -319,10 +319,10 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::initialize_flags() {
|
||||
set_min_alignment(HeapRegion::GrainBytes);
|
||||
_min_alignment = HeapRegion::GrainBytes;
|
||||
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
|
||||
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||
set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
|
||||
_max_alignment = MAX3(card_table_alignment, _min_alignment, page_size);
|
||||
if (SurvivorRatio < 1) {
|
||||
vm_exit_during_initialization("Invalid survivor ratio specified");
|
||||
}
|
||||
|
@ -70,6 +70,12 @@ bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
|
||||
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (val == g1_young_gen) {
|
||||
// the card is for a young gen region. We don't need to keep track of all pointers into young
|
||||
return false;
|
||||
}
|
||||
|
||||
// Cached bit can be installed either on a clean card or on a claimed card.
|
||||
jbyte new_val = val;
|
||||
if (val == clean_card_val()) {
|
||||
@ -85,6 +91,19 @@ bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
|
||||
jbyte *const first = byte_for(mr.start());
|
||||
jbyte *const last = byte_after(mr.last());
|
||||
|
||||
memset(first, g1_young_gen, last - first);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
|
||||
verify_region(mr, g1_young_gen, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
G1SATBCardTableLoggingModRefBS::
|
||||
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
||||
int max_covered_regions) :
|
||||
@ -97,7 +116,11 @@ G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
||||
void
|
||||
G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
|
||||
oop new_val) {
|
||||
jbyte* byte = byte_for(field);
|
||||
volatile jbyte* byte = byte_for(field);
|
||||
if (*byte == g1_young_gen) {
|
||||
return;
|
||||
}
|
||||
OrderAccess::storeload();
|
||||
if (*byte != dirty_card) {
|
||||
*byte = dirty_card;
|
||||
Thread* thr = Thread::current();
|
||||
@ -129,7 +152,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
|
||||
|
||||
void
|
||||
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
|
||||
jbyte* byte = byte_for(mr.start());
|
||||
volatile jbyte* byte = byte_for(mr.start());
|
||||
jbyte* last_byte = byte_for(mr.last());
|
||||
Thread* thr = Thread::current();
|
||||
if (whole_heap) {
|
||||
@ -138,25 +161,35 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
|
||||
byte++;
|
||||
}
|
||||
} else {
|
||||
// Enqueue if necessary.
|
||||
if (thr->is_Java_thread()) {
|
||||
JavaThread* jt = (JavaThread*)thr;
|
||||
while (byte <= last_byte) {
|
||||
if (*byte != dirty_card) {
|
||||
*byte = dirty_card;
|
||||
jt->dirty_card_queue().enqueue(byte);
|
||||
// skip all consecutive young cards
|
||||
for (; byte <= last_byte && *byte == g1_young_gen; byte++);
|
||||
|
||||
if (byte <= last_byte) {
|
||||
OrderAccess::storeload();
|
||||
// Enqueue if necessary.
|
||||
if (thr->is_Java_thread()) {
|
||||
JavaThread* jt = (JavaThread*)thr;
|
||||
for (; byte <= last_byte; byte++) {
|
||||
if (*byte == g1_young_gen) {
|
||||
continue;
|
||||
}
|
||||
if (*byte != dirty_card) {
|
||||
*byte = dirty_card;
|
||||
jt->dirty_card_queue().enqueue(byte);
|
||||
}
|
||||
}
|
||||
byte++;
|
||||
}
|
||||
} else {
|
||||
MutexLockerEx x(Shared_DirtyCardQ_lock,
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
while (byte <= last_byte) {
|
||||
if (*byte != dirty_card) {
|
||||
*byte = dirty_card;
|
||||
_dcqs.shared_dirty_card_queue()->enqueue(byte);
|
||||
} else {
|
||||
MutexLockerEx x(Shared_DirtyCardQ_lock,
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
for (; byte <= last_byte; byte++) {
|
||||
if (*byte == g1_young_gen) {
|
||||
continue;
|
||||
}
|
||||
if (*byte != dirty_card) {
|
||||
*byte = dirty_card;
|
||||
_dcqs.shared_dirty_card_queue()->enqueue(byte);
|
||||
}
|
||||
}
|
||||
byte++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -38,7 +38,14 @@ class DirtyCardQueueSet;
|
||||
// snapshot-at-the-beginning marking.
|
||||
|
||||
class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
|
||||
protected:
|
||||
enum G1CardValues {
|
||||
g1_young_gen = CT_MR_BS_last_reserved << 1
|
||||
};
|
||||
|
||||
public:
|
||||
static int g1_young_card_val() { return g1_young_gen; }
|
||||
|
||||
// Add "pre_val" to a set of objects that may have been disconnected from the
|
||||
// pre-marking object graph.
|
||||
static void enqueue(oop pre_val);
|
||||
@ -118,6 +125,9 @@ public:
|
||||
_byte_map[card_index] = val;
|
||||
}
|
||||
|
||||
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
|
||||
void g1_mark_as_young(const MemRegion& mr);
|
||||
|
||||
bool mark_card_deferred(size_t card_index);
|
||||
|
||||
bool is_card_deferred(size_t card_index) {
|
||||
|
@ -80,6 +80,10 @@ public:
|
||||
|
||||
void reset() { if (_buf != NULL) _index = _sz; }
|
||||
|
||||
void enqueue(volatile void* ptr) {
|
||||
enqueue((void*)(ptr));
|
||||
}
|
||||
|
||||
// Enqueues the given "obj".
|
||||
void enqueue(void* ptr) {
|
||||
if (!_active) return;
|
||||
|
@ -214,9 +214,6 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation {
|
||||
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
|
||||
_loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
|
||||
}
|
||||
~VM_CollectForMetadataAllocation() {
|
||||
MetaspaceGC::set_expand_after_GC(false);
|
||||
}
|
||||
virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
|
||||
virtual void doit();
|
||||
MetaWord* result() const { return _result; }
|
||||
|
@ -202,12 +202,6 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
||||
ShouldNotReachHere(); // Unexpected use of this function
|
||||
}
|
||||
}
|
||||
MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(
|
||||
ClassLoaderData* loader_data,
|
||||
size_t size, Metaspace::MetadataType mdtype) {
|
||||
return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype);
|
||||
}
|
||||
|
||||
|
||||
void CollectedHeap::pre_initialize() {
|
||||
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
|
||||
|
@ -475,11 +475,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// the context of the vm thread.
|
||||
virtual void collect_as_vm_thread(GCCause::Cause cause);
|
||||
|
||||
// Callback from VM_CollectForMetadataAllocation operation.
|
||||
MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
|
||||
size_t size,
|
||||
Metaspace::MetadataType mdtype);
|
||||
|
||||
// Returns the barrier set for this heap
|
||||
BarrierSet* barrier_set() { return _barrier_set; }
|
||||
|
||||
|
@ -47,85 +47,53 @@
|
||||
|
||||
// CollectorPolicy methods.
|
||||
|
||||
// Align down. If the aligning result in 0, return 'alignment'.
|
||||
static size_t restricted_align_down(size_t size, size_t alignment) {
|
||||
return MAX2(alignment, align_size_down_(size, alignment));
|
||||
}
|
||||
|
||||
void CollectorPolicy::initialize_flags() {
|
||||
assert(max_alignment() >= min_alignment(),
|
||||
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
|
||||
max_alignment(), min_alignment()));
|
||||
assert(max_alignment() % min_alignment() == 0,
|
||||
err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
|
||||
max_alignment(), min_alignment()));
|
||||
assert(_max_alignment >= _min_alignment,
|
||||
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
|
||||
_max_alignment, _min_alignment));
|
||||
assert(_max_alignment % _min_alignment == 0,
|
||||
err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
|
||||
_max_alignment, _min_alignment));
|
||||
|
||||
if (MaxHeapSize < InitialHeapSize) {
|
||||
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
|
||||
}
|
||||
|
||||
// Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
|
||||
// override if MaxMetaspaceSize was set on the command line or not.
|
||||
// This information is needed later to conform to the specification of the
|
||||
// java.lang.management.MemoryUsage API.
|
||||
//
|
||||
// Ideally, we would be able to set the default value of MaxMetaspaceSize in
|
||||
// globals.hpp to the aligned value, but this is not possible, since the
|
||||
// alignment depends on other flags being parsed.
|
||||
MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment());
|
||||
|
||||
if (MetaspaceSize > MaxMetaspaceSize) {
|
||||
MetaspaceSize = MaxMetaspaceSize;
|
||||
}
|
||||
|
||||
MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment());
|
||||
|
||||
assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
|
||||
|
||||
MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
|
||||
MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
|
||||
|
||||
MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
|
||||
|
||||
assert(MetaspaceSize % min_alignment() == 0, "metapace alignment");
|
||||
assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
|
||||
if (MetaspaceSize < 256*K) {
|
||||
vm_exit_during_initialization("Too small initial Metaspace size");
|
||||
}
|
||||
MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment);
|
||||
}
|
||||
|
||||
void CollectorPolicy::initialize_size_info() {
|
||||
// User inputs from -mx and ms must be aligned
|
||||
set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
|
||||
set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
|
||||
set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
|
||||
_min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment);
|
||||
_initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment);
|
||||
_max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment);
|
||||
|
||||
// Check heap parameter properties
|
||||
if (initial_heap_byte_size() < M) {
|
||||
if (_initial_heap_byte_size < M) {
|
||||
vm_exit_during_initialization("Too small initial heap");
|
||||
}
|
||||
// Check heap parameter properties
|
||||
if (min_heap_byte_size() < M) {
|
||||
if (_min_heap_byte_size < M) {
|
||||
vm_exit_during_initialization("Too small minimum heap");
|
||||
}
|
||||
if (initial_heap_byte_size() <= NewSize) {
|
||||
if (_initial_heap_byte_size <= NewSize) {
|
||||
// make sure there is at least some room in old space
|
||||
vm_exit_during_initialization("Too small initial heap for new size specified");
|
||||
}
|
||||
if (max_heap_byte_size() < min_heap_byte_size()) {
|
||||
if (_max_heap_byte_size < _min_heap_byte_size) {
|
||||
vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
|
||||
}
|
||||
if (initial_heap_byte_size() < min_heap_byte_size()) {
|
||||
if (_initial_heap_byte_size < _min_heap_byte_size) {
|
||||
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
|
||||
}
|
||||
if (max_heap_byte_size() < initial_heap_byte_size()) {
|
||||
if (_max_heap_byte_size < _initial_heap_byte_size) {
|
||||
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
|
||||
}
|
||||
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
|
||||
SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
|
||||
min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
|
||||
_min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -180,15 +148,15 @@ size_t CollectorPolicy::compute_max_alignment() {
|
||||
|
||||
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
|
||||
size_t x = base_size / (NewRatio+1);
|
||||
size_t new_gen_size = x > min_alignment() ?
|
||||
align_size_down(x, min_alignment()) :
|
||||
min_alignment();
|
||||
size_t new_gen_size = x > _min_alignment ?
|
||||
align_size_down(x, _min_alignment) :
|
||||
_min_alignment;
|
||||
return new_gen_size;
|
||||
}
|
||||
|
||||
size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
|
||||
size_t maximum_size) {
|
||||
size_t alignment = min_alignment();
|
||||
size_t alignment = _min_alignment;
|
||||
size_t max_minus = maximum_size - alignment;
|
||||
return desired_size < max_minus ? desired_size : max_minus;
|
||||
}
|
||||
@ -207,8 +175,8 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
|
||||
|
||||
void GenCollectorPolicy::initialize_flags() {
|
||||
// All sizes must be multiples of the generation granularity.
|
||||
set_min_alignment((uintx) Generation::GenGrain);
|
||||
set_max_alignment(compute_max_alignment());
|
||||
_min_alignment = (uintx) Generation::GenGrain;
|
||||
_max_alignment = compute_max_alignment();
|
||||
|
||||
CollectorPolicy::initialize_flags();
|
||||
|
||||
@ -218,26 +186,26 @@ void GenCollectorPolicy::initialize_flags() {
|
||||
if (NewSize > MaxNewSize) {
|
||||
MaxNewSize = NewSize;
|
||||
}
|
||||
NewSize = align_size_down(NewSize, min_alignment());
|
||||
MaxNewSize = align_size_down(MaxNewSize, min_alignment());
|
||||
NewSize = align_size_down(NewSize, _min_alignment);
|
||||
MaxNewSize = align_size_down(MaxNewSize, _min_alignment);
|
||||
|
||||
// Check validity of heap flags
|
||||
assert(NewSize % min_alignment() == 0, "eden space alignment");
|
||||
assert(MaxNewSize % min_alignment() == 0, "survivor space alignment");
|
||||
assert(NewSize % _min_alignment == 0, "eden space alignment");
|
||||
assert(MaxNewSize % _min_alignment == 0, "survivor space alignment");
|
||||
|
||||
if (NewSize < 3*min_alignment()) {
|
||||
if (NewSize < 3 * _min_alignment) {
|
||||
// make sure there room for eden and two survivor spaces
|
||||
vm_exit_during_initialization("Too small new size specified");
|
||||
}
|
||||
if (SurvivorRatio < 1 || NewRatio < 1) {
|
||||
vm_exit_during_initialization("Invalid heap ratio specified");
|
||||
vm_exit_during_initialization("Invalid young gen ratio specified");
|
||||
}
|
||||
}
|
||||
|
||||
void TwoGenerationCollectorPolicy::initialize_flags() {
|
||||
GenCollectorPolicy::initialize_flags();
|
||||
|
||||
OldSize = align_size_down(OldSize, min_alignment());
|
||||
OldSize = align_size_down(OldSize, _min_alignment);
|
||||
|
||||
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
|
||||
// NewRatio will be used later to set the young generation size so we use
|
||||
@ -246,11 +214,11 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
|
||||
assert(NewRatio > 0, "NewRatio should have been set up earlier");
|
||||
size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
|
||||
|
||||
calculated_heapsize = align_size_up(calculated_heapsize, max_alignment());
|
||||
calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment);
|
||||
MaxHeapSize = calculated_heapsize;
|
||||
InitialHeapSize = calculated_heapsize;
|
||||
}
|
||||
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
|
||||
MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
|
||||
|
||||
// adjust max heap size if necessary
|
||||
if (NewSize + OldSize > MaxHeapSize) {
|
||||
@ -260,18 +228,18 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
|
||||
uintx calculated_size = NewSize + OldSize;
|
||||
double shrink_factor = (double) MaxHeapSize / calculated_size;
|
||||
// align
|
||||
NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
|
||||
NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
|
||||
// OldSize is already aligned because above we aligned MaxHeapSize to
|
||||
// max_alignment(), and we just made sure that NewSize is aligned to
|
||||
// min_alignment(). In initialize_flags() we verified that max_alignment()
|
||||
// is a multiple of min_alignment().
|
||||
// _max_alignment, and we just made sure that NewSize is aligned to
|
||||
// _min_alignment. In initialize_flags() we verified that _max_alignment
|
||||
// is a multiple of _min_alignment.
|
||||
OldSize = MaxHeapSize - NewSize;
|
||||
} else {
|
||||
MaxHeapSize = NewSize + OldSize;
|
||||
}
|
||||
}
|
||||
// need to do this again
|
||||
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
|
||||
MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
|
||||
|
||||
// adjust max heap size if necessary
|
||||
if (NewSize + OldSize > MaxHeapSize) {
|
||||
@ -281,24 +249,24 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
|
||||
uintx calculated_size = NewSize + OldSize;
|
||||
double shrink_factor = (double) MaxHeapSize / calculated_size;
|
||||
// align
|
||||
NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
|
||||
NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
|
||||
// OldSize is already aligned because above we aligned MaxHeapSize to
|
||||
// max_alignment(), and we just made sure that NewSize is aligned to
|
||||
// min_alignment(). In initialize_flags() we verified that max_alignment()
|
||||
// is a multiple of min_alignment().
|
||||
// _max_alignment, and we just made sure that NewSize is aligned to
|
||||
// _min_alignment. In initialize_flags() we verified that _max_alignment
|
||||
// is a multiple of _min_alignment.
|
||||
OldSize = MaxHeapSize - NewSize;
|
||||
} else {
|
||||
MaxHeapSize = NewSize + OldSize;
|
||||
}
|
||||
}
|
||||
// need to do this again
|
||||
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
|
||||
MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
|
||||
|
||||
always_do_update_barrier = UseConcMarkSweepGC;
|
||||
|
||||
// Check validity of heap flags
|
||||
assert(OldSize % min_alignment() == 0, "old space alignment");
|
||||
assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
|
||||
assert(OldSize % _min_alignment == 0, "old space alignment");
|
||||
assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment");
|
||||
}
|
||||
|
||||
// Values set on the command line win over any ergonomically
|
||||
@ -313,7 +281,7 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
|
||||
void GenCollectorPolicy::initialize_size_info() {
|
||||
CollectorPolicy::initialize_size_info();
|
||||
|
||||
// min_alignment() is used for alignment within a generation.
|
||||
// _min_alignment is used for alignment within a generation.
|
||||
// There is additional alignment done down stream for some
|
||||
// collectors that sometimes causes unwanted rounding up of
|
||||
// generations sizes.
|
||||
@ -322,18 +290,18 @@ void GenCollectorPolicy::initialize_size_info() {
|
||||
|
||||
size_t max_new_size = 0;
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
|
||||
if (MaxNewSize < min_alignment()) {
|
||||
max_new_size = min_alignment();
|
||||
if (MaxNewSize < _min_alignment) {
|
||||
max_new_size = _min_alignment;
|
||||
}
|
||||
if (MaxNewSize >= max_heap_byte_size()) {
|
||||
max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
|
||||
min_alignment());
|
||||
if (MaxNewSize >= _max_heap_byte_size) {
|
||||
max_new_size = align_size_down(_max_heap_byte_size - _min_alignment,
|
||||
_min_alignment);
|
||||
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
|
||||
"greater than the entire heap (" SIZE_FORMAT "k). A "
|
||||
"new generation size of " SIZE_FORMAT "k will be used.",
|
||||
MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
|
||||
MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K);
|
||||
} else {
|
||||
max_new_size = align_size_down(MaxNewSize, min_alignment());
|
||||
max_new_size = align_size_down(MaxNewSize, _min_alignment);
|
||||
}
|
||||
|
||||
// The case for FLAG_IS_ERGO(MaxNewSize) could be treated
|
||||
@ -351,7 +319,7 @@ void GenCollectorPolicy::initialize_size_info() {
|
||||
// just accept those choices. The choices currently made are
|
||||
// not always "wise".
|
||||
} else {
|
||||
max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
|
||||
max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
|
||||
// Bound the maximum size by NewSize below (since it historically
|
||||
// would have been NewSize and because the NewRatio calculation could
|
||||
// yield a size that is too small) and bound it by MaxNewSize above.
|
||||
@ -364,13 +332,13 @@ void GenCollectorPolicy::initialize_size_info() {
|
||||
// Given the maximum gen0 size, determine the initial and
|
||||
// minimum gen0 sizes.
|
||||
|
||||
if (max_heap_byte_size() == min_heap_byte_size()) {
|
||||
if (_max_heap_byte_size == _min_heap_byte_size) {
|
||||
// The maximum and minimum heap sizes are the same so
|
||||
// the generations minimum and initial must be the
|
||||
// same as its maximum.
|
||||
set_min_gen0_size(max_new_size);
|
||||
set_initial_gen0_size(max_new_size);
|
||||
set_max_gen0_size(max_new_size);
|
||||
_min_gen0_size = max_new_size;
|
||||
_initial_gen0_size = max_new_size;
|
||||
_max_gen0_size = max_new_size;
|
||||
} else {
|
||||
size_t desired_new_size = 0;
|
||||
if (!FLAG_IS_DEFAULT(NewSize)) {
|
||||
@ -391,43 +359,37 @@ void GenCollectorPolicy::initialize_size_info() {
|
||||
// Use the default NewSize as the floor for these values. If
|
||||
// NewRatio is overly large, the resulting sizes can be too
|
||||
// small.
|
||||
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
|
||||
NewSize);
|
||||
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
|
||||
desired_new_size =
|
||||
MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
|
||||
NewSize);
|
||||
MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
|
||||
}
|
||||
|
||||
assert(_min_gen0_size > 0, "Sanity check");
|
||||
set_initial_gen0_size(desired_new_size);
|
||||
set_max_gen0_size(max_new_size);
|
||||
_initial_gen0_size = desired_new_size;
|
||||
_max_gen0_size = max_new_size;
|
||||
|
||||
// At this point the desirable initial and minimum sizes have been
|
||||
// determined without regard to the maximum sizes.
|
||||
|
||||
// Bound the sizes by the corresponding overall heap sizes.
|
||||
set_min_gen0_size(
|
||||
bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
|
||||
set_initial_gen0_size(
|
||||
bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
|
||||
set_max_gen0_size(
|
||||
bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
|
||||
_min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
|
||||
_initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
|
||||
_max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
|
||||
|
||||
// At this point all three sizes have been checked against the
|
||||
// maximum sizes but have not been checked for consistency
|
||||
// among the three.
|
||||
|
||||
// Final check min <= initial <= max
|
||||
set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
|
||||
set_initial_gen0_size(
|
||||
MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
|
||||
set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
|
||||
_min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
|
||||
_initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
|
||||
_min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
|
||||
}
|
||||
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
|
||||
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
|
||||
min_gen0_size(), initial_gen0_size(), max_gen0_size());
|
||||
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -447,19 +409,17 @@ bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
|
||||
|
||||
if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
|
||||
if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
|
||||
(heap_size >= min_gen1_size + min_alignment())) {
|
||||
(heap_size >= min_gen1_size + _min_alignment)) {
|
||||
// Adjust gen0 down to accommodate min_gen1_size
|
||||
*gen0_size_ptr = heap_size - min_gen1_size;
|
||||
*gen0_size_ptr =
|
||||
MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
|
||||
min_alignment());
|
||||
MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment);
|
||||
assert(*gen0_size_ptr > 0, "Min gen0 is too large");
|
||||
result = true;
|
||||
} else {
|
||||
*gen1_size_ptr = heap_size - *gen0_size_ptr;
|
||||
*gen1_size_ptr =
|
||||
MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
|
||||
min_alignment());
|
||||
MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
@ -480,10 +440,9 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
|
||||
// The maximum gen1 size can be determined from the maximum gen0
|
||||
// and maximum heap size since no explicit flags exits
|
||||
// for setting the gen1 maximum.
|
||||
_max_gen1_size = max_heap_byte_size() - _max_gen0_size;
|
||||
_max_gen1_size = _max_heap_byte_size - _max_gen0_size;
|
||||
_max_gen1_size =
|
||||
MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
|
||||
min_alignment());
|
||||
MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment);
|
||||
// If no explicit command line flag has been set for the
|
||||
// gen1 size, use what is left for gen1.
|
||||
if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
|
||||
@ -492,70 +451,66 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
|
||||
// with the overall heap size). In either case make
|
||||
// the minimum, maximum and initial sizes consistent
|
||||
// with the gen0 sizes and the overall heap sizes.
|
||||
assert(min_heap_byte_size() > _min_gen0_size,
|
||||
assert(_min_heap_byte_size > _min_gen0_size,
|
||||
"gen0 has an unexpected minimum size");
|
||||
set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
|
||||
set_min_gen1_size(
|
||||
MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
|
||||
min_alignment()));
|
||||
set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
|
||||
set_initial_gen1_size(
|
||||
MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
|
||||
min_alignment()));
|
||||
|
||||
_min_gen1_size = _min_heap_byte_size - _min_gen0_size;
|
||||
_min_gen1_size =
|
||||
MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment);
|
||||
_initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
|
||||
_initial_gen1_size =
|
||||
MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment);
|
||||
} else {
|
||||
// It's been explicitly set on the command line. Use the
|
||||
// OldSize and then determine the consequences.
|
||||
set_min_gen1_size(OldSize);
|
||||
set_initial_gen1_size(OldSize);
|
||||
_min_gen1_size = OldSize;
|
||||
_initial_gen1_size = OldSize;
|
||||
|
||||
// If the user has explicitly set an OldSize that is inconsistent
|
||||
// with other command line flags, issue a warning.
|
||||
// The generation minimums and the overall heap mimimum should
|
||||
// be within one heap alignment.
|
||||
if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
|
||||
min_heap_byte_size()) {
|
||||
if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) {
|
||||
warning("Inconsistency between minimum heap size and minimum "
|
||||
"generation sizes: using minimum heap = " SIZE_FORMAT,
|
||||
min_heap_byte_size());
|
||||
"generation sizes: using minimum heap = " SIZE_FORMAT,
|
||||
_min_heap_byte_size);
|
||||
}
|
||||
if ((OldSize > _max_gen1_size)) {
|
||||
warning("Inconsistency between maximum heap size and maximum "
|
||||
"generation sizes: using maximum heap = " SIZE_FORMAT
|
||||
" -XX:OldSize flag is being ignored",
|
||||
max_heap_byte_size());
|
||||
"generation sizes: using maximum heap = " SIZE_FORMAT
|
||||
" -XX:OldSize flag is being ignored",
|
||||
_max_heap_byte_size);
|
||||
}
|
||||
// If there is an inconsistency between the OldSize and the minimum and/or
|
||||
// initial size of gen0, since OldSize was explicitly set, OldSize wins.
|
||||
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
|
||||
min_heap_byte_size(), OldSize)) {
|
||||
_min_heap_byte_size, OldSize)) {
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
|
||||
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
|
||||
min_gen0_size(), initial_gen0_size(), max_gen0_size());
|
||||
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
|
||||
}
|
||||
}
|
||||
// Initial size
|
||||
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
|
||||
initial_heap_byte_size(), OldSize)) {
|
||||
_initial_heap_byte_size, OldSize)) {
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
|
||||
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
|
||||
min_gen0_size(), initial_gen0_size(), max_gen0_size());
|
||||
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Enforce the maximum gen1 size.
|
||||
set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
|
||||
_min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
|
||||
|
||||
// Check that min gen1 <= initial gen1 <= max gen1
|
||||
set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
|
||||
set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
|
||||
_initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
|
||||
_initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
|
||||
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
|
||||
SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
|
||||
min_gen1_size(), initial_gen1_size(), max_gen1_size());
|
||||
_min_gen1_size, _initial_gen1_size, _max_gen1_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,17 +101,12 @@ class CollectorPolicy : public CHeapObj<mtGC> {
|
||||
// Return maximum heap alignment that may be imposed by the policy
|
||||
static size_t compute_max_alignment();
|
||||
|
||||
void set_min_alignment(size_t align) { _min_alignment = align; }
|
||||
size_t min_alignment() { return _min_alignment; }
|
||||
void set_max_alignment(size_t align) { _max_alignment = align; }
|
||||
size_t max_alignment() { return _max_alignment; }
|
||||
|
||||
size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
|
||||
void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
|
||||
size_t max_heap_byte_size() { return _max_heap_byte_size; }
|
||||
void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
|
||||
size_t min_heap_byte_size() { return _min_heap_byte_size; }
|
||||
void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
|
||||
|
||||
enum Name {
|
||||
CollectorPolicyKind,
|
||||
@ -248,12 +243,9 @@ class GenCollectorPolicy : public CollectorPolicy {
|
||||
|
||||
public:
|
||||
// Accessors
|
||||
size_t min_gen0_size() { return _min_gen0_size; }
|
||||
void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
|
||||
size_t min_gen0_size() { return _min_gen0_size; }
|
||||
size_t initial_gen0_size() { return _initial_gen0_size; }
|
||||
void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
|
||||
size_t max_gen0_size() { return _max_gen0_size; }
|
||||
void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
|
||||
size_t max_gen0_size() { return _max_gen0_size; }
|
||||
|
||||
virtual int number_of_generations() = 0;
|
||||
|
||||
@ -302,12 +294,9 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
|
||||
|
||||
public:
|
||||
// Accessors
|
||||
size_t min_gen1_size() { return _min_gen1_size; }
|
||||
void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
|
||||
size_t min_gen1_size() { return _min_gen1_size; }
|
||||
size_t initial_gen1_size() { return _initial_gen1_size; }
|
||||
void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
|
||||
size_t max_gen1_size() { return _max_gen1_size; }
|
||||
void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
|
||||
size_t max_gen1_size() { return _max_gen1_size; }
|
||||
|
||||
// Inherited methods
|
||||
TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_MEMORY_FILEMAP_HPP
|
||||
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
|
||||
// Layout of the file:
|
||||
// header: dump of archive instance plus versioning info, datestamp, etc.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -87,9 +87,10 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
friend class MetaspaceAux;
|
||||
|
||||
public:
|
||||
enum MetadataType {ClassType = 0,
|
||||
NonClassType = ClassType + 1,
|
||||
MetadataTypeCount = ClassType + 2
|
||||
enum MetadataType {
|
||||
ClassType,
|
||||
NonClassType,
|
||||
MetadataTypeCount
|
||||
};
|
||||
enum MetaspaceType {
|
||||
StandardMetaspaceType,
|
||||
@ -103,6 +104,9 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
private:
|
||||
void initialize(Mutex* lock, MetaspaceType type);
|
||||
|
||||
// Get the first chunk for a Metaspace. Used for
|
||||
// special cases such as the boot class loader, reflection
|
||||
// class loader and anonymous class loader.
|
||||
Metachunk* get_initialization_chunk(MetadataType mdtype,
|
||||
size_t chunk_word_size,
|
||||
size_t chunk_bunch);
|
||||
@ -123,6 +127,9 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
static size_t _first_chunk_word_size;
|
||||
static size_t _first_class_chunk_word_size;
|
||||
|
||||
static size_t _commit_alignment;
|
||||
static size_t _reserve_alignment;
|
||||
|
||||
SpaceManager* _vsm;
|
||||
SpaceManager* vsm() const { return _vsm; }
|
||||
|
||||
@ -191,12 +198,17 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
Metaspace(Mutex* lock, MetaspaceType type);
|
||||
~Metaspace();
|
||||
|
||||
// Initialize globals for Metaspace
|
||||
static void ergo_initialize();
|
||||
static void global_initialize();
|
||||
|
||||
static size_t first_chunk_word_size() { return _first_chunk_word_size; }
|
||||
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
|
||||
|
||||
static size_t reserve_alignment() { return _reserve_alignment; }
|
||||
static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; }
|
||||
static size_t commit_alignment() { return _commit_alignment; }
|
||||
static size_t commit_alignment_words() { return _commit_alignment / BytesPerWord; }
|
||||
|
||||
char* bottom() const;
|
||||
size_t used_words_slow(MetadataType mdtype) const;
|
||||
size_t free_words_slow(MetadataType mdtype) const;
|
||||
@ -219,6 +231,9 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
static void purge(MetadataType mdtype);
|
||||
static void purge();
|
||||
|
||||
static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size,
|
||||
MetadataType mdtype, TRAPS);
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
// Debugging support
|
||||
void verify();
|
||||
@ -352,17 +367,10 @@ class MetaspaceAux : AllStatic {
|
||||
|
||||
class MetaspaceGC : AllStatic {
|
||||
|
||||
// The current high-water-mark for inducing a GC. When
|
||||
// the capacity of all space in the virtual lists reaches this value,
|
||||
// a GC is induced and the value is increased. This should be changed
|
||||
// to the space actually used for allocations to avoid affects of
|
||||
// fragmentation losses to partially used chunks. Size is in words.
|
||||
static size_t _capacity_until_GC;
|
||||
|
||||
// After a GC is done any allocation that fails should try to expand
|
||||
// the capacity of the Metaspaces. This flag is set during attempts
|
||||
// to allocate in the VMGCOperation that does the GC.
|
||||
static bool _expand_after_GC;
|
||||
// The current high-water-mark for inducing a GC.
|
||||
// When committed memory of all metaspaces reaches this value,
|
||||
// a GC is induced and the value is increased. Size is in bytes.
|
||||
static volatile intptr_t _capacity_until_GC;
|
||||
|
||||
// For a CMS collection, signal that a concurrent collection should
|
||||
// be started.
|
||||
@ -370,20 +378,16 @@ class MetaspaceGC : AllStatic {
|
||||
|
||||
static uint _shrink_factor;
|
||||
|
||||
static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; }
|
||||
|
||||
static size_t shrink_factor() { return _shrink_factor; }
|
||||
void set_shrink_factor(uint v) { _shrink_factor = v; }
|
||||
|
||||
public:
|
||||
|
||||
static size_t capacity_until_GC() { return _capacity_until_GC; }
|
||||
static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
|
||||
static void dec_capacity_until_GC(size_t v) {
|
||||
_capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
|
||||
}
|
||||
static bool expand_after_GC() { return _expand_after_GC; }
|
||||
static void set_expand_after_GC(bool v) { _expand_after_GC = v; }
|
||||
static void initialize() { _capacity_until_GC = MetaspaceSize; }
|
||||
|
||||
static size_t capacity_until_GC();
|
||||
static size_t inc_capacity_until_GC(size_t v);
|
||||
static size_t dec_capacity_until_GC(size_t v);
|
||||
|
||||
static bool should_concurrent_collect() { return _should_concurrent_collect; }
|
||||
static void set_should_concurrent_collect(bool v) {
|
||||
@ -391,11 +395,14 @@ class MetaspaceGC : AllStatic {
|
||||
}
|
||||
|
||||
// The amount to increase the high-water-mark (_capacity_until_GC)
|
||||
static size_t delta_capacity_until_GC(size_t word_size);
|
||||
static size_t delta_capacity_until_GC(size_t bytes);
|
||||
|
||||
// It is expected that this will be called when the current capacity
|
||||
// has been used and a GC should be considered.
|
||||
static bool should_expand(VirtualSpaceList* vsl, size_t word_size);
|
||||
// Tells if we have can expand metaspace without hitting set limits.
|
||||
static bool can_expand(size_t words, bool is_class);
|
||||
|
||||
// Returns amount that we can expand without hitting a GC,
|
||||
// measured in words.
|
||||
static size_t allowed_expansion();
|
||||
|
||||
// Calculate the new high-water mark at which to induce
|
||||
// a GC.
|
||||
|
@ -3713,7 +3713,8 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
|
||||
Node* no_base = __ top();
|
||||
float likely = PROB_LIKELY(0.999);
|
||||
float unlikely = PROB_UNLIKELY(0.999);
|
||||
Node* zero = __ ConI(0);
|
||||
Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
|
||||
Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
|
||||
Node* zeroX = __ ConX(0);
|
||||
|
||||
// Get the alias_index for raw card-mark memory
|
||||
@ -3769,8 +3770,16 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
|
||||
// load the original value of the card
|
||||
Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
|
||||
|
||||
__ if_then(card_val, BoolTest::ne, zero); {
|
||||
g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
|
||||
__ if_then(card_val, BoolTest::ne, young_card); {
|
||||
sync_kit(ideal);
|
||||
// Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
|
||||
insert_mem_bar(Op_MemBarVolatile, oop_store);
|
||||
__ sync_kit(this);
|
||||
|
||||
Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
|
||||
__ if_then(card_val_reload, BoolTest::ne, dirty_card); {
|
||||
g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
|
||||
} __ end_if();
|
||||
} __ end_if();
|
||||
} __ end_if();
|
||||
} __ end_if();
|
||||
|
@ -2657,16 +2657,16 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
|
||||
// -Xmn for compatibility with other JVM vendors
|
||||
} else if (match_option(option, "-Xmn", &tail)) {
|
||||
julong long_initial_eden_size = 0;
|
||||
ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1);
|
||||
julong long_initial_young_size = 0;
|
||||
ArgsRange errcode = parse_memory_size(tail, &long_initial_young_size, 1);
|
||||
if (errcode != arg_in_range) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Invalid initial eden size: %s\n", option->optionString);
|
||||
"Invalid initial young generation size: %s\n", option->optionString);
|
||||
describe_range_error(errcode);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size);
|
||||
FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size);
|
||||
FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_young_size);
|
||||
FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_young_size);
|
||||
// -Xms
|
||||
} else if (match_option(option, "-Xms", &tail)) {
|
||||
julong long_initial_heap_size = 0;
|
||||
@ -3666,6 +3666,9 @@ jint Arguments::apply_ergo() {
|
||||
assert(verify_serial_gc_flags(), "SerialGC unset");
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Initialize Metaspace flags and alignments.
|
||||
Metaspace::ergo_initialize();
|
||||
|
||||
// Set bytecode rewriting flags
|
||||
set_bytecode_flags();
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -368,8 +368,15 @@ VirtualSpace::VirtualSpace() {
|
||||
|
||||
|
||||
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
|
||||
const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
|
||||
return initialize_with_granularity(rs, committed_size, max_commit_granularity);
|
||||
}
|
||||
|
||||
bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
|
||||
if(!rs.is_reserved()) return false; // allocation failed.
|
||||
assert(_low_boundary == NULL, "VirtualSpace already initialized");
|
||||
assert(max_commit_granularity > 0, "Granularity must be non-zero.");
|
||||
|
||||
_low_boundary = rs.base();
|
||||
_high_boundary = low_boundary() + rs.size();
|
||||
|
||||
@ -390,7 +397,7 @@ bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
|
||||
// No attempt is made to force large page alignment at the very top and
|
||||
// bottom of the space if they are not aligned so already.
|
||||
_lower_alignment = os::vm_page_size();
|
||||
_middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
|
||||
_middle_alignment = max_commit_granularity;
|
||||
_upper_alignment = os::vm_page_size();
|
||||
|
||||
// End of each region
|
||||
@ -966,17 +973,52 @@ void TestReservedSpace_test() {
|
||||
|
||||
|
||||
class TestVirtualSpace : AllStatic {
|
||||
enum TestLargePages {
|
||||
Default,
|
||||
Disable,
|
||||
Reserve,
|
||||
Commit
|
||||
};
|
||||
|
||||
static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
|
||||
switch(mode) {
|
||||
default:
|
||||
case Default:
|
||||
case Reserve:
|
||||
return ReservedSpace(reserve_size_aligned);
|
||||
case Disable:
|
||||
case Commit:
|
||||
return ReservedSpace(reserve_size_aligned,
|
||||
os::vm_allocation_granularity(),
|
||||
/* large */ false, /* exec */ false);
|
||||
}
|
||||
}
|
||||
|
||||
static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
|
||||
switch(mode) {
|
||||
default:
|
||||
case Default:
|
||||
case Reserve:
|
||||
return vs.initialize(rs, 0);
|
||||
case Disable:
|
||||
return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
|
||||
case Commit:
|
||||
return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) {
|
||||
static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
|
||||
TestLargePages mode = Default) {
|
||||
size_t granularity = os::vm_allocation_granularity();
|
||||
size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
|
||||
|
||||
ReservedSpace reserved(reserve_size_aligned);
|
||||
ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
|
||||
|
||||
assert(reserved.is_reserved(), "Must be");
|
||||
|
||||
VirtualSpace vs;
|
||||
bool initialized = vs.initialize(reserved, 0);
|
||||
bool initialized = initialize_virtual_space(vs, reserved, mode);
|
||||
assert(initialized, "Failed to initialize VirtualSpace");
|
||||
|
||||
vs.expand_by(commit_size, false);
|
||||
@ -986,7 +1028,10 @@ class TestVirtualSpace : AllStatic {
|
||||
} else {
|
||||
assert_ge(vs.actual_committed_size(), commit_size);
|
||||
// Approximate the commit granularity.
|
||||
size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||
// Make sure that we don't commit using large pages
|
||||
// if large pages has been disabled for this VirtualSpace.
|
||||
size_t commit_granularity = (mode == Disable || !UseLargePages) ?
|
||||
os::vm_page_size() : os::large_page_size();
|
||||
assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
|
||||
}
|
||||
|
||||
@ -1042,9 +1087,40 @@ class TestVirtualSpace : AllStatic {
|
||||
test_virtual_space_actual_committed_space(10 * M, 10 * M);
|
||||
}
|
||||
|
||||
static void test_virtual_space_disable_large_pages() {
|
||||
if (!UseLargePages) {
|
||||
return;
|
||||
}
|
||||
// These test cases verify that if we force VirtualSpace to disable large pages
|
||||
test_virtual_space_actual_committed_space(10 * M, 0, Disable);
|
||||
test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
|
||||
test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
|
||||
test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
|
||||
test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
|
||||
test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
|
||||
test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
|
||||
|
||||
test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
|
||||
test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
|
||||
test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
|
||||
test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
|
||||
test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
|
||||
test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
|
||||
test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
|
||||
|
||||
test_virtual_space_actual_committed_space(10 * M, 0, Commit);
|
||||
test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
|
||||
test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
|
||||
test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
|
||||
test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
|
||||
test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
|
||||
test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
|
||||
}
|
||||
|
||||
static void test_virtual_space() {
|
||||
test_virtual_space_actual_committed_space();
|
||||
test_virtual_space_actual_committed_space_one_large_page();
|
||||
test_virtual_space_disable_large_pages();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -178,6 +178,7 @@ class VirtualSpace VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
// Initialization
|
||||
VirtualSpace();
|
||||
bool initialize_with_granularity(ReservedSpace rs, size_t committed_byte_size, size_t max_commit_ganularity);
|
||||
bool initialize(ReservedSpace rs, size_t committed_byte_size);
|
||||
|
||||
// Destruction
|
||||
|
@ -716,11 +716,17 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
||||
nonstatic_field(PlaceholderEntry, _loader_data, ClassLoaderData*) \
|
||||
\
|
||||
/**************************/ \
|
||||
/* ProctectionDomainEntry */ \
|
||||
/* ProtectionDomainEntry */ \
|
||||
/**************************/ \
|
||||
\
|
||||
nonstatic_field(ProtectionDomainEntry, _next, ProtectionDomainEntry*) \
|
||||
nonstatic_field(ProtectionDomainEntry, _protection_domain, oop) \
|
||||
nonstatic_field(ProtectionDomainEntry, _pd_cache, ProtectionDomainCacheEntry*) \
|
||||
\
|
||||
/*******************************/ \
|
||||
/* ProtectionDomainCacheEntry */ \
|
||||
/*******************************/ \
|
||||
\
|
||||
nonstatic_field(ProtectionDomainCacheEntry, _literal, oop) \
|
||||
\
|
||||
/*************************/ \
|
||||
/* LoaderConstraintEntry */ \
|
||||
@ -1563,6 +1569,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
||||
declare_toplevel_type(SystemDictionary) \
|
||||
declare_toplevel_type(vmSymbols) \
|
||||
declare_toplevel_type(ProtectionDomainEntry) \
|
||||
declare_toplevel_type(ProtectionDomainCacheEntry) \
|
||||
\
|
||||
declare_toplevel_type(GenericGrowableArray) \
|
||||
declare_toplevel_type(GrowableArray<int>) \
|
||||
|
@ -148,6 +148,12 @@ public:
|
||||
static void track_code_cache_memory_usage() {
|
||||
track_memory_pool_usage(_code_heap_pool);
|
||||
}
|
||||
static void track_metaspace_memory_usage() {
|
||||
track_memory_pool_usage(_metaspace_pool);
|
||||
}
|
||||
static void track_compressed_class_memory_usage() {
|
||||
track_memory_pool_usage(_compressed_class_pool);
|
||||
}
|
||||
static void track_memory_pool_usage(MemoryPool* pool);
|
||||
|
||||
static void gc_begin(bool fullGC, bool recordGCBeginTime,
|
||||
|
@ -326,12 +326,15 @@ typedef jlong s8;
|
||||
|
||||
const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (p.134)
|
||||
|
||||
// Default ProtectionDomainCacheSize values
|
||||
|
||||
const int defaultProtectionDomainCacheSize = NOT_LP64(137) LP64_ONLY(2017);
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Default and minimum StringTableSize values
|
||||
|
||||
const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013);
|
||||
const int minimumStringTableSize=1009;
|
||||
const int minimumStringTableSize = 1009;
|
||||
|
||||
const int defaultSymbolTableSize = 20011;
|
||||
const int minimumSymbolTableSize = 1009;
|
||||
|
389
hotspot/test/runtime/memory/LargePages/TestLargePagesFlags.java
Normal file
389
hotspot/test/runtime/memory/LargePages/TestLargePagesFlags.java
Normal file
@ -0,0 +1,389 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/* @test TestLargePagesFlags
|
||||
* @summary Tests how large pages are choosen depending on the given large pages flag combinations.
|
||||
* @library /testlibrary
|
||||
* @run main TestLargePagesFlags
|
||||
*/
|
||||
|
||||
import com.oracle.java.testlibrary.OutputAnalyzer;
|
||||
import com.oracle.java.testlibrary.Platform;
|
||||
import com.oracle.java.testlibrary.ProcessTools;
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class TestLargePagesFlags {
|
||||
|
||||
public static void main(String [] args) throws Exception {
|
||||
if (!Platform.isLinux()) {
|
||||
System.out.println("Skipping. TestLargePagesFlags has only been implemented for Linux.");
|
||||
return;
|
||||
}
|
||||
|
||||
testUseTransparentHugePages();
|
||||
testUseHugeTLBFS();
|
||||
testUseSHM();
|
||||
testCombinations();
|
||||
}
|
||||
|
||||
public static void testUseTransparentHugePages() throws Exception {
|
||||
if (!canUse(UseTransparentHugePages(true))) {
|
||||
System.out.println("Skipping testUseTransparentHugePages");
|
||||
return;
|
||||
}
|
||||
|
||||
// -XX:-UseLargePages overrides all other flags.
|
||||
new FlagTester()
|
||||
.use(UseLargePages(false),
|
||||
UseTransparentHugePages(true))
|
||||
.expect(
|
||||
UseLargePages(false),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
// Explicitly turn on UseTransparentHugePages.
|
||||
new FlagTester()
|
||||
.use(UseTransparentHugePages(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(true),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseTransparentHugePages(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(true),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
// Setting a specific large pages flag will turn
|
||||
// off heuristics to choose large pages type.
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseTransparentHugePages(false))
|
||||
.expect(
|
||||
UseLargePages(false),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
// Don't turn on UseTransparentHugePages
|
||||
// unless the user explicitly asks for them.
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true))
|
||||
.expect(
|
||||
UseTransparentHugePages(false));
|
||||
}
|
||||
|
||||
public static void testUseHugeTLBFS() throws Exception {
|
||||
if (!canUse(UseHugeTLBFS(true))) {
|
||||
System.out.println("Skipping testUseHugeTLBFS");
|
||||
return;
|
||||
}
|
||||
|
||||
// -XX:-UseLargePages overrides all other flags.
|
||||
new FlagTester()
|
||||
.use(UseLargePages(false),
|
||||
UseHugeTLBFS(true))
|
||||
.expect(
|
||||
UseLargePages(false),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
// Explicitly turn on UseHugeTLBFS.
|
||||
new FlagTester()
|
||||
.use(UseHugeTLBFS(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(true),
|
||||
UseSHM(false));
|
||||
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseHugeTLBFS(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(true),
|
||||
UseSHM(false));
|
||||
|
||||
// Setting a specific large pages flag will turn
|
||||
// off heuristics to choose large pages type.
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseHugeTLBFS(false))
|
||||
.expect(
|
||||
UseLargePages(false),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
// Using UseLargePages will default to UseHugeTLBFS large pages.
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(true),
|
||||
UseSHM(false));
|
||||
}
|
||||
|
||||
public static void testUseSHM() throws Exception {
|
||||
if (!canUse(UseSHM(true))) {
|
||||
System.out.println("Skipping testUseSHM");
|
||||
return;
|
||||
}
|
||||
|
||||
// -XX:-UseLargePages overrides all other flags.
|
||||
new FlagTester()
|
||||
.use(UseLargePages(false),
|
||||
UseSHM(true))
|
||||
.expect(
|
||||
UseLargePages(false),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
// Explicitly turn on UseSHM.
|
||||
new FlagTester()
|
||||
.use(UseSHM(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(true)) ;
|
||||
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseSHM(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(true)) ;
|
||||
|
||||
// Setting a specific large pages flag will turn
|
||||
// off heuristics to choose large pages type.
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseSHM(false))
|
||||
.expect(
|
||||
UseLargePages(false),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
// Setting UseLargePages can allow the system to choose
|
||||
// UseHugeTLBFS instead of UseSHM, but never UseTransparentHugePages.
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(false));
|
||||
}
|
||||
|
||||
public static void testCombinations() throws Exception {
|
||||
if (!canUse(UseSHM(true)) || !canUse(UseHugeTLBFS(true))) {
|
||||
System.out.println("Skipping testUseHugeTLBFSAndUseSHMCombination");
|
||||
return;
|
||||
}
|
||||
|
||||
// UseHugeTLBFS takes precedence over SHM.
|
||||
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseHugeTLBFS(true),
|
||||
UseSHM(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(true),
|
||||
UseSHM(false));
|
||||
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(true));
|
||||
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseHugeTLBFS(true),
|
||||
UseSHM(false))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(true),
|
||||
UseSHM(false));
|
||||
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false))
|
||||
.expect(
|
||||
UseLargePages(false),
|
||||
UseTransparentHugePages(false),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
|
||||
if (!canUse(UseTransparentHugePages(true))) {
|
||||
return;
|
||||
}
|
||||
|
||||
// UseTransparentHugePages takes precedence.
|
||||
|
||||
new FlagTester()
|
||||
.use(UseLargePages(true),
|
||||
UseTransparentHugePages(true),
|
||||
UseHugeTLBFS(true),
|
||||
UseSHM(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(true),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
|
||||
new FlagTester()
|
||||
.use(UseTransparentHugePages(true),
|
||||
UseHugeTLBFS(true),
|
||||
UseSHM(true))
|
||||
.expect(
|
||||
UseLargePages(true),
|
||||
UseTransparentHugePages(true),
|
||||
UseHugeTLBFS(false),
|
||||
UseSHM(false));
|
||||
}
|
||||
|
||||
private static class FlagTester {
|
||||
private Flag [] useFlags;
|
||||
|
||||
public FlagTester use(Flag... useFlags) {
|
||||
this.useFlags = useFlags;
|
||||
return this;
|
||||
}
|
||||
|
||||
public void expect(Flag... expectedFlags) throws Exception {
|
||||
if (useFlags == null) {
|
||||
throw new IllegalStateException("Must run use() before expect()");
|
||||
}
|
||||
|
||||
OutputAnalyzer output = executeNewJVM(useFlags);
|
||||
|
||||
for (Flag flag : expectedFlags) {
|
||||
System.out.println("Looking for: " + flag.flagString());
|
||||
String strValue = output.firstMatch(".* " + flag.name() + " .* :?= (\\S+).*", 1);
|
||||
|
||||
if (strValue == null) {
|
||||
throw new RuntimeException("Flag " + flag.name() + " couldn't be found");
|
||||
}
|
||||
|
||||
if (!flag.value().equals(strValue)) {
|
||||
throw new RuntimeException("Wrong value for: " + flag.name()
|
||||
+ " expected: " + flag.value()
|
||||
+ " got: " + strValue);
|
||||
}
|
||||
}
|
||||
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
}
|
||||
|
||||
private static OutputAnalyzer executeNewJVM(Flag... flags) throws Exception {
|
||||
ArrayList<String> args = new ArrayList<>();
|
||||
for (Flag flag : flags) {
|
||||
args.add(flag.flagString());
|
||||
}
|
||||
args.add("-XX:+PrintFlagsFinal");
|
||||
args.add("-version");
|
||||
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[args.size()]));
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
private static boolean canUse(Flag flag) {
|
||||
try {
|
||||
new FlagTester().use(flag).expect(flag);
|
||||
} catch (Exception e) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private static Flag UseLargePages(boolean value) {
|
||||
return new BooleanFlag("UseLargePages", value);
|
||||
}
|
||||
|
||||
private static Flag UseTransparentHugePages(boolean value) {
|
||||
return new BooleanFlag("UseTransparentHugePages", value);
|
||||
}
|
||||
|
||||
private static Flag UseHugeTLBFS(boolean value) {
|
||||
return new BooleanFlag("UseHugeTLBFS", value);
|
||||
}
|
||||
|
||||
private static Flag UseSHM(boolean value) {
|
||||
return new BooleanFlag("UseSHM", value);
|
||||
}
|
||||
|
||||
private static class BooleanFlag implements Flag {
|
||||
private String name;
|
||||
private boolean value;
|
||||
|
||||
BooleanFlag(String name, boolean value) {
|
||||
this.name = name;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public String flagString() {
|
||||
return "-XX:" + (value ? "+" : "-") + name;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public String value() {
|
||||
return Boolean.toString(value);
|
||||
}
|
||||
}
|
||||
|
||||
private static interface Flag {
|
||||
public String flagString();
|
||||
public String name();
|
||||
public String value();
|
||||
}
|
||||
}
|
@ -231,3 +231,4 @@ d3be8e3b429df917e72c1c23e7920c651219b587 jdk8-b106
|
||||
d6a32e3831aab20a9a3bc78cdc0a60aaad725c6c jdk8-b107
|
||||
8ade3eed63da87067a7137c111f684a821e9e531 jdk8-b108
|
||||
02bfab2aa93899e0f02584f1e85537485a196553 jdk8-b109
|
||||
4c84c5b447b09aff27f3b72667ab3a5401e85968 jdk8-b110
|
||||
|
@ -231,3 +231,4 @@ b1fb4612a2caea52b5661b87509e560fa044b194 jdk8-b98
|
||||
e3c9328f75638289a342ce15fbe532f05078946e jdk8-b107
|
||||
d1ea68556fd7925a3c7078dd9f77c6ca73d5aa9e jdk8-b108
|
||||
df5d4d01642572e77fd3c01e4c8703ed3f6eec87 jdk8-b109
|
||||
cc682329886be2fc26220fc30597ee4e5bba43ed jdk8-b110
|
||||
|
@ -231,3 +231,4 @@ c817276bd870dfe1dcc3a3dbbc092436b6907f75 jdk8-b106
|
||||
eea685b9ccaa1980e0a7e07d6a3a84bcc7e9ab82 jdk8-b107
|
||||
006aaa5f069e7dd98fccdc696866c9f8582c087c jdk8-b108
|
||||
946f3fd5f8bf0ccd180c258d25e5837fa1bf004c jdk8-b109
|
||||
54e099776f08430d3a7f4feabd9f2ba886b55320 jdk8-b110
|
||||
|
@ -863,26 +863,13 @@ SRC_ZIP_INCLUDES = \
|
||||
com/sun/java_cup \
|
||||
com/sun/javadoc \
|
||||
com/sun/java/swing \
|
||||
com/sun/jlex \
|
||||
com/sun/jmx \
|
||||
com/sun/naming \
|
||||
com/sun/org/apache \
|
||||
com/sun/security/auth \
|
||||
com/sun/security/jgss \
|
||||
com/sun/source \
|
||||
java/applet \
|
||||
java/awt \
|
||||
java/beans \
|
||||
java/io \
|
||||
java/lang \
|
||||
java/math \
|
||||
java/net \
|
||||
java/nio \
|
||||
java/rmi \
|
||||
java/security \
|
||||
java/sql \
|
||||
java/text \
|
||||
java/util \
|
||||
java \
|
||||
javax/accessibility \
|
||||
javax/annotation \
|
||||
javax/imageio \
|
||||
@ -902,7 +889,7 @@ SRC_ZIP_INCLUDES = \
|
||||
org/omg \
|
||||
org/w3c/dom \
|
||||
org/xml/sax \
|
||||
sunw
|
||||
#
|
||||
|
||||
SRC_ZIP_SRCS = $(JDK_TOPDIR)/src/share/classes $(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/classes
|
||||
SRC_ZIP_SRCS += $(JDK_OUTPUTDIR)/gensrc
|
||||
|
@ -25,7 +25,6 @@
|
||||
|
||||
# Cache all finds needed for this file. Only used on windows.
|
||||
$(eval $(call FillCacheFind,$(JDK_TOPDIR)/make/tools \
|
||||
$(JDK_TOPDIR)/src/solaris/classes \
|
||||
$(JDK_TOPDIR)/makefiles/sun))
|
||||
|
||||
TOOLS_SRC:=$(JDK_TOPDIR)/make/tools/src \
|
||||
|
@ -231,3 +231,4 @@ fcd768844b9926c5f994292ec6350c20cc7c0f76 jdk8-b106
|
||||
3f274927ec1863544b8214262ab02b7de2970da6 jdk8-b107
|
||||
252f872b8a2f81a416f9127e77924ca56a4578b0 jdk8-b108
|
||||
985abf1cd327169a317d4ff4f318a8162a5cd47d jdk8-b109
|
||||
41541097533aa3933a018c8c1c426c1871dfd76e jdk8-b110
|
||||
|
@ -2704,11 +2704,11 @@ do
|
||||
rm -f $WDIR/$DIR/$F.html
|
||||
|
||||
its_a_jar=
|
||||
if expr $F : '.*\.jar' >/dev/null; then
|
||||
if expr $F : '.*\.jar' \| $F : '.*\.zip' >/dev/null; then
|
||||
its_a_jar=1
|
||||
# It's a JAR file, let's do it differntly
|
||||
# It's a JAR or ZIP file, let's do it differently
|
||||
if [[ -z $JAR ]]; then
|
||||
print "No access to jar, so can't produce diffs for jar files"
|
||||
print "No access to jar, so can't produce diffs for jar or zip files"
|
||||
else
|
||||
if [ -f $ofile ]; then
|
||||
$JAR -tvf $ofile >"$ofile".lst
|
||||
|
@ -219,3 +219,4 @@ bf70cbd2c8369fd97ffdfcbe1a80dbc2797408ee jdk8-b106
|
||||
f35e1255024b66f7cf82517798f45f6e194e5567 jdk8-b107
|
||||
445ad3f6d3b4ba62ebc483323e1919110a304053 jdk8-b108
|
||||
6ec2f9e5ed5bd60c2900976e6a54fdcac2f37e9e jdk8-b109
|
||||
d49a8c2173f5f90c9a39cc4af8e03cfa8f35ee4c jdk8-b110
|
||||
|
Loading…
Reference in New Issue
Block a user