Merge
This commit is contained in:
commit
ff4b3de36b
@ -214,3 +214,4 @@ e1a929afcfc492470d50be0b6b0e8dc77d3760b9 jdk8-b88
|
|||||||
69b773a221b956a3386933ecdbfeccee0edeac47 jdk8-b90
|
69b773a221b956a3386933ecdbfeccee0edeac47 jdk8-b90
|
||||||
cb51fb4789ac0b8be4056482077ddfb8f3bd3805 jdk8-b91
|
cb51fb4789ac0b8be4056482077ddfb8f3bd3805 jdk8-b91
|
||||||
3a36c926a7aafa9d4a892a45ef3678e87ad8359b jdk8-b92
|
3a36c926a7aafa9d4a892a45ef3678e87ad8359b jdk8-b92
|
||||||
|
27c51c6e31c1ef36afa0e6efb031f9b13f26c12b jdk8-b93
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -351,11 +351,7 @@ AC_ARG_WITH(cacerts-file, [AS_HELP_STRING([--with-cacerts-file],
|
|||||||
if test "x$with_cacerts_file" != x; then
|
if test "x$with_cacerts_file" != x; then
|
||||||
CACERTS_FILE=$with_cacerts_file
|
CACERTS_FILE=$with_cacerts_file
|
||||||
else
|
else
|
||||||
if test "x$OPENJDK" = "xtrue"; then
|
CACERTS_FILE=${SRC_ROOT}/jdk/src/share/lib/security/cacerts
|
||||||
CACERTS_FILE=${SRC_ROOT}/jdk/src/share/lib/security/cacerts
|
|
||||||
else
|
|
||||||
CACERTS_FILE=${SRC_ROOT}/jdk/src/closed/share/lib/security/cacerts.internal
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
AC_SUBST(CACERTS_FILE)
|
AC_SUBST(CACERTS_FILE)
|
||||||
|
|
||||||
|
@ -91,6 +91,15 @@ AC_DEFUN([TOOLCHAIN_FIND_VISUAL_STUDIO_BAT_FILE],
|
|||||||
AC_MSG_ERROR([Cannot locate a valid Visual Studio installation])
|
AC_MSG_ERROR([Cannot locate a valid Visual Studio installation])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if test "x$VS100COMNTOOLS" != x; then
|
||||||
|
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$VS100COMNTOOLS/../..], [VS100COMNTOOLS variable])
|
||||||
|
fi
|
||||||
|
if test "x$PROGRAMFILES" != x; then
|
||||||
|
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$PROGRAMFILES/Microsoft Visual Studio 10.0], [well-known name])
|
||||||
|
fi
|
||||||
|
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([C:/Program Files/Microsoft Visual Studio 10.0], [well-known name])
|
||||||
|
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([C:/Program Files (x86)/Microsoft Visual Studio 10.0], [well-known name])
|
||||||
|
|
||||||
if test "x$ProgramW6432" != x; then
|
if test "x$ProgramW6432" != x; then
|
||||||
TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([$ProgramW6432/Microsoft SDKs/Windows/v7.1/Bin], [well-known name])
|
TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([$ProgramW6432/Microsoft SDKs/Windows/v7.1/Bin], [well-known name])
|
||||||
fi
|
fi
|
||||||
@ -102,15 +111,6 @@ AC_DEFUN([TOOLCHAIN_FIND_VISUAL_STUDIO_BAT_FILE],
|
|||||||
fi
|
fi
|
||||||
TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([C:/Program Files/Microsoft SDKs/Windows/v7.1/Bin], [well-known name])
|
TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([C:/Program Files/Microsoft SDKs/Windows/v7.1/Bin], [well-known name])
|
||||||
TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([C:/Program Files (x86)/Microsoft SDKs/Windows/v7.1/Bin], [well-known name])
|
TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([C:/Program Files (x86)/Microsoft SDKs/Windows/v7.1/Bin], [well-known name])
|
||||||
|
|
||||||
if test "x$VS100COMNTOOLS" != x; then
|
|
||||||
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$VS100COMNTOOLS/../..], [VS100COMNTOOLS variable])
|
|
||||||
fi
|
|
||||||
if test "x$PROGRAMFILES" != x; then
|
|
||||||
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$PROGRAMFILES/Microsoft Visual Studio 10.0], [well-known name])
|
|
||||||
fi
|
|
||||||
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([C:/Program Files/Microsoft Visual Studio 10.0], [well-known name])
|
|
||||||
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([C:/Program Files (x86)/Microsoft Visual Studio 10.0], [well-known name])
|
|
||||||
])
|
])
|
||||||
|
|
||||||
# Check if the VS env variables were setup prior to running configure.
|
# Check if the VS env variables were setup prior to running configure.
|
||||||
@ -248,10 +248,23 @@ AC_DEFUN([TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV],
|
|||||||
AC_MSG_NOTICE([Warning: msvcr100.dll not found in VCINSTALLDIR: $VCINSTALLDIR])
|
AC_MSG_NOTICE([Warning: msvcr100.dll not found in VCINSTALLDIR: $VCINSTALLDIR])
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
# Try some fallback alternatives
|
||||||
if test "x$MSVCR_DLL" = x; then
|
if test "x$MSVCR_DLL" = x; then
|
||||||
if test -f "$SYSTEMROOT/system32/msvcr100.dll"; then
|
# If visual studio express is installed, there is usually one with the debugger
|
||||||
AC_MSG_NOTICE([msvcr100.dll found in $SYSTEMROOT/system32])
|
if test "x$VS100COMNTOOLS" != x; then
|
||||||
MSVCR_DLL="$SYSTEMROOT/system32/msvcr100.dll"
|
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
|
||||||
|
MSVCR_DLL=`find "$VS100COMNTOOLS/.." -name msvcr100.dll | grep -i x64 | head --lines 1`
|
||||||
|
AC_MSG_NOTICE([msvcr100.dll found in $VS100COMNTOOLS..: $VS100COMNTOOLS..])
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if test "x$MSVCR_DLL" = x; then
|
||||||
|
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
|
||||||
|
# Fallback for 32bit builds, look in the windows directory.
|
||||||
|
if test -f "$SYSTEMROOT/system32/msvcr100.dll"; then
|
||||||
|
AC_MSG_NOTICE([msvcr100.dll found in $SYSTEMROOT/system32])
|
||||||
|
MSVCR_DLL="$SYSTEMROOT/system32/msvcr100.dll"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@ -214,3 +214,4 @@ fe4150590ee597f4e125fea950aa3b352622cc2d jdk8-b89
|
|||||||
c8286839d0df04aba819ec4bef12b86babccf30e jdk8-b90
|
c8286839d0df04aba819ec4bef12b86babccf30e jdk8-b90
|
||||||
8f7ffb296385f85a4a6d53f9f2d4a7b13a8fa1ff jdk8-b91
|
8f7ffb296385f85a4a6d53f9f2d4a7b13a8fa1ff jdk8-b91
|
||||||
717aa26f8e0a1c0e768aebb3a763aca56db0c83e jdk8-b92
|
717aa26f8e0a1c0e768aebb3a763aca56db0c83e jdk8-b92
|
||||||
|
8dc9d7ccbb2d77fd89bc321bb02e67c152aca257 jdk8-b93
|
||||||
|
@ -346,3 +346,6 @@ b19517cecc2e91636d7c16ba2f35e3d3dc628099 hs25-b33
|
|||||||
7cbdf0e3725c0c56a2ff7540fc70b6d4b5890d04 jdk8-b91
|
7cbdf0e3725c0c56a2ff7540fc70b6d4b5890d04 jdk8-b91
|
||||||
38da9f4f67096745f851318d792d6468aa1f6cf8 hs25-b34
|
38da9f4f67096745f851318d792d6468aa1f6cf8 hs25-b34
|
||||||
092018493d3bbeb1c24278fd8c40ff3d76e1fed7 jdk8-b92
|
092018493d3bbeb1c24278fd8c40ff3d76e1fed7 jdk8-b92
|
||||||
|
573d86d412cd9d3df7912194c1a540be50e9544e jdk8-b93
|
||||||
|
b786c04b7be15194febe88dc1f0c9443e737a84b hs25-b35
|
||||||
|
3c78a14da19d26d6937af5f98b97e2a21c653b04 hs25-b36
|
||||||
|
@ -96,9 +96,10 @@ public class DictionaryEntry extends sun.jvm.hotspot.utilities.HashtableEntry {
|
|||||||
|
|
||||||
public boolean containsProtectionDomain(Oop protectionDomain) {
|
public boolean containsProtectionDomain(Oop protectionDomain) {
|
||||||
InstanceKlass ik = (InstanceKlass) klass();
|
InstanceKlass ik = (InstanceKlass) klass();
|
||||||
if (protectionDomain.equals(ik.getProtectionDomain())) {
|
// Currently unimplemented and not used.
|
||||||
return true; // Succeeds trivially
|
// if (protectionDomain.equals(ik.getJavaMirror().getProtectionDomain())) {
|
||||||
}
|
// return true; // Succeeds trivially
|
||||||
|
// }
|
||||||
for (ProtectionDomainEntry current = pdSet(); current != null;
|
for (ProtectionDomainEntry current = pdSet(); current != null;
|
||||||
current = current.next()) {
|
current = current.next()) {
|
||||||
if (protectionDomain.equals(current.protectionDomain())) {
|
if (protectionDomain.equals(current.protectionDomain())) {
|
||||||
|
@ -75,8 +75,6 @@ public class InstanceKlass extends Klass {
|
|||||||
javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), 0);
|
javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), 0);
|
||||||
constants = new MetadataField(type.getAddressField("_constants"), 0);
|
constants = new MetadataField(type.getAddressField("_constants"), 0);
|
||||||
classLoaderData = type.getAddressField("_class_loader_data");
|
classLoaderData = type.getAddressField("_class_loader_data");
|
||||||
protectionDomain = new OopField(type.getOopField("_protection_domain"), 0);
|
|
||||||
signers = new OopField(type.getOopField("_signers"), 0);
|
|
||||||
sourceFileName = type.getAddressField("_source_file_name");
|
sourceFileName = type.getAddressField("_source_file_name");
|
||||||
sourceDebugExtension = type.getAddressField("_source_debug_extension");
|
sourceDebugExtension = type.getAddressField("_source_debug_extension");
|
||||||
innerClasses = type.getAddressField("_inner_classes");
|
innerClasses = type.getAddressField("_inner_classes");
|
||||||
@ -136,8 +134,6 @@ public class InstanceKlass extends Klass {
|
|||||||
private static CIntField javaFieldsCount;
|
private static CIntField javaFieldsCount;
|
||||||
private static MetadataField constants;
|
private static MetadataField constants;
|
||||||
private static AddressField classLoaderData;
|
private static AddressField classLoaderData;
|
||||||
private static OopField protectionDomain;
|
|
||||||
private static OopField signers;
|
|
||||||
private static AddressField sourceFileName;
|
private static AddressField sourceFileName;
|
||||||
private static AddressField sourceDebugExtension;
|
private static AddressField sourceDebugExtension;
|
||||||
private static AddressField innerClasses;
|
private static AddressField innerClasses;
|
||||||
@ -350,8 +346,6 @@ public class InstanceKlass extends Klass {
|
|||||||
public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
|
public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
|
||||||
public ClassLoaderData getClassLoaderData() { return ClassLoaderData.instantiateWrapperFor(classLoaderData.getValue(getAddress())); }
|
public ClassLoaderData getClassLoaderData() { return ClassLoaderData.instantiateWrapperFor(classLoaderData.getValue(getAddress())); }
|
||||||
public Oop getClassLoader() { return getClassLoaderData().getClassLoader(); }
|
public Oop getClassLoader() { return getClassLoaderData().getClassLoader(); }
|
||||||
public Oop getProtectionDomain() { return protectionDomain.getValue(this); }
|
|
||||||
public ObjArray getSigners() { return (ObjArray) signers.getValue(this); }
|
|
||||||
public Symbol getSourceFileName() { return getSymbol(sourceFileName); }
|
public Symbol getSourceFileName() { return getSymbol(sourceFileName); }
|
||||||
public String getSourceDebugExtension(){ return CStringUtilities.getString(sourceDebugExtension.getValue(getAddress())); }
|
public String getSourceDebugExtension(){ return CStringUtilities.getString(sourceDebugExtension.getValue(getAddress())); }
|
||||||
public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); }
|
public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); }
|
||||||
@ -541,8 +535,6 @@ public class InstanceKlass extends Klass {
|
|||||||
// visitor.doOop(methods, true);
|
// visitor.doOop(methods, true);
|
||||||
// visitor.doOop(localInterfaces, true);
|
// visitor.doOop(localInterfaces, true);
|
||||||
// visitor.doOop(transitiveInterfaces, true);
|
// visitor.doOop(transitiveInterfaces, true);
|
||||||
visitor.doOop(protectionDomain, true);
|
|
||||||
visitor.doOop(signers, true);
|
|
||||||
visitor.doCInt(nonstaticFieldSize, true);
|
visitor.doCInt(nonstaticFieldSize, true);
|
||||||
visitor.doCInt(staticFieldSize, true);
|
visitor.doCInt(staticFieldSize, true);
|
||||||
visitor.doCInt(staticOopFieldCount, true);
|
visitor.doCInt(staticOopFieldCount, true);
|
||||||
|
@ -204,13 +204,13 @@ public class HeapGXLWriter extends AbstractHeapGraphWriter {
|
|||||||
Oop loader = ik.getClassLoader();
|
Oop loader = ik.getClassLoader();
|
||||||
writeEdge(instance, loader, "loaded-by");
|
writeEdge(instance, loader, "loaded-by");
|
||||||
|
|
||||||
// write signers
|
// write signers NYI
|
||||||
Oop signers = ik.getSigners();
|
// Oop signers = ik.getJavaMirror().getSigners();
|
||||||
writeEdge(instance, signers, "signed-by");
|
writeEdge(instance, null, "signed-by");
|
||||||
|
|
||||||
// write protection domain
|
// write protection domain NYI
|
||||||
Oop protectionDomain = ik.getProtectionDomain();
|
// Oop protectionDomain = ik.getJavaMirror().getProtectionDomain();
|
||||||
writeEdge(instance, protectionDomain, "protection-domain");
|
writeEdge(instance, null, "protection-domain");
|
||||||
|
|
||||||
// write edges for static reference fields from this class
|
// write edges for static reference fields from this class
|
||||||
for (Iterator itr = refFields.iterator(); itr.hasNext();) {
|
for (Iterator itr = refFields.iterator(); itr.hasNext();) {
|
||||||
|
@ -477,8 +477,8 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
|
|||||||
if (k instanceof InstanceKlass) {
|
if (k instanceof InstanceKlass) {
|
||||||
InstanceKlass ik = (InstanceKlass) k;
|
InstanceKlass ik = (InstanceKlass) k;
|
||||||
writeObjectID(ik.getClassLoader());
|
writeObjectID(ik.getClassLoader());
|
||||||
writeObjectID(ik.getSigners());
|
writeObjectID(null); // ik.getJavaMirror().getSigners());
|
||||||
writeObjectID(ik.getProtectionDomain());
|
writeObjectID(null); // ik.getJavaMirror().getProtectionDomain());
|
||||||
// two reserved id fields
|
// two reserved id fields
|
||||||
writeObjectID(null);
|
writeObjectID(null);
|
||||||
writeObjectID(null);
|
writeObjectID(null);
|
||||||
@ -516,8 +516,8 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
|
|||||||
if (bottomKlass instanceof InstanceKlass) {
|
if (bottomKlass instanceof InstanceKlass) {
|
||||||
InstanceKlass ik = (InstanceKlass) bottomKlass;
|
InstanceKlass ik = (InstanceKlass) bottomKlass;
|
||||||
writeObjectID(ik.getClassLoader());
|
writeObjectID(ik.getClassLoader());
|
||||||
writeObjectID(ik.getSigners());
|
writeObjectID(null); // ik.getJavaMirror().getSigners());
|
||||||
writeObjectID(ik.getProtectionDomain());
|
writeObjectID(null); // ik.getJavaMirror().getProtectionDomain());
|
||||||
} else {
|
} else {
|
||||||
writeObjectID(null);
|
writeObjectID(null);
|
||||||
writeObjectID(null);
|
writeObjectID(null);
|
||||||
|
@ -47,8 +47,6 @@ public class JSJavaInstanceKlass extends JSJavaKlass {
|
|||||||
private static final int FIELD_IS_SYNTHETIC = 13;
|
private static final int FIELD_IS_SYNTHETIC = 13;
|
||||||
private static final int FIELD_IS_INTERFACE = 14;
|
private static final int FIELD_IS_INTERFACE = 14;
|
||||||
private static final int FIELD_CLASS_LOADER = 15;
|
private static final int FIELD_CLASS_LOADER = 15;
|
||||||
private static final int FIELD_PROTECTION_DOMAIN = 16;
|
|
||||||
private static final int FIELD_SIGNERS = 17;
|
|
||||||
private static final int FIELD_STATICS = 18;
|
private static final int FIELD_STATICS = 18;
|
||||||
private static final int FIELD_UNDEFINED = -1;
|
private static final int FIELD_UNDEFINED = -1;
|
||||||
|
|
||||||
@ -100,10 +98,6 @@ public class JSJavaInstanceKlass extends JSJavaKlass {
|
|||||||
return Boolean.valueOf(ik.isInterface());
|
return Boolean.valueOf(ik.isInterface());
|
||||||
case FIELD_CLASS_LOADER:
|
case FIELD_CLASS_LOADER:
|
||||||
return factory.newJSJavaObject(ik.getClassLoader());
|
return factory.newJSJavaObject(ik.getClassLoader());
|
||||||
case FIELD_PROTECTION_DOMAIN:
|
|
||||||
return factory.newJSJavaObject(ik.getProtectionDomain());
|
|
||||||
case FIELD_SIGNERS:
|
|
||||||
return factory.newJSJavaObject(ik.getSigners());
|
|
||||||
case FIELD_STATICS:
|
case FIELD_STATICS:
|
||||||
return getStatics();
|
return getStatics();
|
||||||
case FIELD_UNDEFINED:
|
case FIELD_UNDEFINED:
|
||||||
@ -246,8 +240,6 @@ public class JSJavaInstanceKlass extends JSJavaKlass {
|
|||||||
addField("isSynthetic", FIELD_IS_SYNTHETIC);
|
addField("isSynthetic", FIELD_IS_SYNTHETIC);
|
||||||
addField("isInterface", FIELD_IS_INTERFACE);
|
addField("isInterface", FIELD_IS_INTERFACE);
|
||||||
addField("classLoader", FIELD_CLASS_LOADER);
|
addField("classLoader", FIELD_CLASS_LOADER);
|
||||||
addField("protectionDomain", FIELD_PROTECTION_DOMAIN);
|
|
||||||
addField("signers", FIELD_SIGNERS);
|
|
||||||
addField("statics", FIELD_STATICS);
|
addField("statics", FIELD_STATICS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ CXXFLAGS += -DASSERT
|
|||||||
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
|
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
|
||||||
# Compiler warnings are treated as errors
|
# Compiler warnings are treated as errors
|
||||||
ifneq ($(COMPILER_WARNINGS_FATAL),false)
|
ifneq ($(COMPILER_WARNINGS_FATAL),false)
|
||||||
CFLAGS_WARN = -Werror
|
CFLAGS_WARN = $(WARNINGS_ARE_ERRORS)
|
||||||
endif
|
endif
|
||||||
CFLAGS += $(CFLAGS_WARN)
|
CFLAGS += $(CFLAGS_WARN)
|
||||||
|
|
||||||
|
@ -71,6 +71,11 @@ ifeq ($(SPEC),)
|
|||||||
CC = $(CC32)
|
CC = $(CC32)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(USE_CLANG), true)
|
||||||
|
CXX = clang++
|
||||||
|
CC = clang
|
||||||
|
endif
|
||||||
|
|
||||||
HOSTCXX = $(CXX)
|
HOSTCXX = $(CXX)
|
||||||
HOSTCC = $(CC)
|
HOSTCC = $(CC)
|
||||||
endif
|
endif
|
||||||
@ -79,21 +84,79 @@ ifeq ($(SPEC),)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
|
ifeq ($(USE_CLANG), true)
|
||||||
# prints the numbers (e.g. "2.95", "3.2.1")
|
CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1)
|
||||||
CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
|
CC_VER_MINOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f2)
|
||||||
CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
|
else
|
||||||
|
# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
|
||||||
# check for precompiled headers support
|
# prints the numbers (e.g. "2.95", "3.2.1")
|
||||||
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
|
CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
|
||||||
# Allow the user to turn off precompiled headers from the command line.
|
CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
|
||||||
ifneq ($(USE_PRECOMPILED_HEADER),0)
|
|
||||||
PRECOMPILED_HEADER_DIR=.
|
|
||||||
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
|
|
||||||
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
|
|
||||||
endif
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(USE_CLANG), true)
|
||||||
|
# clang has precompiled headers support by default, but the user can switch
|
||||||
|
# it off by using 'USE_PRECOMPILED_HEADER=0'.
|
||||||
|
ifdef LP64
|
||||||
|
ifeq ($(USE_PRECOMPILED_HEADER),)
|
||||||
|
USE_PRECOMPILED_HEADER=1
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
# We don't support precompiled headers on 32-bit builds because there some files are
|
||||||
|
# compiled with -fPIC while others are compiled without (see 'NONPIC_OBJ_FILES' rules.make)
|
||||||
|
# Clang produces an error if the PCH file was compiled with other options than the actual compilation unit.
|
||||||
|
USE_PRECOMPILED_HEADER=0
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(USE_PRECOMPILED_HEADER),1)
|
||||||
|
|
||||||
|
ifndef LP64
|
||||||
|
$(error " Precompiled Headers only supported on 64-bit platforms!")
|
||||||
|
endif
|
||||||
|
|
||||||
|
PRECOMPILED_HEADER_DIR=.
|
||||||
|
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
|
||||||
|
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.pch
|
||||||
|
|
||||||
|
PCH_FLAG = -include precompiled.hpp
|
||||||
|
PCH_FLAG/DEFAULT = $(PCH_FLAG)
|
||||||
|
PCH_FLAG/NO_PCH = -DNO_PCH
|
||||||
|
PCH_FLAG/BY_FILE = $(PCH_FLAG/$@)$(PCH_FLAG/DEFAULT$(PCH_FLAG/$@))
|
||||||
|
|
||||||
|
VM_PCH_FLAG/LIBJVM = $(PCH_FLAG/BY_FILE)
|
||||||
|
VM_PCH_FLAG/AOUT =
|
||||||
|
VM_PCH_FLAG = $(VM_PCH_FLAG/$(LINK_INTO))
|
||||||
|
|
||||||
|
# We only use precompiled headers for the JVM build
|
||||||
|
CFLAGS += $(VM_PCH_FLAG)
|
||||||
|
|
||||||
|
# There are some files which don't like precompiled headers
|
||||||
|
# The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build.
|
||||||
|
# But Clang doesn't support a precompiled header which was compiled with -O3
|
||||||
|
# to be used in a compilation unit which uses '-O0'. We could also prepare an
|
||||||
|
# extra '-O0' PCH file for the opt build and use it here, but it's probably
|
||||||
|
# not worth the effort as long as only two files need this special handling.
|
||||||
|
PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH)
|
||||||
|
PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
|
||||||
|
PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
|
||||||
|
|
||||||
|
endif
|
||||||
|
else # ($(USE_CLANG), true)
|
||||||
|
# check for precompiled headers support
|
||||||
|
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
|
||||||
|
# Allow the user to turn off precompiled headers from the command line.
|
||||||
|
ifneq ($(USE_PRECOMPILED_HEADER),0)
|
||||||
|
PRECOMPILED_HEADER_DIR=.
|
||||||
|
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
|
||||||
|
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
|
||||||
|
ifeq ($(USE_PRECOMPILED_HEADER),0)
|
||||||
|
CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
|
||||||
|
endif
|
||||||
|
|
||||||
#------------------------------------------------------------------------
|
#------------------------------------------------------------------------
|
||||||
# Compiler flags
|
# Compiler flags
|
||||||
@ -115,17 +178,31 @@ endif
|
|||||||
CFLAGS += $(VM_PICFLAG)
|
CFLAGS += $(VM_PICFLAG)
|
||||||
CFLAGS += -fno-rtti
|
CFLAGS += -fno-rtti
|
||||||
CFLAGS += -fno-exceptions
|
CFLAGS += -fno-exceptions
|
||||||
CFLAGS += -pthread
|
ifeq ($(USE_CLANG),)
|
||||||
CFLAGS += -fcheck-new
|
CFLAGS += -pthread
|
||||||
# version 4 and above support fvisibility=hidden (matches jni_x86.h file)
|
CFLAGS += -fcheck-new
|
||||||
# except 4.1.2 gives pointless warnings that can't be disabled (afaik)
|
# version 4 and above support fvisibility=hidden (matches jni_x86.h file)
|
||||||
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
# except 4.1.2 gives pointless warnings that can't be disabled (afaik)
|
||||||
CFLAGS += -fvisibility=hidden
|
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
||||||
|
CFLAGS += -fvisibility=hidden
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
CFLAGS += -fvisibility=hidden
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(USE_CLANG), true)
|
||||||
|
# Before Clang 3.1, we had to pass the stack alignment specification directly to llvm with the help of '-mllvm'
|
||||||
|
# Starting with version 3.1, Clang understands the '-mstack-alignment' (and rejects '-mllvm -stack-alignment')
|
||||||
|
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 1 \) \))" "0"
|
||||||
|
STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mstack-alignment=16
|
||||||
|
else
|
||||||
|
STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mllvm -stack-alignment=16
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
|
ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
|
||||||
ARCHFLAG/i486 = -m32 -march=i586
|
ARCHFLAG/i486 = -m32 -march=i586
|
||||||
ARCHFLAG/amd64 = -m64
|
ARCHFLAG/amd64 = -m64 $(STACK_ALIGNMENT_OPT)
|
||||||
ARCHFLAG/ia64 =
|
ARCHFLAG/ia64 =
|
||||||
ARCHFLAG/sparc = -m32 -mcpu=v9
|
ARCHFLAG/sparc = -m32 -mcpu=v9
|
||||||
ARCHFLAG/sparcv9 = -m64 -mcpu=v9
|
ARCHFLAG/sparcv9 = -m64 -mcpu=v9
|
||||||
@ -163,14 +240,25 @@ ifneq ($(COMPILER_WARNINGS_FATAL),false)
|
|||||||
WARNINGS_ARE_ERRORS = -Werror
|
WARNINGS_ARE_ERRORS = -Werror
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Except for a few acceptable ones
|
ifeq ($(USE_CLANG), true)
|
||||||
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
# However we need to clean the code up before we can unrestrictedly enable this option with Clang
|
||||||
# conversions which might affect the values. To avoid that, we need to turn
|
WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
|
||||||
# it off explicitly.
|
WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare
|
||||||
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
# Not yet supported by clang in Xcode 4.6.2
|
||||||
|
# WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
|
||||||
|
WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
|
||||||
|
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
|
||||||
|
endif
|
||||||
|
|
||||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
|
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
|
||||||
else
|
|
||||||
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
|
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
||||||
|
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
||||||
|
# conversions which might affect the values. Only enable it in earlier versions.
|
||||||
|
WARNING_FLAGS = -Wunused-function
|
||||||
|
ifeq ($(USE_CLANG),)
|
||||||
|
WARNINGS_FLAGS += -Wconversion
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
|
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
|
||||||
@ -214,14 +302,24 @@ endif
|
|||||||
|
|
||||||
OPT_CFLAGS/NOOPT=-O0
|
OPT_CFLAGS/NOOPT=-O0
|
||||||
|
|
||||||
# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
|
# Work around some compiler bugs.
|
||||||
ifneq "$(shell expr \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) = 3 \) \))" "0"
|
ifeq ($(USE_CLANG), true)
|
||||||
OPT_CFLAGS/mulnode.o += -O0
|
ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1)
|
||||||
|
OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
|
||||||
|
ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1)
|
||||||
|
OPT_CFLAGS/mulnode.o += $(OPT_CFLAGS/NOOPT)
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Flags for generating make dependency flags.
|
# Flags for generating make dependency flags.
|
||||||
ifneq ("${CC_VER_MAJOR}", "2")
|
DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
||||||
DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
ifeq ($(USE_CLANG),)
|
||||||
|
ifneq ($(CC_VER_MAJOR), 2)
|
||||||
|
DEPFLAGS += -fpch-deps
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
|
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
|
||||||
@ -249,13 +347,15 @@ endif
|
|||||||
# statically link libstdc++.so, work with gcc but ignored by g++
|
# statically link libstdc++.so, work with gcc but ignored by g++
|
||||||
STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic
|
STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic
|
||||||
|
|
||||||
# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x.
|
ifeq ($(USE_CLANG),)
|
||||||
ifneq ("${CC_VER_MAJOR}", "2")
|
# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x.
|
||||||
STATIC_LIBGCC += -static-libgcc
|
ifneq ("${CC_VER_MAJOR}", "2")
|
||||||
endif
|
STATIC_LIBGCC += -static-libgcc
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(BUILDARCH), ia64)
|
ifeq ($(BUILDARCH), ia64)
|
||||||
LFLAGS += -Wl,-relax
|
LFLAGS += -Wl,-relax
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
|
# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
|
||||||
@ -296,25 +396,31 @@ endif
|
|||||||
#------------------------------------------------------------------------
|
#------------------------------------------------------------------------
|
||||||
# Debug flags
|
# Debug flags
|
||||||
|
|
||||||
# Use the stabs format for debugging information (this is the default
|
ifeq ($(USE_CLANG), true)
|
||||||
# on gcc-2.91). It's good enough, has all the information about line
|
# Restrict the debug information created by Clang to avoid
|
||||||
# numbers and local variables, and libjvm.so is only about 16M.
|
# too big object files and speed the build up a little bit
|
||||||
# Change this back to "-g" if you want the most expressive format.
|
# (see http://llvm.org/bugs/show_bug.cgi?id=7554)
|
||||||
# (warning: that could easily inflate libjvm.so to 150M!)
|
CFLAGS += -flimit-debug-info
|
||||||
# Note: The Itanium gcc compiler crashes when using -gstabs.
|
|
||||||
DEBUG_CFLAGS/ia64 = -g
|
|
||||||
DEBUG_CFLAGS/amd64 = -g
|
|
||||||
DEBUG_CFLAGS/arm = -g
|
|
||||||
DEBUG_CFLAGS/ppc = -g
|
|
||||||
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
|
||||||
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
|
|
||||||
DEBUG_CFLAGS += -gstabs
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# DEBUG_BINARIES overrides everything, use full -g debug information
|
# DEBUG_BINARIES uses full -g debug information for all configs
|
||||||
ifeq ($(DEBUG_BINARIES), true)
|
ifeq ($(DEBUG_BINARIES), true)
|
||||||
DEBUG_CFLAGS = -g
|
CFLAGS += -g
|
||||||
CFLAGS += $(DEBUG_CFLAGS)
|
else
|
||||||
|
# Use the stabs format for debugging information (this is the default
|
||||||
|
# on gcc-2.91). It's good enough, has all the information about line
|
||||||
|
# numbers and local variables, and libjvm.so is only about 16M.
|
||||||
|
# Change this back to "-g" if you want the most expressive format.
|
||||||
|
# (warning: that could easily inflate libjvm.so to 150M!)
|
||||||
|
# Note: The Itanium gcc compiler crashes when using -gstabs.
|
||||||
|
DEBUG_CFLAGS/ia64 = -g
|
||||||
|
DEBUG_CFLAGS/amd64 = -g
|
||||||
|
DEBUG_CFLAGS/arm = -g
|
||||||
|
DEBUG_CFLAGS/ppc = -g
|
||||||
|
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
||||||
|
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
|
||||||
|
DEBUG_CFLAGS += -gstabs
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# If we are building HEADLESS, pass on to VM
|
# If we are building HEADLESS, pass on to VM
|
||||||
|
@ -126,7 +126,11 @@ ifneq ($(OS_VENDOR), Darwin)
|
|||||||
LFLAGS += -Xlinker -z -Xlinker noexecstack
|
LFLAGS += -Xlinker -z -Xlinker noexecstack
|
||||||
endif
|
endif
|
||||||
|
|
||||||
LIBS += -lm -pthread
|
LIBS += -lm
|
||||||
|
|
||||||
|
ifeq ($(USE_CLANG),)
|
||||||
|
LIBS += -pthread
|
||||||
|
endif
|
||||||
|
|
||||||
# By default, link the *.o into the library, not the executable.
|
# By default, link the *.o into the library, not the executable.
|
||||||
LINK_INTO$(LINK_INTO) = LIBJVM
|
LINK_INTO$(LINK_INTO) = LIBJVM
|
||||||
|
@ -25,7 +25,7 @@ ifeq ($(INCLUDE_JVMTI), false)
|
|||||||
CXXFLAGS += -DINCLUDE_JVMTI=0
|
CXXFLAGS += -DINCLUDE_JVMTI=0
|
||||||
CFLAGS += -DINCLUDE_JVMTI=0
|
CFLAGS += -DINCLUDE_JVMTI=0
|
||||||
|
|
||||||
Src_Files_EXCLUDE += jvmtiGetLoadedClasses.cpp forte.cpp jvmtiThreadState.cpp jvmtiExtensions.cpp \
|
Src_Files_EXCLUDE += jvmtiGetLoadedClasses.cpp jvmtiThreadState.cpp jvmtiExtensions.cpp \
|
||||||
jvmtiImpl.cpp jvmtiManageCapabilities.cpp jvmtiRawMonitor.cpp jvmtiUtil.cpp jvmtiTrace.cpp \
|
jvmtiImpl.cpp jvmtiManageCapabilities.cpp jvmtiRawMonitor.cpp jvmtiUtil.cpp jvmtiTrace.cpp \
|
||||||
jvmtiCodeBlobEvents.cpp jvmtiEnv.cpp jvmtiRedefineClasses.cpp jvmtiEnvBase.cpp jvmtiEnvThreadState.cpp \
|
jvmtiCodeBlobEvents.cpp jvmtiEnv.cpp jvmtiRedefineClasses.cpp jvmtiEnvBase.cpp jvmtiEnvThreadState.cpp \
|
||||||
jvmtiTagMap.cpp jvmtiEventController.cpp evmCompat.cpp jvmtiEnter.xsl jvmtiExport.cpp \
|
jvmtiTagMap.cpp jvmtiEventController.cpp evmCompat.cpp jvmtiEnter.xsl jvmtiExport.cpp \
|
||||||
@ -87,7 +87,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
|
|||||||
g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
|
g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
|
||||||
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
|
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
|
||||||
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
|
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
|
||||||
g1RemSet.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
|
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
|
||||||
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
|
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
|
||||||
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
|
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
|
||||||
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
|
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
|
||||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
|
|||||||
|
|
||||||
HS_MAJOR_VER=25
|
HS_MAJOR_VER=25
|
||||||
HS_MINOR_VER=0
|
HS_MINOR_VER=0
|
||||||
HS_BUILD_NUMBER=34
|
HS_BUILD_NUMBER=36
|
||||||
|
|
||||||
JDK_MAJOR_VER=1
|
JDK_MAJOR_VER=1
|
||||||
JDK_MINOR_VER=8
|
JDK_MINOR_VER=8
|
||||||
|
@ -68,7 +68,7 @@ CXXFLAGS += -DASSERT
|
|||||||
|
|
||||||
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
|
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
|
||||||
# Compiler warnings are treated as errors
|
# Compiler warnings are treated as errors
|
||||||
CFLAGS_WARN = -Werror
|
CFLAGS_WARN = $(WARNINGS_ARE_ERRORS)
|
||||||
CFLAGS += $(CFLAGS_WARN)
|
CFLAGS += $(CFLAGS_WARN)
|
||||||
|
|
||||||
OBJECTNAMES = \
|
OBJECTNAMES = \
|
||||||
|
@ -36,8 +36,14 @@ ifeq ($(SPEC),)
|
|||||||
HOSTCC = gcc
|
HOSTCC = gcc
|
||||||
STRIP = $(ALT_COMPILER_PATH)/strip
|
STRIP = $(ALT_COMPILER_PATH)/strip
|
||||||
else
|
else
|
||||||
CXX = g++
|
ifeq ($(USE_CLANG), true)
|
||||||
CC = gcc
|
CXX = clang++
|
||||||
|
CC = clang
|
||||||
|
else
|
||||||
|
CXX = g++
|
||||||
|
CC = gcc
|
||||||
|
endif
|
||||||
|
|
||||||
HOSTCXX = $(CXX)
|
HOSTCXX = $(CXX)
|
||||||
HOSTCC = $(CC)
|
HOSTCC = $(CC)
|
||||||
STRIP = strip
|
STRIP = strip
|
||||||
@ -46,19 +52,79 @@ ifeq ($(SPEC),)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
|
ifeq ($(USE_CLANG), true)
|
||||||
# prints the numbers (e.g. "2.95", "3.2.1")
|
CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1)
|
||||||
CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
|
CC_VER_MINOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f2)
|
||||||
CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
|
else
|
||||||
|
# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
|
||||||
# check for precompiled headers support
|
# prints the numbers (e.g. "2.95", "3.2.1")
|
||||||
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
|
CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
|
||||||
# Allow the user to turn off precompiled headers from the command line.
|
CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
|
||||||
ifneq ($(USE_PRECOMPILED_HEADER),0)
|
|
||||||
PRECOMPILED_HEADER_DIR=.
|
|
||||||
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
|
|
||||||
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
|
ifeq ($(USE_CLANG), true)
|
||||||
|
# Clang has precompiled headers support by default, but the user can switch
|
||||||
|
# it off by using 'USE_PRECOMPILED_HEADER=0'.
|
||||||
|
ifdef LP64
|
||||||
|
ifeq ($(USE_PRECOMPILED_HEADER),)
|
||||||
|
USE_PRECOMPILED_HEADER=1
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
# We don't support precompiled headers on 32-bit builds because there some files are
|
||||||
|
# compiled with -fPIC while others are compiled without (see 'NONPIC_OBJ_FILES' rules.make)
|
||||||
|
# Clang produces an error if the PCH file was compiled with other options than the actual compilation unit.
|
||||||
|
USE_PRECOMPILED_HEADER=0
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(USE_PRECOMPILED_HEADER),1)
|
||||||
|
|
||||||
|
ifndef LP64
|
||||||
|
$(error " Precompiled Headers only supported on 64-bit platforms!")
|
||||||
|
endif
|
||||||
|
|
||||||
|
PRECOMPILED_HEADER_DIR=.
|
||||||
|
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
|
||||||
|
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.pch
|
||||||
|
|
||||||
|
PCH_FLAG = -include precompiled.hpp
|
||||||
|
PCH_FLAG/DEFAULT = $(PCH_FLAG)
|
||||||
|
PCH_FLAG/NO_PCH = -DNO_PCH
|
||||||
|
PCH_FLAG/BY_FILE = $(PCH_FLAG/$@)$(PCH_FLAG/DEFAULT$(PCH_FLAG/$@))
|
||||||
|
|
||||||
|
VM_PCH_FLAG/LIBJVM = $(PCH_FLAG/BY_FILE)
|
||||||
|
VM_PCH_FLAG/AOUT =
|
||||||
|
VM_PCH_FLAG = $(VM_PCH_FLAG/$(LINK_INTO))
|
||||||
|
|
||||||
|
# We only use precompiled headers for the JVM build
|
||||||
|
CFLAGS += $(VM_PCH_FLAG)
|
||||||
|
|
||||||
|
# There are some files which don't like precompiled headers
|
||||||
|
# The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build.
|
||||||
|
# But Clang doesn't support a precompiled header which was compiled with -O3
|
||||||
|
# to be used in a compilation unit which uses '-O0'. We could also prepare an
|
||||||
|
# extra '-O0' PCH file for the opt build and use it here, but it's probably
|
||||||
|
# not worth the effoert as long as only two files need this special handling.
|
||||||
|
PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH)
|
||||||
|
PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
|
||||||
|
PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
|
||||||
|
|
||||||
|
endif
|
||||||
|
else # ($(USE_CLANG), true)
|
||||||
|
# check for precompiled headers support
|
||||||
|
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
|
||||||
|
# Allow the user to turn off precompiled headers from the command line.
|
||||||
|
ifneq ($(USE_PRECOMPILED_HEADER),0)
|
||||||
|
PRECOMPILED_HEADER_DIR=.
|
||||||
|
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
|
||||||
|
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
|
||||||
|
ifeq ($(USE_PRECOMPILED_HEADER),0)
|
||||||
|
CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
@ -83,16 +149,30 @@ CFLAGS += $(VM_PICFLAG)
|
|||||||
CFLAGS += -fno-rtti
|
CFLAGS += -fno-rtti
|
||||||
CFLAGS += -fno-exceptions
|
CFLAGS += -fno-exceptions
|
||||||
CFLAGS += -D_REENTRANT
|
CFLAGS += -D_REENTRANT
|
||||||
CFLAGS += -fcheck-new
|
ifeq ($(USE_CLANG),)
|
||||||
# version 4 and above support fvisibility=hidden (matches jni_x86.h file)
|
CFLAGS += -fcheck-new
|
||||||
# except 4.1.2 gives pointless warnings that can't be disabled (afaik)
|
# version 4 and above support fvisibility=hidden (matches jni_x86.h file)
|
||||||
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
# except 4.1.2 gives pointless warnings that can't be disabled (afaik)
|
||||||
CFLAGS += -fvisibility=hidden
|
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
||||||
|
CFLAGS += -fvisibility=hidden
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
CFLAGS += -fvisibility=hidden
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(USE_CLANG), true)
|
||||||
|
# Before Clang 3.1, we had to pass the stack alignment specification directly to llvm with the help of '-mllvm'
|
||||||
|
# Starting with version 3.1, Clang understands the '-mstack-alignment' (and rejects '-mllvm -stack-alignment')
|
||||||
|
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 1 \) \))" "0"
|
||||||
|
STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mstack-alignment=16
|
||||||
|
else
|
||||||
|
STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mllvm -stack-alignment=16
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
|
ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
|
||||||
ARCHFLAG/i486 = -m32 -march=i586
|
ARCHFLAG/i486 = -m32 -march=i586
|
||||||
ARCHFLAG/amd64 = -m64
|
ARCHFLAG/amd64 = -m64 $(STACK_ALIGNMENT_OPT)
|
||||||
ARCHFLAG/ia64 =
|
ARCHFLAG/ia64 =
|
||||||
ARCHFLAG/sparc = -m32 -mcpu=v9
|
ARCHFLAG/sparc = -m32 -mcpu=v9
|
||||||
ARCHFLAG/sparcv9 = -m64 -mcpu=v9
|
ARCHFLAG/sparcv9 = -m64 -mcpu=v9
|
||||||
@ -126,12 +206,22 @@ endif
|
|||||||
# Compiler warnings are treated as errors
|
# Compiler warnings are treated as errors
|
||||||
WARNINGS_ARE_ERRORS = -Werror
|
WARNINGS_ARE_ERRORS = -Werror
|
||||||
|
|
||||||
|
ifeq ($(USE_CLANG), true)
|
||||||
|
# However we need to clean the code up before we can unrestrictedly enable this option with Clang
|
||||||
|
WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
|
||||||
|
WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare
|
||||||
|
WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
|
||||||
|
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
|
||||||
|
endif
|
||||||
|
|
||||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function
|
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function
|
||||||
|
|
||||||
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
ifeq ($(USE_CLANG),)
|
||||||
# conversions which might affect the values. Only enable it in earlier versions.
|
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
||||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
# conversions which might affect the values. Only enable it in earlier versions.
|
||||||
WARNING_FLAGS += -Wconversion
|
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
||||||
|
WARNING_FLAGS += -Wconversion
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
|
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
|
||||||
@ -165,19 +255,24 @@ endif
|
|||||||
|
|
||||||
OPT_CFLAGS/NOOPT=-O0
|
OPT_CFLAGS/NOOPT=-O0
|
||||||
|
|
||||||
# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
|
# Work around some compiler bugs.
|
||||||
ifneq "$(shell expr \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) = 3 \) \))" "0"
|
ifeq ($(USE_CLANG), true)
|
||||||
OPT_CFLAGS/mulnode.o += -O0
|
ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1)
|
||||||
|
OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
|
||||||
|
ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1)
|
||||||
|
OPT_CFLAGS/mulnode.o += $(OPT_CFLAGS/NOOPT)
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Flags for generating make dependency flags.
|
# Flags for generating make dependency flags.
|
||||||
ifneq ("${CC_VER_MAJOR}", "2")
|
DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
||||||
DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
ifeq ($(USE_CLANG),)
|
||||||
endif
|
ifneq ("${CC_VER_MAJOR}", "2")
|
||||||
|
DEPFLAGS += -fpch-deps
|
||||||
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
|
endif
|
||||||
ifeq ($(USE_PRECOMPILED_HEADER),0)
|
|
||||||
CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
#------------------------------------------------------------------------
|
#------------------------------------------------------------------------
|
||||||
@ -186,24 +281,33 @@ endif
|
|||||||
# statically link libstdc++.so, work with gcc but ignored by g++
|
# statically link libstdc++.so, work with gcc but ignored by g++
|
||||||
STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic
|
STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic
|
||||||
|
|
||||||
# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x.
|
ifeq ($(USE_CLANG),)
|
||||||
ifneq ("${CC_VER_MAJOR}", "2")
|
# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x.
|
||||||
STATIC_LIBGCC += -static-libgcc
|
ifneq ("${CC_VER_MAJOR}", "2")
|
||||||
endif
|
STATIC_LIBGCC += -static-libgcc
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(BUILDARCH), ia64)
|
ifeq ($(BUILDARCH), ia64)
|
||||||
LFLAGS += -Wl,-relax
|
LFLAGS += -Wl,-relax
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Enable linker optimization
|
# Enable linker optimization
|
||||||
LFLAGS += -Xlinker -O1
|
LFLAGS += -Xlinker -O1
|
||||||
|
|
||||||
# If this is a --hash-style=gnu system, use --hash-style=both
|
ifeq ($(USE_CLANG),)
|
||||||
# The gnu .hash section won't work on some Linux systems like SuSE 10.
|
# If this is a --hash-style=gnu system, use --hash-style=both
|
||||||
_HAS_HASH_STYLE_GNU:=$(shell $(CC) -dumpspecs | grep -- '--hash-style=gnu')
|
# The gnu .hash section won't work on some Linux systems like SuSE 10.
|
||||||
ifneq ($(_HAS_HASH_STYLE_GNU),)
|
_HAS_HASH_STYLE_GNU:=$(shell $(CC) -dumpspecs | grep -- '--hash-style=gnu')
|
||||||
|
ifneq ($(_HAS_HASH_STYLE_GNU),)
|
||||||
|
LDFLAGS_HASH_STYLE = -Wl,--hash-style=both
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
# Don't know how to find out the 'hash style' of a system as '-dumpspecs'
|
||||||
|
# doesn't work for Clang. So for now we'll alwys use --hash-style=both
|
||||||
LDFLAGS_HASH_STYLE = -Wl,--hash-style=both
|
LDFLAGS_HASH_STYLE = -Wl,--hash-style=both
|
||||||
endif
|
endif
|
||||||
|
|
||||||
LFLAGS += $(LDFLAGS_HASH_STYLE)
|
LFLAGS += $(LDFLAGS_HASH_STYLE)
|
||||||
|
|
||||||
# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
|
# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
|
||||||
@ -221,6 +325,13 @@ AOUT_FLAGS += -Xlinker -export-dynamic
|
|||||||
#------------------------------------------------------------------------
|
#------------------------------------------------------------------------
|
||||||
# Debug flags
|
# Debug flags
|
||||||
|
|
||||||
|
ifeq ($(USE_CLANG), true)
|
||||||
|
# Restrict the debug information created by Clang to avoid
|
||||||
|
# too big object files and speed the build up a little bit
|
||||||
|
# (see http://llvm.org/bugs/show_bug.cgi?id=7554)
|
||||||
|
CFLAGS += -flimit-debug-info
|
||||||
|
endif
|
||||||
|
|
||||||
# DEBUG_BINARIES uses full -g debug information for all configs
|
# DEBUG_BINARIES uses full -g debug information for all configs
|
||||||
ifeq ($(DEBUG_BINARIES), true)
|
ifeq ($(DEBUG_BINARIES), true)
|
||||||
CFLAGS += -g
|
CFLAGS += -g
|
||||||
@ -237,7 +348,12 @@ else
|
|||||||
DEBUG_CFLAGS/ppc = -g
|
DEBUG_CFLAGS/ppc = -g
|
||||||
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
||||||
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
|
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
|
||||||
DEBUG_CFLAGS += -gstabs
|
ifeq ($(USE_CLANG), true)
|
||||||
|
# Clang doesn't understand -gstabs
|
||||||
|
OPT_CFLAGS += -g
|
||||||
|
else
|
||||||
|
OPT_CFLAGS += -gstabs
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
@ -247,7 +363,12 @@ else
|
|||||||
FASTDEBUG_CFLAGS/ppc = -g
|
FASTDEBUG_CFLAGS/ppc = -g
|
||||||
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
||||||
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
|
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
|
||||||
FASTDEBUG_CFLAGS += -gstabs
|
ifeq ($(USE_CLANG), true)
|
||||||
|
# Clang doesn't understand -gstabs
|
||||||
|
OPT_CFLAGS += -g
|
||||||
|
else
|
||||||
|
OPT_CFLAGS += -gstabs
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
OPT_CFLAGS/ia64 = -g
|
OPT_CFLAGS/ia64 = -g
|
||||||
@ -256,7 +377,12 @@ else
|
|||||||
OPT_CFLAGS/ppc = -g
|
OPT_CFLAGS/ppc = -g
|
||||||
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
|
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
|
||||||
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
|
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
|
||||||
OPT_CFLAGS += -gstabs
|
ifeq ($(USE_CLANG), true)
|
||||||
|
# Clang doesn't understand -gstabs
|
||||||
|
OPT_CFLAGS += -g
|
||||||
|
else
|
||||||
|
OPT_CFLAGS += -gstabs
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
@ -1065,7 +1065,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
|||||||
const int slop_factor = 2*wordSize;
|
const int slop_factor = 2*wordSize;
|
||||||
|
|
||||||
const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor?
|
const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor?
|
||||||
//6815692//Method::extra_stack_words() + // extra push slots for MH adapters
|
Method::extra_stack_entries() + // extra stack for jsr 292
|
||||||
frame::memory_parameter_word_sp_offset + // register save area + param window
|
frame::memory_parameter_word_sp_offset + // register save area + param window
|
||||||
(native ? frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class
|
(native ? frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class
|
||||||
|
|
||||||
@ -1221,9 +1221,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
|||||||
// Full size expression stack
|
// Full size expression stack
|
||||||
__ ld_ptr(constMethod, O3);
|
__ ld_ptr(constMethod, O3);
|
||||||
__ lduh(O3, in_bytes(ConstMethod::max_stack_offset()), O3);
|
__ lduh(O3, in_bytes(ConstMethod::max_stack_offset()), O3);
|
||||||
guarantee(!EnableInvokeDynamic, "no support yet for java.lang.invoke.MethodHandle"); //6815692
|
__ inc(O3, Method::extra_stack_entries());
|
||||||
//6815692//if (EnableInvokeDynamic)
|
|
||||||
//6815692// __ inc(O3, Method::extra_stack_entries());
|
|
||||||
__ sll(O3, LogBytesPerWord, O3);
|
__ sll(O3, LogBytesPerWord, O3);
|
||||||
__ sub(O2, O3, O3);
|
__ sub(O2, O3, O3);
|
||||||
// __ sub(O3, wordSize, O3); // so prepush doesn't look out of bounds
|
// __ sub(O3, wordSize, O3); // so prepush doesn't look out of bounds
|
||||||
@ -2084,9 +2082,7 @@ static int size_activation_helper(int callee_extra_locals, int max_stack, int mo
|
|||||||
|
|
||||||
const int fixed_size = sizeof(BytecodeInterpreter)/wordSize + // interpreter state object
|
const int fixed_size = sizeof(BytecodeInterpreter)/wordSize + // interpreter state object
|
||||||
frame::memory_parameter_word_sp_offset; // register save area + param window
|
frame::memory_parameter_word_sp_offset; // register save area + param window
|
||||||
const int extra_stack = 0; //6815692//Method::extra_stack_entries();
|
|
||||||
return (round_to(max_stack +
|
return (round_to(max_stack +
|
||||||
extra_stack +
|
|
||||||
slop_factor +
|
slop_factor +
|
||||||
fixed_size +
|
fixed_size +
|
||||||
monitor_size +
|
monitor_size +
|
||||||
@ -2173,8 +2169,7 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
|
|||||||
// Need +1 here because stack_base points to the word just above the first expr stack entry
|
// Need +1 here because stack_base points to the word just above the first expr stack entry
|
||||||
// and stack_limit is supposed to point to the word just below the last expr stack entry.
|
// and stack_limit is supposed to point to the word just below the last expr stack entry.
|
||||||
// See generate_compute_interpreter_state.
|
// See generate_compute_interpreter_state.
|
||||||
int extra_stack = 0; //6815692//Method::extra_stack_entries();
|
to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
|
||||||
to_fill->_stack_limit = stack_base - (method->max_stack() + 1 + extra_stack);
|
|
||||||
to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
|
to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
|
||||||
|
|
||||||
// sparc specific
|
// sparc specific
|
||||||
|
@ -521,7 +521,7 @@ void InterpreterMacroAssembler::empty_expression_stack() {
|
|||||||
// Compute max expression stack+register save area
|
// Compute max expression stack+register save area
|
||||||
ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size);
|
ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size);
|
||||||
lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack.
|
lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack.
|
||||||
add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
|
add(Gframe_size, frame::memory_parameter_word_sp_offset+Method::extra_stack_entries(), Gframe_size );
|
||||||
|
|
||||||
//
|
//
|
||||||
// now set up a stack frame with the size computed above
|
// now set up a stack frame with the size computed above
|
||||||
|
@ -507,7 +507,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
|||||||
|
|
||||||
const int extra_space =
|
const int extra_space =
|
||||||
rounded_vm_local_words + // frame local scratch space
|
rounded_vm_local_words + // frame local scratch space
|
||||||
//6815692//Method::extra_stack_words() + // extra push slots for MH adapters
|
Method::extra_stack_entries() + // extra stack for jsr 292
|
||||||
frame::memory_parameter_word_sp_offset + // register save area
|
frame::memory_parameter_word_sp_offset + // register save area
|
||||||
(native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
|
(native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
|
||||||
|
|
||||||
@ -1558,7 +1558,6 @@ static int size_activation_helper(int callee_extra_locals, int max_stack, int mo
|
|||||||
round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
|
round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
|
||||||
const int max_stack_words = max_stack * Interpreter::stackElementWords;
|
const int max_stack_words = max_stack * Interpreter::stackElementWords;
|
||||||
return (round_to((max_stack_words
|
return (round_to((max_stack_words
|
||||||
//6815692//+ Method::extra_stack_words()
|
|
||||||
+ rounded_vm_local_words
|
+ rounded_vm_local_words
|
||||||
+ frame::memory_parameter_word_sp_offset), WordsPerLong)
|
+ frame::memory_parameter_word_sp_offset), WordsPerLong)
|
||||||
// already rounded
|
// already rounded
|
||||||
|
@ -539,12 +539,11 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
|||||||
|
|
||||||
// compute full expression stack limit
|
// compute full expression stack limit
|
||||||
|
|
||||||
const int extra_stack = 0; //6815692//Method::extra_stack_words();
|
|
||||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
||||||
__ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words
|
__ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words
|
||||||
__ negptr(rdx); // so we can subtract in next step
|
__ negptr(rdx); // so we can subtract in next step
|
||||||
// Allocate expression stack
|
// Allocate expression stack
|
||||||
__ lea(rsp, Address(rsp, rdx, Address::times_ptr, -extra_stack));
|
__ lea(rsp, Address(rsp, rdx, Address::times_ptr, -Method::extra_stack_words()));
|
||||||
__ movptr(STATE(_stack_limit), rsp);
|
__ movptr(STATE(_stack_limit), rsp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -692,10 +691,9 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
|||||||
// Always give one monitor to allow us to start interp if sync method.
|
// Always give one monitor to allow us to start interp if sync method.
|
||||||
// Any additional monitors need a check when moving the expression stack
|
// Any additional monitors need a check when moving the expression stack
|
||||||
const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
|
const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
|
||||||
const int extra_stack = 0; //6815692//Method::extra_stack_entries();
|
|
||||||
__ movptr(rax, Address(rbx, Method::const_offset()));
|
__ movptr(rax, Address(rbx, Method::const_offset()));
|
||||||
__ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words
|
__ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words
|
||||||
__ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor));
|
__ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor+Method::extra_stack_words()));
|
||||||
__ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
|
__ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -2265,8 +2263,7 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
|||||||
const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
|
const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
|
||||||
( frame::sender_sp_offset - frame::link_offset) + 2;
|
( frame::sender_sp_offset - frame::link_offset) + 2;
|
||||||
|
|
||||||
const int extra_stack = 0; //6815692//Method::extra_stack_entries();
|
const int method_stack = (method->max_locals() + method->max_stack()) *
|
||||||
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
|
|
||||||
Interpreter::stackElementWords;
|
Interpreter::stackElementWords;
|
||||||
return overhead_size + method_stack + stub_code;
|
return overhead_size + method_stack + stub_code;
|
||||||
}
|
}
|
||||||
@ -2331,8 +2328,7 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
|
|||||||
// Need +1 here because stack_base points to the word just above the first expr stack entry
|
// Need +1 here because stack_base points to the word just above the first expr stack entry
|
||||||
// and stack_limit is supposed to point to the word just below the last expr stack entry.
|
// and stack_limit is supposed to point to the word just below the last expr stack entry.
|
||||||
// See generate_compute_interpreter_state.
|
// See generate_compute_interpreter_state.
|
||||||
int extra_stack = 0; //6815692//Method::extra_stack_entries();
|
to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
|
||||||
to_fill->_stack_limit = stack_base - (method->max_stack() + extra_stack + 1);
|
|
||||||
to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
|
to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
|
||||||
|
|
||||||
to_fill->_self_link = to_fill;
|
to_fill->_self_link = to_fill;
|
||||||
@ -2380,8 +2376,7 @@ int AbstractInterpreter::layout_activation(Method* method,
|
|||||||
monitor_size);
|
monitor_size);
|
||||||
|
|
||||||
// Now with full size expression stack
|
// Now with full size expression stack
|
||||||
int extra_stack = 0; //6815692//Method::extra_stack_entries();
|
int full_frame_size = short_frame_size + method->max_stack() * BytesPerWord;
|
||||||
int full_frame_size = short_frame_size + (method->max_stack() + extra_stack) * BytesPerWord;
|
|
||||||
|
|
||||||
// and now with only live portion of the expression stack
|
// and now with only live portion of the expression stack
|
||||||
short_frame_size = short_frame_size + tempcount * BytesPerWord;
|
short_frame_size = short_frame_size + tempcount * BytesPerWord;
|
||||||
|
@ -1565,8 +1565,7 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
|||||||
// be sure to change this if you add/subtract anything to/from the overhead area
|
// be sure to change this if you add/subtract anything to/from the overhead area
|
||||||
const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
|
const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
|
||||||
|
|
||||||
const int extra_stack = Method::extra_stack_entries();
|
const int method_stack = (method->max_locals() + method->max_stack()) *
|
||||||
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
|
|
||||||
Interpreter::stackElementWords;
|
Interpreter::stackElementWords;
|
||||||
return overhead_size + method_stack + stub_code;
|
return overhead_size + method_stack + stub_code;
|
||||||
}
|
}
|
||||||
|
@ -1574,8 +1574,7 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
|||||||
-(frame::interpreter_frame_initial_sp_offset) + entry_size;
|
-(frame::interpreter_frame_initial_sp_offset) + entry_size;
|
||||||
|
|
||||||
const int stub_code = frame::entry_frame_after_call_words;
|
const int stub_code = frame::entry_frame_after_call_words;
|
||||||
const int extra_stack = Method::extra_stack_entries();
|
const int method_stack = (method->max_locals() + method->max_stack()) *
|
||||||
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
|
|
||||||
Interpreter::stackElementWords;
|
Interpreter::stackElementWords;
|
||||||
return (overhead_size + method_stack + stub_code);
|
return (overhead_size + method_stack + stub_code);
|
||||||
}
|
}
|
||||||
|
@ -626,8 +626,6 @@ void os::Bsd::hotspot_sigmask(Thread* thread) {
|
|||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
// create new thread
|
// create new thread
|
||||||
|
|
||||||
static address highest_vm_reserved_address();
|
|
||||||
|
|
||||||
// check if it's safe to start a new thread
|
// check if it's safe to start a new thread
|
||||||
static bool _thread_safety_check(Thread* thread) {
|
static bool _thread_safety_check(Thread* thread) {
|
||||||
return true;
|
return true;
|
||||||
@ -935,10 +933,10 @@ jlong os::elapsed_frequency() {
|
|||||||
return (1000 * 1000);
|
return (1000 * 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: For now, code this as if BSD does not support vtime.
|
bool os::supports_vtime() { return true; }
|
||||||
bool os::supports_vtime() { return false; }
|
|
||||||
bool os::enable_vtime() { return false; }
|
bool os::enable_vtime() { return false; }
|
||||||
bool os::vtime_enabled() { return false; }
|
bool os::vtime_enabled() { return false; }
|
||||||
|
|
||||||
double os::elapsedVTime() {
|
double os::elapsedVTime() {
|
||||||
// better than nothing, but not much
|
// better than nothing, but not much
|
||||||
return elapsedTime();
|
return elapsedTime();
|
||||||
@ -2112,10 +2110,6 @@ bool os::pd_release_memory(char* addr, size_t size) {
|
|||||||
return anon_munmap(addr, size);
|
return anon_munmap(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static address highest_vm_reserved_address() {
|
|
||||||
return _highest_vm_reserved_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool bsd_mprotect(char* addr, size_t size, int prot) {
|
static bool bsd_mprotect(char* addr, size_t size, int prot) {
|
||||||
// Bsd wants the mprotect address argument to be page aligned.
|
// Bsd wants the mprotect address argument to be page aligned.
|
||||||
char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
|
char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
|
||||||
@ -2159,43 +2153,6 @@ bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the coredump_filter bits to include largepages in core dump (bit 6)
|
|
||||||
*
|
|
||||||
* From the coredump_filter documentation:
|
|
||||||
*
|
|
||||||
* - (bit 0) anonymous private memory
|
|
||||||
* - (bit 1) anonymous shared memory
|
|
||||||
* - (bit 2) file-backed private memory
|
|
||||||
* - (bit 3) file-backed shared memory
|
|
||||||
* - (bit 4) ELF header pages in file-backed private memory areas (it is
|
|
||||||
* effective only if the bit 2 is cleared)
|
|
||||||
* - (bit 5) hugetlb private memory
|
|
||||||
* - (bit 6) hugetlb shared memory
|
|
||||||
*/
|
|
||||||
static void set_coredump_filter(void) {
|
|
||||||
FILE *f;
|
|
||||||
long cdm;
|
|
||||||
|
|
||||||
if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fscanf(f, "%lx", &cdm) != 1) {
|
|
||||||
fclose(f);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
rewind(f);
|
|
||||||
|
|
||||||
if ((cdm & LARGEPAGES_BIT) == 0) {
|
|
||||||
cdm |= LARGEPAGES_BIT;
|
|
||||||
fprintf(f, "%#lx", cdm);
|
|
||||||
}
|
|
||||||
|
|
||||||
fclose(f);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Large page support
|
// Large page support
|
||||||
|
|
||||||
static size_t _large_page_size = 0;
|
static size_t _large_page_size = 0;
|
||||||
@ -3030,6 +2987,19 @@ void os::Bsd::set_signal_handler(int sig, bool set_installed) {
|
|||||||
sigAct.sa_sigaction = signalHandler;
|
sigAct.sa_sigaction = signalHandler;
|
||||||
sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
|
sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
|
||||||
}
|
}
|
||||||
|
#if __APPLE__
|
||||||
|
// Needed for main thread as XNU (Mac OS X kernel) will only deliver SIGSEGV
|
||||||
|
// (which starts as SIGBUS) on main thread with faulting address inside "stack+guard pages"
|
||||||
|
// if the signal handler declares it will handle it on alternate stack.
|
||||||
|
// Notice we only declare we will handle it on alt stack, but we are not
|
||||||
|
// actually going to use real alt stack - this is just a workaround.
|
||||||
|
// Please see ux_exception.c, method catch_mach_exception_raise for details
|
||||||
|
// link http://www.opensource.apple.com/source/xnu/xnu-2050.18.24/bsd/uxkern/ux_exception.c
|
||||||
|
if (sig == SIGSEGV) {
|
||||||
|
sigAct.sa_flags |= SA_ONSTACK;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Save flags, which are set by ours
|
// Save flags, which are set by ours
|
||||||
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
|
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
|
||||||
sigflags[sig] = sigAct.sa_flags;
|
sigflags[sig] = sigAct.sa_flags;
|
||||||
|
@ -101,6 +101,12 @@
|
|||||||
# include <inttypes.h>
|
# include <inttypes.h>
|
||||||
# include <sys/ioctl.h>
|
# include <sys/ioctl.h>
|
||||||
|
|
||||||
|
// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
|
||||||
|
// getrusage() is prepared to handle the associated failure.
|
||||||
|
#ifndef RUSAGE_THREAD
|
||||||
|
#define RUSAGE_THREAD (1) /* only the calling thread */
|
||||||
|
#endif
|
||||||
|
|
||||||
#define MAX_PATH (2 * K)
|
#define MAX_PATH (2 * K)
|
||||||
|
|
||||||
// for timer info max values which include all bits
|
// for timer info max values which include all bits
|
||||||
@ -1336,15 +1342,19 @@ jlong os::elapsed_frequency() {
|
|||||||
return (1000 * 1000);
|
return (1000 * 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
// For now, we say that linux does not support vtime. I have no idea
|
bool os::supports_vtime() { return true; }
|
||||||
// whether it can actually be made to (DLD, 9/13/05).
|
|
||||||
|
|
||||||
bool os::supports_vtime() { return false; }
|
|
||||||
bool os::enable_vtime() { return false; }
|
bool os::enable_vtime() { return false; }
|
||||||
bool os::vtime_enabled() { return false; }
|
bool os::vtime_enabled() { return false; }
|
||||||
|
|
||||||
double os::elapsedVTime() {
|
double os::elapsedVTime() {
|
||||||
// better than nothing, but not much
|
struct rusage usage;
|
||||||
return elapsedTime();
|
int retval = getrusage(RUSAGE_THREAD, &usage);
|
||||||
|
if (retval == 0) {
|
||||||
|
return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);
|
||||||
|
} else {
|
||||||
|
// better than nothing, but not much
|
||||||
|
return elapsedTime();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
jlong os::javaTimeMillis() {
|
jlong os::javaTimeMillis() {
|
||||||
|
@ -813,15 +813,21 @@ FILETIME java_to_windows_time(jlong l) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// For now, we say that Windows does not support vtime. I have no idea
|
bool os::supports_vtime() { return true; }
|
||||||
// whether it can actually be made to (DLD, 9/13/05).
|
|
||||||
|
|
||||||
bool os::supports_vtime() { return false; }
|
|
||||||
bool os::enable_vtime() { return false; }
|
bool os::enable_vtime() { return false; }
|
||||||
bool os::vtime_enabled() { return false; }
|
bool os::vtime_enabled() { return false; }
|
||||||
|
|
||||||
double os::elapsedVTime() {
|
double os::elapsedVTime() {
|
||||||
// better than nothing, but not much
|
FILETIME created;
|
||||||
return elapsedTime();
|
FILETIME exited;
|
||||||
|
FILETIME kernel;
|
||||||
|
FILETIME user;
|
||||||
|
if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
|
||||||
|
// the resolution of windows_to_java_time() should be sufficient (ms)
|
||||||
|
return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
|
||||||
|
} else {
|
||||||
|
return elapsedTime();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
jlong os::javaTimeMillis() {
|
jlong os::javaTimeMillis() {
|
||||||
@ -944,6 +950,8 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
|||||||
MINIDUMP_TYPE dumpType;
|
MINIDUMP_TYPE dumpType;
|
||||||
static const char* cwd;
|
static const char* cwd;
|
||||||
|
|
||||||
|
// Default is to always create dump for debug builds, on product builds only dump on server versions of Windows.
|
||||||
|
#ifndef ASSERT
|
||||||
// If running on a client version of Windows and user has not explicitly enabled dumping
|
// If running on a client version of Windows and user has not explicitly enabled dumping
|
||||||
if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) {
|
if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) {
|
||||||
VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false);
|
VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false);
|
||||||
@ -953,6 +961,12 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
|||||||
VMError::report_coredump_status("Minidump has been disabled from the command line", false);
|
VMError::report_coredump_status("Minidump has been disabled from the command line", false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
|
||||||
|
VMError::report_coredump_status("Minidump has been disabled from the command line", false);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
|
dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
|
||||||
|
|
||||||
@ -1004,7 +1018,21 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
|||||||
// the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
|
// the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
|
||||||
if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
|
if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
|
||||||
_MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
|
_MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
|
||||||
VMError::report_coredump_status("Call to MiniDumpWriteDump() failed", false);
|
DWORD error = GetLastError();
|
||||||
|
LPTSTR msgbuf = NULL;
|
||||||
|
|
||||||
|
if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
||||||
|
FORMAT_MESSAGE_FROM_SYSTEM |
|
||||||
|
FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||||
|
NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) {
|
||||||
|
|
||||||
|
jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
|
||||||
|
LocalFree(msgbuf);
|
||||||
|
} else {
|
||||||
|
// Call to FormatMessage failed, just include the result from GetLastError
|
||||||
|
jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
|
||||||
|
}
|
||||||
|
VMError::report_coredump_status(buffer, false);
|
||||||
} else {
|
} else {
|
||||||
VMError::report_coredump_status(buffer, true);
|
VMError::report_coredump_status(buffer, true);
|
||||||
}
|
}
|
||||||
|
@ -241,7 +241,7 @@ acb_CopyLeft:
|
|||||||
jbe 2f # <= 32 dwords
|
jbe 2f # <= 32 dwords
|
||||||
rep; smovl
|
rep; smovl
|
||||||
jmp 4f
|
jmp 4f
|
||||||
.=.+8
|
.space 8
|
||||||
2: subl %esi,%edi
|
2: subl %esi,%edi
|
||||||
.p2align 4,,15
|
.p2align 4,,15
|
||||||
3: movl (%esi),%edx
|
3: movl (%esi),%edx
|
||||||
@ -378,7 +378,7 @@ acs_CopyRight:
|
|||||||
rep; smovl
|
rep; smovl
|
||||||
jmp 4f
|
jmp 4f
|
||||||
# copy aligned dwords
|
# copy aligned dwords
|
||||||
.=.+5
|
.space 5
|
||||||
2: subl %esi,%edi
|
2: subl %esi,%edi
|
||||||
.p2align 4,,15
|
.p2align 4,,15
|
||||||
3: movl (%esi),%edx
|
3: movl (%esi),%edx
|
||||||
@ -454,7 +454,7 @@ ci_CopyRight:
|
|||||||
popl %edi
|
popl %edi
|
||||||
popl %esi
|
popl %esi
|
||||||
ret
|
ret
|
||||||
.=.+10
|
.space 10
|
||||||
2: subl %esi,%edi
|
2: subl %esi,%edi
|
||||||
jmp 4f
|
jmp 4f
|
||||||
.p2align 4,,15
|
.p2align 4,,15
|
||||||
|
@ -93,6 +93,10 @@ address os::current_stack_pointer() {
|
|||||||
register void *esp;
|
register void *esp;
|
||||||
__asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
|
__asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
|
||||||
return (address) ((char*)esp + sizeof(long)*2);
|
return (address) ((char*)esp + sizeof(long)*2);
|
||||||
|
#elif defined(__clang__)
|
||||||
|
intptr_t* esp;
|
||||||
|
__asm__ __volatile__ ("mov %%"SPELL_REG_SP", %0":"=r"(esp):);
|
||||||
|
return (address) esp;
|
||||||
#else
|
#else
|
||||||
register void *esp __asm__ (SPELL_REG_SP);
|
register void *esp __asm__ (SPELL_REG_SP);
|
||||||
return (address) esp;
|
return (address) esp;
|
||||||
@ -175,6 +179,9 @@ intptr_t* _get_previous_fp() {
|
|||||||
#ifdef SPARC_WORKS
|
#ifdef SPARC_WORKS
|
||||||
register intptr_t **ebp;
|
register intptr_t **ebp;
|
||||||
__asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
|
__asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
|
||||||
|
#elif defined(__clang__)
|
||||||
|
intptr_t **ebp;
|
||||||
|
__asm__ __volatile__ ("mov %%"SPELL_REG_FP", %0":"=r"(ebp):);
|
||||||
#else
|
#else
|
||||||
register intptr_t **ebp __asm__ (SPELL_REG_FP);
|
register intptr_t **ebp __asm__ (SPELL_REG_FP);
|
||||||
#endif
|
#endif
|
||||||
|
@ -29,8 +29,8 @@
|
|||||||
static FILE *errfile = stderr;
|
static FILE *errfile = stderr;
|
||||||
|
|
||||||
//--------------------------- utility functions -----------------------------
|
//--------------------------- utility functions -----------------------------
|
||||||
inline char toUpper(char lower) {
|
inline char toUpper(char lower) {
|
||||||
return (('a' <= lower && lower <= 'z') ? (lower + ('A'-'a')) : lower);
|
return (('a' <= lower && lower <= 'z') ? ((char) (lower + ('A'-'a'))) : lower);
|
||||||
}
|
}
|
||||||
char *toUpper(const char *str) {
|
char *toUpper(const char *str) {
|
||||||
char *upper = new char[strlen(str)+1];
|
char *upper = new char[strlen(str)+1];
|
||||||
|
@ -64,18 +64,18 @@ void Dict::init() {
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
// Precompute table of null character hashes
|
// Precompute table of null character hashes
|
||||||
if( !initflag ) { // Not initializated yet?
|
if (!initflag) { // Not initializated yet?
|
||||||
xsum[0] = (1<<shft[0])+1; // Initialize
|
xsum[0] = (short) ((1 << shft[0]) + 1); // Initialize
|
||||||
for( i = 1; i < MAXID; i++) {
|
for( i = 1; i < MAXID; i++) {
|
||||||
xsum[i] = (1<<shft[i])+1+xsum[i-1];
|
xsum[i] = (short) ((1 << shft[i]) + 1 + xsum[i-1]);
|
||||||
}
|
}
|
||||||
initflag = 1; // Never again
|
initflag = 1; // Never again
|
||||||
}
|
}
|
||||||
|
|
||||||
_size = 16; // Size is a power of 2
|
_size = 16; // Size is a power of 2
|
||||||
_cnt = 0; // Dictionary is empty
|
_cnt = 0; // Dictionary is empty
|
||||||
_bin = (bucket*)_arena->Amalloc_4(sizeof(bucket)*_size);
|
_bin = (bucket*)_arena->Amalloc_4(sizeof(bucket) * _size);
|
||||||
memset(_bin,0,sizeof(bucket)*_size);
|
memset(_bin, 0, sizeof(bucket) * _size);
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------~Dict------------------------------------------
|
//------------------------------~Dict------------------------------------------
|
||||||
@ -287,11 +287,11 @@ int hashstr(const void *t) {
|
|||||||
register int sum = 0;
|
register int sum = 0;
|
||||||
register const char *s = (const char *)t;
|
register const char *s = (const char *)t;
|
||||||
|
|
||||||
while( ((c = s[k]) != '\0') && (k < MAXID-1) ) { // Get characters till nul
|
while (((c = s[k]) != '\0') && (k < MAXID-1)) { // Get characters till nul
|
||||||
c = (c<<1)+1; // Characters are always odd!
|
c = (char) ((c << 1) + 1); // Characters are always odd!
|
||||||
sum += c + (c<<shft[k++]); // Universal hash function
|
sum += c + (c << shft[k++]); // Universal hash function
|
||||||
}
|
}
|
||||||
assert( k < (MAXID), "Exceeded maximum name length");
|
assert(k < (MAXID), "Exceeded maximum name length");
|
||||||
return (int)((sum+xsum[k]) >> 1); // Hash key, un-modulo'd table size
|
return (int)((sum+xsum[k]) >> 1); // Hash key, un-modulo'd table size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -796,11 +796,11 @@ uint InstructForm::num_opnds() {
|
|||||||
return num_opnds;
|
return num_opnds;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char *InstructForm::opnd_ident(int idx) {
|
const char* InstructForm::opnd_ident(int idx) {
|
||||||
return _components.at(idx)->_name;
|
return _components.at(idx)->_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char *InstructForm::unique_opnd_ident(int idx) {
|
const char* InstructForm::unique_opnd_ident(uint idx) {
|
||||||
uint i;
|
uint i;
|
||||||
for (i = 1; i < num_opnds(); ++i) {
|
for (i = 1; i < num_opnds(); ++i) {
|
||||||
if (unique_opnds_idx(i) == idx) {
|
if (unique_opnds_idx(i) == idx) {
|
||||||
@ -1315,36 +1315,36 @@ void InstructForm::rep_var_format(FILE *fp, const char *rep_var) {
|
|||||||
// Seach through operands to determine parameters unique positions.
|
// Seach through operands to determine parameters unique positions.
|
||||||
void InstructForm::set_unique_opnds() {
|
void InstructForm::set_unique_opnds() {
|
||||||
uint* uniq_idx = NULL;
|
uint* uniq_idx = NULL;
|
||||||
int nopnds = num_opnds();
|
uint nopnds = num_opnds();
|
||||||
uint num_uniq = nopnds;
|
uint num_uniq = nopnds;
|
||||||
int i;
|
uint i;
|
||||||
_uniq_idx_length = 0;
|
_uniq_idx_length = 0;
|
||||||
if ( nopnds > 0 ) {
|
if (nopnds > 0) {
|
||||||
// Allocate index array. Worst case we're mapping from each
|
// Allocate index array. Worst case we're mapping from each
|
||||||
// component back to an index and any DEF always goes at 0 so the
|
// component back to an index and any DEF always goes at 0 so the
|
||||||
// length of the array has to be the number of components + 1.
|
// length of the array has to be the number of components + 1.
|
||||||
_uniq_idx_length = _components.count() + 1;
|
_uniq_idx_length = _components.count() + 1;
|
||||||
uniq_idx = (uint*) malloc(sizeof(uint)*(_uniq_idx_length));
|
uniq_idx = (uint*) malloc(sizeof(uint) * _uniq_idx_length);
|
||||||
for( i = 0; i < _uniq_idx_length; i++ ) {
|
for (i = 0; i < _uniq_idx_length; i++) {
|
||||||
uniq_idx[i] = i;
|
uniq_idx[i] = i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Do it only if there is a match rule and no expand rule. With an
|
// Do it only if there is a match rule and no expand rule. With an
|
||||||
// expand rule it is done by creating new mach node in Expand()
|
// expand rule it is done by creating new mach node in Expand()
|
||||||
// method.
|
// method.
|
||||||
if ( nopnds > 0 && _matrule != NULL && _exprule == NULL ) {
|
if (nopnds > 0 && _matrule != NULL && _exprule == NULL) {
|
||||||
const char *name;
|
const char *name;
|
||||||
uint count;
|
uint count;
|
||||||
bool has_dupl_use = false;
|
bool has_dupl_use = false;
|
||||||
|
|
||||||
_parameters.reset();
|
_parameters.reset();
|
||||||
while( (name = _parameters.iter()) != NULL ) {
|
while ((name = _parameters.iter()) != NULL) {
|
||||||
count = 0;
|
count = 0;
|
||||||
int position = 0;
|
uint position = 0;
|
||||||
int uniq_position = 0;
|
uint uniq_position = 0;
|
||||||
_components.reset();
|
_components.reset();
|
||||||
Component *comp = NULL;
|
Component *comp = NULL;
|
||||||
if( sets_result() ) {
|
if (sets_result()) {
|
||||||
comp = _components.iter();
|
comp = _components.iter();
|
||||||
position++;
|
position++;
|
||||||
}
|
}
|
||||||
@ -1352,11 +1352,11 @@ void InstructForm::set_unique_opnds() {
|
|||||||
for (; (comp = _components.iter()) != NULL; ++position) {
|
for (; (comp = _components.iter()) != NULL; ++position) {
|
||||||
// When the first component is not a DEF,
|
// When the first component is not a DEF,
|
||||||
// leave space for the result operand!
|
// leave space for the result operand!
|
||||||
if ( position==0 && (! comp->isa(Component::DEF)) ) {
|
if (position==0 && (!comp->isa(Component::DEF))) {
|
||||||
++position;
|
++position;
|
||||||
}
|
}
|
||||||
if( strcmp(name, comp->_name)==0 ) {
|
if (strcmp(name, comp->_name) == 0) {
|
||||||
if( ++count > 1 ) {
|
if (++count > 1) {
|
||||||
assert(position < _uniq_idx_length, "out of bounds");
|
assert(position < _uniq_idx_length, "out of bounds");
|
||||||
uniq_idx[position] = uniq_position;
|
uniq_idx[position] = uniq_position;
|
||||||
has_dupl_use = true;
|
has_dupl_use = true;
|
||||||
@ -1364,22 +1364,25 @@ void InstructForm::set_unique_opnds() {
|
|||||||
uniq_position = position;
|
uniq_position = position;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if( comp->isa(Component::DEF)
|
if (comp->isa(Component::DEF) && comp->isa(Component::USE)) {
|
||||||
&& comp->isa(Component::USE) ) {
|
|
||||||
++position;
|
++position;
|
||||||
if( position != 1 )
|
if (position != 1)
|
||||||
--position; // only use two slots for the 1st USE_DEF
|
--position; // only use two slots for the 1st USE_DEF
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if( has_dupl_use ) {
|
if (has_dupl_use) {
|
||||||
for( i = 1; i < nopnds; i++ )
|
for (i = 1; i < nopnds; i++) {
|
||||||
if( i != uniq_idx[i] )
|
if (i != uniq_idx[i]) {
|
||||||
break;
|
break;
|
||||||
int j = i;
|
}
|
||||||
for( ; i < nopnds; i++ )
|
}
|
||||||
if( i == uniq_idx[i] )
|
uint j = i;
|
||||||
|
for (; i < nopnds; i++) {
|
||||||
|
if (i == uniq_idx[i]) {
|
||||||
uniq_idx[i] = j++;
|
uniq_idx[i] = j++;
|
||||||
|
}
|
||||||
|
}
|
||||||
num_uniq = j;
|
num_uniq = j;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2216,21 +2219,27 @@ RegClass* OperandForm::get_RegClass() const {
|
|||||||
|
|
||||||
|
|
||||||
bool OperandForm::is_bound_register() const {
|
bool OperandForm::is_bound_register() const {
|
||||||
RegClass *reg_class = get_RegClass();
|
RegClass* reg_class = get_RegClass();
|
||||||
if (reg_class == NULL) return false;
|
if (reg_class == NULL) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
const char * name = ideal_type(globalAD->globalNames());
|
const char* name = ideal_type(globalAD->globalNames());
|
||||||
if (name == NULL) return false;
|
if (name == NULL) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
int size = 0;
|
uint size = 0;
|
||||||
if (strcmp(name,"RegFlags")==0) size = 1;
|
if (strcmp(name, "RegFlags") == 0) size = 1;
|
||||||
if (strcmp(name,"RegI")==0) size = 1;
|
if (strcmp(name, "RegI") == 0) size = 1;
|
||||||
if (strcmp(name,"RegF")==0) size = 1;
|
if (strcmp(name, "RegF") == 0) size = 1;
|
||||||
if (strcmp(name,"RegD")==0) size = 2;
|
if (strcmp(name, "RegD") == 0) size = 2;
|
||||||
if (strcmp(name,"RegL")==0) size = 2;
|
if (strcmp(name, "RegL") == 0) size = 2;
|
||||||
if (strcmp(name,"RegN")==0) size = 1;
|
if (strcmp(name, "RegN") == 0) size = 1;
|
||||||
if (strcmp(name,"RegP")==0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
|
if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
|
||||||
if (size == 0) return false;
|
if (size == 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
return size == reg_class->size();
|
return size == reg_class->size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ public:
|
|||||||
const char *_ins_pipe; // Instruction Scheduling description class
|
const char *_ins_pipe; // Instruction Scheduling description class
|
||||||
|
|
||||||
uint *_uniq_idx; // Indexes of unique operands
|
uint *_uniq_idx; // Indexes of unique operands
|
||||||
int _uniq_idx_length; // Length of _uniq_idx array
|
uint _uniq_idx_length; // Length of _uniq_idx array
|
||||||
uint _num_uniq; // Number of unique operands
|
uint _num_uniq; // Number of unique operands
|
||||||
ComponentList _components; // List of Components matches MachNode's
|
ComponentList _components; // List of Components matches MachNode's
|
||||||
// operand structure
|
// operand structure
|
||||||
@ -272,14 +272,14 @@ public:
|
|||||||
void set_unique_opnds();
|
void set_unique_opnds();
|
||||||
uint num_unique_opnds() { return _num_uniq; }
|
uint num_unique_opnds() { return _num_uniq; }
|
||||||
uint unique_opnds_idx(int idx) {
|
uint unique_opnds_idx(int idx) {
|
||||||
if( _uniq_idx != NULL && idx > 0 ) {
|
if (_uniq_idx != NULL && idx > 0) {
|
||||||
assert(idx < _uniq_idx_length, "out of bounds");
|
assert((uint)idx < _uniq_idx_length, "out of bounds");
|
||||||
return _uniq_idx[idx];
|
return _uniq_idx[idx];
|
||||||
} else {
|
} else {
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const char *unique_opnd_ident(int idx); // Name of operand at unique idx.
|
const char *unique_opnd_ident(uint idx); // Name of operand at unique idx.
|
||||||
|
|
||||||
// Operands which are only KILLs aren't part of the input array and
|
// Operands which are only KILLs aren't part of the input array and
|
||||||
// require special handling in some cases. Their position in this
|
// require special handling in some cases. Their position in this
|
||||||
|
@ -463,8 +463,9 @@ static int pipeline_res_mask_initializer(
|
|||||||
uint resources_used_exclusively = 0;
|
uint resources_used_exclusively = 0;
|
||||||
|
|
||||||
for (pipeclass->_resUsage.reset();
|
for (pipeclass->_resUsage.reset();
|
||||||
(piperesource = (const PipeClassResourceForm *)pipeclass->_resUsage.iter()) != NULL; )
|
(piperesource = (const PipeClassResourceForm*)pipeclass->_resUsage.iter()) != NULL; ) {
|
||||||
element_count++;
|
element_count++;
|
||||||
|
}
|
||||||
|
|
||||||
// Pre-compute the string length
|
// Pre-compute the string length
|
||||||
int templen;
|
int templen;
|
||||||
@ -482,8 +483,8 @@ static int pipeline_res_mask_initializer(
|
|||||||
for (i = rescount; i > 0; i /= 10)
|
for (i = rescount; i > 0; i /= 10)
|
||||||
maskdigit++;
|
maskdigit++;
|
||||||
|
|
||||||
static const char * pipeline_use_cycle_mask = "Pipeline_Use_Cycle_Mask";
|
static const char* pipeline_use_cycle_mask = "Pipeline_Use_Cycle_Mask";
|
||||||
static const char * pipeline_use_element = "Pipeline_Use_Element";
|
static const char* pipeline_use_element = "Pipeline_Use_Element";
|
||||||
|
|
||||||
templen = 1 +
|
templen = 1 +
|
||||||
(int)(strlen(pipeline_use_cycle_mask) + (int)strlen(pipeline_use_element) +
|
(int)(strlen(pipeline_use_cycle_mask) + (int)strlen(pipeline_use_element) +
|
||||||
@ -496,11 +497,12 @@ static int pipeline_res_mask_initializer(
|
|||||||
templen = 0;
|
templen = 0;
|
||||||
|
|
||||||
for (pipeclass->_resUsage.reset();
|
for (pipeclass->_resUsage.reset();
|
||||||
(piperesource = (const PipeClassResourceForm *)pipeclass->_resUsage.iter()) != NULL; ) {
|
(piperesource = (const PipeClassResourceForm*)pipeclass->_resUsage.iter()) != NULL; ) {
|
||||||
int used_mask = pipeline->_resdict[piperesource->_resource]->is_resource()->mask();
|
int used_mask = pipeline->_resdict[piperesource->_resource]->is_resource()->mask();
|
||||||
|
|
||||||
if (!used_mask)
|
if (!used_mask) {
|
||||||
fprintf(stderr, "*** used_mask is 0 ***\n");
|
fprintf(stderr, "*** used_mask is 0 ***\n");
|
||||||
|
}
|
||||||
|
|
||||||
resources_used |= used_mask;
|
resources_used |= used_mask;
|
||||||
|
|
||||||
@ -509,8 +511,9 @@ static int pipeline_res_mask_initializer(
|
|||||||
for (lb = 0; (used_mask & (1 << lb)) == 0; lb++);
|
for (lb = 0; (used_mask & (1 << lb)) == 0; lb++);
|
||||||
for (ub = 31; (used_mask & (1 << ub)) == 0; ub--);
|
for (ub = 31; (used_mask & (1 << ub)) == 0; ub--);
|
||||||
|
|
||||||
if (lb == ub)
|
if (lb == ub) {
|
||||||
resources_used_exclusively |= used_mask;
|
resources_used_exclusively |= used_mask;
|
||||||
|
}
|
||||||
|
|
||||||
int formatlen =
|
int formatlen =
|
||||||
sprintf(&resource_mask[templen], " %s(0x%0*x, %*d, %*d, %s %s(",
|
sprintf(&resource_mask[templen], " %s(0x%0*x, %*d, %*d, %s %s(",
|
||||||
@ -526,7 +529,7 @@ static int pipeline_res_mask_initializer(
|
|||||||
|
|
||||||
int cycles = piperesource->_cycles;
|
int cycles = piperesource->_cycles;
|
||||||
uint stage = pipeline->_stages.index(piperesource->_stage);
|
uint stage = pipeline->_stages.index(piperesource->_stage);
|
||||||
if (NameList::Not_in_list == stage) {
|
if ((uint)NameList::Not_in_list == stage) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"pipeline_res_mask_initializer: "
|
"pipeline_res_mask_initializer: "
|
||||||
"semantic error: "
|
"semantic error: "
|
||||||
@ -534,8 +537,8 @@ static int pipeline_res_mask_initializer(
|
|||||||
piperesource->_stage);
|
piperesource->_stage);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
uint upper_limit = stage+cycles-1;
|
uint upper_limit = stage + cycles - 1;
|
||||||
uint lower_limit = stage-1;
|
uint lower_limit = stage - 1;
|
||||||
uint upper_idx = upper_limit >> 5;
|
uint upper_idx = upper_limit >> 5;
|
||||||
uint lower_idx = lower_limit >> 5;
|
uint lower_idx = lower_limit >> 5;
|
||||||
uint upper_position = upper_limit & 0x1f;
|
uint upper_position = upper_limit & 0x1f;
|
||||||
@ -543,7 +546,7 @@ static int pipeline_res_mask_initializer(
|
|||||||
|
|
||||||
uint mask = (((uint)1) << upper_position) - 1;
|
uint mask = (((uint)1) << upper_position) - 1;
|
||||||
|
|
||||||
while ( upper_idx > lower_idx ) {
|
while (upper_idx > lower_idx) {
|
||||||
res_mask[upper_idx--] |= mask;
|
res_mask[upper_idx--] |= mask;
|
||||||
mask = (uint)-1;
|
mask = (uint)-1;
|
||||||
}
|
}
|
||||||
@ -565,8 +568,9 @@ static int pipeline_res_mask_initializer(
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource_mask[templen] = 0;
|
resource_mask[templen] = 0;
|
||||||
if (last_comma)
|
if (last_comma) {
|
||||||
last_comma[0] = ' ';
|
last_comma[0] = ' ';
|
||||||
|
}
|
||||||
|
|
||||||
// See if the same string is in the table
|
// See if the same string is in the table
|
||||||
int ndx = pipeline_res_mask.index(resource_mask);
|
int ndx = pipeline_res_mask.index(resource_mask);
|
||||||
@ -580,7 +584,7 @@ static int pipeline_res_mask_initializer(
|
|||||||
fprintf(fp_cpp, "static const Pipeline_Use_Element pipeline_res_mask_%03d[%d] = {\n%s};\n\n",
|
fprintf(fp_cpp, "static const Pipeline_Use_Element pipeline_res_mask_%03d[%d] = {\n%s};\n\n",
|
||||||
ndx+1, element_count, resource_mask);
|
ndx+1, element_count, resource_mask);
|
||||||
|
|
||||||
char * args = new char [9 + 2*masklen + maskdigit];
|
char* args = new char [9 + 2*masklen + maskdigit];
|
||||||
|
|
||||||
sprintf(args, "0x%0*x, 0x%0*x, %*d",
|
sprintf(args, "0x%0*x, 0x%0*x, %*d",
|
||||||
masklen, resources_used,
|
masklen, resources_used,
|
||||||
@ -589,8 +593,9 @@ static int pipeline_res_mask_initializer(
|
|||||||
|
|
||||||
pipeline_res_args.addName(args);
|
pipeline_res_args.addName(args);
|
||||||
}
|
}
|
||||||
else
|
else {
|
||||||
delete [] resource_mask;
|
delete [] resource_mask;
|
||||||
|
}
|
||||||
|
|
||||||
delete [] res_mask;
|
delete [] res_mask;
|
||||||
//delete [] res_masks;
|
//delete [] res_masks;
|
||||||
@ -1787,7 +1792,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
|||||||
// Skip first unique operands.
|
// Skip first unique operands.
|
||||||
for( i = 1; i < cur_num_opnds; i++ ) {
|
for( i = 1; i < cur_num_opnds; i++ ) {
|
||||||
comp = node->_components.iter();
|
comp = node->_components.iter();
|
||||||
if( (int)i != node->unique_opnds_idx(i) ) {
|
if (i != node->unique_opnds_idx(i)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
new_num_opnds++;
|
new_num_opnds++;
|
||||||
@ -1795,7 +1800,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
|||||||
// Replace not unique operands with next unique operands.
|
// Replace not unique operands with next unique operands.
|
||||||
for( ; i < cur_num_opnds; i++ ) {
|
for( ; i < cur_num_opnds; i++ ) {
|
||||||
comp = node->_components.iter();
|
comp = node->_components.iter();
|
||||||
int j = node->unique_opnds_idx(i);
|
uint j = node->unique_opnds_idx(i);
|
||||||
// unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique.
|
// unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique.
|
||||||
if( j != node->unique_opnds_idx(j) ) {
|
if( j != node->unique_opnds_idx(j) ) {
|
||||||
fprintf(fp," set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
|
fprintf(fp," set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
|
||||||
|
@ -2232,6 +2232,7 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
|
|||||||
// We still need to continue with the checks.
|
// We still need to continue with the checks.
|
||||||
if (src.is_constant()) {
|
if (src.is_constant()) {
|
||||||
ciObject* src_con = src.get_jobject_constant();
|
ciObject* src_con = src.get_jobject_constant();
|
||||||
|
guarantee(src_con != NULL, "no source constant");
|
||||||
|
|
||||||
if (src_con->is_null_object()) {
|
if (src_con->is_null_object()) {
|
||||||
// The constant src object is null - We can skip
|
// The constant src object is null - We can skip
|
||||||
|
@ -444,8 +444,8 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
|
|||||||
break;
|
break;
|
||||||
case JVM_REF_invokeStatic:
|
case JVM_REF_invokeStatic:
|
||||||
case JVM_REF_invokeSpecial:
|
case JVM_REF_invokeSpecial:
|
||||||
check_property(
|
check_property(tag.is_method() ||
|
||||||
tag.is_method() || tag.is_interface_method(),
|
((_major_version >= JAVA_8_VERSION) && tag.is_interface_method()),
|
||||||
"Invalid constant pool index %u in class file %s (not a method)",
|
"Invalid constant pool index %u in class file %s (not a method)",
|
||||||
ref_index, CHECK_(nullHandle));
|
ref_index, CHECK_(nullHandle));
|
||||||
break;
|
break;
|
||||||
@ -3152,7 +3152,6 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
int contended_count = nonstatic_contended_count;
|
|
||||||
|
|
||||||
|
|
||||||
// Calculate the starting byte offsets
|
// Calculate the starting byte offsets
|
||||||
@ -3177,35 +3176,52 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
|||||||
|
|
||||||
next_nonstatic_field_offset = nonstatic_fields_start;
|
next_nonstatic_field_offset = nonstatic_fields_start;
|
||||||
|
|
||||||
|
bool is_contended_class = parsed_annotations->is_contended();
|
||||||
|
|
||||||
// Class is contended, pad before all the fields
|
// Class is contended, pad before all the fields
|
||||||
if (parsed_annotations->is_contended()) {
|
if (is_contended_class) {
|
||||||
next_nonstatic_field_offset += ContendedPaddingWidth;
|
next_nonstatic_field_offset += ContendedPaddingWidth;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute the non-contended fields count
|
// Compute the non-contended fields count.
|
||||||
|
// The packing code below relies on these counts to determine if some field
|
||||||
|
// can be squeezed into the alignment gap. Contended fields are obviously
|
||||||
|
// exempt from that.
|
||||||
unsigned int nonstatic_double_count = fac->count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
|
unsigned int nonstatic_double_count = fac->count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
|
||||||
unsigned int nonstatic_word_count = fac->count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD];
|
unsigned int nonstatic_word_count = fac->count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD];
|
||||||
unsigned int nonstatic_short_count = fac->count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT];
|
unsigned int nonstatic_short_count = fac->count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT];
|
||||||
unsigned int nonstatic_byte_count = fac->count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE];
|
unsigned int nonstatic_byte_count = fac->count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE];
|
||||||
unsigned int nonstatic_oop_count = fac->count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP];
|
unsigned int nonstatic_oop_count = fac->count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP];
|
||||||
|
|
||||||
|
// Total non-static fields count, including every contended field
|
||||||
|
unsigned int nonstatic_fields_count = fac->count[NONSTATIC_DOUBLE] + fac->count[NONSTATIC_WORD] +
|
||||||
|
fac->count[NONSTATIC_SHORT] + fac->count[NONSTATIC_BYTE] +
|
||||||
|
fac->count[NONSTATIC_OOP];
|
||||||
|
|
||||||
bool super_has_nonstatic_fields =
|
bool super_has_nonstatic_fields =
|
||||||
(_super_klass() != NULL && _super_klass->has_nonstatic_fields());
|
(_super_klass() != NULL && _super_klass->has_nonstatic_fields());
|
||||||
bool has_nonstatic_fields = super_has_nonstatic_fields ||
|
bool has_nonstatic_fields = super_has_nonstatic_fields || (nonstatic_fields_count != 0);
|
||||||
((nonstatic_double_count + nonstatic_word_count +
|
|
||||||
nonstatic_short_count + nonstatic_byte_count +
|
|
||||||
nonstatic_oop_count) != 0);
|
|
||||||
|
|
||||||
|
|
||||||
// Prepare list of oops for oop map generation.
|
// Prepare list of oops for oop map generation.
|
||||||
|
//
|
||||||
|
// "offset" and "count" lists are describing the set of contiguous oop
|
||||||
|
// regions. offset[i] is the start of the i-th region, which then has
|
||||||
|
// count[i] oops following. Before we know how many regions are required,
|
||||||
|
// we pessimistically allocate the maps to fit all the oops into the
|
||||||
|
// distinct regions.
|
||||||
|
//
|
||||||
|
// TODO: We add +1 to always allocate non-zero resource arrays; we need
|
||||||
|
// to figure out if we still need to do this.
|
||||||
int* nonstatic_oop_offsets;
|
int* nonstatic_oop_offsets;
|
||||||
unsigned int* nonstatic_oop_counts;
|
unsigned int* nonstatic_oop_counts;
|
||||||
unsigned int nonstatic_oop_map_count = 0;
|
unsigned int nonstatic_oop_map_count = 0;
|
||||||
|
unsigned int max_nonstatic_oop_maps = fac->count[NONSTATIC_OOP] + 1;
|
||||||
|
|
||||||
nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
|
nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
|
||||||
THREAD, int, nonstatic_oop_count + 1);
|
THREAD, int, max_nonstatic_oop_maps);
|
||||||
nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD(
|
nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD(
|
||||||
THREAD, unsigned int, nonstatic_oop_count + 1);
|
THREAD, unsigned int, max_nonstatic_oop_maps);
|
||||||
|
|
||||||
first_nonstatic_oop_offset = 0; // will be set for first oop field
|
first_nonstatic_oop_offset = 0; // will be set for first oop field
|
||||||
|
|
||||||
@ -3392,9 +3408,11 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
|||||||
int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
|
int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
|
||||||
heapOopSize ) {
|
heapOopSize ) {
|
||||||
// Extend current oop map
|
// Extend current oop map
|
||||||
|
assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
|
||||||
nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
|
nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
|
||||||
} else {
|
} else {
|
||||||
// Create new oop map
|
// Create new oop map
|
||||||
|
assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
|
||||||
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
|
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
|
||||||
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
|
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
|
||||||
nonstatic_oop_map_count += 1;
|
nonstatic_oop_map_count += 1;
|
||||||
@ -3452,12 +3470,10 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
|||||||
//
|
//
|
||||||
// Additionally, this should not break alignment for the fields, so we round the alignment up
|
// Additionally, this should not break alignment for the fields, so we round the alignment up
|
||||||
// for each field.
|
// for each field.
|
||||||
if (contended_count > 0) {
|
if (nonstatic_contended_count > 0) {
|
||||||
|
|
||||||
// if there is at least one contended field, we need to have pre-padding for them
|
// if there is at least one contended field, we need to have pre-padding for them
|
||||||
if (nonstatic_contended_count > 0) {
|
next_nonstatic_padded_offset += ContendedPaddingWidth;
|
||||||
next_nonstatic_padded_offset += ContendedPaddingWidth;
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect all contended groups
|
// collect all contended groups
|
||||||
BitMap bm(_cp->size());
|
BitMap bm(_cp->size());
|
||||||
@ -3518,6 +3534,7 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
|||||||
next_nonstatic_padded_offset += heapOopSize;
|
next_nonstatic_padded_offset += heapOopSize;
|
||||||
|
|
||||||
// Create new oop map
|
// Create new oop map
|
||||||
|
assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
|
||||||
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
|
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
|
||||||
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
|
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
|
||||||
nonstatic_oop_map_count += 1;
|
nonstatic_oop_map_count += 1;
|
||||||
@ -3554,18 +3571,17 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
|||||||
// handle static fields
|
// handle static fields
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size of instances
|
|
||||||
int notaligned_offset = next_nonstatic_padded_offset;
|
|
||||||
|
|
||||||
// Entire class is contended, pad in the back.
|
// Entire class is contended, pad in the back.
|
||||||
// This helps to alleviate memory contention effects for subclass fields
|
// This helps to alleviate memory contention effects for subclass fields
|
||||||
// and/or adjacent object.
|
// and/or adjacent object.
|
||||||
if (parsed_annotations->is_contended()) {
|
if (is_contended_class) {
|
||||||
notaligned_offset += ContendedPaddingWidth;
|
next_nonstatic_padded_offset += ContendedPaddingWidth;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nonstatic_fields_end = align_size_up(notaligned_offset, heapOopSize);
|
int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset;
|
||||||
int instance_end = align_size_up(notaligned_offset, wordSize);
|
|
||||||
|
int nonstatic_fields_end = align_size_up(notaligned_nonstatic_fields_end, heapOopSize);
|
||||||
|
int instance_end = align_size_up(notaligned_nonstatic_fields_end, wordSize);
|
||||||
int static_fields_end = align_size_up(next_static_byte_offset, wordSize);
|
int static_fields_end = align_size_up(next_static_byte_offset, wordSize);
|
||||||
|
|
||||||
int static_field_size = (static_fields_end -
|
int static_field_size = (static_fields_end -
|
||||||
@ -3579,6 +3595,14 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
|||||||
(instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
|
(instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
|
||||||
wordSize) / wordSize), "consistent layout helper value");
|
wordSize) / wordSize), "consistent layout helper value");
|
||||||
|
|
||||||
|
// Invariant: nonstatic_field end/start should only change if there are
|
||||||
|
// nonstatic fields in the class, or if the class is contended. We compare
|
||||||
|
// against the non-aligned value, so that end alignment will not fail the
|
||||||
|
// assert without actually having the fields.
|
||||||
|
assert((notaligned_nonstatic_fields_end == nonstatic_fields_start) ||
|
||||||
|
is_contended_class ||
|
||||||
|
(nonstatic_fields_count > 0), "double-check nonstatic start/end");
|
||||||
|
|
||||||
// Number of non-static oop map blocks allocated at end of klass.
|
// Number of non-static oop map blocks allocated at end of klass.
|
||||||
const unsigned int total_oop_map_count =
|
const unsigned int total_oop_map_count =
|
||||||
compute_oop_map_count(_super_klass, nonstatic_oop_map_count,
|
compute_oop_map_count(_super_klass, nonstatic_oop_map_count,
|
||||||
@ -4040,6 +4064,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Allocate mirror and initialize static fields
|
||||||
|
java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle));
|
||||||
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (ParseAllGenericSignatures) {
|
if (ParseAllGenericSignatures) {
|
||||||
@ -4055,17 +4082,6 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
|
|||||||
this_klass(), &all_mirandas, CHECK_(nullHandle));
|
this_klass(), &all_mirandas, CHECK_(nullHandle));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate mirror and initialize static fields
|
|
||||||
java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle));
|
|
||||||
|
|
||||||
// Allocate a simple java object for locking during class initialization.
|
|
||||||
// This needs to be a java object because it can be held across a java call.
|
|
||||||
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
|
|
||||||
this_klass->set_init_lock(r);
|
|
||||||
|
|
||||||
// TODO: Move these oops to the mirror
|
|
||||||
this_klass->set_protection_domain(protection_domain());
|
|
||||||
|
|
||||||
// Update the loader_data graph.
|
// Update the loader_data graph.
|
||||||
record_defined_class_dependencies(this_klass, CHECK_NULL);
|
record_defined_class_dependencies(this_klass, CHECK_NULL);
|
||||||
|
|
||||||
|
@ -1349,6 +1349,7 @@ static void merge_in_new_methods(InstanceKlass* klass,
|
|||||||
|
|
||||||
// Replace klass methods with new merged lists
|
// Replace klass methods with new merged lists
|
||||||
klass->set_methods(merged_methods);
|
klass->set_methods(merged_methods);
|
||||||
|
klass->set_initial_method_idnum(new_size);
|
||||||
|
|
||||||
ClassLoaderData* cld = klass->class_loader_data();
|
ClassLoaderData* cld = klass->class_loader_data();
|
||||||
MetadataFactory::free_array(cld, original_methods);
|
MetadataFactory::free_array(cld, original_methods);
|
||||||
|
@ -512,22 +512,22 @@ void java_lang_Class::fixup_mirror(KlassHandle k, TRAPS) {
|
|||||||
|
|
||||||
// If the offset was read from the shared archive, it was fixed up already
|
// If the offset was read from the shared archive, it was fixed up already
|
||||||
if (!k->is_shared()) {
|
if (!k->is_shared()) {
|
||||||
if (k->oop_is_instance()) {
|
if (k->oop_is_instance()) {
|
||||||
// During bootstrap, java.lang.Class wasn't loaded so static field
|
// During bootstrap, java.lang.Class wasn't loaded so static field
|
||||||
// offsets were computed without the size added it. Go back and
|
// offsets were computed without the size added it. Go back and
|
||||||
// update all the static field offsets to included the size.
|
// update all the static field offsets to included the size.
|
||||||
for (JavaFieldStream fs(InstanceKlass::cast(k())); !fs.done(); fs.next()) {
|
for (JavaFieldStream fs(InstanceKlass::cast(k())); !fs.done(); fs.next()) {
|
||||||
if (fs.access_flags().is_static()) {
|
if (fs.access_flags().is_static()) {
|
||||||
int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields();
|
int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields();
|
||||||
fs.set_offset(real_offset);
|
fs.set_offset(real_offset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
create_mirror(k, Handle(NULL), CHECK);
|
||||||
create_mirror(k, CHECK);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
|
oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
|
||||||
assert(k->java_mirror() == NULL, "should only assign mirror once");
|
assert(k->java_mirror() == NULL, "should only assign mirror once");
|
||||||
// Use this moment of initialization to cache modifier_flags also,
|
// Use this moment of initialization to cache modifier_flags also,
|
||||||
// to support Class.getModifiers(). Instance classes recalculate
|
// to support Class.getModifiers(). Instance classes recalculate
|
||||||
@ -563,6 +563,16 @@ oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
|
|||||||
set_array_klass(comp_mirror(), k());
|
set_array_klass(comp_mirror(), k());
|
||||||
} else {
|
} else {
|
||||||
assert(k->oop_is_instance(), "Must be");
|
assert(k->oop_is_instance(), "Must be");
|
||||||
|
|
||||||
|
// Allocate a simple java object for a lock.
|
||||||
|
// This needs to be a java object because during class initialization
|
||||||
|
// it can be held across a java call.
|
||||||
|
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
|
||||||
|
set_init_lock(mirror(), r);
|
||||||
|
|
||||||
|
// Set protection domain also
|
||||||
|
set_protection_domain(mirror(), protection_domain());
|
||||||
|
|
||||||
// Initialize static fields
|
// Initialize static fields
|
||||||
InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
|
InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
|
||||||
}
|
}
|
||||||
@ -597,6 +607,34 @@ void java_lang_Class::set_static_oop_field_count(oop java_class, int size) {
|
|||||||
java_class->int_field_put(_static_oop_field_count_offset, size);
|
java_class->int_field_put(_static_oop_field_count_offset, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
oop java_lang_Class::protection_domain(oop java_class) {
|
||||||
|
assert(_protection_domain_offset != 0, "must be set");
|
||||||
|
return java_class->obj_field(_protection_domain_offset);
|
||||||
|
}
|
||||||
|
void java_lang_Class::set_protection_domain(oop java_class, oop pd) {
|
||||||
|
assert(_protection_domain_offset != 0, "must be set");
|
||||||
|
java_class->obj_field_put(_protection_domain_offset, pd);
|
||||||
|
}
|
||||||
|
|
||||||
|
oop java_lang_Class::init_lock(oop java_class) {
|
||||||
|
assert(_init_lock_offset != 0, "must be set");
|
||||||
|
return java_class->obj_field(_init_lock_offset);
|
||||||
|
}
|
||||||
|
void java_lang_Class::set_init_lock(oop java_class, oop init_lock) {
|
||||||
|
assert(_init_lock_offset != 0, "must be set");
|
||||||
|
java_class->obj_field_put(_init_lock_offset, init_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
objArrayOop java_lang_Class::signers(oop java_class) {
|
||||||
|
assert(_signers_offset != 0, "must be set");
|
||||||
|
return (objArrayOop)java_class->obj_field(_signers_offset);
|
||||||
|
}
|
||||||
|
void java_lang_Class::set_signers(oop java_class, objArrayOop signers) {
|
||||||
|
assert(_signers_offset != 0, "must be set");
|
||||||
|
java_class->obj_field_put(_signers_offset, (oop)signers);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
|
oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
|
||||||
// This should be improved by adding a field at the Java level or by
|
// This should be improved by adding a field at the Java level or by
|
||||||
// introducing a new VM klass (see comment in ClassFileParser)
|
// introducing a new VM klass (see comment in ClassFileParser)
|
||||||
@ -2934,6 +2972,9 @@ int java_lang_Class::_klass_offset;
|
|||||||
int java_lang_Class::_array_klass_offset;
|
int java_lang_Class::_array_klass_offset;
|
||||||
int java_lang_Class::_oop_size_offset;
|
int java_lang_Class::_oop_size_offset;
|
||||||
int java_lang_Class::_static_oop_field_count_offset;
|
int java_lang_Class::_static_oop_field_count_offset;
|
||||||
|
int java_lang_Class::_protection_domain_offset;
|
||||||
|
int java_lang_Class::_init_lock_offset;
|
||||||
|
int java_lang_Class::_signers_offset;
|
||||||
GrowableArray<Klass*>* java_lang_Class::_fixup_mirror_list = NULL;
|
GrowableArray<Klass*>* java_lang_Class::_fixup_mirror_list = NULL;
|
||||||
int java_lang_Throwable::backtrace_offset;
|
int java_lang_Throwable::backtrace_offset;
|
||||||
int java_lang_Throwable::detailMessage_offset;
|
int java_lang_Throwable::detailMessage_offset;
|
||||||
|
@ -208,7 +208,10 @@ class java_lang_String : AllStatic {
|
|||||||
macro(java_lang_Class, klass, intptr_signature, false) \
|
macro(java_lang_Class, klass, intptr_signature, false) \
|
||||||
macro(java_lang_Class, array_klass, intptr_signature, false) \
|
macro(java_lang_Class, array_klass, intptr_signature, false) \
|
||||||
macro(java_lang_Class, oop_size, int_signature, false) \
|
macro(java_lang_Class, oop_size, int_signature, false) \
|
||||||
macro(java_lang_Class, static_oop_field_count, int_signature, false)
|
macro(java_lang_Class, static_oop_field_count, int_signature, false) \
|
||||||
|
macro(java_lang_Class, protection_domain, object_signature, false) \
|
||||||
|
macro(java_lang_Class, init_lock, object_signature, false) \
|
||||||
|
macro(java_lang_Class, signers, object_signature, false)
|
||||||
|
|
||||||
class java_lang_Class : AllStatic {
|
class java_lang_Class : AllStatic {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
@ -222,15 +225,20 @@ class java_lang_Class : AllStatic {
|
|||||||
static int _oop_size_offset;
|
static int _oop_size_offset;
|
||||||
static int _static_oop_field_count_offset;
|
static int _static_oop_field_count_offset;
|
||||||
|
|
||||||
|
static int _protection_domain_offset;
|
||||||
|
static int _init_lock_offset;
|
||||||
|
static int _signers_offset;
|
||||||
|
|
||||||
static bool offsets_computed;
|
static bool offsets_computed;
|
||||||
static int classRedefinedCount_offset;
|
static int classRedefinedCount_offset;
|
||||||
static GrowableArray<Klass*>* _fixup_mirror_list;
|
static GrowableArray<Klass*>* _fixup_mirror_list;
|
||||||
|
|
||||||
|
static void set_init_lock(oop java_class, oop init_lock);
|
||||||
public:
|
public:
|
||||||
static void compute_offsets();
|
static void compute_offsets();
|
||||||
|
|
||||||
// Instance creation
|
// Instance creation
|
||||||
static oop create_mirror(KlassHandle k, TRAPS);
|
static oop create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
|
||||||
static void fixup_mirror(KlassHandle k, TRAPS);
|
static void fixup_mirror(KlassHandle k, TRAPS);
|
||||||
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
|
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
|
||||||
// Conversion
|
// Conversion
|
||||||
@ -262,6 +270,13 @@ class java_lang_Class : AllStatic {
|
|||||||
static int classRedefinedCount(oop the_class_mirror);
|
static int classRedefinedCount(oop the_class_mirror);
|
||||||
static void set_classRedefinedCount(oop the_class_mirror, int value);
|
static void set_classRedefinedCount(oop the_class_mirror, int value);
|
||||||
|
|
||||||
|
// Support for embedded per-class oops
|
||||||
|
static oop protection_domain(oop java_class);
|
||||||
|
static void set_protection_domain(oop java_class, oop protection_domain);
|
||||||
|
static oop init_lock(oop java_class);
|
||||||
|
static objArrayOop signers(oop java_class);
|
||||||
|
static void set_signers(oop java_class, objArrayOop signers);
|
||||||
|
|
||||||
static int oop_size(oop java_class);
|
static int oop_size(oop java_class);
|
||||||
static void set_oop_size(oop java_class, int size);
|
static void set_oop_size(oop java_class, int size);
|
||||||
static int static_oop_field_count(oop java_class);
|
static int static_oop_field_count(oop java_class);
|
||||||
|
@ -737,7 +737,7 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void StringTable::unlink(BoolObjectClosure* is_alive) {
|
void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
|
||||||
// Readers of the table are unlocked, so we should only be removing
|
// Readers of the table are unlocked, so we should only be removing
|
||||||
// entries at a safepoint.
|
// entries at a safepoint.
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
||||||
@ -745,41 +745,31 @@ void StringTable::unlink(BoolObjectClosure* is_alive) {
|
|||||||
HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
|
HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
|
||||||
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
|
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
|
||||||
while (entry != NULL) {
|
while (entry != NULL) {
|
||||||
// Shared entries are normally at the end of the bucket and if we run into
|
assert(!entry->is_shared(), "CDS not used for the StringTable");
|
||||||
// a shared entry, then there is nothing more to remove. However, if we
|
|
||||||
// have rehashed the table, then the shared entries are no longer at the
|
if (is_alive->do_object_b(entry->literal())) {
|
||||||
// end of the bucket.
|
if (f != NULL) {
|
||||||
if (entry->is_shared() && !use_alternate_hashcode()) {
|
f->do_oop((oop*)entry->literal_addr());
|
||||||
break;
|
}
|
||||||
}
|
|
||||||
assert(entry->literal() != NULL, "just checking");
|
|
||||||
if (entry->is_shared() || is_alive->do_object_b(entry->literal())) {
|
|
||||||
p = entry->next_addr();
|
p = entry->next_addr();
|
||||||
} else {
|
} else {
|
||||||
*p = entry->next();
|
*p = entry->next();
|
||||||
the_table()->free_entry(entry);
|
the_table()->free_entry(entry);
|
||||||
}
|
}
|
||||||
entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p);
|
entry = *p;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void StringTable::oops_do(OopClosure* f) {
|
void StringTable::oops_do(OopClosure* f) {
|
||||||
for (int i = 0; i < the_table()->table_size(); ++i) {
|
for (int i = 0; i < the_table()->table_size(); ++i) {
|
||||||
HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
|
|
||||||
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
|
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
|
||||||
while (entry != NULL) {
|
while (entry != NULL) {
|
||||||
|
assert(!entry->is_shared(), "CDS not used for the StringTable");
|
||||||
|
|
||||||
f->do_oop((oop*)entry->literal_addr());
|
f->do_oop((oop*)entry->literal_addr());
|
||||||
|
|
||||||
// Did the closure remove the literal from the table?
|
entry = entry->next();
|
||||||
if (entry->literal() == NULL) {
|
|
||||||
assert(!entry->is_shared(), "immutable hashtable entry?");
|
|
||||||
*p = entry->next();
|
|
||||||
the_table()->free_entry(entry);
|
|
||||||
} else {
|
|
||||||
p = entry->next_addr();
|
|
||||||
}
|
|
||||||
entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -272,7 +272,10 @@ public:
|
|||||||
|
|
||||||
// GC support
|
// GC support
|
||||||
// Delete pointers to otherwise-unreachable objects.
|
// Delete pointers to otherwise-unreachable objects.
|
||||||
static void unlink(BoolObjectClosure* cl);
|
static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f);
|
||||||
|
static void unlink(BoolObjectClosure* cl) {
|
||||||
|
unlink_or_oops_do(cl, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
// Invoke "f->do_oop" on the locations of all oops in the table.
|
// Invoke "f->do_oop" on the locations of all oops in the table.
|
||||||
static void oops_do(OopClosure* f);
|
static void oops_do(OopClosure* f);
|
||||||
|
@ -36,8 +36,10 @@
|
|||||||
class Verifier : AllStatic {
|
class Verifier : AllStatic {
|
||||||
public:
|
public:
|
||||||
enum {
|
enum {
|
||||||
|
STRICTER_ACCESS_CTRL_CHECK_VERSION = 49,
|
||||||
STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50,
|
STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50,
|
||||||
INVOKEDYNAMIC_MAJOR_VERSION = 51
|
INVOKEDYNAMIC_MAJOR_VERSION = 51,
|
||||||
|
NO_RELAX_ACCESS_CTRL_CHECK_VERSION = 52
|
||||||
};
|
};
|
||||||
typedef enum { ThrowException, NoException } Mode;
|
typedef enum { ThrowException, NoException } Mode;
|
||||||
|
|
||||||
|
@ -392,6 +392,9 @@
|
|||||||
template(array_klass_name, "array_klass") \
|
template(array_klass_name, "array_klass") \
|
||||||
template(oop_size_name, "oop_size") \
|
template(oop_size_name, "oop_size") \
|
||||||
template(static_oop_field_count_name, "static_oop_field_count") \
|
template(static_oop_field_count_name, "static_oop_field_count") \
|
||||||
|
template(protection_domain_name, "protection_domain") \
|
||||||
|
template(init_lock_name, "init_lock") \
|
||||||
|
template(signers_name, "signers_name") \
|
||||||
template(loader_data_name, "loader_data") \
|
template(loader_data_name, "loader_data") \
|
||||||
template(dependencies_name, "dependencies") \
|
template(dependencies_name, "dependencies") \
|
||||||
\
|
\
|
||||||
|
@ -1976,11 +1976,10 @@ void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map
|
|||||||
if (!method()->is_native()) {
|
if (!method()->is_native()) {
|
||||||
SimpleScopeDesc ssd(this, fr.pc());
|
SimpleScopeDesc ssd(this, fr.pc());
|
||||||
Bytecode_invoke call(ssd.method(), ssd.bci());
|
Bytecode_invoke call(ssd.method(), ssd.bci());
|
||||||
// compiled invokedynamic call sites have an implicit receiver at
|
bool has_receiver = call.has_receiver();
|
||||||
// resolution time, so make sure it gets GC'ed.
|
bool has_appendix = call.has_appendix();
|
||||||
bool has_receiver = !call.is_invokestatic();
|
|
||||||
Symbol* signature = call.signature();
|
Symbol* signature = call.signature();
|
||||||
fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
|
fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
|
||||||
}
|
}
|
||||||
#endif // !SHARK
|
#endif // !SHARK
|
||||||
}
|
}
|
||||||
|
@ -1642,42 +1642,37 @@ void CompileBroker::compiler_thread_loop() {
|
|||||||
// Set up state required by +LogCompilation.
|
// Set up state required by +LogCompilation.
|
||||||
void CompileBroker::init_compiler_thread_log() {
|
void CompileBroker::init_compiler_thread_log() {
|
||||||
CompilerThread* thread = CompilerThread::current();
|
CompilerThread* thread = CompilerThread::current();
|
||||||
char fileBuf[4*K];
|
char file_name[4*K];
|
||||||
FILE* fp = NULL;
|
FILE* fp = NULL;
|
||||||
char* file = NULL;
|
|
||||||
intx thread_id = os::current_thread_id();
|
intx thread_id = os::current_thread_id();
|
||||||
for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
|
for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
|
||||||
const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL);
|
const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL);
|
||||||
if (dir == NULL) {
|
if (dir == NULL) {
|
||||||
jio_snprintf(fileBuf, sizeof(fileBuf), "hs_c" UINTX_FORMAT "_pid%u.log",
|
jio_snprintf(file_name, sizeof(file_name), "hs_c" UINTX_FORMAT "_pid%u.log",
|
||||||
thread_id, os::current_process_id());
|
thread_id, os::current_process_id());
|
||||||
} else {
|
} else {
|
||||||
jio_snprintf(fileBuf, sizeof(fileBuf),
|
jio_snprintf(file_name, sizeof(file_name),
|
||||||
"%s%shs_c" UINTX_FORMAT "_pid%u.log", dir,
|
"%s%shs_c" UINTX_FORMAT "_pid%u.log", dir,
|
||||||
os::file_separator(), thread_id, os::current_process_id());
|
os::file_separator(), thread_id, os::current_process_id());
|
||||||
}
|
}
|
||||||
fp = fopen(fileBuf, "at");
|
|
||||||
|
fp = fopen(file_name, "at");
|
||||||
if (fp != NULL) {
|
if (fp != NULL) {
|
||||||
file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1, mtCompiler);
|
if (LogCompilation && Verbose) {
|
||||||
strcpy(file, fileBuf);
|
tty->print_cr("Opening compilation log %s", file_name);
|
||||||
break;
|
}
|
||||||
}
|
CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file_name, fp, thread_id);
|
||||||
}
|
thread->init_log(log);
|
||||||
if (fp == NULL) {
|
|
||||||
warning("Cannot open log file: %s", fileBuf);
|
if (xtty != NULL) {
|
||||||
} else {
|
ttyLocker ttyl;
|
||||||
if (LogCompilation && Verbose)
|
// Record any per thread log files
|
||||||
tty->print_cr("Opening compilation log %s", file);
|
xtty->elem("thread_logfile thread='%d' filename='%s'", thread_id, file_name);
|
||||||
CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file, fp, thread_id);
|
}
|
||||||
thread->init_log(log);
|
return;
|
||||||
|
|
||||||
if (xtty != NULL) {
|
|
||||||
ttyLocker ttyl;
|
|
||||||
|
|
||||||
// Record any per thread log files
|
|
||||||
xtty->elem("thread_logfile thread='%d' filename='%s'", thread_id, file);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
warning("Cannot open log file: %s", file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
|
@ -34,17 +34,18 @@ CompileLog* CompileLog::_first = NULL;
|
|||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// CompileLog::CompileLog
|
// CompileLog::CompileLog
|
||||||
CompileLog::CompileLog(const char* file, FILE* fp, intx thread_id)
|
CompileLog::CompileLog(const char* file_name, FILE* fp, intx thread_id)
|
||||||
: _context(_context_buffer, sizeof(_context_buffer))
|
: _context(_context_buffer, sizeof(_context_buffer))
|
||||||
{
|
{
|
||||||
initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp));
|
initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp, true));
|
||||||
_file = file;
|
|
||||||
_file_end = 0;
|
_file_end = 0;
|
||||||
_thread_id = thread_id;
|
_thread_id = thread_id;
|
||||||
|
|
||||||
_identities_limit = 0;
|
_identities_limit = 0;
|
||||||
_identities_capacity = 400;
|
_identities_capacity = 400;
|
||||||
_identities = NEW_C_HEAP_ARRAY(char, _identities_capacity, mtCompiler);
|
_identities = NEW_C_HEAP_ARRAY(char, _identities_capacity, mtCompiler);
|
||||||
|
_file = NEW_C_HEAP_ARRAY(char, strlen(file_name)+1, mtCompiler);
|
||||||
|
strcpy((char*)_file, file_name);
|
||||||
|
|
||||||
// link into the global list
|
// link into the global list
|
||||||
{ MutexLocker locker(CompileTaskAlloc_lock);
|
{ MutexLocker locker(CompileTaskAlloc_lock);
|
||||||
@ -57,6 +58,7 @@ CompileLog::~CompileLog() {
|
|||||||
delete _out;
|
delete _out;
|
||||||
_out = NULL;
|
_out = NULL;
|
||||||
FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
|
FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
|
||||||
|
FREE_C_HEAP_ARRAY(char, _file, mtCompiler);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -188,7 +190,8 @@ void CompileLog::finish_log_on_error(outputStream* file, char* buf, int buflen)
|
|||||||
if (called_exit) return;
|
if (called_exit) return;
|
||||||
called_exit = true;
|
called_exit = true;
|
||||||
|
|
||||||
for (CompileLog* log = _first; log != NULL; log = log->_next) {
|
CompileLog* log = _first;
|
||||||
|
while (log != NULL) {
|
||||||
log->flush();
|
log->flush();
|
||||||
const char* partial_file = log->file();
|
const char* partial_file = log->file();
|
||||||
int partial_fd = open(partial_file, O_RDONLY);
|
int partial_fd = open(partial_file, O_RDONLY);
|
||||||
@ -267,7 +270,11 @@ void CompileLog::finish_log_on_error(outputStream* file, char* buf, int buflen)
|
|||||||
close(partial_fd);
|
close(partial_fd);
|
||||||
unlink(partial_file);
|
unlink(partial_file);
|
||||||
}
|
}
|
||||||
|
CompileLog* next_log = log->_next;
|
||||||
|
delete log;
|
||||||
|
log = next_log;
|
||||||
}
|
}
|
||||||
|
_first = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
|
@ -57,7 +57,7 @@ class CompileLog : public xmlStream {
|
|||||||
void va_tag(bool push, const char* format, va_list ap);
|
void va_tag(bool push, const char* format, va_list ap);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CompileLog(const char* file, FILE* fp, intx thread_id);
|
CompileLog(const char* file_name, FILE* fp, intx thread_id);
|
||||||
~CompileLog();
|
~CompileLog();
|
||||||
|
|
||||||
intx thread_id() { return _thread_id; }
|
intx thread_id() { return _thread_id; }
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -969,8 +969,8 @@ size_t CMSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CMSAdaptiveSizePolicy::compute_young_generation_free_space(size_t cur_eden,
|
void CMSAdaptiveSizePolicy::compute_eden_space_size(size_t cur_eden,
|
||||||
size_t max_eden_size)
|
size_t max_eden_size)
|
||||||
{
|
{
|
||||||
size_t desired_eden_size = cur_eden;
|
size_t desired_eden_size = cur_eden;
|
||||||
size_t eden_limit = max_eden_size;
|
size_t eden_limit = max_eden_size;
|
||||||
@ -978,7 +978,7 @@ void CMSAdaptiveSizePolicy::compute_young_generation_free_space(size_t cur_eden,
|
|||||||
// Printout input
|
// Printout input
|
||||||
if (PrintGC && PrintAdaptiveSizePolicy) {
|
if (PrintGC && PrintAdaptiveSizePolicy) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"CMSAdaptiveSizePolicy::compute_young_generation_free_space: "
|
"CMSAdaptiveSizePolicy::compute_eden_space_size: "
|
||||||
"cur_eden " SIZE_FORMAT,
|
"cur_eden " SIZE_FORMAT,
|
||||||
cur_eden);
|
cur_eden);
|
||||||
}
|
}
|
||||||
@ -1024,7 +1024,7 @@ void CMSAdaptiveSizePolicy::compute_young_generation_free_space(size_t cur_eden,
|
|||||||
|
|
||||||
if (PrintGC && PrintAdaptiveSizePolicy) {
|
if (PrintGC && PrintAdaptiveSizePolicy) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"CMSAdaptiveSizePolicy::compute_young_generation_free_space limits:"
|
"CMSAdaptiveSizePolicy::compute_eden_space_size limits:"
|
||||||
" desired_eden_size: " SIZE_FORMAT
|
" desired_eden_size: " SIZE_FORMAT
|
||||||
" old_eden_size: " SIZE_FORMAT,
|
" old_eden_size: " SIZE_FORMAT,
|
||||||
desired_eden_size, cur_eden);
|
desired_eden_size, cur_eden);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -436,8 +436,8 @@ class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
|||||||
|
|
||||||
size_t generation_alignment() { return _generation_alignment; }
|
size_t generation_alignment() { return _generation_alignment; }
|
||||||
|
|
||||||
virtual void compute_young_generation_free_space(size_t cur_eden,
|
virtual void compute_eden_space_size(size_t cur_eden,
|
||||||
size_t max_eden_size);
|
size_t max_eden_size);
|
||||||
// Calculates new survivor space size; returns a new tenuring threshold
|
// Calculates new survivor space size; returns a new tenuring threshold
|
||||||
// value. Stores new survivor size in _survivor_size.
|
// value. Stores new survivor size in _survivor_size.
|
||||||
virtual uint compute_survivor_space_size_and_threshold(
|
virtual uint compute_survivor_space_size_and_threshold(
|
||||||
|
@ -114,6 +114,14 @@ void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
|
||||||
|
if (_threads != NULL) {
|
||||||
|
for (int i = 0; i < worker_thread_num(); i++) {
|
||||||
|
tc->do_thread(_threads[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int ConcurrentG1Refine::thread_num() {
|
int ConcurrentG1Refine::thread_num() {
|
||||||
int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
|
int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
|
||||||
: ParallelGCThreads;
|
: ParallelGCThreads;
|
||||||
@ -126,3 +134,7 @@ void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ConcurrentG1RefineThread * ConcurrentG1Refine::sampling_thread() const {
|
||||||
|
return _threads[worker_thread_num()];
|
||||||
|
}
|
||||||
|
@ -35,6 +35,7 @@ class ConcurrentG1RefineThread;
|
|||||||
class G1CollectedHeap;
|
class G1CollectedHeap;
|
||||||
class G1HotCardCache;
|
class G1HotCardCache;
|
||||||
class G1RemSet;
|
class G1RemSet;
|
||||||
|
class DirtyCardQueue;
|
||||||
|
|
||||||
class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||||
ConcurrentG1RefineThread** _threads;
|
ConcurrentG1RefineThread** _threads;
|
||||||
@ -78,9 +79,15 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
|||||||
|
|
||||||
void reinitialize_threads();
|
void reinitialize_threads();
|
||||||
|
|
||||||
// Iterate over the conc refine threads
|
// Iterate over all concurrent refinement threads
|
||||||
void threads_do(ThreadClosure *tc);
|
void threads_do(ThreadClosure *tc);
|
||||||
|
|
||||||
|
// Iterate over all worker refinement threads
|
||||||
|
void worker_threads_do(ThreadClosure * tc);
|
||||||
|
|
||||||
|
// The RS sampling thread
|
||||||
|
ConcurrentG1RefineThread * sampling_thread() const;
|
||||||
|
|
||||||
static int thread_num();
|
static int thread_num();
|
||||||
|
|
||||||
void print_worker_threads_on(outputStream* st) const;
|
void print_worker_threads_on(outputStream* st) const;
|
||||||
|
@ -1417,8 +1417,6 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
|
|
||||||
MemoryService::track_memory_usage();
|
MemoryService::track_memory_usage();
|
||||||
|
|
||||||
verify_after_gc();
|
|
||||||
|
|
||||||
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
|
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
|
||||||
ref_processor_stw()->verify_no_references_recorded();
|
ref_processor_stw()->verify_no_references_recorded();
|
||||||
|
|
||||||
@ -1521,6 +1519,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
_hrs.verify_optional();
|
_hrs.verify_optional();
|
||||||
verify_region_sets_optional();
|
verify_region_sets_optional();
|
||||||
|
|
||||||
|
verify_after_gc();
|
||||||
|
|
||||||
// Start a new incremental collection set for the next pause
|
// Start a new incremental collection set for the next pause
|
||||||
assert(g1_policy()->collection_set() == NULL, "must be");
|
assert(g1_policy()->collection_set() == NULL, "must be");
|
||||||
g1_policy()->start_incremental_cset_building();
|
g1_policy()->start_incremental_cset_building();
|
||||||
@ -3539,6 +3539,14 @@ void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||||
|
|
||||||
|
if (G1SummarizeRSetStats &&
|
||||||
|
(G1SummarizeRSetStatsPeriod > 0) &&
|
||||||
|
// we are at the end of the GC. Total collections has already been increased.
|
||||||
|
((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
|
||||||
|
g1_rem_set()->print_periodic_summary_info();
|
||||||
|
}
|
||||||
|
|
||||||
// FIXME: what is this about?
|
// FIXME: what is this about?
|
||||||
// I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
|
// I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
|
||||||
// is set.
|
// is set.
|
||||||
@ -4093,12 +4101,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
g1mm()->update_sizes();
|
g1mm()->update_sizes();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (G1SummarizeRSetStats &&
|
|
||||||
(G1SummarizeRSetStatsPeriod > 0) &&
|
|
||||||
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
|
|
||||||
g1_rem_set()->print_summary_info();
|
|
||||||
}
|
|
||||||
|
|
||||||
// It should now be safe to tell the concurrent mark thread to start
|
// It should now be safe to tell the concurrent mark thread to start
|
||||||
// without its logging output interfering with the logging output
|
// without its logging output interfering with the logging output
|
||||||
// that came from the pause.
|
// that came from the pause.
|
||||||
|
@ -593,11 +593,6 @@ protected:
|
|||||||
// may not be a humongous - it must fit into a single heap region.
|
// may not be a humongous - it must fit into a single heap region.
|
||||||
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
||||||
|
|
||||||
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
|
|
||||||
HeapRegion* alloc_region,
|
|
||||||
bool par,
|
|
||||||
size_t word_size);
|
|
||||||
|
|
||||||
// Ensure that no further allocations can happen in "r", bearing in mind
|
// Ensure that no further allocations can happen in "r", bearing in mind
|
||||||
// that parallel threads might be attempting allocations.
|
// that parallel threads might be attempting allocations.
|
||||||
void par_allocate_remaining_space(HeapRegion* r);
|
void par_allocate_remaining_space(HeapRegion* r);
|
||||||
@ -1733,6 +1728,95 @@ public:
|
|||||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
ParGCAllocBuffer::retire(end_of_gc, retain);
|
||||||
_retired = true;
|
_retired = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_retired() {
|
||||||
|
return _retired;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1ParGCAllocBufferContainer {
|
||||||
|
protected:
|
||||||
|
static int const _priority_max = 2;
|
||||||
|
G1ParGCAllocBuffer* _priority_buffer[_priority_max];
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1ParGCAllocBufferContainer(size_t gclab_word_size) {
|
||||||
|
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||||
|
_priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~G1ParGCAllocBufferContainer() {
|
||||||
|
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||||
|
assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
|
||||||
|
delete _priority_buffer[pr];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* allocate(size_t word_sz) {
|
||||||
|
HeapWord* obj;
|
||||||
|
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||||
|
obj = _priority_buffer[pr]->allocate(word_sz);
|
||||||
|
if (obj != NULL) return obj;
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool contains(void* addr) {
|
||||||
|
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||||
|
if (_priority_buffer[pr]->contains(addr)) return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void undo_allocation(HeapWord* obj, size_t word_sz) {
|
||||||
|
bool finish_undo;
|
||||||
|
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||||
|
if (_priority_buffer[pr]->contains(obj)) {
|
||||||
|
_priority_buffer[pr]->undo_allocation(obj, word_sz);
|
||||||
|
finish_undo = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!finish_undo) ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t words_remaining() {
|
||||||
|
size_t result = 0;
|
||||||
|
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||||
|
result += _priority_buffer[pr]->words_remaining();
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t words_remaining_in_retired_buffer() {
|
||||||
|
G1ParGCAllocBuffer* retired = _priority_buffer[0];
|
||||||
|
return retired->words_remaining();
|
||||||
|
}
|
||||||
|
|
||||||
|
void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
|
||||||
|
for (int pr = 0; pr < _priority_max; ++pr) {
|
||||||
|
_priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
|
||||||
|
G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
|
||||||
|
retired_and_set->retire(end_of_gc, retain);
|
||||||
|
retired_and_set->set_buf(buf);
|
||||||
|
retired_and_set->set_word_size(word_sz);
|
||||||
|
adjust_priority_order();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void adjust_priority_order() {
|
||||||
|
G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
|
||||||
|
|
||||||
|
int last = _priority_max - 1;
|
||||||
|
for (int pr = 0; pr < last; ++pr) {
|
||||||
|
_priority_buffer[pr] = _priority_buffer[pr + 1];
|
||||||
|
}
|
||||||
|
_priority_buffer[last] = retired_and_set;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class G1ParScanThreadState : public StackObj {
|
class G1ParScanThreadState : public StackObj {
|
||||||
@ -1743,9 +1827,9 @@ protected:
|
|||||||
CardTableModRefBS* _ct_bs;
|
CardTableModRefBS* _ct_bs;
|
||||||
G1RemSet* _g1_rem;
|
G1RemSet* _g1_rem;
|
||||||
|
|
||||||
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
G1ParGCAllocBufferContainer _surviving_alloc_buffer;
|
||||||
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
G1ParGCAllocBufferContainer _tenured_alloc_buffer;
|
||||||
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
|
||||||
ageTable _age_table;
|
ageTable _age_table;
|
||||||
|
|
||||||
size_t _alloc_buffer_waste;
|
size_t _alloc_buffer_waste;
|
||||||
@ -1809,7 +1893,7 @@ public:
|
|||||||
RefToScanQueue* refs() { return _refs; }
|
RefToScanQueue* refs() { return _refs; }
|
||||||
ageTable* age_table() { return &_age_table; }
|
ageTable* age_table() { return &_age_table; }
|
||||||
|
|
||||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
|
||||||
return _alloc_buffers[purpose];
|
return _alloc_buffers[purpose];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1839,15 +1923,13 @@ public:
|
|||||||
HeapWord* obj = NULL;
|
HeapWord* obj = NULL;
|
||||||
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
||||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);
|
||||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
|
||||||
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
|
|
||||||
|
|
||||||
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
|
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
|
||||||
if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
||||||
// Otherwise.
|
|
||||||
alloc_buf->set_word_size(gclab_word_size);
|
add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer());
|
||||||
alloc_buf->set_buf(buf);
|
alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size);
|
||||||
|
|
||||||
obj = alloc_buf->allocate(word_sz);
|
obj = alloc_buf->allocate(word_sz);
|
||||||
assert(obj != NULL, "buffer was definitely big enough...");
|
assert(obj != NULL, "buffer was definitely big enough...");
|
||||||
@ -1959,7 +2041,6 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
|
||||||
void trim_queue();
|
void trim_queue();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||||
|
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||||
#include "memory/iterator.hpp"
|
#include "memory/iterator.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "utilities/intHisto.hpp"
|
#include "utilities/intHisto.hpp"
|
||||||
@ -73,7 +74,8 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
|||||||
_ct_bs(ct_bs), _g1p(_g1->g1_policy()),
|
_ct_bs(ct_bs), _g1p(_g1->g1_policy()),
|
||||||
_cg1r(g1->concurrent_g1_refine()),
|
_cg1r(g1->concurrent_g1_refine()),
|
||||||
_cset_rs_update_cl(NULL),
|
_cset_rs_update_cl(NULL),
|
||||||
_cards_scanned(NULL), _total_cards_scanned(0)
|
_cards_scanned(NULL), _total_cards_scanned(0),
|
||||||
|
_prev_period_summary()
|
||||||
{
|
{
|
||||||
_seq_task = new SubTasksDone(NumSeqTasks);
|
_seq_task = new SubTasksDone(NumSeqTasks);
|
||||||
guarantee(n_workers() > 0, "There should be some workers");
|
guarantee(n_workers() > 0, "There should be some workers");
|
||||||
@ -81,6 +83,7 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
|||||||
for (uint i = 0; i < n_workers(); i++) {
|
for (uint i = 0; i < n_workers(); i++) {
|
||||||
_cset_rs_update_cl[i] = NULL;
|
_cset_rs_update_cl[i] = NULL;
|
||||||
}
|
}
|
||||||
|
_prev_period_summary.initialize(this, n_workers());
|
||||||
}
|
}
|
||||||
|
|
||||||
G1RemSet::~G1RemSet() {
|
G1RemSet::~G1RemSet() {
|
||||||
@ -697,47 +700,29 @@ bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
|
|||||||
return has_refs_into_cset;
|
return has_refs_into_cset;
|
||||||
}
|
}
|
||||||
|
|
||||||
class HRRSStatsIter: public HeapRegionClosure {
|
void G1RemSet::print_periodic_summary_info() {
|
||||||
size_t _occupied;
|
G1RemSetSummary current;
|
||||||
size_t _total_mem_sz;
|
current.initialize(this, n_workers());
|
||||||
size_t _max_mem_sz;
|
|
||||||
HeapRegion* _max_mem_sz_region;
|
|
||||||
public:
|
|
||||||
HRRSStatsIter() :
|
|
||||||
_occupied(0),
|
|
||||||
_total_mem_sz(0),
|
|
||||||
_max_mem_sz(0),
|
|
||||||
_max_mem_sz_region(NULL)
|
|
||||||
{}
|
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
_prev_period_summary.subtract_from(¤t);
|
||||||
if (r->continuesHumongous()) return false;
|
print_summary_info(&_prev_period_summary);
|
||||||
size_t mem_sz = r->rem_set()->mem_size();
|
|
||||||
if (mem_sz > _max_mem_sz) {
|
|
||||||
_max_mem_sz = mem_sz;
|
|
||||||
_max_mem_sz_region = r;
|
|
||||||
}
|
|
||||||
_total_mem_sz += mem_sz;
|
|
||||||
size_t occ = r->rem_set()->occupied();
|
|
||||||
_occupied += occ;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
size_t total_mem_sz() { return _total_mem_sz; }
|
|
||||||
size_t max_mem_sz() { return _max_mem_sz; }
|
|
||||||
size_t occupied() { return _occupied; }
|
|
||||||
HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
|
|
||||||
};
|
|
||||||
|
|
||||||
class PrintRSThreadVTimeClosure : public ThreadClosure {
|
_prev_period_summary.set(¤t);
|
||||||
public:
|
}
|
||||||
virtual void do_thread(Thread *t) {
|
|
||||||
ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
|
|
||||||
gclog_or_tty->print(" %5.2f", crt->vtime_accum());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void G1RemSet::print_summary_info() {
|
void G1RemSet::print_summary_info() {
|
||||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
G1RemSetSummary current;
|
||||||
|
current.initialize(this, n_workers());
|
||||||
|
|
||||||
|
print_summary_info(¤t, " Cumulative RS summary");
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1RemSet::print_summary_info(G1RemSetSummary * summary, const char * header) {
|
||||||
|
assert(summary != NULL, "just checking");
|
||||||
|
|
||||||
|
if (header != NULL) {
|
||||||
|
gclog_or_tty->print_cr("%s", header);
|
||||||
|
}
|
||||||
|
|
||||||
#if CARD_REPEAT_HISTO
|
#if CARD_REPEAT_HISTO
|
||||||
gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
|
gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
|
||||||
@ -745,52 +730,13 @@ void G1RemSet::print_summary_info() {
|
|||||||
card_repeat_count.print_on(gclog_or_tty);
|
card_repeat_count.print_on(gclog_or_tty);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
|
summary->print_on(gclog_or_tty);
|
||||||
_conc_refine_cards);
|
|
||||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
|
||||||
jint tot_processed_buffers =
|
|
||||||
dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
|
|
||||||
gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
|
|
||||||
gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
|
|
||||||
dcqs.processed_buffers_rs_thread(),
|
|
||||||
100.0*(float)dcqs.processed_buffers_rs_thread()/
|
|
||||||
(float)tot_processed_buffers);
|
|
||||||
gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
|
|
||||||
dcqs.processed_buffers_mut(),
|
|
||||||
100.0*(float)dcqs.processed_buffers_mut()/
|
|
||||||
(float)tot_processed_buffers);
|
|
||||||
gclog_or_tty->print_cr(" Conc RS threads times(s)");
|
|
||||||
PrintRSThreadVTimeClosure p;
|
|
||||||
gclog_or_tty->print(" ");
|
|
||||||
g1->concurrent_g1_refine()->threads_do(&p);
|
|
||||||
gclog_or_tty->print_cr("");
|
|
||||||
|
|
||||||
HRRSStatsIter blk;
|
|
||||||
g1->heap_region_iterate(&blk);
|
|
||||||
gclog_or_tty->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K."
|
|
||||||
" Max = "SIZE_FORMAT"K.",
|
|
||||||
blk.total_mem_sz()/K, blk.max_mem_sz()/K);
|
|
||||||
gclog_or_tty->print_cr(" Static structures = "SIZE_FORMAT"K,"
|
|
||||||
" free_lists = "SIZE_FORMAT"K.",
|
|
||||||
HeapRegionRemSet::static_mem_size() / K,
|
|
||||||
HeapRegionRemSet::fl_mem_size() / K);
|
|
||||||
gclog_or_tty->print_cr(" "SIZE_FORMAT" occupied cards represented.",
|
|
||||||
blk.occupied());
|
|
||||||
HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
|
|
||||||
HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
|
|
||||||
gclog_or_tty->print_cr(" Max size region = "HR_FORMAT", "
|
|
||||||
"size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
|
|
||||||
HR_FORMAT_PARAMS(max_mem_sz_region),
|
|
||||||
(rem_set->mem_size() + K - 1)/K,
|
|
||||||
(rem_set->occupied() + K - 1)/K);
|
|
||||||
gclog_or_tty->print_cr(" Did %d coarsenings.",
|
|
||||||
HeapRegionRemSet::n_coarsenings());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1RemSet::prepare_for_verify() {
|
void G1RemSet::prepare_for_verify() {
|
||||||
if (G1HRRSFlushLogBuffersOnVerify &&
|
if (G1HRRSFlushLogBuffersOnVerify &&
|
||||||
(VerifyBeforeGC || VerifyAfterGC)
|
(VerifyBeforeGC || VerifyAfterGC)
|
||||||
&& !_g1->full_collection()) {
|
&& (!_g1->full_collection() || G1VerifyRSetsDuringFullGC)) {
|
||||||
cleanupHRRS();
|
cleanupHRRS();
|
||||||
_g1->set_refine_cte_cl_concurrency(false);
|
_g1->set_refine_cte_cl_concurrency(false);
|
||||||
if (SafepointSynchronize::is_at_safepoint()) {
|
if (SafepointSynchronize::is_at_safepoint()) {
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1RemSetSummary.hpp"
|
||||||
|
|
||||||
// A G1RemSet provides ways of iterating over pointers into a selected
|
// A G1RemSet provides ways of iterating over pointers into a selected
|
||||||
// collection set.
|
// collection set.
|
||||||
|
|
||||||
@ -37,9 +39,11 @@ class ConcurrentG1Refine;
|
|||||||
// so that they can be used to update the individual region remsets.
|
// so that they can be used to update the individual region remsets.
|
||||||
|
|
||||||
class G1RemSet: public CHeapObj<mtGC> {
|
class G1RemSet: public CHeapObj<mtGC> {
|
||||||
|
private:
|
||||||
|
G1RemSetSummary _prev_period_summary;
|
||||||
protected:
|
protected:
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
unsigned _conc_refine_cards;
|
size_t _conc_refine_cards;
|
||||||
uint n_workers();
|
uint n_workers();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -66,6 +70,8 @@ protected:
|
|||||||
// references into the collection set.
|
// references into the collection set.
|
||||||
OopsInHeapRegionClosure** _cset_rs_update_cl;
|
OopsInHeapRegionClosure** _cset_rs_update_cl;
|
||||||
|
|
||||||
|
// Print the given summary info
|
||||||
|
virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
|
||||||
public:
|
public:
|
||||||
// This is called to reset dual hash tables after the gc pause
|
// This is called to reset dual hash tables after the gc pause
|
||||||
// is finished and the initial hash table is no longer being
|
// is finished and the initial hash table is no longer being
|
||||||
@ -123,11 +129,18 @@ public:
|
|||||||
int worker_i,
|
int worker_i,
|
||||||
bool check_for_refs_into_cset);
|
bool check_for_refs_into_cset);
|
||||||
|
|
||||||
// Print any relevant summary info.
|
// Print accumulated summary info from the start of the VM.
|
||||||
virtual void print_summary_info();
|
virtual void print_summary_info();
|
||||||
|
|
||||||
|
// Print accumulated summary info from the last time called.
|
||||||
|
virtual void print_periodic_summary_info();
|
||||||
|
|
||||||
// Prepare remembered set for verification.
|
// Prepare remembered set for verification.
|
||||||
virtual void prepare_for_verify();
|
virtual void prepare_for_verify();
|
||||||
|
|
||||||
|
size_t conc_refine_cards() const {
|
||||||
|
return _conc_refine_cards;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class CountNonCleanMemRegionClosure: public MemRegionClosure {
|
class CountNonCleanMemRegionClosure: public MemRegionClosure {
|
||||||
|
205
hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
Normal file
205
hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
Normal file
@ -0,0 +1,205 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
||||||
|
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
|
||||||
|
#include "gc_implementation/g1/heapRegion.hpp"
|
||||||
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
|
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||||
|
#include "gc_implementation/g1/g1RemSetSummary.hpp"
|
||||||
|
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||||
|
#include "runtime/thread.inline.hpp"
|
||||||
|
|
||||||
|
class GetRSThreadVTimeClosure : public ThreadClosure {
|
||||||
|
private:
|
||||||
|
G1RemSetSummary* _summary;
|
||||||
|
uint _counter;
|
||||||
|
|
||||||
|
public:
|
||||||
|
GetRSThreadVTimeClosure(G1RemSetSummary * summary) : ThreadClosure(), _summary(summary), _counter(0) {
|
||||||
|
assert(_summary != NULL, "just checking");
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void do_thread(Thread* t) {
|
||||||
|
ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
|
||||||
|
_summary->set_rs_thread_vtime(_counter, crt->vtime_accum());
|
||||||
|
_counter++;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void G1RemSetSummary::update() {
|
||||||
|
_num_refined_cards = remset()->conc_refine_cards();
|
||||||
|
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||||
|
_num_processed_buf_mutator = dcqs.processed_buffers_mut();
|
||||||
|
_num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread();
|
||||||
|
|
||||||
|
_num_coarsenings = HeapRegionRemSet::n_coarsenings();
|
||||||
|
|
||||||
|
ConcurrentG1Refine * cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
|
||||||
|
if (_rs_threads_vtimes != NULL) {
|
||||||
|
GetRSThreadVTimeClosure p(this);
|
||||||
|
cg1r->worker_threads_do(&p);
|
||||||
|
}
|
||||||
|
set_sampling_thread_vtime(cg1r->sampling_thread()->vtime_accum());
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1RemSetSummary::set_rs_thread_vtime(uint thread, double value) {
|
||||||
|
assert(_rs_threads_vtimes != NULL, "just checking");
|
||||||
|
assert(thread < _num_vtimes, "just checking");
|
||||||
|
_rs_threads_vtimes[thread] = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1RemSetSummary::rs_thread_vtime(uint thread) const {
|
||||||
|
assert(_rs_threads_vtimes != NULL, "just checking");
|
||||||
|
assert(thread < _num_vtimes, "just checking");
|
||||||
|
return _rs_threads_vtimes[thread];
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) {
|
||||||
|
assert(_rs_threads_vtimes == NULL, "just checking");
|
||||||
|
assert(remset != NULL, "just checking");
|
||||||
|
|
||||||
|
_remset = remset;
|
||||||
|
_num_vtimes = num_workers;
|
||||||
|
_rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC);
|
||||||
|
memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
|
||||||
|
|
||||||
|
update();
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1RemSetSummary::set(G1RemSetSummary* other) {
|
||||||
|
assert(other != NULL, "just checking");
|
||||||
|
assert(remset() == other->remset(), "just checking");
|
||||||
|
assert(_num_vtimes == other->_num_vtimes, "just checking");
|
||||||
|
|
||||||
|
_num_refined_cards = other->num_concurrent_refined_cards();
|
||||||
|
|
||||||
|
_num_processed_buf_mutator = other->num_processed_buf_mutator();
|
||||||
|
_num_processed_buf_rs_threads = other->num_processed_buf_rs_threads();
|
||||||
|
|
||||||
|
_num_coarsenings = other->_num_coarsenings;
|
||||||
|
|
||||||
|
memcpy(_rs_threads_vtimes, other->_rs_threads_vtimes, sizeof(double) * _num_vtimes);
|
||||||
|
|
||||||
|
set_sampling_thread_vtime(other->sampling_thread_vtime());
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1RemSetSummary::subtract_from(G1RemSetSummary* other) {
|
||||||
|
assert(other != NULL, "just checking");
|
||||||
|
assert(remset() == other->remset(), "just checking");
|
||||||
|
assert(_num_vtimes == other->_num_vtimes, "just checking");
|
||||||
|
|
||||||
|
_num_refined_cards = other->num_concurrent_refined_cards() - _num_refined_cards;
|
||||||
|
|
||||||
|
_num_processed_buf_mutator = other->num_processed_buf_mutator() - _num_processed_buf_mutator;
|
||||||
|
_num_processed_buf_rs_threads = other->num_processed_buf_rs_threads() - _num_processed_buf_rs_threads;
|
||||||
|
|
||||||
|
_num_coarsenings = other->num_coarsenings() - _num_coarsenings;
|
||||||
|
|
||||||
|
for (uint i = 0; i < _num_vtimes; i++) {
|
||||||
|
set_rs_thread_vtime(i, other->rs_thread_vtime(i) - rs_thread_vtime(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
_sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
|
||||||
|
}
|
||||||
|
|
||||||
|
class HRRSStatsIter: public HeapRegionClosure {
|
||||||
|
size_t _occupied;
|
||||||
|
size_t _total_mem_sz;
|
||||||
|
size_t _max_mem_sz;
|
||||||
|
HeapRegion* _max_mem_sz_region;
|
||||||
|
public:
|
||||||
|
HRRSStatsIter() :
|
||||||
|
_occupied(0),
|
||||||
|
_total_mem_sz(0),
|
||||||
|
_max_mem_sz(0),
|
||||||
|
_max_mem_sz_region(NULL)
|
||||||
|
{}
|
||||||
|
|
||||||
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
|
size_t mem_sz = r->rem_set()->mem_size();
|
||||||
|
if (mem_sz > _max_mem_sz) {
|
||||||
|
_max_mem_sz = mem_sz;
|
||||||
|
_max_mem_sz_region = r;
|
||||||
|
}
|
||||||
|
_total_mem_sz += mem_sz;
|
||||||
|
size_t occ = r->rem_set()->occupied();
|
||||||
|
_occupied += occ;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
size_t total_mem_sz() { return _total_mem_sz; }
|
||||||
|
size_t max_mem_sz() { return _max_mem_sz; }
|
||||||
|
size_t occupied() { return _occupied; }
|
||||||
|
HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
|
||||||
|
};
|
||||||
|
|
||||||
|
double calc_percentage(size_t numerator, size_t denominator) {
|
||||||
|
if (denominator != 0) {
|
||||||
|
return (double)numerator / denominator * 100.0;
|
||||||
|
} else {
|
||||||
|
return 0.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1RemSetSummary::print_on(outputStream* out) {
|
||||||
|
out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards",
|
||||||
|
num_concurrent_refined_cards());
|
||||||
|
out->print_cr(" Of %d completed buffers:", num_processed_buf_total());
|
||||||
|
out->print_cr(" %8d (%5.1f%%) by concurrent RS threads.",
|
||||||
|
num_processed_buf_total(),
|
||||||
|
calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total()));
|
||||||
|
out->print_cr(" %8d (%5.1f%%) by mutator threads.",
|
||||||
|
num_processed_buf_mutator(),
|
||||||
|
calc_percentage(num_processed_buf_mutator(), num_processed_buf_total()));
|
||||||
|
out->print_cr(" Concurrent RS threads times (s)");
|
||||||
|
out->print(" ");
|
||||||
|
for (uint i = 0; i < _num_vtimes; i++) {
|
||||||
|
out->print(" %5.2f", rs_thread_vtime(i));
|
||||||
|
}
|
||||||
|
out->cr();
|
||||||
|
out->print_cr(" Concurrent sampling threads times (s)");
|
||||||
|
out->print_cr(" %5.2f", sampling_thread_vtime());
|
||||||
|
|
||||||
|
HRRSStatsIter blk;
|
||||||
|
G1CollectedHeap::heap()->heap_region_iterate(&blk);
|
||||||
|
out->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K."
|
||||||
|
" Max = "SIZE_FORMAT"K.",
|
||||||
|
blk.total_mem_sz()/K, blk.max_mem_sz()/K);
|
||||||
|
out->print_cr(" Static structures = "SIZE_FORMAT"K,"
|
||||||
|
" free_lists = "SIZE_FORMAT"K.",
|
||||||
|
HeapRegionRemSet::static_mem_size() / K,
|
||||||
|
HeapRegionRemSet::fl_mem_size() / K);
|
||||||
|
out->print_cr(" "SIZE_FORMAT" occupied cards represented.",
|
||||||
|
blk.occupied());
|
||||||
|
HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
|
||||||
|
HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
|
||||||
|
out->print_cr(" Max size region = "HR_FORMAT", "
|
||||||
|
"size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
|
||||||
|
HR_FORMAT_PARAMS(max_mem_sz_region),
|
||||||
|
(rem_set->mem_size() + K - 1)/K,
|
||||||
|
(rem_set->occupied() + K - 1)/K);
|
||||||
|
|
||||||
|
out->print_cr(" Did %d coarsenings.", num_coarsenings());
|
||||||
|
}
|
118
hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp
Normal file
118
hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP
|
||||||
|
|
||||||
|
#include "utilities/ostream.hpp"
|
||||||
|
|
||||||
|
class G1RemSet;
|
||||||
|
|
||||||
|
// A G1RemSetSummary manages statistical information about the G1RemSet
|
||||||
|
|
||||||
|
class G1RemSetSummary VALUE_OBJ_CLASS_SPEC {
|
||||||
|
private:
|
||||||
|
friend class GetRSThreadVTimeClosure;
|
||||||
|
|
||||||
|
G1RemSet* _remset;
|
||||||
|
|
||||||
|
G1RemSet* remset() const {
|
||||||
|
return _remset;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t _num_refined_cards;
|
||||||
|
size_t _num_processed_buf_mutator;
|
||||||
|
size_t _num_processed_buf_rs_threads;
|
||||||
|
|
||||||
|
size_t _num_coarsenings;
|
||||||
|
|
||||||
|
double* _rs_threads_vtimes;
|
||||||
|
size_t _num_vtimes;
|
||||||
|
|
||||||
|
double _sampling_thread_vtime;
|
||||||
|
|
||||||
|
void set_rs_thread_vtime(uint thread, double value);
|
||||||
|
void set_sampling_thread_vtime(double value) {
|
||||||
|
_sampling_thread_vtime = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void free_and_null() {
|
||||||
|
if (_rs_threads_vtimes) {
|
||||||
|
FREE_C_HEAP_ARRAY(double, _rs_threads_vtimes, mtGC);
|
||||||
|
_rs_threads_vtimes = NULL;
|
||||||
|
_num_vtimes = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update this summary with current data from various places
|
||||||
|
void update();
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1RemSetSummary() : _remset(NULL), _num_refined_cards(0),
|
||||||
|
_num_processed_buf_mutator(0), _num_processed_buf_rs_threads(0), _num_coarsenings(0),
|
||||||
|
_rs_threads_vtimes(NULL), _num_vtimes(0), _sampling_thread_vtime(0.0f) {
|
||||||
|
}
|
||||||
|
|
||||||
|
~G1RemSetSummary() {
|
||||||
|
free_and_null();
|
||||||
|
}
|
||||||
|
|
||||||
|
// set the counters in this summary to the values of the others
|
||||||
|
void set(G1RemSetSummary* other);
|
||||||
|
// subtract all counters from the other summary, and set them in the current
|
||||||
|
void subtract_from(G1RemSetSummary* other);
|
||||||
|
|
||||||
|
// initialize and get the first sampling
|
||||||
|
void initialize(G1RemSet* remset, uint num_workers);
|
||||||
|
|
||||||
|
void print_on(outputStream* out);
|
||||||
|
|
||||||
|
double rs_thread_vtime(uint thread) const;
|
||||||
|
|
||||||
|
double sampling_thread_vtime() const {
|
||||||
|
return _sampling_thread_vtime;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t num_concurrent_refined_cards() const {
|
||||||
|
return _num_refined_cards;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t num_processed_buf_mutator() const {
|
||||||
|
return _num_processed_buf_mutator;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t num_processed_buf_rs_threads() const {
|
||||||
|
return _num_processed_buf_rs_threads;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t num_processed_buf_total() const {
|
||||||
|
return num_processed_buf_mutator() + num_processed_buf_rs_threads();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t num_coarsenings() const {
|
||||||
|
return _num_coarsenings;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP
|
@ -329,7 +329,11 @@
|
|||||||
\
|
\
|
||||||
develop(bool, G1EvacuationFailureALotDuringMixedGC, true, \
|
develop(bool, G1EvacuationFailureALotDuringMixedGC, true, \
|
||||||
"Force use of evacuation failure handling during mixed " \
|
"Force use of evacuation failure handling during mixed " \
|
||||||
"evacuation pauses")
|
"evacuation pauses") \
|
||||||
|
\
|
||||||
|
diagnostic(bool, G1VerifyRSetsDuringFullGC, false, \
|
||||||
|
"If true, perform verification of each heap region's " \
|
||||||
|
"remembered set when verifying the heap during a full GC.")
|
||||||
|
|
||||||
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
||||||
|
|
||||||
|
@ -139,7 +139,7 @@ public:
|
|||||||
_n_failures++;
|
_n_failures++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_g1h->full_collection()) {
|
if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
|
||||||
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||||
HeapRegion* to = _g1h->heap_region_containing(obj);
|
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||||
if (from != NULL && to != NULL &&
|
if (from != NULL && to != NULL &&
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -585,8 +585,7 @@ void ASParNewGeneration::compute_new_size() {
|
|||||||
size_policy->avg_young_live()->sample(used());
|
size_policy->avg_young_live()->sample(used());
|
||||||
size_policy->avg_eden_live()->sample(eden()->used());
|
size_policy->avg_eden_live()->sample(eden()->used());
|
||||||
|
|
||||||
size_policy->compute_young_generation_free_space(eden()->capacity(),
|
size_policy->compute_eden_space_size(eden()->capacity(), max_gen_size());
|
||||||
max_gen_size());
|
|
||||||
|
|
||||||
resize(size_policy->calculated_eden_size_in_bytes(),
|
resize(size_policy->calculated_eden_size_in_bytes(),
|
||||||
size_policy->calculated_survivor_size_in_bytes());
|
size_policy->calculated_survivor_size_in_bytes());
|
||||||
|
@ -116,7 +116,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||||||
|
|
||||||
// The alignment used for eden and survivors within the young gen
|
// The alignment used for eden and survivors within the young gen
|
||||||
// and for boundary between young gen and old gen.
|
// and for boundary between young gen and old gen.
|
||||||
size_t intra_heap_alignment() const { return 64 * K; }
|
size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
|
||||||
|
|
||||||
size_t capacity() const;
|
size_t capacity() const;
|
||||||
size_t used() const;
|
size_t used() const;
|
||||||
|
@ -120,6 +120,9 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
|||||||
|
|
||||||
case system_dictionary:
|
case system_dictionary:
|
||||||
SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
|
SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case class_loader_data:
|
||||||
ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true);
|
ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -98,7 +98,8 @@ class MarkFromRootsTask : public GCTask {
|
|||||||
management = 6,
|
management = 6,
|
||||||
jvmti = 7,
|
jvmti = 7,
|
||||||
system_dictionary = 8,
|
system_dictionary = 8,
|
||||||
code_cache = 9
|
class_loader_data = 9,
|
||||||
|
code_cache = 10
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
RootType _root_type;
|
RootType _root_type;
|
||||||
|
@ -194,7 +194,7 @@ void PSAdaptiveSizePolicy::clear_generation_free_space_flags() {
|
|||||||
|
|
||||||
// If this is not a full GC, only test and modify the young generation.
|
// If this is not a full GC, only test and modify the young generation.
|
||||||
|
|
||||||
void PSAdaptiveSizePolicy::compute_generation_free_space(
|
void PSAdaptiveSizePolicy::compute_generations_free_space(
|
||||||
size_t young_live,
|
size_t young_live,
|
||||||
size_t eden_live,
|
size_t eden_live,
|
||||||
size_t old_live,
|
size_t old_live,
|
||||||
@ -729,7 +729,7 @@ void PSAdaptiveSizePolicy::adjust_promo_for_pause_time(bool is_full_gc,
|
|||||||
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"PSAdaptiveSizePolicy::compute_old_gen_free_space "
|
"PSAdaptiveSizePolicy::adjust_promo_for_pause_time "
|
||||||
"adjusting gen sizes for major pause (avg %f goal %f). "
|
"adjusting gen sizes for major pause (avg %f goal %f). "
|
||||||
"desired_promo_size " SIZE_FORMAT " promo delta " SIZE_FORMAT,
|
"desired_promo_size " SIZE_FORMAT " promo delta " SIZE_FORMAT,
|
||||||
_avg_major_pause->average(), gc_pause_goal_sec(),
|
_avg_major_pause->average(), gc_pause_goal_sec(),
|
||||||
@ -786,7 +786,7 @@ void PSAdaptiveSizePolicy::adjust_eden_for_pause_time(bool is_full_gc,
|
|||||||
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"PSAdaptiveSizePolicy::compute_eden_space_size "
|
"PSAdaptiveSizePolicy::adjust_eden_for_pause_time "
|
||||||
"adjusting gen sizes for major pause (avg %f goal %f). "
|
"adjusting gen sizes for major pause (avg %f goal %f). "
|
||||||
"desired_eden_size " SIZE_FORMAT " eden delta " SIZE_FORMAT,
|
"desired_eden_size " SIZE_FORMAT " eden delta " SIZE_FORMAT,
|
||||||
_avg_major_pause->average(), gc_pause_goal_sec(),
|
_avg_major_pause->average(), gc_pause_goal_sec(),
|
||||||
@ -1001,7 +1001,7 @@ size_t PSAdaptiveSizePolicy::adjust_promo_for_footprint(
|
|||||||
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"AdaptiveSizePolicy::compute_generation_free_space "
|
"AdaptiveSizePolicy::adjust_promo_for_footprint "
|
||||||
"adjusting tenured gen for footprint. "
|
"adjusting tenured gen for footprint. "
|
||||||
"starting promo size " SIZE_FORMAT
|
"starting promo size " SIZE_FORMAT
|
||||||
" reduced promo size " SIZE_FORMAT,
|
" reduced promo size " SIZE_FORMAT,
|
||||||
@ -1025,7 +1025,7 @@ size_t PSAdaptiveSizePolicy::adjust_eden_for_footprint(
|
|||||||
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"AdaptiveSizePolicy::compute_generation_free_space "
|
"AdaptiveSizePolicy::adjust_eden_for_footprint "
|
||||||
"adjusting eden for footprint. "
|
"adjusting eden for footprint. "
|
||||||
" starting eden size " SIZE_FORMAT
|
" starting eden size " SIZE_FORMAT
|
||||||
" reduced eden size " SIZE_FORMAT
|
" reduced eden size " SIZE_FORMAT
|
||||||
@ -1280,7 +1280,7 @@ void PSAdaptiveSizePolicy::update_averages(bool is_survivor_overflow,
|
|||||||
|
|
||||||
if (PrintAdaptiveSizePolicy) {
|
if (PrintAdaptiveSizePolicy) {
|
||||||
gclog_or_tty->print(
|
gclog_or_tty->print(
|
||||||
"AdaptiveSizePolicy::compute_survivor_space_size_and_thresh:"
|
"AdaptiveSizePolicy::update_averages:"
|
||||||
" survived: " SIZE_FORMAT
|
" survived: " SIZE_FORMAT
|
||||||
" promoted: " SIZE_FORMAT
|
" promoted: " SIZE_FORMAT
|
||||||
" overflow: %s",
|
" overflow: %s",
|
||||||
|
@ -344,13 +344,13 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
|||||||
// Takes current used space in all generations as input, as well
|
// Takes current used space in all generations as input, as well
|
||||||
// as an indication if a full gc has just been performed, for use
|
// as an indication if a full gc has just been performed, for use
|
||||||
// in deciding if an OOM error should be thrown.
|
// in deciding if an OOM error should be thrown.
|
||||||
void compute_generation_free_space(size_t young_live,
|
void compute_generations_free_space(size_t young_live,
|
||||||
size_t eden_live,
|
size_t eden_live,
|
||||||
size_t old_live,
|
size_t old_live,
|
||||||
size_t cur_eden, // current eden in bytes
|
size_t cur_eden, // current eden in bytes
|
||||||
size_t max_old_gen_size,
|
size_t max_old_gen_size,
|
||||||
size_t max_eden_size,
|
size_t max_eden_size,
|
||||||
bool is_full_gc);
|
bool is_full_gc);
|
||||||
|
|
||||||
void compute_eden_space_size(size_t young_live,
|
void compute_eden_space_size(size_t young_live,
|
||||||
size_t eden_live,
|
size_t eden_live,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -119,7 +119,7 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
|||||||
ps_size_policy()->change_old_gen_for_min_pauses());
|
ps_size_policy()->change_old_gen_for_min_pauses());
|
||||||
}
|
}
|
||||||
|
|
||||||
// compute_generation_free_space() statistics
|
// compute_generations_free_space() statistics
|
||||||
|
|
||||||
inline void update_avg_major_pause() {
|
inline void update_avg_major_pause() {
|
||||||
_avg_major_pause->set_value(
|
_avg_major_pause->set_value(
|
||||||
|
@ -290,13 +290,13 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
|||||||
// Used for diagnostics
|
// Used for diagnostics
|
||||||
size_policy->clear_generation_free_space_flags();
|
size_policy->clear_generation_free_space_flags();
|
||||||
|
|
||||||
size_policy->compute_generation_free_space(young_live,
|
size_policy->compute_generations_free_space(young_live,
|
||||||
eden_live,
|
eden_live,
|
||||||
old_live,
|
old_live,
|
||||||
cur_eden,
|
cur_eden,
|
||||||
max_old_gen_size,
|
max_old_gen_size,
|
||||||
max_eden_size,
|
max_eden_size,
|
||||||
true /* full gc*/);
|
true /* full gc*/);
|
||||||
|
|
||||||
size_policy->check_gc_overhead_limit(young_live,
|
size_policy->check_gc_overhead_limit(young_live,
|
||||||
eden_live,
|
eden_live,
|
||||||
|
@ -59,13 +59,25 @@
|
|||||||
#include <math.h>
|
#include <math.h>
|
||||||
|
|
||||||
// All sizes are in HeapWords.
|
// All sizes are in HeapWords.
|
||||||
const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words
|
const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words
|
||||||
const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
|
const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
|
||||||
const size_t ParallelCompactData::RegionSizeBytes =
|
const size_t ParallelCompactData::RegionSizeBytes =
|
||||||
RegionSize << LogHeapWordSize;
|
RegionSize << LogHeapWordSize;
|
||||||
const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
|
const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
|
||||||
const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
|
const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
|
||||||
const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
|
const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
|
||||||
|
|
||||||
|
const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
|
||||||
|
const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
|
||||||
|
const size_t ParallelCompactData::BlockSizeBytes =
|
||||||
|
BlockSize << LogHeapWordSize;
|
||||||
|
const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
|
||||||
|
const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
|
||||||
|
const size_t ParallelCompactData::BlockAddrMask = ~BlockAddrOffsetMask;
|
||||||
|
|
||||||
|
const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
|
||||||
|
const size_t ParallelCompactData::Log2BlocksPerRegion =
|
||||||
|
Log2RegionSize - Log2BlockSize;
|
||||||
|
|
||||||
const ParallelCompactData::RegionData::region_sz_t
|
const ParallelCompactData::RegionData::region_sz_t
|
||||||
ParallelCompactData::RegionData::dc_shift = 27;
|
ParallelCompactData::RegionData::dc_shift = 27;
|
||||||
@ -359,6 +371,10 @@ ParallelCompactData::ParallelCompactData()
|
|||||||
_reserved_byte_size = 0;
|
_reserved_byte_size = 0;
|
||||||
_region_data = 0;
|
_region_data = 0;
|
||||||
_region_count = 0;
|
_region_count = 0;
|
||||||
|
|
||||||
|
_block_vspace = 0;
|
||||||
|
_block_data = 0;
|
||||||
|
_block_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ParallelCompactData::initialize(MemRegion covered_region)
|
bool ParallelCompactData::initialize(MemRegion covered_region)
|
||||||
@ -372,8 +388,7 @@ bool ParallelCompactData::initialize(MemRegion covered_region)
|
|||||||
assert((region_size & RegionSizeOffsetMask) == 0,
|
assert((region_size & RegionSizeOffsetMask) == 0,
|
||||||
"region size not a multiple of RegionSize");
|
"region size not a multiple of RegionSize");
|
||||||
|
|
||||||
bool result = initialize_region_data(region_size);
|
bool result = initialize_region_data(region_size) && initialize_block_data();
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -418,17 +433,36 @@ bool ParallelCompactData::initialize_region_data(size_t region_size)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ParallelCompactData::initialize_block_data()
|
||||||
|
{
|
||||||
|
assert(_region_count != 0, "region data must be initialized first");
|
||||||
|
const size_t count = _region_count << Log2BlocksPerRegion;
|
||||||
|
_block_vspace = create_vspace(count, sizeof(BlockData));
|
||||||
|
if (_block_vspace != 0) {
|
||||||
|
_block_data = (BlockData*)_block_vspace->reserved_low_addr();
|
||||||
|
_block_count = count;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
void ParallelCompactData::clear()
|
void ParallelCompactData::clear()
|
||||||
{
|
{
|
||||||
memset(_region_data, 0, _region_vspace->committed_size());
|
memset(_region_data, 0, _region_vspace->committed_size());
|
||||||
|
memset(_block_data, 0, _block_vspace->committed_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
|
void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
|
||||||
assert(beg_region <= _region_count, "beg_region out of range");
|
assert(beg_region <= _region_count, "beg_region out of range");
|
||||||
assert(end_region <= _region_count, "end_region out of range");
|
assert(end_region <= _region_count, "end_region out of range");
|
||||||
|
assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
|
||||||
|
|
||||||
const size_t region_cnt = end_region - beg_region;
|
const size_t region_cnt = end_region - beg_region;
|
||||||
memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
|
memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
|
||||||
|
|
||||||
|
const size_t beg_block = beg_region * BlocksPerRegion;
|
||||||
|
const size_t block_cnt = region_cnt * BlocksPerRegion;
|
||||||
|
memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
|
HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
|
||||||
@ -707,49 +741,48 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
|
|||||||
|
|
||||||
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
|
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
|
||||||
assert(addr != NULL, "Should detect NULL oop earlier");
|
assert(addr != NULL, "Should detect NULL oop earlier");
|
||||||
assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
|
assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap");
|
||||||
#ifdef ASSERT
|
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
|
||||||
if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
|
|
||||||
gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
|
|
||||||
|
|
||||||
// Region covering the object.
|
// Region covering the object.
|
||||||
size_t region_index = addr_to_region_idx(addr);
|
RegionData* const region_ptr = addr_to_region_ptr(addr);
|
||||||
const RegionData* const region_ptr = region(region_index);
|
|
||||||
HeapWord* const region_addr = region_align_down(addr);
|
|
||||||
|
|
||||||
assert(addr < region_addr + RegionSize, "Region does not cover object");
|
|
||||||
assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
|
|
||||||
|
|
||||||
HeapWord* result = region_ptr->destination();
|
HeapWord* result = region_ptr->destination();
|
||||||
|
|
||||||
// If all the data in the region is live, then the new location of the object
|
// If the entire Region is live, the new location is region->destination + the
|
||||||
// can be calculated from the destination of the region plus the offset of the
|
// offset of the object within in the Region.
|
||||||
// object in the region.
|
|
||||||
|
// Run some performance tests to determine if this special case pays off. It
|
||||||
|
// is worth it for pointers into the dense prefix. If the optimization to
|
||||||
|
// avoid pointer updates in regions that only point to the dense prefix is
|
||||||
|
// ever implemented, this should be revisited.
|
||||||
if (region_ptr->data_size() == RegionSize) {
|
if (region_ptr->data_size() == RegionSize) {
|
||||||
result += pointer_delta(addr, region_addr);
|
result += region_offset(addr);
|
||||||
DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The new location of the object is
|
// Otherwise, the new location is region->destination + block offset + the
|
||||||
// region destination +
|
// number of live words in the Block that are (a) to the left of addr and (b)
|
||||||
// size of the partial object extending onto the region +
|
// due to objects that start in the Block.
|
||||||
// sizes of the live objects in the Region that are to the left of addr
|
|
||||||
const size_t partial_obj_size = region_ptr->partial_obj_size();
|
// Fill in the block table if necessary. This is unsynchronized, so multiple
|
||||||
HeapWord* const search_start = region_addr + partial_obj_size;
|
// threads may fill the block table for a region (harmless, since it is
|
||||||
|
// idempotent).
|
||||||
|
if (!region_ptr->blocks_filled()) {
|
||||||
|
PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
|
||||||
|
region_ptr->set_blocks_filled();
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* const search_start = block_align_down(addr);
|
||||||
|
const size_t block_offset = addr_to_block_ptr(addr)->offset();
|
||||||
|
|
||||||
const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
|
const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
|
||||||
size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
|
const size_t live = bitmap->live_words_in_range(search_start, oop(addr));
|
||||||
|
result += block_offset + live;
|
||||||
result += partial_obj_size + live_to_left;
|
DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
|
||||||
DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
|
void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
|
||||||
{
|
{
|
||||||
const size_t* const beg = (const size_t*)vspace->committed_low_addr();
|
const size_t* const beg = (const size_t*)vspace->committed_low_addr();
|
||||||
@ -762,16 +795,10 @@ void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
|
|||||||
void ParallelCompactData::verify_clear()
|
void ParallelCompactData::verify_clear()
|
||||||
{
|
{
|
||||||
verify_clear(_region_vspace);
|
verify_clear(_region_vspace);
|
||||||
|
verify_clear(_block_vspace);
|
||||||
}
|
}
|
||||||
#endif // #ifdef ASSERT
|
#endif // #ifdef ASSERT
|
||||||
|
|
||||||
#ifdef NOT_PRODUCT
|
|
||||||
ParallelCompactData::RegionData* debug_region(size_t region_index) {
|
|
||||||
ParallelCompactData& sd = PSParallelCompact::summary_data();
|
|
||||||
return sd.region(region_index);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
elapsedTimer PSParallelCompact::_accumulated_time;
|
elapsedTimer PSParallelCompact::_accumulated_time;
|
||||||
unsigned int PSParallelCompact::_total_invocations = 0;
|
unsigned int PSParallelCompact::_total_invocations = 0;
|
||||||
unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
|
unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
|
||||||
@ -1961,11 +1988,6 @@ void PSParallelCompact::invoke(bool maximum_heap_compaction) {
|
|||||||
maximum_heap_compaction);
|
maximum_heap_compaction);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
|
|
||||||
size_t addr_region_index = addr_to_region_idx(addr);
|
|
||||||
return region_index == addr_region_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This method contains no policy. You should probably
|
// This method contains no policy. You should probably
|
||||||
// be calling invoke() instead.
|
// be calling invoke() instead.
|
||||||
bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||||
@ -2101,13 +2123,13 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
|||||||
// Used for diagnostics
|
// Used for diagnostics
|
||||||
size_policy->clear_generation_free_space_flags();
|
size_policy->clear_generation_free_space_flags();
|
||||||
|
|
||||||
size_policy->compute_generation_free_space(young_live,
|
size_policy->compute_generations_free_space(young_live,
|
||||||
eden_live,
|
eden_live,
|
||||||
old_live,
|
old_live,
|
||||||
cur_eden,
|
cur_eden,
|
||||||
max_old_gen_size,
|
max_old_gen_size,
|
||||||
max_eden_size,
|
max_eden_size,
|
||||||
true /* full gc*/);
|
true /* full gc*/);
|
||||||
|
|
||||||
size_policy->check_gc_overhead_limit(young_live,
|
size_policy->check_gc_overhead_limit(young_live,
|
||||||
eden_live,
|
eden_live,
|
||||||
@ -2338,6 +2360,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
|||||||
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
|
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
|
||||||
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
|
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
|
||||||
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
|
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
|
||||||
|
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
|
||||||
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
|
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
|
||||||
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
|
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
|
||||||
|
|
||||||
@ -2626,6 +2649,41 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
// Write a histogram of the number of times the block table was filled for a
|
||||||
|
// region.
|
||||||
|
void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
|
||||||
|
{
|
||||||
|
if (!TraceParallelOldGCCompactionPhase) return;
|
||||||
|
|
||||||
|
typedef ParallelCompactData::RegionData rd_t;
|
||||||
|
ParallelCompactData& sd = summary_data();
|
||||||
|
|
||||||
|
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
|
||||||
|
MutableSpace* const spc = _space_info[id].space();
|
||||||
|
if (spc->bottom() != spc->top()) {
|
||||||
|
const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
|
||||||
|
HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
|
||||||
|
const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
|
||||||
|
|
||||||
|
size_t histo[5] = { 0, 0, 0, 0, 0 };
|
||||||
|
const size_t histo_len = sizeof(histo) / sizeof(size_t);
|
||||||
|
const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
|
||||||
|
|
||||||
|
for (const rd_t* cur = beg; cur < end; ++cur) {
|
||||||
|
++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
|
||||||
|
}
|
||||||
|
out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
|
||||||
|
for (size_t i = 0; i < histo_len; ++i) {
|
||||||
|
out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
|
||||||
|
histo[i], 100.0 * histo[i] / region_cnt);
|
||||||
|
}
|
||||||
|
out->cr();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // #ifdef ASSERT
|
||||||
|
|
||||||
void PSParallelCompact::compact() {
|
void PSParallelCompact::compact() {
|
||||||
// trace("5");
|
// trace("5");
|
||||||
TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
|
TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
|
||||||
@ -2665,6 +2723,8 @@ void PSParallelCompact::compact() {
|
|||||||
update_deferred_objects(cm, SpaceId(id));
|
update_deferred_objects(cm, SpaceId(id));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -3129,6 +3189,57 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
|
|||||||
} while (true);
|
} while (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PSParallelCompact::fill_blocks(size_t region_idx)
|
||||||
|
{
|
||||||
|
// Fill in the block table elements for the specified region. Each block
|
||||||
|
// table element holds the number of live words in the region that are to the
|
||||||
|
// left of the first object that starts in the block. Thus only blocks in
|
||||||
|
// which an object starts need to be filled.
|
||||||
|
//
|
||||||
|
// The algorithm scans the section of the bitmap that corresponds to the
|
||||||
|
// region, keeping a running total of the live words. When an object start is
|
||||||
|
// found, if it's the first to start in the block that contains it, the
|
||||||
|
// current total is written to the block table element.
|
||||||
|
const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
|
||||||
|
const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
|
||||||
|
const size_t RegionSize = ParallelCompactData::RegionSize;
|
||||||
|
|
||||||
|
ParallelCompactData& sd = summary_data();
|
||||||
|
const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
|
||||||
|
if (partial_obj_size >= RegionSize) {
|
||||||
|
return; // No objects start in this region.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the first loop iteration decides that the block has changed.
|
||||||
|
size_t cur_block = sd.block_count();
|
||||||
|
|
||||||
|
const ParMarkBitMap* const bitmap = mark_bitmap();
|
||||||
|
|
||||||
|
const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
|
||||||
|
assert((size_t)1 << Log2BitsPerBlock ==
|
||||||
|
bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
|
||||||
|
|
||||||
|
size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
|
||||||
|
const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
|
||||||
|
size_t live_bits = bitmap->words_to_bits(partial_obj_size);
|
||||||
|
beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
|
||||||
|
while (beg_bit < range_end) {
|
||||||
|
const size_t new_block = beg_bit >> Log2BitsPerBlock;
|
||||||
|
if (new_block != cur_block) {
|
||||||
|
cur_block = new_block;
|
||||||
|
sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
|
||||||
|
if (end_bit < range_end - 1) {
|
||||||
|
live_bits += end_bit - beg_bit + 1;
|
||||||
|
beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
|
PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
|
||||||
const MutableSpace* sp = space(space_id);
|
const MutableSpace* sp = space(space_id);
|
||||||
|
@ -220,6 +220,17 @@ public:
|
|||||||
// Mask for the bits in a pointer to get the address of the start of a region.
|
// Mask for the bits in a pointer to get the address of the start of a region.
|
||||||
static const size_t RegionAddrMask;
|
static const size_t RegionAddrMask;
|
||||||
|
|
||||||
|
static const size_t Log2BlockSize;
|
||||||
|
static const size_t BlockSize;
|
||||||
|
static const size_t BlockSizeBytes;
|
||||||
|
|
||||||
|
static const size_t BlockSizeOffsetMask;
|
||||||
|
static const size_t BlockAddrOffsetMask;
|
||||||
|
static const size_t BlockAddrMask;
|
||||||
|
|
||||||
|
static const size_t BlocksPerRegion;
|
||||||
|
static const size_t Log2BlocksPerRegion;
|
||||||
|
|
||||||
class RegionData
|
class RegionData
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -272,6 +283,12 @@ public:
|
|||||||
inline uint destination_count() const;
|
inline uint destination_count() const;
|
||||||
inline uint destination_count_raw() const;
|
inline uint destination_count_raw() const;
|
||||||
|
|
||||||
|
// Whether the block table for this region has been filled.
|
||||||
|
inline bool blocks_filled() const;
|
||||||
|
|
||||||
|
// Number of times the block table was filled.
|
||||||
|
DEBUG_ONLY(inline size_t blocks_filled_count() const;)
|
||||||
|
|
||||||
// The location of the java heap data that corresponds to this region.
|
// The location of the java heap data that corresponds to this region.
|
||||||
inline HeapWord* data_location() const;
|
inline HeapWord* data_location() const;
|
||||||
|
|
||||||
@ -296,6 +313,7 @@ public:
|
|||||||
void set_partial_obj_size(size_t words) {
|
void set_partial_obj_size(size_t words) {
|
||||||
_partial_obj_size = (region_sz_t) words;
|
_partial_obj_size = (region_sz_t) words;
|
||||||
}
|
}
|
||||||
|
inline void set_blocks_filled();
|
||||||
|
|
||||||
inline void set_destination_count(uint count);
|
inline void set_destination_count(uint count);
|
||||||
inline void set_live_obj_size(size_t words);
|
inline void set_live_obj_size(size_t words);
|
||||||
@ -328,7 +346,11 @@ public:
|
|||||||
HeapWord* _partial_obj_addr;
|
HeapWord* _partial_obj_addr;
|
||||||
region_sz_t _partial_obj_size;
|
region_sz_t _partial_obj_size;
|
||||||
region_sz_t volatile _dc_and_los;
|
region_sz_t volatile _dc_and_los;
|
||||||
|
bool _blocks_filled;
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
size_t _blocks_filled_count; // Number of block table fills.
|
||||||
|
|
||||||
// These enable optimizations that are only partially implemented. Use
|
// These enable optimizations that are only partially implemented. Use
|
||||||
// debug builds to prevent the code fragments from breaking.
|
// debug builds to prevent the code fragments from breaking.
|
||||||
HeapWord* _data_location;
|
HeapWord* _data_location;
|
||||||
@ -337,11 +359,26 @@ public:
|
|||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
public:
|
public:
|
||||||
uint _pushed; // 0 until region is pushed onto a worker's stack
|
uint _pushed; // 0 until region is pushed onto a stack
|
||||||
private:
|
private:
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// "Blocks" allow shorter sections of the bitmap to be searched. Each Block
|
||||||
|
// holds an offset, which is the amount of live data in the Region to the left
|
||||||
|
// of the first live object that starts in the Block.
|
||||||
|
class BlockData
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
typedef unsigned short int blk_ofs_t;
|
||||||
|
|
||||||
|
blk_ofs_t offset() const { return _offset; }
|
||||||
|
void set_offset(size_t val) { _offset = (blk_ofs_t)val; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
blk_ofs_t _offset;
|
||||||
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ParallelCompactData();
|
ParallelCompactData();
|
||||||
bool initialize(MemRegion covered_region);
|
bool initialize(MemRegion covered_region);
|
||||||
@ -353,8 +390,9 @@ public:
|
|||||||
inline RegionData* region(size_t region_idx) const;
|
inline RegionData* region(size_t region_idx) const;
|
||||||
inline size_t region(const RegionData* const region_ptr) const;
|
inline size_t region(const RegionData* const region_ptr) const;
|
||||||
|
|
||||||
// Returns true if the given address is contained within the region
|
size_t block_count() const { return _block_count; }
|
||||||
bool region_contains(size_t region_index, HeapWord* addr);
|
inline BlockData* block(size_t block_idx) const;
|
||||||
|
inline size_t block(const BlockData* block_ptr) const;
|
||||||
|
|
||||||
void add_obj(HeapWord* addr, size_t len);
|
void add_obj(HeapWord* addr, size_t len);
|
||||||
void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
|
void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
|
||||||
@ -394,11 +432,24 @@ public:
|
|||||||
inline HeapWord* region_align_up(HeapWord* addr) const;
|
inline HeapWord* region_align_up(HeapWord* addr) const;
|
||||||
inline bool is_region_aligned(HeapWord* addr) const;
|
inline bool is_region_aligned(HeapWord* addr) const;
|
||||||
|
|
||||||
|
// Analogous to region_offset() for blocks.
|
||||||
|
size_t block_offset(const HeapWord* addr) const;
|
||||||
|
size_t addr_to_block_idx(const HeapWord* addr) const;
|
||||||
|
size_t addr_to_block_idx(const oop obj) const {
|
||||||
|
return addr_to_block_idx((HeapWord*) obj);
|
||||||
|
}
|
||||||
|
inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
|
||||||
|
inline HeapWord* block_to_addr(size_t block) const;
|
||||||
|
inline size_t region_to_block_idx(size_t region) const;
|
||||||
|
|
||||||
|
inline HeapWord* block_align_down(HeapWord* addr) const;
|
||||||
|
inline HeapWord* block_align_up(HeapWord* addr) const;
|
||||||
|
inline bool is_block_aligned(HeapWord* addr) const;
|
||||||
|
|
||||||
// Return the address one past the end of the partial object.
|
// Return the address one past the end of the partial object.
|
||||||
HeapWord* partial_obj_end(size_t region_idx) const;
|
HeapWord* partial_obj_end(size_t region_idx) const;
|
||||||
|
|
||||||
// Return the new location of the object p after the
|
// Return the location of the object after compaction.
|
||||||
// the compaction.
|
|
||||||
HeapWord* calc_new_pointer(HeapWord* addr);
|
HeapWord* calc_new_pointer(HeapWord* addr);
|
||||||
|
|
||||||
HeapWord* calc_new_pointer(oop p) {
|
HeapWord* calc_new_pointer(oop p) {
|
||||||
@ -411,6 +462,7 @@ public:
|
|||||||
#endif // #ifdef ASSERT
|
#endif // #ifdef ASSERT
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
bool initialize_block_data();
|
||||||
bool initialize_region_data(size_t region_size);
|
bool initialize_region_data(size_t region_size);
|
||||||
PSVirtualSpace* create_vspace(size_t count, size_t element_size);
|
PSVirtualSpace* create_vspace(size_t count, size_t element_size);
|
||||||
|
|
||||||
@ -424,6 +476,10 @@ private:
|
|||||||
size_t _reserved_byte_size;
|
size_t _reserved_byte_size;
|
||||||
RegionData* _region_data;
|
RegionData* _region_data;
|
||||||
size_t _region_count;
|
size_t _region_count;
|
||||||
|
|
||||||
|
PSVirtualSpace* _block_vspace;
|
||||||
|
BlockData* _block_data;
|
||||||
|
size_t _block_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline uint
|
inline uint
|
||||||
@ -438,6 +494,28 @@ ParallelCompactData::RegionData::destination_count() const
|
|||||||
return destination_count_raw() >> dc_shift;
|
return destination_count_raw() >> dc_shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline bool
|
||||||
|
ParallelCompactData::RegionData::blocks_filled() const
|
||||||
|
{
|
||||||
|
return _blocks_filled;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
inline size_t
|
||||||
|
ParallelCompactData::RegionData::blocks_filled_count() const
|
||||||
|
{
|
||||||
|
return _blocks_filled_count;
|
||||||
|
}
|
||||||
|
#endif // #ifdef ASSERT
|
||||||
|
|
||||||
|
inline void
|
||||||
|
ParallelCompactData::RegionData::set_blocks_filled()
|
||||||
|
{
|
||||||
|
_blocks_filled = true;
|
||||||
|
// Debug builds count the number of times the table was filled.
|
||||||
|
DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count));
|
||||||
|
}
|
||||||
|
|
||||||
inline void
|
inline void
|
||||||
ParallelCompactData::RegionData::set_destination_count(uint count)
|
ParallelCompactData::RegionData::set_destination_count(uint count)
|
||||||
{
|
{
|
||||||
@ -532,6 +610,12 @@ ParallelCompactData::region(const RegionData* const region_ptr) const
|
|||||||
return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
|
return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline ParallelCompactData::BlockData*
|
||||||
|
ParallelCompactData::block(size_t n) const {
|
||||||
|
assert(n < block_count(), "bad arg");
|
||||||
|
return _block_data + n;
|
||||||
|
}
|
||||||
|
|
||||||
inline size_t
|
inline size_t
|
||||||
ParallelCompactData::region_offset(const HeapWord* addr) const
|
ParallelCompactData::region_offset(const HeapWord* addr) const
|
||||||
{
|
{
|
||||||
@ -598,6 +682,63 @@ ParallelCompactData::is_region_aligned(HeapWord* addr) const
|
|||||||
return region_offset(addr) == 0;
|
return region_offset(addr) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline size_t
|
||||||
|
ParallelCompactData::block_offset(const HeapWord* addr) const
|
||||||
|
{
|
||||||
|
assert(addr >= _region_start, "bad addr");
|
||||||
|
assert(addr <= _region_end, "bad addr");
|
||||||
|
return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t
|
||||||
|
ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const
|
||||||
|
{
|
||||||
|
assert(addr >= _region_start, "bad addr");
|
||||||
|
assert(addr <= _region_end, "bad addr");
|
||||||
|
return pointer_delta(addr, _region_start) >> Log2BlockSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline ParallelCompactData::BlockData*
|
||||||
|
ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const
|
||||||
|
{
|
||||||
|
return block(addr_to_block_idx(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline HeapWord*
|
||||||
|
ParallelCompactData::block_to_addr(size_t block) const
|
||||||
|
{
|
||||||
|
assert(block < _block_count, "block out of range");
|
||||||
|
return _region_start + (block << Log2BlockSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t
|
||||||
|
ParallelCompactData::region_to_block_idx(size_t region) const
|
||||||
|
{
|
||||||
|
return region << Log2BlocksPerRegion;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline HeapWord*
|
||||||
|
ParallelCompactData::block_align_down(HeapWord* addr) const
|
||||||
|
{
|
||||||
|
assert(addr >= _region_start, "bad addr");
|
||||||
|
assert(addr < _region_end + RegionSize, "bad addr");
|
||||||
|
return (HeapWord*)(size_t(addr) & BlockAddrMask);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline HeapWord*
|
||||||
|
ParallelCompactData::block_align_up(HeapWord* addr) const
|
||||||
|
{
|
||||||
|
assert(addr >= _region_start, "bad addr");
|
||||||
|
assert(addr <= _region_end, "bad addr");
|
||||||
|
return block_align_down(addr + BlockSizeOffsetMask);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool
|
||||||
|
ParallelCompactData::is_block_aligned(HeapWord* addr) const
|
||||||
|
{
|
||||||
|
return block_offset(addr) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
// Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
|
// Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
|
||||||
// do_addr() method.
|
// do_addr() method.
|
||||||
//
|
//
|
||||||
@ -775,6 +916,7 @@ class PSParallelCompact : AllStatic {
|
|||||||
// Convenient access to type names.
|
// Convenient access to type names.
|
||||||
typedef ParMarkBitMap::idx_t idx_t;
|
typedef ParMarkBitMap::idx_t idx_t;
|
||||||
typedef ParallelCompactData::RegionData RegionData;
|
typedef ParallelCompactData::RegionData RegionData;
|
||||||
|
typedef ParallelCompactData::BlockData BlockData;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
old_space_id, eden_space_id,
|
old_space_id, eden_space_id,
|
||||||
@ -962,6 +1104,8 @@ class PSParallelCompact : AllStatic {
|
|||||||
// Adjust addresses in roots. Does not adjust addresses in heap.
|
// Adjust addresses in roots. Does not adjust addresses in heap.
|
||||||
static void adjust_roots();
|
static void adjust_roots();
|
||||||
|
|
||||||
|
DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);)
|
||||||
|
|
||||||
// Move objects to new locations.
|
// Move objects to new locations.
|
||||||
static void compact_perm(ParCompactionManager* cm);
|
static void compact_perm(ParCompactionManager* cm);
|
||||||
static void compact();
|
static void compact();
|
||||||
@ -1128,6 +1272,9 @@ class PSParallelCompact : AllStatic {
|
|||||||
fill_region(cm, region);
|
fill_region(cm, region);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fill in the block table for the specified region.
|
||||||
|
static void fill_blocks(size_t region_idx);
|
||||||
|
|
||||||
// Update the deferred objects in the space.
|
// Update the deferred objects in the space.
|
||||||
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
|
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
|
|||||||
if (o->is_forwarded()) {
|
if (o->is_forwarded()) {
|
||||||
o = o->forwardee();
|
o = o->forwardee();
|
||||||
// Card mark
|
// Card mark
|
||||||
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
|
if (PSScavenge::is_obj_in_young(o)) {
|
||||||
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
|
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
|
||||||
}
|
}
|
||||||
oopDesc::encode_store_heap_oop_not_null(p, o);
|
oopDesc::encode_store_heap_oop_not_null(p, o);
|
||||||
|
@ -61,6 +61,7 @@ CardTableExtension* PSScavenge::_card_table = NULL;
|
|||||||
bool PSScavenge::_survivor_overflow = false;
|
bool PSScavenge::_survivor_overflow = false;
|
||||||
uint PSScavenge::_tenuring_threshold = 0;
|
uint PSScavenge::_tenuring_threshold = 0;
|
||||||
HeapWord* PSScavenge::_young_generation_boundary = NULL;
|
HeapWord* PSScavenge::_young_generation_boundary = NULL;
|
||||||
|
uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
|
||||||
elapsedTimer PSScavenge::_accumulated_time;
|
elapsedTimer PSScavenge::_accumulated_time;
|
||||||
Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack;
|
Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack;
|
||||||
Stack<oop, mtGC> PSScavenge::_preserved_oop_stack;
|
Stack<oop, mtGC> PSScavenge::_preserved_oop_stack;
|
||||||
@ -71,7 +72,7 @@ bool PSScavenge::_promotion_failed = false;
|
|||||||
class PSIsAliveClosure: public BoolObjectClosure {
|
class PSIsAliveClosure: public BoolObjectClosure {
|
||||||
public:
|
public:
|
||||||
bool do_object_b(oop p) {
|
bool do_object_b(oop p) {
|
||||||
return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
|
return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -408,6 +409,7 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
|
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
|
||||||
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
|
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
|
||||||
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
|
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
|
||||||
|
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
|
||||||
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
|
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
|
||||||
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
|
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
|
||||||
|
|
||||||
@ -449,11 +451,9 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
reference_processor()->enqueue_discovered_references(NULL);
|
reference_processor()->enqueue_discovered_references(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlink any dead interned Strings
|
// Unlink any dead interned Strings and process the remaining live ones.
|
||||||
StringTable::unlink(&_is_alive_closure);
|
PSScavengeRootsClosure root_closure(promotion_manager);
|
||||||
// Process the remaining live ones
|
StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
|
||||||
PSScavengeRootsClosure root_closure(promotion_manager);
|
|
||||||
StringTable::oops_do(&root_closure);
|
|
||||||
|
|
||||||
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
|
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
|
||||||
PSPromotionManager::post_scavenge();
|
PSPromotionManager::post_scavenge();
|
||||||
@ -816,7 +816,7 @@ void PSScavenge::initialize() {
|
|||||||
// Set boundary between young_gen and old_gen
|
// Set boundary between young_gen and old_gen
|
||||||
assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
|
assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
|
||||||
"old above young");
|
"old above young");
|
||||||
_young_generation_boundary = young_gen->eden_space()->bottom();
|
set_young_generation_boundary(young_gen->eden_space()->bottom());
|
||||||
|
|
||||||
// Initialize ref handling object for scavenging.
|
// Initialize ref handling object for scavenging.
|
||||||
MemRegion mr = young_gen->reserved();
|
MemRegion mr = young_gen->reserved();
|
||||||
|
@ -62,19 +62,22 @@ class PSScavenge: AllStatic {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Flags/counters
|
// Flags/counters
|
||||||
static ReferenceProcessor* _ref_processor; // Reference processor for scavenging.
|
static ReferenceProcessor* _ref_processor; // Reference processor for scavenging.
|
||||||
static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing
|
static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing
|
||||||
static CardTableExtension* _card_table; // We cache the card table for fast access.
|
static CardTableExtension* _card_table; // We cache the card table for fast access.
|
||||||
static bool _survivor_overflow; // Overflow this collection
|
static bool _survivor_overflow; // Overflow this collection
|
||||||
static uint _tenuring_threshold; // tenuring threshold for next scavenge
|
static uint _tenuring_threshold; // tenuring threshold for next scavenge
|
||||||
static elapsedTimer _accumulated_time; // total time spent on scavenge
|
static elapsedTimer _accumulated_time; // total time spent on scavenge
|
||||||
static HeapWord* _young_generation_boundary; // The lowest address possible for the young_gen.
|
// The lowest address possible for the young_gen.
|
||||||
// This is used to decide if an oop should be scavenged,
|
// This is used to decide if an oop should be scavenged,
|
||||||
// cards should be marked, etc.
|
// cards should be marked, etc.
|
||||||
|
static HeapWord* _young_generation_boundary;
|
||||||
|
// Used to optimize compressed oops young gen boundary checking.
|
||||||
|
static uintptr_t _young_generation_boundary_compressed;
|
||||||
static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion
|
static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion
|
||||||
static Stack<oop, mtGC> _preserved_oop_stack; // List of oops that need their mark restored.
|
static Stack<oop, mtGC> _preserved_oop_stack; // List of oops that need their mark restored.
|
||||||
static CollectorCounters* _counters; // collector performance counters
|
static CollectorCounters* _counters; // collector performance counters
|
||||||
static bool _promotion_failed;
|
static bool _promotion_failed;
|
||||||
|
|
||||||
static void clean_up_failed_promotion();
|
static void clean_up_failed_promotion();
|
||||||
|
|
||||||
@ -112,6 +115,9 @@ class PSScavenge: AllStatic {
|
|||||||
// boundary moves, _young_generation_boundary must be reset
|
// boundary moves, _young_generation_boundary must be reset
|
||||||
static void set_young_generation_boundary(HeapWord* v) {
|
static void set_young_generation_boundary(HeapWord* v) {
|
||||||
_young_generation_boundary = v;
|
_young_generation_boundary = v;
|
||||||
|
if (UseCompressedOops) {
|
||||||
|
_young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called by parallelScavengeHeap to init the tenuring threshold
|
// Called by parallelScavengeHeap to init the tenuring threshold
|
||||||
@ -140,11 +146,19 @@ class PSScavenge: AllStatic {
|
|||||||
static void copy_and_push_safe_barrier_from_klass(PSPromotionManager* pm, oop* p);
|
static void copy_and_push_safe_barrier_from_klass(PSPromotionManager* pm, oop* p);
|
||||||
|
|
||||||
// Is an object in the young generation
|
// Is an object in the young generation
|
||||||
// This assumes that the HeapWord argument is in the heap,
|
// This assumes that the 'o' is in the heap,
|
||||||
// so it only checks one side of the complete predicate.
|
// so it only checks one side of the complete predicate.
|
||||||
|
|
||||||
|
inline static bool is_obj_in_young(oop o) {
|
||||||
|
return (HeapWord*)o >= _young_generation_boundary;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline static bool is_obj_in_young(narrowOop o) {
|
||||||
|
return (uintptr_t)o >= _young_generation_boundary_compressed;
|
||||||
|
}
|
||||||
|
|
||||||
inline static bool is_obj_in_young(HeapWord* o) {
|
inline static bool is_obj_in_young(HeapWord* o) {
|
||||||
const bool result = (o >= _young_generation_boundary);
|
return o >= _young_generation_boundary;
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -39,9 +39,7 @@ inline void PSScavenge::save_to_space_top_before_gc() {
|
|||||||
|
|
||||||
template <class T> inline bool PSScavenge::should_scavenge(T* p) {
|
template <class T> inline bool PSScavenge::should_scavenge(T* p) {
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (oopDesc::is_null(heap_oop)) return false;
|
return PSScavenge::is_obj_in_young(heap_oop);
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
|
||||||
return PSScavenge::is_obj_in_young((HeapWord*)obj);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
@ -94,7 +92,7 @@ inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
|
|||||||
// or from metadata.
|
// or from metadata.
|
||||||
if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
|
if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
|
||||||
Universe::heap()->is_in_reserved(p)) {
|
Universe::heap()->is_in_reserved(p)) {
|
||||||
if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
|
if (PSScavenge::is_obj_in_young(new_obj)) {
|
||||||
card_table()->inline_write_ref_field_gc(p, new_obj);
|
card_table()->inline_write_ref_field_gc(p, new_obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -147,7 +145,7 @@ class PSScavengeFromKlassClosure: public OopClosure {
|
|||||||
}
|
}
|
||||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||||
|
|
||||||
if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
|
if (PSScavenge::is_obj_in_young(new_obj)) {
|
||||||
do_klass_barrier();
|
do_klass_barrier();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -79,15 +79,16 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case system_dictionary:
|
case system_dictionary:
|
||||||
{
|
|
||||||
SystemDictionary::oops_do(&roots_closure);
|
SystemDictionary::oops_do(&roots_closure);
|
||||||
|
|
||||||
// Move this to another root_type?
|
|
||||||
PSScavengeKlassClosure klass_closure(pm);
|
|
||||||
ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case class_loader_data:
|
||||||
|
{
|
||||||
|
PSScavengeKlassClosure klass_closure(pm);
|
||||||
|
ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
case management:
|
case management:
|
||||||
Management::oops_do(&roots_closure);
|
Management::oops_do(&roots_closure);
|
||||||
break;
|
break;
|
||||||
|
@ -59,9 +59,10 @@ class ScavengeRootsTask : public GCTask {
|
|||||||
object_synchronizer = 4,
|
object_synchronizer = 4,
|
||||||
flat_profiler = 5,
|
flat_profiler = 5,
|
||||||
system_dictionary = 6,
|
system_dictionary = 6,
|
||||||
management = 7,
|
class_loader_data = 7,
|
||||||
jvmti = 8,
|
management = 8,
|
||||||
code_cache = 9
|
jvmti = 9,
|
||||||
|
code_cache = 10
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
RootType _root_type;
|
RootType _root_type;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -467,7 +467,7 @@ void AdaptiveSizePolicy::check_gc_overhead_limit(
|
|||||||
(free_in_old_gen < (size_t) mem_free_old_limit &&
|
(free_in_old_gen < (size_t) mem_free_old_limit &&
|
||||||
free_in_eden < (size_t) mem_free_eden_limit))) {
|
free_in_eden < (size_t) mem_free_eden_limit))) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"PSAdaptiveSizePolicy::compute_generation_free_space limits:"
|
"PSAdaptiveSizePolicy::check_gc_overhead_limit:"
|
||||||
" promo_limit: " SIZE_FORMAT
|
" promo_limit: " SIZE_FORMAT
|
||||||
" max_eden_size: " SIZE_FORMAT
|
" max_eden_size: " SIZE_FORMAT
|
||||||
" total_free_limit: " SIZE_FORMAT
|
" total_free_limit: " SIZE_FORMAT
|
||||||
|
@ -158,7 +158,7 @@ public:
|
|||||||
// Fills in the unallocated portion of the buffer with a garbage object.
|
// Fills in the unallocated portion of the buffer with a garbage object.
|
||||||
// If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain"
|
// If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain"
|
||||||
// is true, attempt to re-use the unused portion in the next GC.
|
// is true, attempt to re-use the unused portion in the next GC.
|
||||||
void retire(bool end_of_gc, bool retain);
|
virtual void retire(bool end_of_gc, bool retain);
|
||||||
|
|
||||||
void print() PRODUCT_RETURN;
|
void print() PRODUCT_RETURN;
|
||||||
};
|
};
|
||||||
|
@ -468,7 +468,25 @@ BytecodeInterpreter::run(interpreterState istate) {
|
|||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (istate->_msg != initialize) {
|
if (istate->_msg != initialize) {
|
||||||
assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
|
// We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
|
||||||
|
// because in that case, EnableInvokeDynamic is true by default but will be later switched off
|
||||||
|
// if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
|
||||||
|
// for the old JSR292 implementation.
|
||||||
|
// This leads to a situation where 'istate->_stack_limit' always accounts for
|
||||||
|
// methodOopDesc::extra_stack_entries() because it is computed in
|
||||||
|
// CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
|
||||||
|
// EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
|
||||||
|
// account for extra_stack_entries() anymore because at the time when it is called
|
||||||
|
// EnableInvokeDynamic was already set to false.
|
||||||
|
// So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
|
||||||
|
// switched off because of the wrong classes.
|
||||||
|
if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
|
||||||
|
assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
|
||||||
|
} else {
|
||||||
|
const int extra_stack_entries = Method::extra_stack_entries_for_indy;
|
||||||
|
assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
|
||||||
|
+ 1), "bad stack limit");
|
||||||
|
}
|
||||||
#ifndef SHARK
|
#ifndef SHARK
|
||||||
IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
|
IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
|
||||||
#endif // !SHARK
|
#endif // !SHARK
|
||||||
|
@ -60,10 +60,11 @@ void* _ValueObj::operator new [](size_t size) { ShouldNotCallThis(); return 0;
|
|||||||
void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
|
void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
|
||||||
|
|
||||||
void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
|
void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
|
||||||
size_t word_size, bool read_only, TRAPS) {
|
size_t word_size, bool read_only,
|
||||||
|
MetaspaceObj::Type type, TRAPS) {
|
||||||
// Klass has it's own operator new
|
// Klass has it's own operator new
|
||||||
return Metaspace::allocate(loader_data, word_size, read_only,
|
return Metaspace::allocate(loader_data, word_size, read_only,
|
||||||
Metaspace::NonClassType, CHECK_NULL);
|
type, CHECK_NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MetaspaceObj::is_shared() const {
|
bool MetaspaceObj::is_shared() const {
|
||||||
|
@ -268,8 +268,55 @@ class MetaspaceObj {
|
|||||||
bool is_shared() const;
|
bool is_shared() const;
|
||||||
void print_address_on(outputStream* st) const; // nonvirtual address printing
|
void print_address_on(outputStream* st) const; // nonvirtual address printing
|
||||||
|
|
||||||
|
#define METASPACE_OBJ_TYPES_DO(f) \
|
||||||
|
f(Unknown) \
|
||||||
|
f(Class) \
|
||||||
|
f(Symbol) \
|
||||||
|
f(TypeArrayU1) \
|
||||||
|
f(TypeArrayU2) \
|
||||||
|
f(TypeArrayU4) \
|
||||||
|
f(TypeArrayU8) \
|
||||||
|
f(TypeArrayOther) \
|
||||||
|
f(Method) \
|
||||||
|
f(ConstMethod) \
|
||||||
|
f(MethodData) \
|
||||||
|
f(ConstantPool) \
|
||||||
|
f(ConstantPoolCache) \
|
||||||
|
f(Annotation) \
|
||||||
|
f(MethodCounters)
|
||||||
|
|
||||||
|
#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
|
||||||
|
#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
|
||||||
|
|
||||||
|
enum Type {
|
||||||
|
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
|
||||||
|
METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
|
||||||
|
_number_of_types
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char * type_name(Type type) {
|
||||||
|
switch(type) {
|
||||||
|
METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
|
||||||
|
default:
|
||||||
|
ShouldNotReachHere();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static MetaspaceObj::Type array_type(size_t elem_size) {
|
||||||
|
switch (elem_size) {
|
||||||
|
case 1: return TypeArrayU1Type;
|
||||||
|
case 2: return TypeArrayU2Type;
|
||||||
|
case 4: return TypeArrayU4Type;
|
||||||
|
case 8: return TypeArrayU8Type;
|
||||||
|
default:
|
||||||
|
return TypeArrayOtherType;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void* operator new(size_t size, ClassLoaderData* loader_data,
|
void* operator new(size_t size, ClassLoaderData* loader_data,
|
||||||
size_t word_size, bool read_only, Thread* thread);
|
size_t word_size, bool read_only,
|
||||||
|
Type type, Thread* thread);
|
||||||
// can't use TRAPS from this header file.
|
// can't use TRAPS from this header file.
|
||||||
void operator delete(void* p) { ShouldNotCallThis(); }
|
void operator delete(void* p) { ShouldNotCallThis(); }
|
||||||
};
|
};
|
||||||
|
@ -713,6 +713,23 @@ class SpaceManager : public CHeapObj<mtClass> {
|
|||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void verify_allocated_blocks_words();
|
void verify_allocated_blocks_words();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
size_t get_raw_word_size(size_t word_size) {
|
||||||
|
// If only the dictionary is going to be used (i.e., no
|
||||||
|
// indexed free list), then there is a minimum size requirement.
|
||||||
|
// MinChunkSize is a placeholder for the real minimum size JJJ
|
||||||
|
size_t byte_size = word_size * BytesPerWord;
|
||||||
|
|
||||||
|
size_t byte_size_with_overhead = byte_size + Metablock::overhead();
|
||||||
|
|
||||||
|
size_t raw_bytes_size = MAX2(byte_size_with_overhead,
|
||||||
|
Metablock::min_block_byte_size());
|
||||||
|
raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
|
||||||
|
size_t raw_word_size = raw_bytes_size / BytesPerWord;
|
||||||
|
assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
|
||||||
|
|
||||||
|
return raw_word_size;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
uint const SpaceManager::_small_chunk_limit = 4;
|
uint const SpaceManager::_small_chunk_limit = 4;
|
||||||
@ -2320,19 +2337,7 @@ Metachunk* SpaceManager::get_new_chunk(size_t word_size,
|
|||||||
MetaWord* SpaceManager::allocate(size_t word_size) {
|
MetaWord* SpaceManager::allocate(size_t word_size) {
|
||||||
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
|
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
// If only the dictionary is going to be used (i.e., no
|
size_t raw_word_size = get_raw_word_size(word_size);
|
||||||
// indexed free list), then there is a minimum size requirement.
|
|
||||||
// MinChunkSize is a placeholder for the real minimum size JJJ
|
|
||||||
size_t byte_size = word_size * BytesPerWord;
|
|
||||||
|
|
||||||
size_t byte_size_with_overhead = byte_size + Metablock::overhead();
|
|
||||||
|
|
||||||
size_t raw_bytes_size = MAX2(byte_size_with_overhead,
|
|
||||||
Metablock::min_block_byte_size());
|
|
||||||
raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
|
|
||||||
size_t raw_word_size = raw_bytes_size / BytesPerWord;
|
|
||||||
assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
|
|
||||||
|
|
||||||
BlockFreelist* fl = block_freelists();
|
BlockFreelist* fl = block_freelists();
|
||||||
MetaWord* p = NULL;
|
MetaWord* p = NULL;
|
||||||
// Allocation from the dictionary is expensive in the sense that
|
// Allocation from the dictionary is expensive in the sense that
|
||||||
@ -2896,6 +2901,9 @@ void Metaspace::initialize(Mutex* lock,
|
|||||||
if (class_chunk != NULL) {
|
if (class_chunk != NULL) {
|
||||||
class_vsm()->add_chunk(class_chunk, true);
|
class_vsm()->add_chunk(class_chunk, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_alloc_record_head = NULL;
|
||||||
|
_alloc_record_tail = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Metaspace::align_word_size_up(size_t word_size) {
|
size_t Metaspace::align_word_size_up(size_t word_size) {
|
||||||
@ -3000,12 +3008,14 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||||
bool read_only, MetadataType mdtype, TRAPS) {
|
bool read_only, MetaspaceObj::Type type, TRAPS) {
|
||||||
if (HAS_PENDING_EXCEPTION) {
|
if (HAS_PENDING_EXCEPTION) {
|
||||||
assert(false, "Should not allocate with exception pending");
|
assert(false, "Should not allocate with exception pending");
|
||||||
return NULL; // caller does a CHECK_NULL too
|
return NULL; // caller does a CHECK_NULL too
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
|
||||||
|
|
||||||
// SSS: Should we align the allocations and make sure the sizes are aligned.
|
// SSS: Should we align the allocations and make sure the sizes are aligned.
|
||||||
MetaWord* result = NULL;
|
MetaWord* result = NULL;
|
||||||
|
|
||||||
@ -3015,13 +3025,13 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
|||||||
// with the SymbolTable_lock. Dumping is single threaded for now. We'll have
|
// with the SymbolTable_lock. Dumping is single threaded for now. We'll have
|
||||||
// to revisit this for application class data sharing.
|
// to revisit this for application class data sharing.
|
||||||
if (DumpSharedSpaces) {
|
if (DumpSharedSpaces) {
|
||||||
if (read_only) {
|
assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
|
||||||
result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
|
Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
|
||||||
} else {
|
result = space->allocate(word_size, NonClassType);
|
||||||
result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
|
|
||||||
}
|
|
||||||
if (result == NULL) {
|
if (result == NULL) {
|
||||||
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
|
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
|
||||||
|
} else {
|
||||||
|
space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
|
||||||
}
|
}
|
||||||
return Metablock::initialize(result, word_size);
|
return Metablock::initialize(result, word_size);
|
||||||
}
|
}
|
||||||
@ -3056,6 +3066,38 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
|||||||
return Metablock::initialize(result, word_size);
|
return Metablock::initialize(result, word_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
|
||||||
|
assert(DumpSharedSpaces, "sanity");
|
||||||
|
|
||||||
|
AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
|
||||||
|
if (_alloc_record_head == NULL) {
|
||||||
|
_alloc_record_head = _alloc_record_tail = rec;
|
||||||
|
} else {
|
||||||
|
_alloc_record_tail->_next = rec;
|
||||||
|
_alloc_record_tail = rec;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
|
||||||
|
assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
|
||||||
|
|
||||||
|
address last_addr = (address)bottom();
|
||||||
|
|
||||||
|
for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
|
||||||
|
address ptr = rec->_ptr;
|
||||||
|
if (last_addr < ptr) {
|
||||||
|
closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
|
||||||
|
}
|
||||||
|
closure->doit(ptr, rec->_type, rec->_byte_size);
|
||||||
|
last_addr = ptr + rec->_byte_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
|
||||||
|
if (last_addr < top) {
|
||||||
|
closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Metaspace::purge() {
|
void Metaspace::purge() {
|
||||||
MutexLockerEx cl(SpaceManager::expand_lock(),
|
MutexLockerEx cl(SpaceManager::expand_lock(),
|
||||||
Mutex::_no_safepoint_check_flag);
|
Mutex::_no_safepoint_check_flag);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -127,6 +127,23 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
static VirtualSpaceList* space_list() { return _space_list; }
|
static VirtualSpaceList* space_list() { return _space_list; }
|
||||||
static VirtualSpaceList* class_space_list() { return _class_space_list; }
|
static VirtualSpaceList* class_space_list() { return _class_space_list; }
|
||||||
|
|
||||||
|
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will
|
||||||
|
// maintain a single list for now.
|
||||||
|
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
|
||||||
|
|
||||||
|
class AllocRecord : public CHeapObj<mtClass> {
|
||||||
|
public:
|
||||||
|
AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
|
||||||
|
: _next(NULL), _ptr(ptr), _type(type), _byte_size(byte_size) {}
|
||||||
|
AllocRecord *_next;
|
||||||
|
address _ptr;
|
||||||
|
MetaspaceObj::Type _type;
|
||||||
|
int _byte_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
AllocRecord * _alloc_record_head;
|
||||||
|
AllocRecord * _alloc_record_tail;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
Metaspace(Mutex* lock, MetaspaceType type);
|
Metaspace(Mutex* lock, MetaspaceType type);
|
||||||
@ -148,8 +165,8 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
size_t used_bytes_slow(MetadataType mdtype) const;
|
size_t used_bytes_slow(MetadataType mdtype) const;
|
||||||
size_t capacity_bytes_slow(MetadataType mdtype) const;
|
size_t capacity_bytes_slow(MetadataType mdtype) const;
|
||||||
|
|
||||||
static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
|
static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||||
bool read_only, MetadataType mdtype, TRAPS);
|
bool read_only, MetaspaceObj::Type type, TRAPS);
|
||||||
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
|
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
|
||||||
|
|
||||||
MetaWord* expand_and_allocate(size_t size,
|
MetaWord* expand_and_allocate(size_t size,
|
||||||
@ -166,6 +183,13 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
void print_on(outputStream* st) const;
|
void print_on(outputStream* st) const;
|
||||||
// Debugging support
|
// Debugging support
|
||||||
void verify();
|
void verify();
|
||||||
|
|
||||||
|
class AllocRecordClosure : public StackObj {
|
||||||
|
public:
|
||||||
|
virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
void iterate(AllocRecordClosure *closure);
|
||||||
};
|
};
|
||||||
|
|
||||||
class MetaspaceAux : AllStatic {
|
class MetaspaceAux : AllStatic {
|
||||||
|
@ -243,6 +243,147 @@ public:
|
|||||||
bool reading() const { return false; }
|
bool reading() const { return false; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// This is for dumping detailed statistics for the allocations
|
||||||
|
// in the shared spaces.
|
||||||
|
class DumpAllocClosure : public Metaspace::AllocRecordClosure {
|
||||||
|
public:
|
||||||
|
|
||||||
|
// Here's poor man's enum inheritance
|
||||||
|
#define SHAREDSPACE_OBJ_TYPES_DO(f) \
|
||||||
|
METASPACE_OBJ_TYPES_DO(f) \
|
||||||
|
f(SymbolHashentry) \
|
||||||
|
f(SymbolBuckets) \
|
||||||
|
f(Other)
|
||||||
|
|
||||||
|
#define SHAREDSPACE_OBJ_TYPE_DECLARE(name) name ## Type,
|
||||||
|
#define SHAREDSPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
|
||||||
|
|
||||||
|
enum Type {
|
||||||
|
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
|
||||||
|
SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_DECLARE)
|
||||||
|
_number_of_types
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char * type_name(Type type) {
|
||||||
|
switch(type) {
|
||||||
|
SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_NAME_CASE)
|
||||||
|
default:
|
||||||
|
ShouldNotReachHere();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
enum {
|
||||||
|
RO = 0,
|
||||||
|
RW = 1
|
||||||
|
};
|
||||||
|
|
||||||
|
int _counts[2][_number_of_types];
|
||||||
|
int _bytes [2][_number_of_types];
|
||||||
|
int _which;
|
||||||
|
|
||||||
|
DumpAllocClosure() {
|
||||||
|
memset(_counts, 0, sizeof(_counts));
|
||||||
|
memset(_bytes, 0, sizeof(_bytes));
|
||||||
|
};
|
||||||
|
|
||||||
|
void iterate_metaspace(Metaspace* space, int which) {
|
||||||
|
assert(which == RO || which == RW, "sanity");
|
||||||
|
_which = which;
|
||||||
|
space->iterate(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) {
|
||||||
|
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
|
||||||
|
_counts[_which][type] ++;
|
||||||
|
_bytes [_which][type] += byte_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void dump_stats(int ro_all, int rw_all, int md_all, int mc_all);
|
||||||
|
};
|
||||||
|
|
||||||
|
void DumpAllocClosure::dump_stats(int ro_all, int rw_all, int md_all, int mc_all) {
|
||||||
|
rw_all += (md_all + mc_all); // md and mc are all mapped Read/Write
|
||||||
|
int other_bytes = md_all + mc_all;
|
||||||
|
|
||||||
|
// Calculate size of data that was not allocated by Metaspace::allocate()
|
||||||
|
int symbol_count = _counts[RO][MetaspaceObj::SymbolType];
|
||||||
|
int symhash_bytes = symbol_count * sizeof (HashtableEntry<Symbol*, mtSymbol>);
|
||||||
|
int symbuck_count = SymbolTable::the_table()->table_size();
|
||||||
|
int symbuck_bytes = symbuck_count * sizeof(HashtableBucket<mtSymbol>);
|
||||||
|
|
||||||
|
_counts[RW][SymbolHashentryType] = symbol_count;
|
||||||
|
_bytes [RW][SymbolHashentryType] = symhash_bytes;
|
||||||
|
other_bytes -= symhash_bytes;
|
||||||
|
|
||||||
|
_counts[RW][SymbolBucketsType] = symbuck_count;
|
||||||
|
_bytes [RW][SymbolBucketsType] = symbuck_bytes;
|
||||||
|
other_bytes -= symbuck_bytes;
|
||||||
|
|
||||||
|
// TODO: count things like dictionary, vtable, etc
|
||||||
|
_bytes[RW][OtherType] = other_bytes;
|
||||||
|
|
||||||
|
// prevent divide-by-zero
|
||||||
|
if (ro_all < 1) {
|
||||||
|
ro_all = 1;
|
||||||
|
}
|
||||||
|
if (rw_all < 1) {
|
||||||
|
rw_all = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int all_ro_count = 0;
|
||||||
|
int all_ro_bytes = 0;
|
||||||
|
int all_rw_count = 0;
|
||||||
|
int all_rw_bytes = 0;
|
||||||
|
|
||||||
|
const char *fmt = "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f";
|
||||||
|
const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
|
||||||
|
const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
|
||||||
|
|
||||||
|
tty->print_cr("Detailed metadata info (rw includes md and mc):");
|
||||||
|
tty->print_cr(hdr);
|
||||||
|
tty->print_cr(sep);
|
||||||
|
for (int type = 0; type < int(_number_of_types); type ++) {
|
||||||
|
const char *name = type_name((Type)type);
|
||||||
|
int ro_count = _counts[RO][type];
|
||||||
|
int ro_bytes = _bytes [RO][type];
|
||||||
|
int rw_count = _counts[RW][type];
|
||||||
|
int rw_bytes = _bytes [RW][type];
|
||||||
|
int count = ro_count + rw_count;
|
||||||
|
int bytes = ro_bytes + rw_bytes;
|
||||||
|
|
||||||
|
double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
|
||||||
|
double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
|
||||||
|
double perc = 100.0 * double(bytes) / double(ro_all + rw_all);
|
||||||
|
|
||||||
|
tty->print_cr(fmt, name,
|
||||||
|
ro_count, ro_bytes, ro_perc,
|
||||||
|
rw_count, rw_bytes, rw_perc,
|
||||||
|
count, bytes, perc);
|
||||||
|
|
||||||
|
all_ro_count += ro_count;
|
||||||
|
all_ro_bytes += ro_bytes;
|
||||||
|
all_rw_count += rw_count;
|
||||||
|
all_rw_bytes += rw_bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
int all_count = all_ro_count + all_rw_count;
|
||||||
|
int all_bytes = all_ro_bytes + all_rw_bytes;
|
||||||
|
|
||||||
|
double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
|
||||||
|
double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
|
||||||
|
double all_perc = 100.0 * double(all_bytes) / double(ro_all + rw_all);
|
||||||
|
|
||||||
|
tty->print_cr(sep);
|
||||||
|
tty->print_cr(fmt, "Total",
|
||||||
|
all_ro_count, all_ro_bytes, all_ro_perc,
|
||||||
|
all_rw_count, all_rw_bytes, all_rw_perc,
|
||||||
|
all_count, all_bytes, all_perc);
|
||||||
|
|
||||||
|
assert(all_ro_bytes == ro_all, "everything should have been counted");
|
||||||
|
assert(all_rw_bytes == rw_all, "everything should have been counted");
|
||||||
|
}
|
||||||
|
|
||||||
// Populate the shared space.
|
// Populate the shared space.
|
||||||
|
|
||||||
@ -454,6 +595,14 @@ void VM_PopulateDumpSharedSpace::doit() {
|
|||||||
mapinfo->close();
|
mapinfo->close();
|
||||||
|
|
||||||
memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*));
|
memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*));
|
||||||
|
|
||||||
|
if (PrintSharedSpaces) {
|
||||||
|
DumpAllocClosure dac;
|
||||||
|
dac.iterate_metaspace(_loader_data->ro_metaspace(), DumpAllocClosure::RO);
|
||||||
|
dac.iterate_metaspace(_loader_data->rw_metaspace(), DumpAllocClosure::RW);
|
||||||
|
|
||||||
|
dac.dump_stats(int(ro_bytes), int(rw_bytes), int(md_bytes), int(mc_bytes));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void link_shared_classes(Klass* obj, TRAPS) {
|
static void link_shared_classes(Klass* obj, TRAPS) {
|
||||||
|
@ -45,6 +45,7 @@ enum SH_process_strong_roots_tasks {
|
|||||||
SH_PS_FlatProfiler_oops_do,
|
SH_PS_FlatProfiler_oops_do,
|
||||||
SH_PS_Management_oops_do,
|
SH_PS_Management_oops_do,
|
||||||
SH_PS_SystemDictionary_oops_do,
|
SH_PS_SystemDictionary_oops_do,
|
||||||
|
SH_PS_ClassLoaderDataGraph_oops_do,
|
||||||
SH_PS_jvmti_oops_do,
|
SH_PS_jvmti_oops_do,
|
||||||
SH_PS_StringTable_oops_do,
|
SH_PS_StringTable_oops_do,
|
||||||
SH_PS_CodeCache_oops_do,
|
SH_PS_CodeCache_oops_do,
|
||||||
@ -173,15 +174,21 @@ void SharedHeap::process_strong_roots(bool activate_scope,
|
|||||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
|
if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
|
||||||
if (so & SO_AllClasses) {
|
if (so & SO_AllClasses) {
|
||||||
SystemDictionary::oops_do(roots);
|
SystemDictionary::oops_do(roots);
|
||||||
ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
|
|
||||||
} else if (so & SO_SystemClasses) {
|
} else if (so & SO_SystemClasses) {
|
||||||
SystemDictionary::always_strong_oops_do(roots);
|
SystemDictionary::always_strong_oops_do(roots);
|
||||||
ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
|
|
||||||
} else {
|
} else {
|
||||||
fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
|
fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
|
||||||
|
if (so & SO_AllClasses) {
|
||||||
|
ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
|
||||||
|
} else if (so & SO_SystemClasses) {
|
||||||
|
ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
|
if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
|
||||||
if (so & SO_Strings) {
|
if (so & SO_Strings) {
|
||||||
StringTable::oops_do(roots);
|
StringTable::oops_do(roots);
|
||||||
|
@ -228,11 +228,8 @@ void Universe::serialize(SerializeClosure* f, bool do_all) {
|
|||||||
|
|
||||||
void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
|
void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
|
||||||
if (size < alignment || size % alignment != 0) {
|
if (size < alignment || size % alignment != 0) {
|
||||||
ResourceMark rm;
|
vm_exit_during_initialization(
|
||||||
stringStream st;
|
err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment));
|
||||||
st.print("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment);
|
|
||||||
char* error = st.as_string();
|
|
||||||
vm_exit_during_initialization(error);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -916,7 +913,7 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!total_rs.is_reserved()) {
|
if (!total_rs.is_reserved()) {
|
||||||
vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved));
|
vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
|
||||||
return total_rs;
|
return total_rs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
|
|
||||||
// Allocate annotations in metadata area
|
// Allocate annotations in metadata area
|
||||||
Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) {
|
Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) {
|
||||||
return new (loader_data, size(), true, THREAD) Annotations();
|
return new (loader_data, size(), true, MetaspaceObj::AnnotationType, THREAD) Annotations();
|
||||||
}
|
}
|
||||||
|
|
||||||
// helper
|
// helper
|
||||||
|
@ -94,7 +94,7 @@ void ArrayKlass::complete_create_array_klass(ArrayKlass* k, KlassHandle super_kl
|
|||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
k->initialize_supers(super_klass(), CHECK);
|
k->initialize_supers(super_klass(), CHECK);
|
||||||
k->vtable()->initialize_vtable(false, CHECK);
|
k->vtable()->initialize_vtable(false, CHECK);
|
||||||
java_lang_Class::create_mirror(k, CHECK);
|
java_lang_Class::create_mirror(k, Handle(NULL), CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
GrowableArray<Klass*>* ArrayKlass::compute_secondary_supers(int num_extra_slots) {
|
GrowableArray<Klass*>* ArrayKlass::compute_secondary_supers(int num_extra_slots) {
|
||||||
|
@ -40,7 +40,7 @@ ConstMethod* ConstMethod::allocate(ClassLoaderData* loader_data,
|
|||||||
MethodType method_type,
|
MethodType method_type,
|
||||||
TRAPS) {
|
TRAPS) {
|
||||||
int size = ConstMethod::size(byte_code_size, sizes);
|
int size = ConstMethod::size(byte_code_size, sizes);
|
||||||
return new (loader_data, size, true, THREAD) ConstMethod(
|
return new (loader_data, size, true, MetaspaceObj::ConstMethodType, THREAD) ConstMethod(
|
||||||
byte_code_size, sizes, method_type, size);
|
byte_code_size, sizes, method_type, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, T
|
|||||||
// the resolved_references array, which is recreated at startup time.
|
// the resolved_references array, which is recreated at startup time.
|
||||||
// But that could be moved to InstanceKlass (although a pain to access from
|
// But that could be moved to InstanceKlass (although a pain to access from
|
||||||
// assembly code). Maybe it could be moved to the cpCache which is RW.
|
// assembly code). Maybe it could be moved to the cpCache which is RW.
|
||||||
return new (loader_data, size, false, THREAD) ConstantPool(tags);
|
return new (loader_data, size, false, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags);
|
||||||
}
|
}
|
||||||
|
|
||||||
ConstantPool::ConstantPool(Array<u1>* tags) {
|
ConstantPool::ConstantPool(Array<u1>* tags) {
|
||||||
@ -1063,9 +1063,10 @@ bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
|
|||||||
int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
|
int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
|
||||||
int i1 = invoke_dynamic_bootstrap_specifier_index(index1);
|
int i1 = invoke_dynamic_bootstrap_specifier_index(index1);
|
||||||
int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2);
|
int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2);
|
||||||
bool match = compare_entry_to(k1, cp2, k2, CHECK_false) &&
|
// separate statements and variables because CHECK_false is used
|
||||||
compare_operand_to(i1, cp2, i2, CHECK_false);
|
bool match_entry = compare_entry_to(k1, cp2, k2, CHECK_false);
|
||||||
return match;
|
bool match_operand = compare_operand_to(i1, cp2, i2, CHECK_false);
|
||||||
|
return (match_entry && match_operand);
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case JVM_CONSTANT_String:
|
case JVM_CONSTANT_String:
|
||||||
|
@ -542,7 +542,8 @@ ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
|
|||||||
const intStack& invokedynamic_map, TRAPS) {
|
const intStack& invokedynamic_map, TRAPS) {
|
||||||
int size = ConstantPoolCache::size(length);
|
int size = ConstantPoolCache::size(length);
|
||||||
|
|
||||||
return new (loader_data, size, false, THREAD) ConstantPoolCache(length, index_map, invokedynamic_map);
|
return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
|
||||||
|
ConstantPoolCache(length, index_map, invokedynamic_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConstantPoolCache::initialize(const intArray& inverse_index_map,
|
void ConstantPoolCache::initialize(const intArray& inverse_index_map,
|
||||||
|
@ -268,8 +268,6 @@ InstanceKlass::InstanceKlass(int vtable_len,
|
|||||||
set_fields(NULL, 0);
|
set_fields(NULL, 0);
|
||||||
set_constants(NULL);
|
set_constants(NULL);
|
||||||
set_class_loader_data(NULL);
|
set_class_loader_data(NULL);
|
||||||
set_protection_domain(NULL);
|
|
||||||
set_signers(NULL);
|
|
||||||
set_source_file_name(NULL);
|
set_source_file_name(NULL);
|
||||||
set_source_debug_extension(NULL, 0);
|
set_source_debug_extension(NULL, 0);
|
||||||
set_array_name(NULL);
|
set_array_name(NULL);
|
||||||
@ -279,7 +277,6 @@ InstanceKlass::InstanceKlass(int vtable_len,
|
|||||||
set_is_marked_dependent(false);
|
set_is_marked_dependent(false);
|
||||||
set_init_state(InstanceKlass::allocated);
|
set_init_state(InstanceKlass::allocated);
|
||||||
set_init_thread(NULL);
|
set_init_thread(NULL);
|
||||||
set_init_lock(NULL);
|
|
||||||
set_reference_type(rt);
|
set_reference_type(rt);
|
||||||
set_oop_map_cache(NULL);
|
set_oop_map_cache(NULL);
|
||||||
set_jni_ids(NULL);
|
set_jni_ids(NULL);
|
||||||
@ -408,12 +405,6 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
|
|||||||
}
|
}
|
||||||
set_inner_classes(NULL);
|
set_inner_classes(NULL);
|
||||||
|
|
||||||
// Null out Java heap objects, although these won't be walked to keep
|
|
||||||
// alive once this InstanceKlass is deallocated.
|
|
||||||
set_protection_domain(NULL);
|
|
||||||
set_signers(NULL);
|
|
||||||
set_init_lock(NULL);
|
|
||||||
|
|
||||||
// We should deallocate the Annotations instance
|
// We should deallocate the Annotations instance
|
||||||
MetadataFactory::free_metadata(loader_data, annotations());
|
MetadataFactory::free_metadata(loader_data, annotations());
|
||||||
set_annotations(NULL);
|
set_annotations(NULL);
|
||||||
@ -451,6 +442,24 @@ void InstanceKlass::eager_initialize(Thread *thread) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// JVMTI spec thinks there are signers and protection domain in the
|
||||||
|
// instanceKlass. These accessors pretend these fields are there.
|
||||||
|
// The hprof specification also thinks these fields are in InstanceKlass.
|
||||||
|
oop InstanceKlass::protection_domain() const {
|
||||||
|
// return the protection_domain from the mirror
|
||||||
|
return java_lang_Class::protection_domain(java_mirror());
|
||||||
|
}
|
||||||
|
|
||||||
|
// To remove these from requires an incompatible change and CCC request.
|
||||||
|
objArrayOop InstanceKlass::signers() const {
|
||||||
|
// return the signers from the mirror
|
||||||
|
return java_lang_Class::signers(java_mirror());
|
||||||
|
}
|
||||||
|
|
||||||
|
volatile oop InstanceKlass::init_lock() const {
|
||||||
|
// return the init lock from the mirror
|
||||||
|
return java_lang_Class::init_lock(java_mirror());
|
||||||
|
}
|
||||||
|
|
||||||
void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
|
void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
|
||||||
EXCEPTION_MARK;
|
EXCEPTION_MARK;
|
||||||
@ -1883,16 +1892,6 @@ bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
|
|||||||
|
|
||||||
// Garbage collection
|
// Garbage collection
|
||||||
|
|
||||||
void InstanceKlass::oops_do(OopClosure* cl) {
|
|
||||||
Klass::oops_do(cl);
|
|
||||||
|
|
||||||
cl->do_oop(adr_protection_domain());
|
|
||||||
cl->do_oop(adr_signers());
|
|
||||||
cl->do_oop(adr_init_lock());
|
|
||||||
|
|
||||||
// Don't walk the arrays since they are walked from the ClassLoaderData objects.
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
template <class T> void assert_is_in(T *p) {
|
template <class T> void assert_is_in(T *p) {
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
@ -2241,9 +2240,6 @@ void InstanceKlass::remove_unshareable_info() {
|
|||||||
m->remove_unshareable_info();
|
m->remove_unshareable_info();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Need to reinstate when reading back the class.
|
|
||||||
set_init_lock(NULL);
|
|
||||||
|
|
||||||
// do array classes also.
|
// do array classes also.
|
||||||
array_klasses_do(remove_unshareable_in_class);
|
array_klasses_do(remove_unshareable_in_class);
|
||||||
}
|
}
|
||||||
@ -2275,13 +2271,6 @@ void InstanceKlass::restore_unshareable_info(TRAPS) {
|
|||||||
ik->itable()->initialize_itable(false, CHECK);
|
ik->itable()->initialize_itable(false, CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate a simple java object for a lock.
|
|
||||||
// This needs to be a java object because during class initialization
|
|
||||||
// it can be held across a java call.
|
|
||||||
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
|
|
||||||
Handle h(THREAD, (oop)r);
|
|
||||||
ik->set_init_lock(h());
|
|
||||||
|
|
||||||
// restore constant pool resolved references
|
// restore constant pool resolved references
|
||||||
ik->constants()->restore_unshareable_info(CHECK);
|
ik->constants()->restore_unshareable_info(CHECK);
|
||||||
|
|
||||||
@ -2331,10 +2320,15 @@ void InstanceKlass::release_C_heap_structures() {
|
|||||||
FreeHeap(jmeths);
|
FreeHeap(jmeths);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemberNameTable* mnt = member_names();
|
// Deallocate MemberNameTable
|
||||||
if (mnt != NULL) {
|
{
|
||||||
delete mnt;
|
Mutex* lock_or_null = SafepointSynchronize::is_at_safepoint() ? NULL : MemberNameTable_lock;
|
||||||
set_member_names(NULL);
|
MutexLockerEx ml(lock_or_null, Mutex::_no_safepoint_check_flag);
|
||||||
|
MemberNameTable* mnt = member_names();
|
||||||
|
if (mnt != NULL) {
|
||||||
|
delete mnt;
|
||||||
|
set_member_names(NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int* indices = methods_cached_itable_indices_acquire();
|
int* indices = methods_cached_itable_indices_acquire();
|
||||||
@ -2765,15 +2759,28 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstanceKlass::add_member_name(Handle mem_name) {
|
void InstanceKlass::add_member_name(int index, Handle mem_name) {
|
||||||
jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
|
jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
|
||||||
MutexLocker ml(MemberNameTable_lock);
|
MutexLocker ml(MemberNameTable_lock);
|
||||||
|
assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
|
||||||
DEBUG_ONLY(No_Safepoint_Verifier nsv);
|
DEBUG_ONLY(No_Safepoint_Verifier nsv);
|
||||||
|
|
||||||
if (_member_names == NULL) {
|
if (_member_names == NULL) {
|
||||||
_member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable();
|
_member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count());
|
||||||
}
|
}
|
||||||
_member_names->add_member_name(mem_name_wref);
|
_member_names->add_member_name(index, mem_name_wref);
|
||||||
|
}
|
||||||
|
|
||||||
|
oop InstanceKlass::get_member_name(int index) {
|
||||||
|
MutexLocker ml(MemberNameTable_lock);
|
||||||
|
assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
|
||||||
|
DEBUG_ONLY(No_Safepoint_Verifier nsv);
|
||||||
|
|
||||||
|
if (_member_names == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
oop mem_name =_member_names->get_member_name(index);
|
||||||
|
return mem_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------------------------------
|
||||||
@ -2836,10 +2843,7 @@ void InstanceKlass::print_on(outputStream* st) const {
|
|||||||
class_loader_data()->print_value_on(st);
|
class_loader_data()->print_value_on(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
st->print(BULLET"protection domain: "); ((InstanceKlass*)this)->protection_domain()->print_value_on(st); st->cr();
|
|
||||||
st->print(BULLET"host class: "); host_klass()->print_value_on_maybe_null(st); st->cr();
|
st->print(BULLET"host class: "); host_klass()->print_value_on_maybe_null(st); st->cr();
|
||||||
st->print(BULLET"signers: "); signers()->print_value_on(st); st->cr();
|
|
||||||
st->print(BULLET"init_lock: "); ((oop)_init_lock)->print_value_on(st); st->cr();
|
|
||||||
if (source_file_name() != NULL) {
|
if (source_file_name() != NULL) {
|
||||||
st->print(BULLET"source file: ");
|
st->print(BULLET"source file: ");
|
||||||
source_file_name()->print_value_on(st);
|
source_file_name()->print_value_on(st);
|
||||||
@ -3040,7 +3044,6 @@ void InstanceKlass::collect_statistics(KlassSizeStats *sz) const {
|
|||||||
n += (sz->_method_ordering_bytes = sz->count_array(method_ordering()));
|
n += (sz->_method_ordering_bytes = sz->count_array(method_ordering()));
|
||||||
n += (sz->_local_interfaces_bytes = sz->count_array(local_interfaces()));
|
n += (sz->_local_interfaces_bytes = sz->count_array(local_interfaces()));
|
||||||
n += (sz->_transitive_interfaces_bytes = sz->count_array(transitive_interfaces()));
|
n += (sz->_transitive_interfaces_bytes = sz->count_array(transitive_interfaces()));
|
||||||
n += (sz->_signers_bytes = sz->count_array(signers()));
|
|
||||||
n += (sz->_fields_bytes = sz->count_array(fields()));
|
n += (sz->_fields_bytes = sz->count_array(fields()));
|
||||||
n += (sz->_inner_classes_bytes = sz->count_array(inner_classes()));
|
n += (sz->_inner_classes_bytes = sz->count_array(inner_classes()));
|
||||||
sz->_ro_bytes += n;
|
sz->_ro_bytes += n;
|
||||||
@ -3206,17 +3209,11 @@ void InstanceKlass::verify_on(outputStream* st) {
|
|||||||
guarantee(constants()->is_metadata(), "should be in metaspace");
|
guarantee(constants()->is_metadata(), "should be in metaspace");
|
||||||
guarantee(constants()->is_constantPool(), "should be constant pool");
|
guarantee(constants()->is_constantPool(), "should be constant pool");
|
||||||
}
|
}
|
||||||
if (protection_domain() != NULL) {
|
|
||||||
guarantee(protection_domain()->is_oop(), "should be oop");
|
|
||||||
}
|
|
||||||
const Klass* host = host_klass();
|
const Klass* host = host_klass();
|
||||||
if (host != NULL) {
|
if (host != NULL) {
|
||||||
guarantee(host->is_metadata(), "should be in metaspace");
|
guarantee(host->is_metadata(), "should be in metaspace");
|
||||||
guarantee(host->is_klass(), "should be klass");
|
guarantee(host->is_klass(), "should be klass");
|
||||||
}
|
}
|
||||||
if (signers() != NULL) {
|
|
||||||
guarantee(signers()->is_objArray(), "should be obj array");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstanceKlass::oop_verify_on(oop obj, outputStream* st) {
|
void InstanceKlass::oop_verify_on(oop obj, outputStream* st) {
|
||||||
|
@ -58,8 +58,6 @@
|
|||||||
// [fields ]
|
// [fields ]
|
||||||
// [constants ]
|
// [constants ]
|
||||||
// [class loader ]
|
// [class loader ]
|
||||||
// [protection domain ]
|
|
||||||
// [signers ]
|
|
||||||
// [source file name ]
|
// [source file name ]
|
||||||
// [inner classes ]
|
// [inner classes ]
|
||||||
// [static field size ]
|
// [static field size ]
|
||||||
@ -180,16 +178,6 @@ class InstanceKlass: public Klass {
|
|||||||
static volatile int _total_instanceKlass_count;
|
static volatile int _total_instanceKlass_count;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Protection domain.
|
|
||||||
oop _protection_domain;
|
|
||||||
// Class signers.
|
|
||||||
objArrayOop _signers;
|
|
||||||
// Lock for (1) initialization; (2) access to the ConstantPool of this class.
|
|
||||||
// Must be one per class and it has to be a VM internal object so java code
|
|
||||||
// cannot lock it (like the mirror).
|
|
||||||
// It has to be an object not a Mutex because it's held through java calls.
|
|
||||||
volatile oop _init_lock;
|
|
||||||
|
|
||||||
// Annotations for this class
|
// Annotations for this class
|
||||||
Annotations* _annotations;
|
Annotations* _annotations;
|
||||||
// Array classes holding elements of this class.
|
// Array classes holding elements of this class.
|
||||||
@ -527,8 +515,10 @@ class InstanceKlass: public Klass {
|
|||||||
void set_constants(ConstantPool* c) { _constants = c; }
|
void set_constants(ConstantPool* c) { _constants = c; }
|
||||||
|
|
||||||
// protection domain
|
// protection domain
|
||||||
oop protection_domain() { return _protection_domain; }
|
oop protection_domain() const;
|
||||||
void set_protection_domain(oop pd) { klass_oop_store(&_protection_domain, pd); }
|
|
||||||
|
// signers
|
||||||
|
objArrayOop signers() const;
|
||||||
|
|
||||||
// host class
|
// host class
|
||||||
Klass* host_klass() const {
|
Klass* host_klass() const {
|
||||||
@ -575,10 +565,6 @@ class InstanceKlass: public Klass {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// signers
|
|
||||||
objArrayOop signers() const { return _signers; }
|
|
||||||
void set_signers(objArrayOop s) { klass_oop_store((oop*)&_signers, s); }
|
|
||||||
|
|
||||||
// source file name
|
// source file name
|
||||||
Symbol* source_file_name() const { return _source_file_name; }
|
Symbol* source_file_name() const { return _source_file_name; }
|
||||||
void set_source_file_name(Symbol* n);
|
void set_source_file_name(Symbol* n);
|
||||||
@ -912,8 +898,6 @@ class InstanceKlass: public Klass {
|
|||||||
Method* method_at_itable(Klass* holder, int index, TRAPS);
|
Method* method_at_itable(Klass* holder, int index, TRAPS);
|
||||||
|
|
||||||
// Garbage collection
|
// Garbage collection
|
||||||
virtual void oops_do(OopClosure* cl);
|
|
||||||
|
|
||||||
void oop_follow_contents(oop obj);
|
void oop_follow_contents(oop obj);
|
||||||
int oop_adjust_pointers(oop obj);
|
int oop_adjust_pointers(oop obj);
|
||||||
|
|
||||||
@ -999,14 +983,12 @@ private:
|
|||||||
|
|
||||||
// Lock during initialization
|
// Lock during initialization
|
||||||
public:
|
public:
|
||||||
volatile oop init_lock() const {return _init_lock; }
|
// Lock for (1) initialization; (2) access to the ConstantPool of this class.
|
||||||
|
// Must be one per class and it has to be a VM internal object so java code
|
||||||
|
// cannot lock it (like the mirror).
|
||||||
|
// It has to be an object not a Mutex because it's held through java calls.
|
||||||
|
volatile oop init_lock() const;
|
||||||
private:
|
private:
|
||||||
void set_init_lock(oop value) { klass_oop_store(&_init_lock, value); }
|
|
||||||
|
|
||||||
// Offsets for memory management
|
|
||||||
oop* adr_protection_domain() const { return (oop*)&this->_protection_domain;}
|
|
||||||
oop* adr_signers() const { return (oop*)&this->_signers;}
|
|
||||||
oop* adr_init_lock() const { return (oop*)&this->_init_lock;}
|
|
||||||
|
|
||||||
// Static methods that are used to implement member methods where an exposed this pointer
|
// Static methods that are used to implement member methods where an exposed this pointer
|
||||||
// is needed due to possible GCs
|
// is needed due to possible GCs
|
||||||
@ -1040,7 +1022,8 @@ public:
|
|||||||
// JSR-292 support
|
// JSR-292 support
|
||||||
MemberNameTable* member_names() { return _member_names; }
|
MemberNameTable* member_names() { return _member_names; }
|
||||||
void set_member_names(MemberNameTable* member_names) { _member_names = member_names; }
|
void set_member_names(MemberNameTable* member_names) { _member_names = member_names; }
|
||||||
void add_member_name(Handle member_name);
|
void add_member_name(int index, Handle member_name);
|
||||||
|
oop get_member_name(int index);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// JVMTI support
|
// JVMTI support
|
||||||
|
@ -140,7 +140,7 @@ Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
|
|||||||
|
|
||||||
void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) {
|
void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) {
|
||||||
return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
|
return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
|
||||||
Metaspace::ClassType, CHECK_NULL);
|
MetaspaceObj::ClassType, CHECK_NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
Klass::Klass() {
|
Klass::Klass() {
|
||||||
@ -511,8 +511,9 @@ void Klass::restore_unshareable_info(TRAPS) {
|
|||||||
// (same order as class file parsing)
|
// (same order as class file parsing)
|
||||||
loader_data->add_class(this);
|
loader_data->add_class(this);
|
||||||
|
|
||||||
// Recreate the class mirror
|
// Recreate the class mirror. The protection_domain is always null for
|
||||||
java_lang_Class::create_mirror(this, CHECK);
|
// boot loader, for now.
|
||||||
|
java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
Klass* Klass::array_klass_or_null(int rank) {
|
Klass* Klass::array_klass_or_null(int rank) {
|
||||||
|
@ -445,7 +445,7 @@ class Klass : public Metadata {
|
|||||||
Klass* array_klass_or_null(int rank);
|
Klass* array_klass_or_null(int rank);
|
||||||
Klass* array_klass_or_null();
|
Klass* array_klass_or_null();
|
||||||
|
|
||||||
virtual oop protection_domain() { return NULL; }
|
virtual oop protection_domain() const = 0;
|
||||||
|
|
||||||
oop class_loader() const;
|
oop class_loader() const;
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ Method* Method::allocate(ClassLoaderData* loader_data,
|
|||||||
|
|
||||||
int size = Method::size(access_flags.is_native());
|
int size = Method::size(access_flags.is_native());
|
||||||
|
|
||||||
return new (loader_data, size, false, THREAD) Method(cm, access_flags, size);
|
return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
|
Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
|
||||||
|
@ -671,13 +671,15 @@ class Method : public Metadata {
|
|||||||
Symbol* signature, //anything at all
|
Symbol* signature, //anything at all
|
||||||
TRAPS);
|
TRAPS);
|
||||||
static Klass* check_non_bcp_klass(Klass* klass);
|
static Klass* check_non_bcp_klass(Klass* klass);
|
||||||
// these operate only on invoke methods:
|
|
||||||
|
// How many extra stack entries for invokedynamic when it's enabled
|
||||||
|
static const int extra_stack_entries_for_jsr292 = 1;
|
||||||
|
|
||||||
|
// this operates only on invoke methods:
|
||||||
// presize interpreter frames for extra interpreter stack entries, if needed
|
// presize interpreter frames for extra interpreter stack entries, if needed
|
||||||
// method handles want to be able to push a few extra values (e.g., a bound receiver), and
|
// Account for the extra appendix argument for invokehandle/invokedynamic
|
||||||
// invokedynamic sometimes needs to push a bootstrap method, call site, and arglist,
|
static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; }
|
||||||
// all without checking for a stack overflow
|
static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize
|
||||||
static int extra_stack_entries() { return EnableInvokeDynamic ? 2 : 0; }
|
|
||||||
static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize()
|
|
||||||
|
|
||||||
// RedefineClasses() support:
|
// RedefineClasses() support:
|
||||||
bool is_old() const { return access_flags().is_old(); }
|
bool is_old() const { return access_flags().is_old(); }
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
|
|
||||||
MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) {
|
MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) {
|
||||||
return new(loader_data, size(), false, THREAD) MethodCounters();
|
return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MethodCounters::clear_counters() {
|
void MethodCounters::clear_counters() {
|
||||||
|
@ -388,7 +388,8 @@ void ArgInfoData::print_data_on(outputStream* st) {
|
|||||||
MethodData* MethodData::allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS) {
|
MethodData* MethodData::allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS) {
|
||||||
int size = MethodData::compute_allocation_size_in_words(method);
|
int size = MethodData::compute_allocation_size_in_words(method);
|
||||||
|
|
||||||
return new (loader_data, size, false, THREAD) MethodData(method(), size, CHECK_NULL);
|
return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD)
|
||||||
|
MethodData(method(), size, CHECK_NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int MethodData::bytecode_cell_count(Bytecodes::Code code) {
|
int MethodData::bytecode_cell_count(Bytecodes::Code code) {
|
||||||
|
@ -75,7 +75,7 @@ class ObjArrayKlass : public ArrayKlass {
|
|||||||
void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS);
|
void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS);
|
||||||
|
|
||||||
// Compute protection domain
|
// Compute protection domain
|
||||||
oop protection_domain() { return bottom_klass()->protection_domain(); }
|
oop protection_domain() const { return bottom_klass()->protection_domain(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Either oop or narrowOop depending on UseCompressedOops.
|
// Either oop or narrowOop depending on UseCompressedOops.
|
||||||
|
@ -55,7 +55,7 @@ void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRA
|
|||||||
address res;
|
address res;
|
||||||
int alloc_size = size(len)*HeapWordSize;
|
int alloc_size = size(len)*HeapWordSize;
|
||||||
res = (address) Metaspace::allocate(loader_data, size(len), true,
|
res = (address) Metaspace::allocate(loader_data, size(len), true,
|
||||||
Metaspace::NonClassType, CHECK_NULL);
|
MetaspaceObj::SymbolType, CHECK_NULL);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user