Merge
This commit is contained in:
commit
ec330e431d
2
.hgtags
2
.hgtags
@ -112,3 +112,5 @@ def8e16dd237a47fc067d66d4c616d7baaec6001 jdk7-b134
|
||||
f75a1efb141210901aabe00a834e0fc32bb8b337 jdk7-b135
|
||||
46acf76a533954cfd594bb88fdea79938abfbe20 jdk7-b136
|
||||
d1cf7d4ee16c341f5b8c7e7f1d68a8c412b6c693 jdk7-b137
|
||||
62b8e328f8c8c66c14b0713222116f2add473f3f jdk7-b138
|
||||
955488f34ca418f6cdab843d61c20d2c615637d9 jdk7-b139
|
||||
|
@ -112,3 +112,5 @@ ddc2fcb3682ffd27f44354db666128827be7e3c3 jdk7-b134
|
||||
783bd02b4ab4596059c74b10a1793d7bd2f1c157 jdk7-b135
|
||||
2fe76e73adaa5133ac559f0b3c2c0707eca04580 jdk7-b136
|
||||
7654afc6a29e43cb0a1343ce7f1287bf690d5e5f jdk7-b137
|
||||
fc47c97bbbd91b1f774d855c48a7e285eb1a351a jdk7-b138
|
||||
7ed6d0b9aaa12320832a7ddadb88d6d8d0dda4c1 jdk7-b139
|
||||
|
@ -112,3 +112,5 @@ d7532bcd3742f1576dd07ff9fbb535c9c9a276e9 jdk7-b126
|
||||
e0b72ae5dc5e824b342801c8d1d336a55eb54e2c jdk7-b135
|
||||
48ef0c712e7cbf272f47f9224db92a3c6a9e2612 jdk7-b136
|
||||
a66c01d8bf895261715955df0b95545c000ed6a8 jdk7-b137
|
||||
78d8cf04697e9df54f7f11e195b7da29b8e345a2 jdk7-b138
|
||||
60b074ec6fcf5cdf9efce22fdfb02326ed8fa2d3 jdk7-b139
|
||||
|
@ -5,3 +5,4 @@
|
||||
^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/
|
||||
^src/share/tools/IdealGraphVisualizer/build/
|
||||
^src/share/tools/IdealGraphVisualizer/dist/
|
||||
^.hgtip
|
||||
|
@ -162,3 +162,7 @@ bd586e392d93b7ed7a1636dcc8da2b6a4203a102 jdk7-b136
|
||||
bd586e392d93b7ed7a1636dcc8da2b6a4203a102 hs21-b06
|
||||
2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f jdk7-b137
|
||||
2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f hs21-b07
|
||||
0930dc920c185afbf40fed9a655290b8e5b16783 jdk7-b138
|
||||
0930dc920c185afbf40fed9a655290b8e5b16783 hs21-b08
|
||||
611e19a16519d6fb5deea9ab565336e6e6ee475d jdk7-b139
|
||||
611e19a16519d6fb5deea9ab565336e6e6ee475d hs21-b09
|
||||
|
@ -22,29 +22,23 @@
|
||||
#
|
||||
#
|
||||
|
||||
#
|
||||
# The cscope.out file is made in the current directory and spans the entire
|
||||
# source tree.
|
||||
#
|
||||
# Things to note:
|
||||
# 1. We use relative names for cscope.
|
||||
# 2. We *don't* remove the old cscope.out file, because cscope is smart
|
||||
# enough to only build what has changed. It can be confused, however,
|
||||
# if files are renamed or removed, so it may be necessary to manually
|
||||
# remove cscope.out if a lot of reorganization has occurred.
|
||||
#
|
||||
# The cscope.out file is generated in the current directory. The old cscope.out
|
||||
# file is *not* removed because cscope is smart enough to only build what has
|
||||
# changed. cscope can be confused if files are renamed or removed, so it may be
|
||||
# necessary to remove cscope.out (gmake cscope.clean) if a lot of reorganization
|
||||
# has occurred.
|
||||
|
||||
include $(GAMMADIR)/make/scm.make
|
||||
|
||||
NAWK = /usr/xpg4/bin/awk
|
||||
RM = rm -f
|
||||
HG = hg
|
||||
CS_TOP = ../..
|
||||
CS_TOP = $(GAMMADIR)
|
||||
|
||||
CSDIRS = $(CS_TOP)/src $(CS_TOP)/make
|
||||
CSINCS = $(CSDIRS:%=-I%)
|
||||
|
||||
CSCOPE = cscope
|
||||
CSCOPE_OUT = cscope.out
|
||||
CSCOPE_FLAGS = -b
|
||||
|
||||
# Allow .java files to be added from the environment (CSCLASSES=yes).
|
||||
@ -61,25 +55,22 @@ ifndef CSHEADERS
|
||||
RMCCHEADERS= -o -name CClassHeaders
|
||||
endif
|
||||
|
||||
# Use CS_GENERATED=x to include auto-generated files in the make directories.
|
||||
ifdef CS_GENERATED
|
||||
CS_ADD_GENERATED = -o -name '*.incl'
|
||||
else
|
||||
CS_PRUNE_GENERATED = -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?'
|
||||
# Ignore build products.
|
||||
CS_PRUNE_GENERATED = -o -name '${OSNAME}_*_core' -o \
|
||||
-name '${OSNAME}_*_compiler?'
|
||||
|
||||
# O/S-specific files for all systems are included by default. Set CS_OS to a
|
||||
# space-separated list of identifiers to include only those systems.
|
||||
ifdef CS_OS
|
||||
CS_PRUNE_OS = $(patsubst %,-o -name '*%*',\
|
||||
$(filter-out ${CS_OS},linux macos solaris windows))
|
||||
endif
|
||||
|
||||
# OS-specific files for other systems are excluded by default. Use CS_OS=yes
|
||||
# to include platform-specific files for other platforms.
|
||||
ifndef CS_OS
|
||||
CS_OS = linux macos solaris win32
|
||||
CS_PRUNE_OS = $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS}))
|
||||
endif
|
||||
|
||||
# Processor-specific files for other processors are excluded by default. Use
|
||||
# CS_CPU=x to include platform-specific files for other platforms.
|
||||
ifndef CS_CPU
|
||||
CS_CPU = i486 sparc amd64 ia64
|
||||
CS_PRUNE_CPU = $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU}))
|
||||
# CPU-specific files for all processors are included by default. Set CS_CPU
|
||||
# space-separated list identifiers to include only those CPUs.
|
||||
ifdef CS_CPU
|
||||
CS_PRUNE_CPU = $(patsubst %,-o -name '*%*',\
|
||||
$(filter-out ${CS_CPU},arm ppc sparc x86 zero))
|
||||
endif
|
||||
|
||||
# What files should we include? A simple rule might be just those files under
|
||||
@ -95,10 +86,14 @@ CS_PRUNE_STD = $(SCM_DIRS) \
|
||||
-o -name '*demo' \
|
||||
-o -name pkgarchive
|
||||
|
||||
# Placeholder for user-defined excludes.
|
||||
CS_PRUNE_EX =
|
||||
|
||||
CS_PRUNE = $(CS_PRUNE_STD) \
|
||||
$(CS_PRUNE_OS) \
|
||||
$(CS_PRUNE_CPU) \
|
||||
$(CS_PRUNE_GENERATED) \
|
||||
$(CS_PRUNE_EX) \
|
||||
$(RMCCHEADERS)
|
||||
|
||||
# File names to include.
|
||||
@ -114,49 +109,33 @@ CSFILENAMES = -name '*.[ch]pp' \
|
||||
-o -name '*.ad' \
|
||||
$(ADDCLASSES)
|
||||
|
||||
.PHONY: cscope cscope.clean cscope.scratch TAGS.clean FORCE
|
||||
.PRECIOUS: cscope.out
|
||||
|
||||
cscope cscope.out: cscope.files FORCE
|
||||
$(CSCOPE) $(CSCOPE_FLAGS)
|
||||
cscope $(CSCOPE_OUT): cscope.files FORCE
|
||||
$(CSCOPE) -f $(CSCOPE_OUT) $(CSCOPE_FLAGS)
|
||||
|
||||
# The .raw file is reordered here in an attempt to make cscope display the most
|
||||
# relevant files first.
|
||||
cscope.files: .cscope.files.raw
|
||||
echo "$(CSINCS)" > $@
|
||||
-egrep -v "\.java|\/make\/" $< >> $@
|
||||
-fgrep ".java" $< >> $@
|
||||
-fgrep "/make/" $< >> $@
|
||||
cscope.clean:
|
||||
$(QUIETLY) $(RM) $(CSCOPE_OUT) cscope.files
|
||||
|
||||
.cscope.files.raw: .nametable.files
|
||||
-find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \
|
||||
-type f \( $(CSFILENAMES) \) -print > $@
|
||||
cscope.scratch: cscope.clean cscope
|
||||
|
||||
cscope.clean: nametable.clean
|
||||
-$(RM) cscope.out cscope.files .cscope.files.raw
|
||||
# The raw list is reordered so cscope displays the most relevant files first.
|
||||
cscope.files:
|
||||
$(QUIETLY) \
|
||||
raw=cscope.$$$$; \
|
||||
find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \
|
||||
-type f \( $(CSFILENAMES) \) -print > $$raw; \
|
||||
{ \
|
||||
echo "$(CSINCS)"; \
|
||||
egrep -v "\.java|/make/" $$raw; \
|
||||
fgrep ".java" $$raw; \
|
||||
fgrep "/make/" $$raw; \
|
||||
} > $@; \
|
||||
rm -f $$raw
|
||||
|
||||
TAGS: cscope.files FORCE
|
||||
egrep -v '^-|^$$' $< | etags --members -
|
||||
|
||||
TAGS.clean: nametable.clean
|
||||
-$(RM) TAGS
|
||||
|
||||
# .nametable.files and .nametable.files.tmp are used to determine if any files
|
||||
# were added to/deleted from/renamed in the workspace. If not, then there's
|
||||
# normally no need to rebuild the cscope database. To force a rebuild of
|
||||
# the cscope database: gmake nametable.clean.
|
||||
.nametable.files: .nametable.files.tmp
|
||||
( cmp -s $@ $< ) || ( cp $< $@ )
|
||||
-$(RM) $<
|
||||
|
||||
# `hg status' is slightly faster than `hg fstatus'. Both are
|
||||
# quite a bit slower on an NFS mounted file system, so this is
|
||||
# really geared towards repos on local file systems.
|
||||
.nametable.files.tmp:
|
||||
-$(HG) fstatus -acmn > $@
|
||||
|
||||
nametable.clean:
|
||||
-$(RM) .nametable.files .nametable.files.tmp
|
||||
|
||||
FORCE:
|
||||
|
||||
.PHONY: cscope cscope.clean TAGS.clean nametable.clean FORCE
|
||||
TAGS.clean:
|
||||
$(RM) TAGS
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
|
||||
|
||||
HS_MAJOR_VER=21
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=08
|
||||
HS_BUILD_NUMBER=11
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=7
|
||||
|
@ -359,7 +359,7 @@ clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark:
|
||||
|
||||
clean: clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_docs
|
||||
|
||||
include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make
|
||||
include $(GAMMADIR)/make/cscope.make
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
|
@ -1,160 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
#
|
||||
# The cscope.out file is made in the current directory and spans the entire
|
||||
# source tree.
|
||||
#
|
||||
# Things to note:
|
||||
# 1. We use relative names for cscope.
|
||||
# 2. We *don't* remove the old cscope.out file, because cscope is smart
|
||||
# enough to only build what has changed. It can be confused, however,
|
||||
# if files are renamed or removed, so it may be necessary to manually
|
||||
# remove cscope.out if a lot of reorganization has occurred.
|
||||
#
|
||||
|
||||
include $(GAMMADIR)/make/scm.make
|
||||
|
||||
NAWK = awk
|
||||
RM = rm -f
|
||||
HG = hg
|
||||
CS_TOP = ../..
|
||||
|
||||
CSDIRS = $(CS_TOP)/src $(CS_TOP)/build
|
||||
CSINCS = $(CSDIRS:%=-I%)
|
||||
|
||||
CSCOPE = cscope
|
||||
CSCOPE_FLAGS = -b
|
||||
|
||||
# Allow .java files to be added from the environment (CSCLASSES=yes).
|
||||
ifdef CSCLASSES
|
||||
ADDCLASSES= -o -name '*.java'
|
||||
endif
|
||||
|
||||
# Adding CClassHeaders also pushes the file count of a full workspace up about
|
||||
# 200 files (these files also don't exist in a new workspace, and thus will
|
||||
# cause the recreation of the database as they get created, which might seem
|
||||
# a little confusing). Thus allow these files to be added from the environment
|
||||
# (CSHEADERS=yes).
|
||||
ifndef CSHEADERS
|
||||
RMCCHEADERS= -o -name CClassHeaders
|
||||
endif
|
||||
|
||||
# Use CS_GENERATED=x to include auto-generated files in the build directories.
|
||||
ifdef CS_GENERATED
|
||||
CS_ADD_GENERATED = -o -name '*.incl'
|
||||
else
|
||||
CS_PRUNE_GENERATED = -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?'
|
||||
endif
|
||||
|
||||
# OS-specific files for other systems are excluded by default. Use CS_OS=yes
|
||||
# to include platform-specific files for other platforms.
|
||||
ifndef CS_OS
|
||||
CS_OS = linux macos solaris win32
|
||||
CS_PRUNE_OS = $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS}))
|
||||
endif
|
||||
|
||||
# Processor-specific files for other processors are excluded by default. Use
|
||||
# CS_CPU=x to include platform-specific files for other platforms.
|
||||
ifndef CS_CPU
|
||||
CS_CPU = i486 sparc amd64 ia64
|
||||
CS_PRUNE_CPU = $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU}))
|
||||
endif
|
||||
|
||||
# What files should we include? A simple rule might be just those files under
|
||||
# SCCS control, however this would miss files we create like the opcodes and
|
||||
# CClassHeaders. The following attempts to find everything that is *useful*.
|
||||
# (.del files are created by sccsrm, demo directories contain many .java files
|
||||
# that probably aren't useful for development, and the pkgarchive may contain
|
||||
# duplicates of files within the source hierarchy).
|
||||
|
||||
# Directories to exclude.
|
||||
CS_PRUNE_STD = $(SCM_DIRS) \
|
||||
-o -name '.del-*' \
|
||||
-o -name '*demo' \
|
||||
-o -name pkgarchive
|
||||
|
||||
CS_PRUNE = $(CS_PRUNE_STD) \
|
||||
$(CS_PRUNE_OS) \
|
||||
$(CS_PRUNE_CPU) \
|
||||
$(CS_PRUNE_GENERATED) \
|
||||
$(RMCCHEADERS)
|
||||
|
||||
# File names to include.
|
||||
CSFILENAMES = -name '*.[ch]pp' \
|
||||
-o -name '*.[Ccshlxy]' \
|
||||
$(CS_ADD_GENERATED) \
|
||||
-o -name '*.il' \
|
||||
-o -name '*.cc' \
|
||||
-o -name '*[Mm]akefile*' \
|
||||
-o -name '*.gmk' \
|
||||
-o -name '*.make' \
|
||||
-o -name '*.ad' \
|
||||
$(ADDCLASSES)
|
||||
|
||||
.PRECIOUS: cscope.out
|
||||
|
||||
cscope cscope.out: cscope.files FORCE
|
||||
$(CSCOPE) $(CSCOPE_FLAGS)
|
||||
|
||||
# The .raw file is reordered here in an attempt to make cscope display the most
|
||||
# relevant files first.
|
||||
cscope.files: .cscope.files.raw
|
||||
echo "$(CSINCS)" > $@
|
||||
-egrep -v "\.java|\/make\/" $< >> $@
|
||||
-fgrep ".java" $< >> $@
|
||||
-fgrep "/make/" $< >> $@
|
||||
|
||||
.cscope.files.raw: .nametable.files
|
||||
-find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \
|
||||
-type f \( $(CSFILENAMES) \) -print > $@
|
||||
|
||||
cscope.clean: nametable.clean
|
||||
-$(RM) cscope.out cscope.files .cscope.files.raw
|
||||
|
||||
TAGS: cscope.files FORCE
|
||||
egrep -v '^-|^$$' $< | etags --members -
|
||||
|
||||
TAGS.clean: nametable.clean
|
||||
-$(RM) TAGS
|
||||
|
||||
# .nametable.files and .nametable.files.tmp are used to determine if any files
|
||||
# were added to/deleted from/renamed in the workspace. If not, then there's
|
||||
# normally no need to rebuild the cscope database. To force a rebuild of
|
||||
# the cscope database: gmake nametable.clean.
|
||||
.nametable.files: .nametable.files.tmp
|
||||
( cmp -s $@ $< ) || ( cp $< $@ )
|
||||
-$(RM) $<
|
||||
|
||||
# `hg status' is slightly faster than `hg fstatus'. Both are
|
||||
# quite a bit slower on an NFS mounted file system, so this is
|
||||
# really geared towards repos on local file systems.
|
||||
.nametable.files.tmp:
|
||||
-$(HG) fstatus -acmn > $@
|
||||
nametable.clean:
|
||||
-$(RM) .nametable.files .nametable.files.tmp
|
||||
|
||||
FORCE:
|
||||
|
||||
.PHONY: cscope cscope.clean TAGS.clean nametable.clean FORCE
|
@ -296,7 +296,7 @@ clean_compiler1 clean_compiler2 clean_core clean_kernel:
|
||||
|
||||
clean: clean_compiler2 clean_compiler1 clean_core clean_docs clean_kernel
|
||||
|
||||
include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make
|
||||
include $(GAMMADIR)/make/cscope.make
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
|
@ -486,7 +486,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
if (ek == _invokespecial_mh) {
|
||||
// Must load & check the first argument before entering the target method.
|
||||
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
|
||||
__ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
|
||||
__ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
|
||||
__ null_check(G3_method_handle);
|
||||
__ verify_oop(G3_method_handle);
|
||||
}
|
||||
|
@ -3293,8 +3293,6 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
/*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
|
||||
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
|
||||
|
||||
__ verify_oop(G5_callsite);
|
||||
|
||||
// profile this call
|
||||
__ profile_call(O4);
|
||||
|
||||
@ -3307,8 +3305,10 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
__ sll(Rret, LogBytesPerWord, Rret);
|
||||
__ ld_ptr(Rtemp, Rret, Rret); // get return address
|
||||
|
||||
__ verify_oop(G5_callsite);
|
||||
__ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
|
||||
__ null_check(G3_method_handle);
|
||||
__ verify_oop(G3_method_handle);
|
||||
|
||||
// Adjust Rret first so Llast_SP can be same as Rret
|
||||
__ add(Rret, -frame::pc_return_offset, O7);
|
||||
|
@ -422,7 +422,7 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
|
||||
Label L_done, L_throw_exception;
|
||||
const Register con_klass_temp = rcx; // same as Rcache
|
||||
__ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(con_klass_temp, rax);
|
||||
__ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
|
||||
__ jcc(Assembler::notEqual, L_done);
|
||||
__ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
|
||||
@ -432,7 +432,7 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
|
||||
// Load the exception from the system-array which wraps it:
|
||||
__ bind(L_throw_exception);
|
||||
__ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
__ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
__ jump(ExternalAddress(Interpreter::throw_exception_entry()));
|
||||
|
||||
__ bind(L_done);
|
||||
@ -946,9 +946,9 @@ void TemplateTable::aastore() {
|
||||
__ jcc(Assembler::zero, is_null);
|
||||
|
||||
// Move subklass into EBX
|
||||
__ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rbx, rax);
|
||||
// Move superklass into EAX
|
||||
__ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rax, rdx);
|
||||
__ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
|
||||
// Compress array+index*wordSize+12 into a single register. Frees ECX.
|
||||
__ lea(rdx, element_address);
|
||||
@ -2001,7 +2001,7 @@ void TemplateTable::_return(TosState state) {
|
||||
if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
|
||||
assert(state == vtos, "only valid state");
|
||||
__ movptr(rax, aaddress(0));
|
||||
__ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rdi, rax);
|
||||
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
|
||||
__ testl(rdi, JVM_ACC_HAS_FINALIZER);
|
||||
Label skip_register_finalizer;
|
||||
@ -2948,7 +2948,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
|
||||
// get receiver klass
|
||||
__ null_check(recv, oopDesc::klass_offset_in_bytes());
|
||||
// Keep recv in rcx for callee expects it there
|
||||
__ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rax, recv);
|
||||
__ verify_oop(rax);
|
||||
|
||||
// profile this call
|
||||
@ -3028,7 +3028,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
|
||||
// Get receiver klass into rdx - also a null check
|
||||
__ restore_locals(); // restore rdi
|
||||
__ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rdx, rcx);
|
||||
__ verify_oop(rdx);
|
||||
|
||||
// profile this call
|
||||
@ -3083,6 +3083,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
|
||||
void TemplateTable::invokedynamic(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_oop, "use this argument");
|
||||
|
||||
if (!EnableInvokeDynamic) {
|
||||
// We should not encounter this bytecode if !EnableInvokeDynamic.
|
||||
@ -3095,7 +3096,6 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(byte_no == f1_oop, "use this argument");
|
||||
prepare_invoke(rax, rbx, byte_no);
|
||||
|
||||
// rax: CallSite object (f1)
|
||||
@ -3106,14 +3106,14 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
Register rax_callsite = rax;
|
||||
Register rcx_method_handle = rcx;
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
// profile this call
|
||||
__ profile_call(rsi);
|
||||
}
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
// profile this call
|
||||
__ profile_call(rsi);
|
||||
|
||||
__ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
|
||||
__ verify_oop(rax_callsite);
|
||||
__ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
|
||||
__ null_check(rcx_method_handle);
|
||||
__ verify_oop(rcx_method_handle);
|
||||
__ prepare_to_jump_from_interpreted();
|
||||
__ jump_to_method_handle_entry(rcx_method_handle, rdx);
|
||||
}
|
||||
@ -3258,7 +3258,7 @@ void TemplateTable::_new() {
|
||||
(int32_t)markOopDesc::prototype()); // header
|
||||
__ pop(rcx); // get saved klass back in the register.
|
||||
}
|
||||
__ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
|
||||
__ store_klass(rax, rcx); // klass
|
||||
|
||||
{
|
||||
SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
|
||||
@ -3333,7 +3333,7 @@ void TemplateTable::checkcast() {
|
||||
__ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
|
||||
|
||||
__ bind(resolved);
|
||||
__ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rbx, rdx);
|
||||
|
||||
// Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
|
||||
// Superklass in EAX. Subklass in EBX.
|
||||
@ -3376,12 +3376,12 @@ void TemplateTable::instanceof() {
|
||||
__ push(atos);
|
||||
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
|
||||
__ pop_ptr(rdx);
|
||||
__ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rdx, rdx);
|
||||
__ jmp(resolved);
|
||||
|
||||
// Get superklass in EAX and subklass in EDX
|
||||
__ bind(quicked);
|
||||
__ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rdx, rax);
|
||||
__ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
|
||||
|
||||
__ bind(resolved);
|
||||
|
@ -436,7 +436,7 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
Label L_done, L_throw_exception;
|
||||
const Register con_klass_temp = rcx; // same as cache
|
||||
const Register array_klass_temp = rdx; // same as index
|
||||
__ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(con_klass_temp, rax);
|
||||
__ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
|
||||
__ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
|
||||
__ jcc(Assembler::notEqual, L_done);
|
||||
@ -447,7 +447,7 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
|
||||
// Load the exception from the system-array which wraps it:
|
||||
__ bind(L_throw_exception);
|
||||
__ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
__ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
__ jump(ExternalAddress(Interpreter::throw_exception_entry()));
|
||||
|
||||
__ bind(L_done);
|
||||
@ -3137,7 +3137,6 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(byte_no == f1_oop, "use this argument");
|
||||
prepare_invoke(rax, rbx, byte_no);
|
||||
|
||||
// rax: CallSite object (f1)
|
||||
@ -3148,14 +3147,14 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
Register rax_callsite = rax;
|
||||
Register rcx_method_handle = rcx;
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
// profile this call
|
||||
__ profile_call(r13);
|
||||
}
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
// profile this call
|
||||
__ profile_call(r13);
|
||||
|
||||
__ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
|
||||
__ verify_oop(rax_callsite);
|
||||
__ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
|
||||
__ null_check(rcx_method_handle);
|
||||
__ verify_oop(rcx_method_handle);
|
||||
__ prepare_to_jump_from_interpreted();
|
||||
__ jump_to_method_handle_entry(rcx_method_handle, rdx);
|
||||
}
|
||||
|
@ -441,12 +441,25 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
}
|
||||
|
||||
// On family 21 processors default is no sw prefetch
|
||||
if ( cpu_family() == 21 ) {
|
||||
// some defaults for AMD family 15h
|
||||
if ( cpu_family() == 0x15 ) {
|
||||
// On family 15h processors default is no sw prefetch
|
||||
if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
|
||||
AllocatePrefetchStyle = 0;
|
||||
}
|
||||
// Also, if some other prefetch style is specified, default instruction type is PREFETCHW
|
||||
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
|
||||
AllocatePrefetchInstr = 3;
|
||||
}
|
||||
// On family 15h processors use XMM and UnalignedLoadStores for Array Copy
|
||||
if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
|
||||
UseXMMForArrayCopy = true;
|
||||
}
|
||||
if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
|
||||
UseUnalignedLoadStores = true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if( is_intel() ) { // Intel cpus specific settings
|
||||
|
@ -29,13 +29,19 @@
|
||||
// Defines Linux specific flags. They are not available on other platforms.
|
||||
//
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
|
||||
product(bool, UseOprofile, false, \
|
||||
"enable support for Oprofile profiler") \
|
||||
\
|
||||
product(bool, UseLinuxPosixThreadCPUClocks, true, \
|
||||
"enable fast Linux Posix clocks where available")
|
||||
// NB: The default value of UseLinuxPosixThreadCPUClocks may be
|
||||
// overridden in Arguments::parse_each_vm_init_arg.
|
||||
product(bool, UseOprofile, false, \
|
||||
"enable support for Oprofile profiler") \
|
||||
\
|
||||
product(bool, UseLinuxPosixThreadCPUClocks, true, \
|
||||
"enable fast Linux Posix clocks where available") \
|
||||
/* NB: The default value of UseLinuxPosixThreadCPUClocks may be \
|
||||
overridden in Arguments::parse_each_vm_init_arg. */ \
|
||||
\
|
||||
product(bool, UseHugeTLBFS, false, \
|
||||
"Use MAP_HUGETLB for large pages") \
|
||||
\
|
||||
product(bool, UseSHM, false, \
|
||||
"Use SYSV shared memory for large pages")
|
||||
|
||||
//
|
||||
// Defines Linux-specific default values. The flags are available on all
|
||||
|
@ -2465,16 +2465,40 @@ bool os::commit_memory(char* addr, size_t size, bool exec) {
|
||||
return res != (uintptr_t) MAP_FAILED;
|
||||
}
|
||||
|
||||
// Define MAP_HUGETLB here so we can build HotSpot on old systems.
|
||||
#ifndef MAP_HUGETLB
|
||||
#define MAP_HUGETLB 0x40000
|
||||
#endif
|
||||
|
||||
// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
|
||||
#ifndef MADV_HUGEPAGE
|
||||
#define MADV_HUGEPAGE 14
|
||||
#endif
|
||||
|
||||
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool exec) {
|
||||
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
|
||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||
uintptr_t res =
|
||||
(uintptr_t) ::mmap(addr, size, prot,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
|
||||
-1, 0);
|
||||
return res != (uintptr_t) MAP_FAILED;
|
||||
}
|
||||
|
||||
return commit_memory(addr, size, exec);
|
||||
}
|
||||
|
||||
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
|
||||
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
|
||||
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
|
||||
// be supported or the memory may already be backed by huge pages.
|
||||
::madvise(addr, bytes, MADV_HUGEPAGE);
|
||||
}
|
||||
}
|
||||
|
||||
void os::free_memory(char *addr, size_t bytes) {
|
||||
::mmap(addr, bytes, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
|
||||
::madvise(addr, bytes, MADV_DONTNEED);
|
||||
}
|
||||
|
||||
void os::numa_make_global(char *addr, size_t bytes) {
|
||||
@ -2812,6 +2836,43 @@ bool os::unguard_memory(char* addr, size_t size) {
|
||||
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
|
||||
}
|
||||
|
||||
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
||||
bool result = false;
|
||||
void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
|
||||
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
|
||||
-1, 0);
|
||||
|
||||
if (p != (void *) -1) {
|
||||
// We don't know if this really is a huge page or not.
|
||||
FILE *fp = fopen("/proc/self/maps", "r");
|
||||
if (fp) {
|
||||
while (!feof(fp)) {
|
||||
char chars[257];
|
||||
long x = 0;
|
||||
if (fgets(chars, sizeof(chars), fp)) {
|
||||
if (sscanf(chars, "%lx-%*lx", &x) == 1
|
||||
&& x == (long)p) {
|
||||
if (strstr (chars, "hugepage")) {
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
munmap (p, page_size);
|
||||
if (result)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (warn) {
|
||||
warning("HugeTLBFS is not supported by the operating system.");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the coredump_filter bits to include largepages in core dump (bit 6)
|
||||
*
|
||||
@ -2854,7 +2915,16 @@ static void set_coredump_filter(void) {
|
||||
static size_t _large_page_size = 0;
|
||||
|
||||
bool os::large_page_init() {
|
||||
if (!UseLargePages) return false;
|
||||
if (!UseLargePages) {
|
||||
UseHugeTLBFS = false;
|
||||
UseSHM = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
|
||||
// Our user has not expressed a preference, so we'll try both.
|
||||
UseHugeTLBFS = UseSHM = true;
|
||||
}
|
||||
|
||||
if (LargePageSizeInBytes) {
|
||||
_large_page_size = LargePageSizeInBytes;
|
||||
@ -2899,6 +2969,9 @@ bool os::large_page_init() {
|
||||
}
|
||||
}
|
||||
|
||||
// print a warning if any large page related flag is specified on command line
|
||||
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
|
||||
|
||||
const size_t default_page_size = (size_t)Linux::page_size();
|
||||
if (_large_page_size > default_page_size) {
|
||||
_page_sizes[0] = _large_page_size;
|
||||
@ -2906,6 +2979,14 @@ bool os::large_page_init() {
|
||||
_page_sizes[2] = 0;
|
||||
}
|
||||
|
||||
UseHugeTLBFS = UseHugeTLBFS &&
|
||||
Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
|
||||
|
||||
if (UseHugeTLBFS)
|
||||
UseSHM = false;
|
||||
|
||||
UseLargePages = UseHugeTLBFS || UseSHM;
|
||||
|
||||
set_coredump_filter();
|
||||
|
||||
// Large page support is available on 2.6 or newer kernel, some vendors
|
||||
@ -2922,7 +3003,7 @@ bool os::large_page_init() {
|
||||
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
||||
// "exec" is passed in but not used. Creating the shared image for
|
||||
// the code cache doesn't have an SHM_X executable permission to check.
|
||||
assert(UseLargePages, "only for large pages");
|
||||
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
||||
|
||||
key_t key = IPC_PRIVATE;
|
||||
char *addr;
|
||||
@ -2989,16 +3070,15 @@ size_t os::large_page_size() {
|
||||
return _large_page_size;
|
||||
}
|
||||
|
||||
// Linux does not support anonymous mmap with large page memory. The only way
|
||||
// to reserve large page memory without file backing is through SysV shared
|
||||
// memory API. The entire memory region is committed and pinned upfront.
|
||||
// Hopefully this will change in the future...
|
||||
// HugeTLBFS allows application to commit large page memory on demand;
|
||||
// with SysV SHM the entire memory region must be allocated as shared
|
||||
// memory.
|
||||
bool os::can_commit_large_page_memory() {
|
||||
return false;
|
||||
return UseHugeTLBFS;
|
||||
}
|
||||
|
||||
bool os::can_execute_large_page_memory() {
|
||||
return false;
|
||||
return UseHugeTLBFS;
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
@ -4090,6 +4170,23 @@ jint os::init_2(void)
|
||||
UseNUMA = false;
|
||||
}
|
||||
}
|
||||
// With SHM large pages we cannot uncommit a page, so there's not way
|
||||
// we can make the adaptive lgrp chunk resizing work. If the user specified
|
||||
// both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
|
||||
// disable adaptive resizing.
|
||||
if (UseNUMA && UseLargePages && UseSHM) {
|
||||
if (!FLAG_IS_DEFAULT(UseNUMA)) {
|
||||
if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
|
||||
UseLargePages = false;
|
||||
} else {
|
||||
warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
|
||||
UseAdaptiveSizePolicy = false;
|
||||
UseAdaptiveNUMAChunkSizing = false;
|
||||
}
|
||||
} else {
|
||||
UseNUMA = false;
|
||||
}
|
||||
}
|
||||
if (!UseNUMA && ForceNUMA) {
|
||||
UseNUMA = true;
|
||||
}
|
||||
|
@ -86,6 +86,9 @@ class Linux {
|
||||
|
||||
static void rebuild_cpu_to_node_map();
|
||||
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
|
||||
|
||||
static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
|
||||
|
||||
public:
|
||||
static void init_thread_fpu_state();
|
||||
static int get_fpu_control_word();
|
||||
|
@ -2826,7 +2826,9 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
|
||||
assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
|
||||
Solaris::set_mpss_range(addr, bytes, alignment_hint);
|
||||
if (UseLargePages && UseMPSS) {
|
||||
Solaris::set_mpss_range(addr, bytes, alignment_hint);
|
||||
}
|
||||
}
|
||||
|
||||
// Tell the OS to make the range local to the first-touching LWP
|
||||
@ -5044,6 +5046,20 @@ jint os::init_2(void) {
|
||||
UseNUMA = false;
|
||||
}
|
||||
}
|
||||
// ISM is not compatible with the NUMA allocator - it always allocates
|
||||
// pages round-robin across the lgroups.
|
||||
if (UseNUMA && UseLargePages && UseISM) {
|
||||
if (!FLAG_IS_DEFAULT(UseNUMA)) {
|
||||
if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) {
|
||||
UseLargePages = false;
|
||||
} else {
|
||||
warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator");
|
||||
UseNUMA = false;
|
||||
}
|
||||
} else {
|
||||
UseNUMA = false;
|
||||
}
|
||||
}
|
||||
if (!UseNUMA && ForceNUMA) {
|
||||
UseNUMA = true;
|
||||
}
|
||||
|
@ -232,14 +232,7 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
|
||||
}
|
||||
|
||||
// compute size of arguments
|
||||
int arg_size = target->arg_size();
|
||||
if (code == Bytecodes::_invokedynamic) {
|
||||
assert(!target->is_static(), "receiver explicit in method");
|
||||
arg_size--; // implicit, not really on stack
|
||||
}
|
||||
if (!target->is_loaded() && code == Bytecodes::_invokestatic) {
|
||||
arg_size--;
|
||||
}
|
||||
int arg_size = target->invoke_arg_size(code);
|
||||
int arg_base = MAX2(state._stack_height - arg_size, 0);
|
||||
|
||||
// direct recursive calls are skipped if they can be bound statically without introducing
|
||||
|
@ -756,7 +756,7 @@ ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
|
||||
assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
|
||||
|
||||
bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc);
|
||||
if (is_resolved && (oop) cpool->cache()->secondary_entry_at(index)->f1() == NULL)
|
||||
if (is_resolved && cpool->cache()->secondary_entry_at(index)->is_f1_null())
|
||||
// FIXME: code generation could allow for null (unlinked) call site
|
||||
is_resolved = false;
|
||||
|
||||
@ -770,7 +770,7 @@ ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
|
||||
|
||||
// Get the invoker methodOop from the constant pool.
|
||||
oop f1_value = cpool->cache()->main_entry_at(index)->f1();
|
||||
methodOop signature_invoker = methodOop(f1_value);
|
||||
methodOop signature_invoker = (methodOop) f1_value;
|
||||
assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
|
||||
"correct result from LinkResolver::resolve_invokedynamic");
|
||||
|
||||
|
@ -127,7 +127,24 @@ class ciMethod : public ciObject {
|
||||
ciSignature* signature() const { return _signature; }
|
||||
ciType* return_type() const { return _signature->return_type(); }
|
||||
int arg_size_no_receiver() const { return _signature->size(); }
|
||||
int arg_size() const { return _signature->size() + (_flags.is_static() ? 0 : 1); }
|
||||
// Can only be used on loaded ciMethods
|
||||
int arg_size() const {
|
||||
check_is_loaded();
|
||||
return _signature->size() + (_flags.is_static() ? 0 : 1);
|
||||
}
|
||||
// Report the number of elements on stack when invoking this method.
|
||||
// This is different than the regular arg_size because invokdynamic
|
||||
// has an implicit receiver.
|
||||
int invoke_arg_size(Bytecodes::Code code) const {
|
||||
int arg_size = _signature->size();
|
||||
// Add a receiver argument, maybe:
|
||||
if (code != Bytecodes::_invokestatic &&
|
||||
code != Bytecodes::_invokedynamic) {
|
||||
arg_size++;
|
||||
}
|
||||
return arg_size;
|
||||
}
|
||||
|
||||
|
||||
// Method code and related information.
|
||||
address code() { if (_code == NULL) load_code(); return _code; }
|
||||
@ -276,9 +293,9 @@ class ciMethod : public ciObject {
|
||||
void print_short_name(outputStream* st = tty);
|
||||
|
||||
methodOop get_method_handle_target() {
|
||||
klassOop receiver_limit_oop = NULL;
|
||||
int flags = 0;
|
||||
return MethodHandles::decode_method(get_oop(), receiver_limit_oop, flags);
|
||||
KlassHandle receiver_limit; int flags = 0;
|
||||
methodHandle m = MethodHandles::decode_method(get_oop(), receiver_limit, flags);
|
||||
return m();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -194,6 +194,16 @@ bool ciObject::can_be_constant() {
|
||||
// ciObject::should_be_constant()
|
||||
bool ciObject::should_be_constant() {
|
||||
if (ScavengeRootsInCode >= 2) return true; // force everybody to be a constant
|
||||
if (!JavaObjectsInPerm && !is_null_object()) {
|
||||
// We want Strings and Classes to be embeddable by default since
|
||||
// they used to be in the perm world. Not all Strings used to be
|
||||
// embeddable but there's no easy way to distinguish the interned
|
||||
// from the regulars ones so just treat them all that way.
|
||||
ciEnv* env = CURRENT_ENV;
|
||||
if (klass() == env->String_klass() || klass() == env->Class_klass()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return handle() == NULL || !is_scavengable();
|
||||
}
|
||||
|
||||
|
@ -976,6 +976,15 @@ void CompileBroker::compile_method_base(methodHandle method,
|
||||
return;
|
||||
}
|
||||
|
||||
// If the requesting thread is holding the pending list lock
|
||||
// then we just return. We can't risk blocking while holding
|
||||
// the pending list lock or a 3-way deadlock may occur
|
||||
// between the reference handler thread, a GC (instigated
|
||||
// by a compiler thread), and compiled method registration.
|
||||
if (instanceRefKlass::owns_pending_list_lock(JavaThread::current())) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Outputs from the following MutexLocker block:
|
||||
CompileTask* task = NULL;
|
||||
bool blocking = false;
|
||||
@ -1304,17 +1313,8 @@ uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
|
||||
// Should the current thread be blocked until this compilation request
|
||||
// has been fulfilled?
|
||||
bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) {
|
||||
if (!BackgroundCompilation) {
|
||||
Symbol* class_name = method->method_holder()->klass_part()->name();
|
||||
if (class_name->starts_with("java/lang/ref/Reference", 23)) {
|
||||
// The reference handler thread can dead lock with the GC if compilation is blocking,
|
||||
// so we avoid blocking compiles for anything in the java.lang.ref.Reference class,
|
||||
// including inner classes such as ReferenceHandler.
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
assert(!instanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
|
||||
return !BackgroundCompilation;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1963,10 +1963,21 @@ CompactibleFreeListSpace::gc_epilogue() {
|
||||
// Iteration support, mostly delegated from a CMS generation
|
||||
|
||||
void CompactibleFreeListSpace::save_marks() {
|
||||
// mark the "end" of the used space at the time of this call;
|
||||
assert(Thread::current()->is_VM_thread(),
|
||||
"Global variable should only be set when single-threaded");
|
||||
// Mark the "end" of the used space at the time of this call;
|
||||
// note, however, that promoted objects from this point
|
||||
// on are tracked in the _promoInfo below.
|
||||
set_saved_mark_word(unallocated_block());
|
||||
#ifdef ASSERT
|
||||
// Check the sanity of save_marks() etc.
|
||||
MemRegion ur = used_region();
|
||||
MemRegion urasm = used_region_at_save_marks();
|
||||
assert(ur.contains(urasm),
|
||||
err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
|
||||
" should contain [" PTR_FORMAT "," PTR_FORMAT ")",
|
||||
ur.start(), ur.end(), urasm.start(), urasm.end()));
|
||||
#endif
|
||||
// inform allocator that promotions should be tracked.
|
||||
assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
|
||||
_promoInfo.startTrackingPromotions();
|
||||
|
@ -3189,10 +3189,9 @@ bool ConcurrentMarkSweepGeneration::is_too_full() const {
|
||||
}
|
||||
|
||||
void CMSCollector::setup_cms_unloading_and_verification_state() {
|
||||
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|
||||
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|
||||
|| VerifyBeforeExit;
|
||||
const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
|
||||
| SharedHeap::SO_CodeCache;
|
||||
const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
|
||||
|
||||
if (should_unload_classes()) { // Should unload classes this cycle
|
||||
remove_root_scanning_option(rso); // Shrink the root set appropriately
|
||||
|
@ -1161,6 +1161,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
|
||||
PrintGC, true, gclog_or_tty);
|
||||
|
||||
TraceCollectorStats tcs(g1mm()->full_collection_counters());
|
||||
TraceMemoryManagerStats tms(true /* fullGC */);
|
||||
|
||||
double start = os::elapsedTime();
|
||||
@ -1339,6 +1340,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_after_gc();
|
||||
}
|
||||
g1mm()->update_counters();
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1971,6 +1973,10 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
init_mutator_alloc_region();
|
||||
|
||||
// Do create of the monitoring and management support so that
|
||||
// values in the heap have been properly initialized.
|
||||
_g1mm = new G1MonitoringSupport(this, &_g1_storage);
|
||||
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
@ -2113,6 +2119,28 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void G1CollectedHeap::allocate_dummy_regions() {
|
||||
// Let's fill up most of the region
|
||||
size_t word_size = HeapRegion::GrainWords - 1024;
|
||||
// And as a result the region we'll allocate will be humongous.
|
||||
guarantee(isHumongous(word_size), "sanity");
|
||||
|
||||
for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
|
||||
// Let's use the existing mechanism for the allocation
|
||||
HeapWord* dummy_obj = humongous_obj_allocate(word_size);
|
||||
if (dummy_obj != NULL) {
|
||||
MemRegion mr(dummy_obj, word_size);
|
||||
CollectedHeap::fill_with_object(mr);
|
||||
} else {
|
||||
// If we can't allocate once, we probably cannot allocate
|
||||
// again. Let's get out of the loop.
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
|
||||
MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
@ -2777,17 +2805,26 @@ void G1CollectedHeap::verify(bool allow_dirty,
|
||||
bool silent,
|
||||
bool use_prev_marking) {
|
||||
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
|
||||
if (!silent) { gclog_or_tty->print("roots "); }
|
||||
if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
|
||||
VerifyRootsClosure rootsCl(use_prev_marking);
|
||||
CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
|
||||
process_strong_roots(true, // activate StrongRootsScope
|
||||
false,
|
||||
SharedHeap::SO_AllClasses,
|
||||
// We apply the relevant closures to all the oops in the
|
||||
// system dictionary, the string table and the code cache.
|
||||
const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
|
||||
process_strong_roots(true, // activate StrongRootsScope
|
||||
true, // we set "collecting perm gen" to true,
|
||||
// so we don't reset the dirty cards in the perm gen.
|
||||
SharedHeap::ScanningOption(so), // roots scanning options
|
||||
&rootsCl,
|
||||
&blobsCl,
|
||||
&rootsCl);
|
||||
// Since we used "collecting_perm_gen" == true above, we will not have
|
||||
// checked the refs from perm into the G1-collected heap. We check those
|
||||
// references explicitly below. Whether the relevant cards are dirty
|
||||
// is checked further below in the rem set verification.
|
||||
if (!silent) { gclog_or_tty->print("Permgen roots "); }
|
||||
perm_gen()->oop_iterate(&rootsCl);
|
||||
bool failures = rootsCl.failures();
|
||||
rem_set()->invalidate(perm_gen()->used_region(), false);
|
||||
if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
|
||||
verify_region_sets();
|
||||
if (!silent) { gclog_or_tty->print("HeapRegions "); }
|
||||
@ -3164,6 +3201,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
|
||||
|
||||
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
|
||||
TraceMemoryManagerStats tms(false /* fullGC */);
|
||||
|
||||
// If the secondary_free_list is not empty, append it to the
|
||||
@ -3338,6 +3376,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
doConcurrentMark();
|
||||
}
|
||||
|
||||
allocate_dummy_regions();
|
||||
|
||||
#if YOUNG_LIST_VERBOSE
|
||||
gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
|
||||
_young_list->print();
|
||||
@ -3401,6 +3441,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_after_gc();
|
||||
}
|
||||
g1mm()->update_counters();
|
||||
|
||||
if (G1SummarizeRSetStats &&
|
||||
(G1SummarizeRSetStatsPeriod > 0) &&
|
||||
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
|
||||
@ -5314,6 +5356,7 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
|
||||
if (new_alloc_region != NULL) {
|
||||
g1_policy()->update_region_num(true /* next_is_young */);
|
||||
set_region_short_lived_locked(new_alloc_region);
|
||||
g1mm()->update_eden_counters();
|
||||
return new_alloc_region;
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,9 @@
|
||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSets.hpp"
|
||||
#include "gc_implementation/shared/hSpaceCounters.hpp"
|
||||
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
|
||||
#include "memory/barrierSet.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
@ -57,6 +59,7 @@ class HeapRegionRemSetIterator;
|
||||
class ConcurrentMark;
|
||||
class ConcurrentMarkThread;
|
||||
class ConcurrentG1Refine;
|
||||
class GenerationCounters;
|
||||
|
||||
typedef OverflowTaskQueue<StarTask> RefToScanQueue;
|
||||
typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
|
||||
@ -236,6 +239,9 @@ private:
|
||||
// current collection.
|
||||
HeapRegion* _gc_alloc_region_list;
|
||||
|
||||
// Helper for monitoring and management support.
|
||||
G1MonitoringSupport* _g1mm;
|
||||
|
||||
// Determines PLAB size for a particular allocation purpose.
|
||||
static size_t desired_plab_sz(GCAllocPurpose purpose);
|
||||
|
||||
@ -298,6 +304,14 @@ private:
|
||||
// started is maintained in _total_full_collections in CollectedHeap.
|
||||
volatile unsigned int _full_collections_completed;
|
||||
|
||||
// This is a non-product method that is helpful for testing. It is
|
||||
// called at the end of a GC and artificially expands the heap by
|
||||
// allocating a number of dead regions. This way we can induce very
|
||||
// frequent marking cycles and stress the cleanup / concurrent
|
||||
// cleanup code more (as all the regions that will be allocated by
|
||||
// this method will be found dead by the marking cycle).
|
||||
void allocate_dummy_regions() PRODUCT_RETURN;
|
||||
|
||||
// These are macros so that, if the assert fires, we get the correct
|
||||
// line number, file, etc.
|
||||
|
||||
@ -542,6 +556,9 @@ protected:
|
||||
HeapWord* expand_and_allocate(size_t word_size);
|
||||
|
||||
public:
|
||||
|
||||
G1MonitoringSupport* g1mm() { return _g1mm; }
|
||||
|
||||
// Expand the garbage-first heap by at least the given size (in bytes!).
|
||||
// Returns true if the heap was expanded by the requested amount;
|
||||
// false otherwise.
|
||||
|
@ -0,0 +1,178 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
|
||||
G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h,
|
||||
VirtualSpace* g1_storage_addr) :
|
||||
_g1h(g1h),
|
||||
_incremental_collection_counters(NULL),
|
||||
_full_collection_counters(NULL),
|
||||
_non_young_collection_counters(NULL),
|
||||
_old_space_counters(NULL),
|
||||
_young_collection_counters(NULL),
|
||||
_eden_counters(NULL),
|
||||
_from_counters(NULL),
|
||||
_to_counters(NULL),
|
||||
_g1_storage_addr(g1_storage_addr)
|
||||
{
|
||||
// Counters for GC collections
|
||||
//
|
||||
// name "collector.0". In a generational collector this would be the
|
||||
// young generation collection.
|
||||
_incremental_collection_counters =
|
||||
new CollectorCounters("G1 incremental collections", 0);
|
||||
// name "collector.1". In a generational collector this would be the
|
||||
// old generation collection.
|
||||
_full_collection_counters =
|
||||
new CollectorCounters("G1 stop-the-world full collections", 1);
|
||||
|
||||
// timer sampling for all counters supporting sampling only update the
|
||||
// used value. See the take_sample() method. G1 requires both used and
|
||||
// capacity updated so sampling is not currently used. It might
|
||||
// be sufficient to update all counters in take_sample() even though
|
||||
// take_sample() only returns "used". When sampling was used, there
|
||||
// were some anomolous values emitted which may have been the consequence
|
||||
// of not updating all values simultaneously (i.e., see the calculation done
|
||||
// in eden_space_used(), is it possbile that the values used to
|
||||
// calculate either eden_used or survivor_used are being updated by
|
||||
// the collector when the sample is being done?).
|
||||
const bool sampled = false;
|
||||
|
||||
// "Generation" and "Space" counters.
|
||||
//
|
||||
// name "generation.1" This is logically the old generation in
|
||||
// generational GC terms. The "1, 1" parameters are for
|
||||
// the n-th generation (=1) with 1 space.
|
||||
// Counters are created from minCapacity, maxCapacity, and capacity
|
||||
_non_young_collection_counters =
|
||||
new GenerationCounters("whole heap", 1, 1, _g1_storage_addr);
|
||||
|
||||
// name "generation.1.space.0"
|
||||
// Counters are created from maxCapacity, capacity, initCapacity,
|
||||
// and used.
|
||||
_old_space_counters = new HSpaceCounters("space", 0,
|
||||
_g1h->max_capacity(), _g1h->capacity(), _non_young_collection_counters);
|
||||
|
||||
// Young collection set
|
||||
// name "generation.0". This is logically the young generation.
|
||||
// The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
|
||||
// See _non_young_collection_counters for additional counters
|
||||
_young_collection_counters = new GenerationCounters("young", 0, 3, NULL);
|
||||
|
||||
// Replace "max_heap_byte_size() with maximum young gen size for
|
||||
// g1Collectedheap
|
||||
// name "generation.0.space.0"
|
||||
// See _old_space_counters for additional counters
|
||||
_eden_counters = new HSpaceCounters("eden", 0,
|
||||
_g1h->max_capacity(), eden_space_committed(),
|
||||
_young_collection_counters);
|
||||
|
||||
// name "generation.0.space.1"
|
||||
// See _old_space_counters for additional counters
|
||||
// Set the arguments to indicate that this survivor space is not used.
|
||||
_from_counters = new HSpaceCounters("s0", 1, (long) 0, (long) 0,
|
||||
_young_collection_counters);
|
||||
|
||||
// name "generation.0.space.2"
|
||||
// See _old_space_counters for additional counters
|
||||
_to_counters = new HSpaceCounters("s1", 2,
|
||||
_g1h->max_capacity(),
|
||||
survivor_space_committed(),
|
||||
_young_collection_counters);
|
||||
}
|
||||
|
||||
size_t G1MonitoringSupport::overall_committed() {
|
||||
return g1h()->capacity();
|
||||
}
|
||||
|
||||
size_t G1MonitoringSupport::overall_used() {
|
||||
return g1h()->used_unlocked();
|
||||
}
|
||||
|
||||
size_t G1MonitoringSupport::eden_space_committed() {
|
||||
return MAX2(eden_space_used(), (size_t) HeapRegion::GrainBytes);
|
||||
}
|
||||
|
||||
size_t G1MonitoringSupport::eden_space_used() {
|
||||
size_t young_list_length = g1h()->young_list()->length();
|
||||
size_t eden_used = young_list_length * HeapRegion::GrainBytes;
|
||||
size_t survivor_used = survivor_space_used();
|
||||
eden_used = subtract_up_to_zero(eden_used, survivor_used);
|
||||
return eden_used;
|
||||
}
|
||||
|
||||
size_t G1MonitoringSupport::survivor_space_committed() {
|
||||
return MAX2(survivor_space_used(),
|
||||
(size_t) HeapRegion::GrainBytes);
|
||||
}
|
||||
|
||||
size_t G1MonitoringSupport::survivor_space_used() {
|
||||
size_t survivor_num = g1h()->g1_policy()->recorded_survivor_regions();
|
||||
size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
|
||||
return survivor_used;
|
||||
}
|
||||
|
||||
size_t G1MonitoringSupport::old_space_committed() {
|
||||
size_t committed = overall_committed();
|
||||
size_t eden_committed = eden_space_committed();
|
||||
size_t survivor_committed = survivor_space_committed();
|
||||
committed = subtract_up_to_zero(committed, eden_committed);
|
||||
committed = subtract_up_to_zero(committed, survivor_committed);
|
||||
committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
|
||||
return committed;
|
||||
}
|
||||
|
||||
// See the comment near the top of g1MonitoringSupport.hpp for
|
||||
// an explanation of these calculations for "used" and "capacity".
|
||||
size_t G1MonitoringSupport::old_space_used() {
|
||||
size_t used = overall_used();
|
||||
size_t eden_used = eden_space_used();
|
||||
size_t survivor_used = survivor_space_used();
|
||||
used = subtract_up_to_zero(used, eden_used);
|
||||
used = subtract_up_to_zero(used, survivor_used);
|
||||
return used;
|
||||
}
|
||||
|
||||
void G1MonitoringSupport::update_counters() {
|
||||
if (UsePerfData) {
|
||||
eden_counters()->update_capacity(eden_space_committed());
|
||||
eden_counters()->update_used(eden_space_used());
|
||||
to_counters()->update_capacity(survivor_space_committed());
|
||||
to_counters()->update_used(survivor_space_used());
|
||||
old_space_counters()->update_capacity(old_space_committed());
|
||||
old_space_counters()->update_used(old_space_used());
|
||||
non_young_collection_counters()->update_all();
|
||||
}
|
||||
}
|
||||
|
||||
void G1MonitoringSupport::update_eden_counters() {
|
||||
if (UsePerfData) {
|
||||
eden_counters()->update_capacity(eden_space_committed());
|
||||
eden_counters()->update_used(eden_space_used());
|
||||
}
|
||||
}
|
@ -0,0 +1,203 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
|
||||
|
||||
#include "gc_implementation/shared/hSpaceCounters.hpp"
|
||||
|
||||
class G1CollectedHeap;
|
||||
class G1SpaceMonitoringSupport;
|
||||
|
||||
// Class for monitoring logical spaces in G1.
|
||||
// G1 defines a set of regions as a young
|
||||
// collection (analogous to a young generation).
|
||||
// The young collection is a logical generation
|
||||
// with no fixed chunk (see space.hpp) reflecting
|
||||
// the address space for the generation. In addition
|
||||
// to the young collection there is its complement
|
||||
// the non-young collection that is simply the regions
|
||||
// not in the young collection. The non-young collection
|
||||
// is treated here as a logical old generation only
|
||||
// because the monitoring tools expect a generational
|
||||
// heap. The monitoring tools expect that a Space
|
||||
// (see space.hpp) exists that describe the
|
||||
// address space of young collection and non-young
|
||||
// collection and such a view is provided here.
|
||||
//
|
||||
// This class provides interfaces to access
|
||||
// the value of variables for the young collection
|
||||
// that include the "capacity" and "used" of the
|
||||
// young collection along with constant values
|
||||
// for the minimum and maximum capacities for
|
||||
// the logical spaces. Similarly for the non-young
|
||||
// collection.
|
||||
//
|
||||
// Also provided are counters for G1 concurrent collections
|
||||
// and stop-the-world full heap collecitons.
|
||||
//
|
||||
// Below is a description of how "used" and "capactiy"
|
||||
// (or committed) is calculated for the logical spaces.
|
||||
//
|
||||
// 1) The used space calculation for a pool is not necessarily
|
||||
// independent of the others. We can easily get from G1 the overall
|
||||
// used space in the entire heap, the number of regions in the young
|
||||
// generation (includes both eden and survivors), and the number of
|
||||
// survivor regions. So, from that we calculate:
|
||||
//
|
||||
// survivor_used = survivor_num * region_size
|
||||
// eden_used = young_region_num * region_size - survivor_used
|
||||
// old_gen_used = overall_used - eden_used - survivor_used
|
||||
//
|
||||
// Note that survivor_used and eden_used are upper bounds. To get the
|
||||
// actual value we would have to iterate over the regions and add up
|
||||
// ->used(). But that'd be expensive. So, we'll accept some lack of
|
||||
// accuracy for those two. But, we have to be careful when calculating
|
||||
// old_gen_used, in case we subtract from overall_used more then the
|
||||
// actual number and our result goes negative.
|
||||
//
|
||||
// 2) Calculating the used space is straightforward, as described
|
||||
// above. However, how do we calculate the committed space, given that
|
||||
// we allocate space for the eden, survivor, and old gen out of the
|
||||
// same pool of regions? One way to do this is to use the used value
|
||||
// as also the committed value for the eden and survivor spaces and
|
||||
// then calculate the old gen committed space as follows:
|
||||
//
|
||||
// old_gen_committed = overall_committed - eden_committed - survivor_committed
|
||||
//
|
||||
// Maybe a better way to do that would be to calculate used for eden
|
||||
// and survivor as a sum of ->used() over their regions and then
|
||||
// calculate committed as region_num * region_size (i.e., what we use
|
||||
// to calculate the used space now). This is something to consider
|
||||
// in the future.
|
||||
//
|
||||
// 3) Another decision that is again not straightforward is what is
|
||||
// the max size that each memory pool can grow to. One way to do this
|
||||
// would be to use the committed size for the max for the eden and
|
||||
// survivors and calculate the old gen max as follows (basically, it's
|
||||
// a similar pattern to what we use for the committed space, as
|
||||
// described above):
|
||||
//
|
||||
// old_gen_max = overall_max - eden_max - survivor_max
|
||||
//
|
||||
// Unfortunately, the above makes the max of each pool fluctuate over
|
||||
// time and, even though this is allowed according to the spec, it
|
||||
// broke several assumptions in the M&M framework (there were cases
|
||||
// where used would reach a value greater than max). So, for max we
|
||||
// use -1, which means "undefined" according to the spec.
|
||||
//
|
||||
// 4) Now, there is a very subtle issue with all the above. The
|
||||
// framework will call get_memory_usage() on the three pools
|
||||
// asynchronously. As a result, each call might get a different value
|
||||
// for, say, survivor_num which will yield inconsistent values for
|
||||
// eden_used, survivor_used, and old_gen_used (as survivor_num is used
|
||||
// in the calculation of all three). This would normally be
|
||||
// ok. However, it's possible that this might cause the sum of
|
||||
// eden_used, survivor_used, and old_gen_used to go over the max heap
|
||||
// size and this seems to sometimes cause JConsole (and maybe other
|
||||
// clients) to get confused. There's not a really an easy / clean
|
||||
// solution to this problem, due to the asynchrounous nature of the
|
||||
// framework.
|
||||
|
||||
class G1MonitoringSupport : public CHeapObj {
|
||||
G1CollectedHeap* _g1h;
|
||||
VirtualSpace* _g1_storage_addr;
|
||||
|
||||
// jstat performance counters
|
||||
// incremental collections both fully and partially young
|
||||
CollectorCounters* _incremental_collection_counters;
|
||||
// full stop-the-world collections
|
||||
CollectorCounters* _full_collection_counters;
|
||||
// young collection set counters. The _eden_counters,
|
||||
// _from_counters, and _to_counters are associated with
|
||||
// this "generational" counter.
|
||||
GenerationCounters* _young_collection_counters;
|
||||
// non-young collection set counters. The _old_space_counters
|
||||
// below are associated with this "generational" counter.
|
||||
GenerationCounters* _non_young_collection_counters;
|
||||
// Counters for the capacity and used for
|
||||
// the whole heap
|
||||
HSpaceCounters* _old_space_counters;
|
||||
// the young collection
|
||||
HSpaceCounters* _eden_counters;
|
||||
// the survivor collection (only one, _to_counters, is actively used)
|
||||
HSpaceCounters* _from_counters;
|
||||
HSpaceCounters* _to_counters;
|
||||
|
||||
// It returns x - y if x > y, 0 otherwise.
|
||||
// As described in the comment above, some of the inputs to the
|
||||
// calculations we have to do are obtained concurrently and hence
|
||||
// may be inconsistent with each other. So, this provides a
|
||||
// defensive way of performing the subtraction and avoids the value
|
||||
// going negative (which would mean a very large result, given that
|
||||
// the parameter are size_t).
|
||||
static size_t subtract_up_to_zero(size_t x, size_t y) {
|
||||
if (x > y) {
|
||||
return x - y;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr);
|
||||
|
||||
G1CollectedHeap* g1h() { return _g1h; }
|
||||
VirtualSpace* g1_storage_addr() { return _g1_storage_addr; }
|
||||
|
||||
// Performance Counter accessors
|
||||
void update_counters();
|
||||
void update_eden_counters();
|
||||
|
||||
CollectorCounters* incremental_collection_counters() {
|
||||
return _incremental_collection_counters;
|
||||
}
|
||||
CollectorCounters* full_collection_counters() {
|
||||
return _full_collection_counters;
|
||||
}
|
||||
GenerationCounters* non_young_collection_counters() {
|
||||
return _non_young_collection_counters;
|
||||
}
|
||||
HSpaceCounters* old_space_counters() { return _old_space_counters; }
|
||||
HSpaceCounters* eden_counters() { return _eden_counters; }
|
||||
HSpaceCounters* from_counters() { return _from_counters; }
|
||||
HSpaceCounters* to_counters() { return _to_counters; }
|
||||
|
||||
// Monitoring support used by
|
||||
// MemoryService
|
||||
// jstat counters
|
||||
size_t overall_committed();
|
||||
size_t overall_used();
|
||||
|
||||
size_t eden_space_committed();
|
||||
size_t eden_space_used();
|
||||
|
||||
size_t survivor_space_committed();
|
||||
size_t survivor_space_used();
|
||||
|
||||
size_t old_space_committed();
|
||||
size_t old_space_used();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
|
@ -300,6 +300,11 @@
|
||||
develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \
|
||||
"Artificial delay during concurrent region freeing") \
|
||||
\
|
||||
develop(uintx, G1DummyRegionsPerGC, 0, \
|
||||
"The number of dummy regions G1 will allocate at the end of " \
|
||||
"each evacuation pause in order to artificially fill up the " \
|
||||
"heap and stress the marking implementation.") \
|
||||
\
|
||||
develop(bool, ReduceInitialCardMarksForG1, false, \
|
||||
"When ReduceInitialCardMarks is true, this flag setting " \
|
||||
" controls whether G1 allows the RICM optimization") \
|
||||
|
@ -33,44 +33,43 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/virtualspace.hpp"
|
||||
|
||||
void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
int n_threads) {
|
||||
if (n_threads > 0) {
|
||||
assert((n_threads == 1 && ParallelGCThreads == 0) ||
|
||||
n_threads <= (int)ParallelGCThreads,
|
||||
"# worker threads != # requested!");
|
||||
// Make sure the LNC array is valid for the space.
|
||||
jbyte** lowest_non_clean;
|
||||
uintptr_t lowest_non_clean_base_chunk_index;
|
||||
size_t lowest_non_clean_chunk_size;
|
||||
get_LNC_array_for_space(sp, lowest_non_clean,
|
||||
lowest_non_clean_base_chunk_index,
|
||||
lowest_non_clean_chunk_size);
|
||||
void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
ClearNoncleanCardWrapper* cl,
|
||||
int n_threads) {
|
||||
assert(n_threads > 0, "Error: expected n_threads > 0");
|
||||
assert((n_threads == 1 && ParallelGCThreads == 0) ||
|
||||
n_threads <= (int)ParallelGCThreads,
|
||||
"# worker threads != # requested!");
|
||||
// Make sure the LNC array is valid for the space.
|
||||
jbyte** lowest_non_clean;
|
||||
uintptr_t lowest_non_clean_base_chunk_index;
|
||||
size_t lowest_non_clean_chunk_size;
|
||||
get_LNC_array_for_space(sp, lowest_non_clean,
|
||||
lowest_non_clean_base_chunk_index,
|
||||
lowest_non_clean_chunk_size);
|
||||
|
||||
int n_strides = n_threads * StridesPerThread;
|
||||
SequentialSubTasksDone* pst = sp->par_seq_tasks();
|
||||
pst->set_n_threads(n_threads);
|
||||
pst->set_n_tasks(n_strides);
|
||||
int n_strides = n_threads * StridesPerThread;
|
||||
SequentialSubTasksDone* pst = sp->par_seq_tasks();
|
||||
pst->set_n_threads(n_threads);
|
||||
pst->set_n_tasks(n_strides);
|
||||
|
||||
int stride = 0;
|
||||
while (!pst->is_task_claimed(/* reference */ stride)) {
|
||||
process_stride(sp, mr, stride, n_strides, dcto_cl, cl,
|
||||
lowest_non_clean,
|
||||
lowest_non_clean_base_chunk_index,
|
||||
lowest_non_clean_chunk_size);
|
||||
}
|
||||
if (pst->all_tasks_completed()) {
|
||||
// Clear lowest_non_clean array for next time.
|
||||
intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
|
||||
uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
|
||||
for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
|
||||
intptr_t ind = ch - lowest_non_clean_base_chunk_index;
|
||||
assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
|
||||
"Bounds error");
|
||||
lowest_non_clean[ind] = NULL;
|
||||
}
|
||||
int stride = 0;
|
||||
while (!pst->is_task_claimed(/* reference */ stride)) {
|
||||
process_stride(sp, mr, stride, n_strides, dcto_cl, cl,
|
||||
lowest_non_clean,
|
||||
lowest_non_clean_base_chunk_index,
|
||||
lowest_non_clean_chunk_size);
|
||||
}
|
||||
if (pst->all_tasks_completed()) {
|
||||
// Clear lowest_non_clean array for next time.
|
||||
intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
|
||||
uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
|
||||
for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
|
||||
intptr_t ind = ch - lowest_non_clean_base_chunk_index;
|
||||
assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
|
||||
"Bounds error");
|
||||
lowest_non_clean[ind] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -81,7 +80,7 @@ process_stride(Space* sp,
|
||||
MemRegion used,
|
||||
jint stride, int n_strides,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
ClearNoncleanCardWrapper* cl,
|
||||
jbyte** lowest_non_clean,
|
||||
uintptr_t lowest_non_clean_base_chunk_index,
|
||||
size_t lowest_non_clean_chunk_size) {
|
||||
@ -127,7 +126,11 @@ process_stride(Space* sp,
|
||||
lowest_non_clean_base_chunk_index,
|
||||
lowest_non_clean_chunk_size);
|
||||
|
||||
non_clean_card_iterate_work(chunk_mr, cl);
|
||||
// We do not call the non_clean_card_iterate_serial() version because
|
||||
// we want to clear the cards, and the ClearNoncleanCardWrapper closure
|
||||
// itself does the work of finding contiguous dirty ranges of cards to
|
||||
// process (and clear).
|
||||
cl->do_MemRegion(chunk_mr);
|
||||
|
||||
// Find the next chunk of the stride.
|
||||
chunk_card_start += CardsPerStrideChunk * n_strides;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -51,15 +51,18 @@ GenerationCounters::GenerationCounters(const char* name,
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "minCapacity");
|
||||
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
|
||||
_virtual_space == NULL ? 0 :
|
||||
_virtual_space->committed_size(), CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
|
||||
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
|
||||
_virtual_space == NULL ? 0 :
|
||||
_virtual_space->reserved_size(), CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "capacity");
|
||||
_current_size = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Bytes,
|
||||
PerfData::U_Bytes,
|
||||
_virtual_space == NULL ? 0 :
|
||||
_virtual_space->committed_size(), CHECK);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,10 +61,11 @@ class GenerationCounters: public CHeapObj {
|
||||
}
|
||||
|
||||
virtual void update_all() {
|
||||
_current_size->set_value(_virtual_space->committed_size());
|
||||
_current_size->set_value(_virtual_space == NULL ? 0 :
|
||||
_virtual_space->committed_size());
|
||||
}
|
||||
|
||||
const char* name_space() const { return _name_space; }
|
||||
};
|
||||
|
||||
};
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GENERATIONCOUNTERS_HPP
|
||||
|
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/shared/hSpaceCounters.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
|
||||
HSpaceCounters::HSpaceCounters(const char* name,
|
||||
int ordinal,
|
||||
size_t max_size,
|
||||
size_t initial_capacity,
|
||||
GenerationCounters* gc) {
|
||||
|
||||
if (UsePerfData) {
|
||||
EXCEPTION_MARK;
|
||||
ResourceMark rm;
|
||||
|
||||
const char* cns =
|
||||
PerfDataManager::name_space(gc->name_space(), "space", ordinal);
|
||||
|
||||
_name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
|
||||
strcpy(_name_space, cns);
|
||||
|
||||
const char* cname = PerfDataManager::counter_name(_name_space, "name");
|
||||
PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
|
||||
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
|
||||
(jlong)max_size, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "capacity");
|
||||
_capacity = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Bytes,
|
||||
initial_capacity, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "used");
|
||||
_used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
||||
(jlong) 0, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "initCapacity");
|
||||
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
|
||||
initial_capacity, CHECK);
|
||||
}
|
||||
}
|
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
|
||||
|
||||
#ifndef SERIALGC
|
||||
#include "gc_implementation/shared/generationCounters.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
#endif
|
||||
|
||||
// A HSpaceCounter is a holder class for performance counters
|
||||
// that track a collections (logical spaces) in a heap;
|
||||
|
||||
class HeapSpaceUsedHelper;
|
||||
class G1SpaceMonitoringSupport;
|
||||
|
||||
class HSpaceCounters: public CHeapObj {
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
PerfVariable* _capacity;
|
||||
PerfVariable* _used;
|
||||
|
||||
// Constant PerfData types don't need to retain a reference.
|
||||
// However, it's a good idea to document them here.
|
||||
|
||||
char* _name_space;
|
||||
|
||||
public:
|
||||
|
||||
HSpaceCounters(const char* name, int ordinal, size_t max_size,
|
||||
size_t initial_capacity, GenerationCounters* gc);
|
||||
|
||||
~HSpaceCounters() {
|
||||
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
|
||||
}
|
||||
|
||||
inline void update_capacity(size_t v) {
|
||||
_capacity->set_value(v);
|
||||
}
|
||||
|
||||
inline void update_used(size_t v) {
|
||||
_used->set_value(v);
|
||||
}
|
||||
|
||||
debug_only(
|
||||
// for security reasons, we do not allow arbitrary reads from
|
||||
// the counters as they may live in shared memory.
|
||||
jlong used() {
|
||||
return _used->get_value();
|
||||
}
|
||||
jlong capacity() {
|
||||
return _used->get_value();
|
||||
}
|
||||
)
|
||||
|
||||
inline void update_all(size_t capacity, size_t used) {
|
||||
update_capacity(capacity);
|
||||
update_used(used);
|
||||
}
|
||||
|
||||
const char* name_space() const { return _name_space; }
|
||||
};
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
|
@ -456,31 +456,35 @@ bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
|
||||
}
|
||||
|
||||
|
||||
void CardTableModRefBS::non_clean_card_iterate(Space* sp,
|
||||
MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl) {
|
||||
void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
|
||||
MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
ClearNoncleanCardWrapper* cl) {
|
||||
if (!mr.is_empty()) {
|
||||
int n_threads = SharedHeap::heap()->n_par_threads();
|
||||
if (n_threads > 0) {
|
||||
#ifndef SERIALGC
|
||||
par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, n_threads);
|
||||
non_clean_card_iterate_parallel_work(sp, mr, dcto_cl, cl, n_threads);
|
||||
#else // SERIALGC
|
||||
fatal("Parallel gc not supported here.");
|
||||
#endif // SERIALGC
|
||||
} else {
|
||||
non_clean_card_iterate_work(mr, cl);
|
||||
// We do not call the non_clean_card_iterate_serial() version below because
|
||||
// we want to clear the cards (which non_clean_card_iterate_serial() does not
|
||||
// do for us), and the ClearNoncleanCardWrapper closure itself does the work
|
||||
// of finding contiguous dirty ranges of cards to process (and clear).
|
||||
cl->do_MemRegion(mr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: For this to work correctly, it is important that
|
||||
// we look for non-clean cards below (so as to catch those
|
||||
// marked precleaned), rather than look explicitly for dirty
|
||||
// cards (and miss those marked precleaned). In that sense,
|
||||
// the name precleaned is currently somewhat of a misnomer.
|
||||
void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
|
||||
MemRegionClosure* cl) {
|
||||
// The iterator itself is not MT-aware, but
|
||||
// MT-aware callers and closures can use this to
|
||||
// accomplish dirty card iteration in parallel. The
|
||||
// iterator itself does not clear the dirty cards, or
|
||||
// change their values in any manner.
|
||||
void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
|
||||
MemRegionClosure* cl) {
|
||||
for (int i = 0; i < _cur_covered_regions; i++) {
|
||||
MemRegion mri = mr.intersection(_covered[i]);
|
||||
if (mri.word_size() > 0) {
|
||||
@ -661,7 +665,7 @@ public:
|
||||
|
||||
void CardTableModRefBS::verify_clean_region(MemRegion mr) {
|
||||
GuaranteeNotModClosure blk(this);
|
||||
non_clean_card_iterate_work(mr, &blk);
|
||||
non_clean_card_iterate_serial(mr, &blk);
|
||||
}
|
||||
|
||||
// To verify a MemRegion is entirely dirty this closure is passed to
|
||||
|
@ -44,6 +44,7 @@
|
||||
class Generation;
|
||||
class OopsInGenClosure;
|
||||
class DirtyCardToOopClosure;
|
||||
class ClearNoncleanCardWrapper;
|
||||
|
||||
class CardTableModRefBS: public ModRefBarrierSet {
|
||||
// Some classes get to look at some private stuff.
|
||||
@ -165,22 +166,28 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
||||
|
||||
// Iterate over the portion of the card-table which covers the given
|
||||
// region mr in the given space and apply cl to any dirty sub-regions
|
||||
// of mr. cl and dcto_cl must either be the same closure or cl must
|
||||
// wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl
|
||||
// may be modified. Note that this function will operate in a parallel
|
||||
// mode if worker threads are available.
|
||||
void non_clean_card_iterate(Space* sp, MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl);
|
||||
// of mr. Dirty cards are _not_ cleared by the iterator method itself,
|
||||
// but closures may arrange to do so on their own should they so wish.
|
||||
void non_clean_card_iterate_serial(MemRegion mr, MemRegionClosure* cl);
|
||||
|
||||
// Utility function used to implement the other versions below.
|
||||
void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl);
|
||||
// A variant of the above that will operate in a parallel mode if
|
||||
// worker threads are available, and clear the dirty cards as it
|
||||
// processes them.
|
||||
// ClearNoncleanCardWrapper cl must wrap the DirtyCardToOopClosure dcto_cl,
|
||||
// which may itself be modified by the method.
|
||||
void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
ClearNoncleanCardWrapper* cl);
|
||||
|
||||
void par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
int n_threads);
|
||||
private:
|
||||
// Work method used to implement non_clean_card_iterate_possibly_parallel()
|
||||
// above in the parallel case.
|
||||
void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
ClearNoncleanCardWrapper* cl,
|
||||
int n_threads);
|
||||
|
||||
protected:
|
||||
// Dirty the bytes corresponding to "mr" (not all of which must be
|
||||
// covered.)
|
||||
void dirty_MemRegion(MemRegion mr);
|
||||
@ -237,7 +244,7 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
||||
MemRegion used,
|
||||
jint stride, int n_strides,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
ClearNoncleanCardWrapper* cl,
|
||||
jbyte** lowest_non_clean,
|
||||
uintptr_t lowest_non_clean_base_chunk_index,
|
||||
size_t lowest_non_clean_chunk_size);
|
||||
@ -409,14 +416,14 @@ public:
|
||||
// marking, where a dirty card may cause scanning, and summarization
|
||||
// marking, of objects that extend onto subsequent cards.)
|
||||
void mod_card_iterate(MemRegionClosure* cl) {
|
||||
non_clean_card_iterate_work(_whole_heap, cl);
|
||||
non_clean_card_iterate_serial(_whole_heap, cl);
|
||||
}
|
||||
|
||||
// Like the "mod_cards_iterate" above, except only invokes the closure
|
||||
// for cards within the MemRegion "mr" (which is required to be
|
||||
// card-aligned and sized.)
|
||||
void mod_card_iterate(MemRegion mr, MemRegionClosure* cl) {
|
||||
non_clean_card_iterate_work(mr, cl);
|
||||
non_clean_card_iterate_serial(mr, cl);
|
||||
}
|
||||
|
||||
static uintx ct_max_alignment_constraint();
|
||||
@ -493,4 +500,5 @@ public:
|
||||
void set_CTRS(CardTableRS* rs) { _rs = rs; }
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP
|
||||
|
@ -105,107 +105,111 @@ void CardTableRS::younger_refs_iterate(Generation* g,
|
||||
g->younger_refs_iterate(blk);
|
||||
}
|
||||
|
||||
class ClearNoncleanCardWrapper: public MemRegionClosure {
|
||||
MemRegionClosure* _dirty_card_closure;
|
||||
CardTableRS* _ct;
|
||||
bool _is_par;
|
||||
private:
|
||||
// Clears the given card, return true if the corresponding card should be
|
||||
// processed.
|
||||
bool clear_card(jbyte* entry) {
|
||||
if (_is_par) {
|
||||
while (true) {
|
||||
// In the parallel case, we may have to do this several times.
|
||||
jbyte entry_val = *entry;
|
||||
assert(entry_val != CardTableRS::clean_card_val(),
|
||||
"We shouldn't be looking at clean cards, and this should "
|
||||
"be the only place they get cleaned.");
|
||||
if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
|
||||
|| _ct->is_prev_youngergen_card_val(entry_val)) {
|
||||
jbyte res =
|
||||
Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
|
||||
if (res == entry_val) {
|
||||
break;
|
||||
} else {
|
||||
assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
|
||||
"The CAS above should only fail if another thread did "
|
||||
"a GC write barrier.");
|
||||
}
|
||||
} else if (entry_val ==
|
||||
CardTableRS::cur_youngergen_and_prev_nonclean_card) {
|
||||
// Parallelism shouldn't matter in this case. Only the thread
|
||||
// assigned to scan the card should change this value.
|
||||
*entry = _ct->cur_youngergen_card_val();
|
||||
break;
|
||||
} else {
|
||||
assert(entry_val == _ct->cur_youngergen_card_val(),
|
||||
"Should be the only possibility.");
|
||||
// In this case, the card was clean before, and become
|
||||
// cur_youngergen only because of processing of a promoted object.
|
||||
// We don't have to look at the card.
|
||||
return false;
|
||||
}
|
||||
inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
|
||||
if (_is_par) {
|
||||
return clear_card_parallel(entry);
|
||||
} else {
|
||||
return clear_card_serial(entry);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
|
||||
while (true) {
|
||||
// In the parallel case, we may have to do this several times.
|
||||
jbyte entry_val = *entry;
|
||||
assert(entry_val != CardTableRS::clean_card_val(),
|
||||
"We shouldn't be looking at clean cards, and this should "
|
||||
"be the only place they get cleaned.");
|
||||
if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
|
||||
|| _ct->is_prev_youngergen_card_val(entry_val)) {
|
||||
jbyte res =
|
||||
Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
|
||||
if (res == entry_val) {
|
||||
break;
|
||||
} else {
|
||||
assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
|
||||
"The CAS above should only fail if another thread did "
|
||||
"a GC write barrier.");
|
||||
}
|
||||
return true;
|
||||
} else if (entry_val ==
|
||||
CardTableRS::cur_youngergen_and_prev_nonclean_card) {
|
||||
// Parallelism shouldn't matter in this case. Only the thread
|
||||
// assigned to scan the card should change this value.
|
||||
*entry = _ct->cur_youngergen_card_val();
|
||||
break;
|
||||
} else {
|
||||
jbyte entry_val = *entry;
|
||||
assert(entry_val != CardTableRS::clean_card_val(),
|
||||
"We shouldn't be looking at clean cards, and this should "
|
||||
"be the only place they get cleaned.");
|
||||
assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
|
||||
"This should be possible in the sequential case.");
|
||||
*entry = CardTableRS::clean_card_val();
|
||||
return true;
|
||||
assert(entry_val == _ct->cur_youngergen_card_val(),
|
||||
"Should be the only possibility.");
|
||||
// In this case, the card was clean before, and become
|
||||
// cur_youngergen only because of processing of a promoted object.
|
||||
// We don't have to look at the card.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure,
|
||||
CardTableRS* ct) :
|
||||
|
||||
inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
|
||||
jbyte entry_val = *entry;
|
||||
assert(entry_val != CardTableRS::clean_card_val(),
|
||||
"We shouldn't be looking at clean cards, and this should "
|
||||
"be the only place they get cleaned.");
|
||||
assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
|
||||
"This should be possible in the sequential case.");
|
||||
*entry = CardTableRS::clean_card_val();
|
||||
return true;
|
||||
}
|
||||
|
||||
ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
|
||||
MemRegionClosure* dirty_card_closure, CardTableRS* ct) :
|
||||
_dirty_card_closure(dirty_card_closure), _ct(ct) {
|
||||
_is_par = (SharedHeap::heap()->n_par_threads() > 0);
|
||||
}
|
||||
void do_MemRegion(MemRegion mr) {
|
||||
// We start at the high end of "mr", walking backwards
|
||||
// while accumulating a contiguous dirty range of cards in
|
||||
// [start_of_non_clean, end_of_non_clean) which we then
|
||||
// process en masse.
|
||||
HeapWord* end_of_non_clean = mr.end();
|
||||
HeapWord* start_of_non_clean = end_of_non_clean;
|
||||
jbyte* entry = _ct->byte_for(mr.last());
|
||||
const jbyte* first_entry = _ct->byte_for(mr.start());
|
||||
while (entry >= first_entry) {
|
||||
HeapWord* cur = _ct->addr_for(entry);
|
||||
if (!clear_card(entry)) {
|
||||
// We hit a clean card; process any non-empty
|
||||
// dirty range accumulated so far.
|
||||
if (start_of_non_clean < end_of_non_clean) {
|
||||
MemRegion mr2(start_of_non_clean, end_of_non_clean);
|
||||
_dirty_card_closure->do_MemRegion(mr2);
|
||||
}
|
||||
// Reset the dirty window while continuing to
|
||||
// look for the next dirty window to process.
|
||||
end_of_non_clean = cur;
|
||||
start_of_non_clean = end_of_non_clean;
|
||||
}
|
||||
|
||||
void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
|
||||
assert(mr.word_size() > 0, "Error");
|
||||
assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
|
||||
// mr.end() may not necessarily be card aligned.
|
||||
jbyte* cur_entry = _ct->byte_for(mr.last());
|
||||
const jbyte* limit = _ct->byte_for(mr.start());
|
||||
HeapWord* end_of_non_clean = mr.end();
|
||||
HeapWord* start_of_non_clean = end_of_non_clean;
|
||||
while (cur_entry >= limit) {
|
||||
HeapWord* cur_hw = _ct->addr_for(cur_entry);
|
||||
if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
|
||||
// Continue the dirty range by opening the
|
||||
// dirty window one card to the left.
|
||||
start_of_non_clean = cur_hw;
|
||||
} else {
|
||||
// We hit a "clean" card; process any non-empty
|
||||
// "dirty" range accumulated so far.
|
||||
if (start_of_non_clean < end_of_non_clean) {
|
||||
const MemRegion mrd(start_of_non_clean, end_of_non_clean);
|
||||
_dirty_card_closure->do_MemRegion(mrd);
|
||||
}
|
||||
// Open the left end of the window one card to the left.
|
||||
start_of_non_clean = cur;
|
||||
// Note that "entry" leads "start_of_non_clean" in
|
||||
// its leftward excursion after this point
|
||||
// in the loop and, when we hit the left end of "mr",
|
||||
// will point off of the left end of the card-table
|
||||
// for "mr".
|
||||
entry--;
|
||||
}
|
||||
// If the first card of "mr" was dirty, we will have
|
||||
// been left with a dirty window, co-initial with "mr",
|
||||
// which we now process.
|
||||
if (start_of_non_clean < end_of_non_clean) {
|
||||
MemRegion mr2(start_of_non_clean, end_of_non_clean);
|
||||
_dirty_card_closure->do_MemRegion(mr2);
|
||||
// Reset the dirty window, while continuing to look
|
||||
// for the next dirty card that will start a
|
||||
// new dirty window.
|
||||
end_of_non_clean = cur_hw;
|
||||
start_of_non_clean = cur_hw;
|
||||
}
|
||||
// Note that "cur_entry" leads "start_of_non_clean" in
|
||||
// its leftward excursion after this point
|
||||
// in the loop and, when we hit the left end of "mr",
|
||||
// will point off of the left end of the card-table
|
||||
// for "mr".
|
||||
cur_entry--;
|
||||
}
|
||||
};
|
||||
// If the first card of "mr" was dirty, we will have
|
||||
// been left with a dirty window, co-initial with "mr",
|
||||
// which we now process.
|
||||
if (start_of_non_clean < end_of_non_clean) {
|
||||
const MemRegion mrd(start_of_non_clean, end_of_non_clean);
|
||||
_dirty_card_closure->do_MemRegion(mrd);
|
||||
}
|
||||
}
|
||||
|
||||
// clean (by dirty->clean before) ==> cur_younger_gen
|
||||
// dirty ==> cur_youngergen_and_prev_nonclean_card
|
||||
// precleaned ==> cur_youngergen_and_prev_nonclean_card
|
||||
@ -246,8 +250,35 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
|
||||
cl->gen_boundary());
|
||||
ClearNoncleanCardWrapper clear_cl(dcto_cl, this);
|
||||
|
||||
_ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(),
|
||||
dcto_cl, &clear_cl);
|
||||
const MemRegion urasm = sp->used_region_at_save_marks();
|
||||
#ifdef ASSERT
|
||||
// Convert the assertion check to a warning if we are running
|
||||
// CMS+ParNew until related bug is fixed.
|
||||
MemRegion ur = sp->used_region();
|
||||
assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
|
||||
err_msg("Did you forget to call save_marks()? "
|
||||
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
|
||||
"[" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
urasm.start(), urasm.end(), ur.start(), ur.end()));
|
||||
// In the case of CMS+ParNew, issue a warning
|
||||
if (!ur.contains(urasm)) {
|
||||
assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
|
||||
warning("CMS+ParNew: Did you forget to call save_marks()? "
|
||||
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
|
||||
"[" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
urasm.start(), urasm.end(), ur.start(), ur.end());
|
||||
MemRegion ur2 = sp->used_region();
|
||||
MemRegion urasm2 = sp->used_region_at_save_marks();
|
||||
if (!ur.equals(ur2)) {
|
||||
warning("CMS+ParNew: Flickering used_region()!!");
|
||||
}
|
||||
if (!urasm.equals(urasm2)) {
|
||||
warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm,
|
||||
dcto_cl, &clear_cl);
|
||||
}
|
||||
|
||||
void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -166,4 +166,21 @@ public:
|
||||
|
||||
};
|
||||
|
||||
class ClearNoncleanCardWrapper: public MemRegionClosure {
|
||||
MemRegionClosure* _dirty_card_closure;
|
||||
CardTableRS* _ct;
|
||||
bool _is_par;
|
||||
private:
|
||||
// Clears the given card, return true if the corresponding card should be
|
||||
// processed.
|
||||
inline bool clear_card(jbyte* entry);
|
||||
// Work methods called by the clear_card()
|
||||
inline bool clear_card_serial(jbyte* entry);
|
||||
inline bool clear_card_parallel(jbyte* entry);
|
||||
|
||||
public:
|
||||
ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure, CardTableRS* ct);
|
||||
void do_MemRegion(MemRegion mr);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_MEMORY_CARDTABLERS_HPP
|
||||
|
@ -427,13 +427,13 @@ public:
|
||||
// explicitly mark reachable objects in younger generations, to avoid
|
||||
// excess storage retention.) If "collecting_perm_gen" is false, then
|
||||
// roots that may only contain references to permGen objects are not
|
||||
// scanned. The "so" argument determines which of the roots
|
||||
// scanned; instead, the older_gens closure is applied to all outgoing
|
||||
// references in the perm gen. The "so" argument determines which of the roots
|
||||
// the closure is applied to:
|
||||
// "SO_None" does none;
|
||||
// "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
|
||||
// "SO_SystemClasses" to all the "system" classes and loaders;
|
||||
// "SO_Symbols_and_Strings" applies the closure to all entries in
|
||||
// SymbolsTable and StringTable.
|
||||
// "SO_Strings" applies the closure to all entries in the StringTable.
|
||||
void gen_process_strong_roots(int level,
|
||||
bool younger_gens_as_roots,
|
||||
// The remaining arguments are in an order
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -175,7 +175,7 @@ class VerifyOopClosure: public OopClosure {
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(obj->is_oop_or_null(), "invalid oop");
|
||||
guarantee(obj->is_oop_or_null(), err_msg("invalid oop: " INTPTR_FORMAT, obj));
|
||||
}
|
||||
public:
|
||||
virtual void do_oop(oop* p);
|
||||
|
@ -46,7 +46,6 @@ enum SH_process_strong_roots_tasks {
|
||||
SH_PS_Management_oops_do,
|
||||
SH_PS_SystemDictionary_oops_do,
|
||||
SH_PS_jvmti_oops_do,
|
||||
SH_PS_SymbolTable_oops_do,
|
||||
SH_PS_StringTable_oops_do,
|
||||
SH_PS_CodeCache_oops_do,
|
||||
// Leave this one last.
|
||||
@ -161,13 +160,9 @@ void SharedHeap::process_strong_roots(bool activate_scope,
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
|
||||
if (so & SO_AllClasses) {
|
||||
SystemDictionary::oops_do(roots);
|
||||
} else
|
||||
if (so & SO_SystemClasses) {
|
||||
SystemDictionary::always_strong_oops_do(roots);
|
||||
}
|
||||
}
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_SymbolTable_oops_do)) {
|
||||
} else if (so & SO_SystemClasses) {
|
||||
SystemDictionary::always_strong_oops_do(roots);
|
||||
}
|
||||
}
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
|
||||
|
@ -192,9 +192,8 @@ public:
|
||||
SO_None = 0x0,
|
||||
SO_AllClasses = 0x1,
|
||||
SO_SystemClasses = 0x2,
|
||||
SO_Symbols = 0x4,
|
||||
SO_Strings = 0x8,
|
||||
SO_CodeCache = 0x10
|
||||
SO_Strings = 0x4,
|
||||
SO_CodeCache = 0x8
|
||||
};
|
||||
|
||||
FlexibleWorkGang* workers() const { return _workers; }
|
||||
@ -208,14 +207,13 @@ public:
|
||||
|
||||
// Invoke the "do_oop" method the closure "roots" on all root locations.
|
||||
// If "collecting_perm_gen" is false, then roots that may only contain
|
||||
// references to permGen objects are not scanned. If true, the
|
||||
// "perm_gen" closure is applied to all older-to-younger refs in the
|
||||
// references to permGen objects are not scanned; instead, in that case,
|
||||
// the "perm_blk" closure is applied to all outgoing refs in the
|
||||
// permanent generation. The "so" argument determines which of roots
|
||||
// the closure is applied to:
|
||||
// "SO_None" does none;
|
||||
// "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
|
||||
// "SO_SystemClasses" to all the "system" classes and loaders;
|
||||
// "SO_Symbols" applies the closure to all entries in SymbolsTable;
|
||||
// "SO_Strings" applies the closure to all entries in StringTable;
|
||||
// "SO_CodeCache" applies the closure to all elements of the CodeCache.
|
||||
void process_strong_roots(bool activate_scope,
|
||||
|
@ -104,7 +104,7 @@ void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) {
|
||||
void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
|
||||
bool success = (result == NULL);
|
||||
if (success) {
|
||||
update_barrier_set(f1_addr, f1);
|
||||
update_barrier_set((void*) f1_addr, f1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -275,21 +275,23 @@ int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
|
||||
return (int) bsm_cache_index;
|
||||
}
|
||||
|
||||
void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
|
||||
methodHandle signature_invoker) {
|
||||
void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) {
|
||||
assert(is_secondary_entry(), "");
|
||||
// NOTE: it's important that all other values are set before f1 is
|
||||
// set since some users short circuit on f1 being set
|
||||
// (i.e. non-null) and that may result in uninitialized values for
|
||||
// other racing threads (e.g. flags).
|
||||
int param_size = signature_invoker->size_of_parameters();
|
||||
assert(param_size >= 1, "method argument size must include MH.this");
|
||||
param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
|
||||
if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
|
||||
// racing threads might be trying to install their own favorites
|
||||
set_f1(call_site());
|
||||
}
|
||||
param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
|
||||
bool is_final = true;
|
||||
assert(signature_invoker->is_final_method(), "is_final");
|
||||
set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
|
||||
int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size;
|
||||
assert(_flags == 0 || _flags == flags, "flags should be the same");
|
||||
set_flags(flags);
|
||||
// do not do set_bytecode on a secondary CP cache entry
|
||||
//set_bytecode_1(Bytecodes::_invokedynamic);
|
||||
set_f1_if_null_atomic(call_site()); // This must be the last one to set (see NOTE above)!
|
||||
}
|
||||
|
||||
|
||||
|
@ -1437,7 +1437,10 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
|
||||
// Update the memory inputs of MemNodes with the value we computed
|
||||
// in Phase 2 and move stores memory users to corresponding memory slices.
|
||||
#ifdef ASSERT
|
||||
|
||||
// Disable memory split verification code until the fix for 6984348.
|
||||
// Currently it produces false negative results since it does not cover all cases.
|
||||
#if 0 // ifdef ASSERT
|
||||
visited.Reset();
|
||||
Node_Stack old_mems(arena, _compile->unique() >> 2);
|
||||
#endif
|
||||
@ -1447,7 +1450,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
Node *n = ptnode_adr(i)->_node;
|
||||
assert(n != NULL, "sanity");
|
||||
if (n->is_Mem()) {
|
||||
#ifdef ASSERT
|
||||
#if 0 // ifdef ASSERT
|
||||
Node* old_mem = n->in(MemNode::Memory);
|
||||
if (!visited.test_set(old_mem->_idx)) {
|
||||
old_mems.push(old_mem, old_mem->outcnt());
|
||||
@ -1469,13 +1472,13 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef ASSERT
|
||||
#if 0 // ifdef ASSERT
|
||||
// Verify that memory was split correctly
|
||||
while (old_mems.is_nonempty()) {
|
||||
Node* old_mem = old_mems.node();
|
||||
uint old_cnt = old_mems.index();
|
||||
old_mems.pop();
|
||||
assert(old_cnt = old_mem->outcnt(), "old mem could be lost");
|
||||
assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -1033,14 +1033,10 @@ bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
|
||||
iter.reset_to_bci(bci());
|
||||
iter.next();
|
||||
ciMethod* method = iter.get_method(ignore);
|
||||
inputs = method->arg_size_no_receiver();
|
||||
// Add a receiver argument, maybe:
|
||||
if (code != Bytecodes::_invokestatic &&
|
||||
code != Bytecodes::_invokedynamic)
|
||||
inputs += 1;
|
||||
// (Do not use ciMethod::arg_size(), because
|
||||
// it might be an unloaded method, which doesn't
|
||||
// know whether it is static or not.)
|
||||
inputs = method->invoke_arg_size(code);
|
||||
int size = method->return_type()->size();
|
||||
depth = size - inputs;
|
||||
}
|
||||
@ -2957,8 +2953,7 @@ static void hook_memory_on_init(GraphKit& kit, int alias_idx,
|
||||
|
||||
//---------------------------set_output_for_allocation-------------------------
|
||||
Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
|
||||
const TypeOopPtr* oop_type,
|
||||
bool raw_mem_only) {
|
||||
const TypeOopPtr* oop_type) {
|
||||
int rawidx = Compile::AliasIdxRaw;
|
||||
alloc->set_req( TypeFunc::FramePtr, frameptr() );
|
||||
add_safepoint_edges(alloc);
|
||||
@ -2982,7 +2977,7 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
|
||||
rawoop)->as_Initialize();
|
||||
assert(alloc->initialization() == init, "2-way macro link must work");
|
||||
assert(init ->allocation() == alloc, "2-way macro link must work");
|
||||
if (ReduceFieldZeroing && !raw_mem_only) {
|
||||
{
|
||||
// Extract memory strands which may participate in the new object's
|
||||
// initialization, and source them from the new InitializeNode.
|
||||
// This will allow us to observe initializations when they occur,
|
||||
@ -3043,11 +3038,9 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
|
||||
// the type to a constant.
|
||||
// The optional arguments are for specialized use by intrinsics:
|
||||
// - If 'extra_slow_test' if not null is an extra condition for the slow-path.
|
||||
// - If 'raw_mem_only', do not cast the result to an oop.
|
||||
// - If 'return_size_val', report the the total object size to the caller.
|
||||
Node* GraphKit::new_instance(Node* klass_node,
|
||||
Node* extra_slow_test,
|
||||
bool raw_mem_only, // affect only raw memory
|
||||
Node* *return_size_val) {
|
||||
// Compute size in doublewords
|
||||
// The size is always an integral number of doublewords, represented
|
||||
@ -3118,7 +3111,7 @@ Node* GraphKit::new_instance(Node* klass_node,
|
||||
size, klass_node,
|
||||
initial_slow_test);
|
||||
|
||||
return set_output_for_allocation(alloc, oop_type, raw_mem_only);
|
||||
return set_output_for_allocation(alloc, oop_type);
|
||||
}
|
||||
|
||||
//-------------------------------new_array-------------------------------------
|
||||
@ -3128,7 +3121,6 @@ Node* GraphKit::new_instance(Node* klass_node,
|
||||
Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
|
||||
Node* length, // number of array elements
|
||||
int nargs, // number of arguments to push back for uncommon trap
|
||||
bool raw_mem_only, // affect only raw memory
|
||||
Node* *return_size_val) {
|
||||
jint layout_con = Klass::_lh_neutral_value;
|
||||
Node* layout_val = get_layout_helper(klass_node, layout_con);
|
||||
@ -3273,7 +3265,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
|
||||
ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
|
||||
}
|
||||
|
||||
Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only);
|
||||
Node* javaoop = set_output_for_allocation(alloc, ary_type);
|
||||
|
||||
// Cast length on remaining path to be as narrow as possible
|
||||
if (map()->find_edge(length) >= 0) {
|
||||
@ -3462,9 +3454,22 @@ void GraphKit::write_barrier_post(Node* oop_store,
|
||||
|
||||
// Get the alias_index for raw card-mark memory
|
||||
int adr_type = Compile::AliasIdxRaw;
|
||||
// Smash zero into card
|
||||
Node* zero = __ ConI(0);
|
||||
Node* zero = __ ConI(0); // Dirty card value
|
||||
BasicType bt = T_BYTE;
|
||||
|
||||
if (UseCondCardMark) {
|
||||
// The classic GC reference write barrier is typically implemented
|
||||
// as a store into the global card mark table. Unfortunately
|
||||
// unconditional stores can result in false sharing and excessive
|
||||
// coherence traffic as well as false transactional aborts.
|
||||
// UseCondCardMark enables MP "polite" conditional card mark
|
||||
// stores. In theory we could relax the load from ctrl() to
|
||||
// no_ctrl, but that doesn't buy much latitude.
|
||||
Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
|
||||
__ if_then(card_val, BoolTest::ne, zero);
|
||||
}
|
||||
|
||||
// Smash zero into card
|
||||
if( !UseConcMarkSweepGC ) {
|
||||
__ store(__ ctrl(), card_adr, zero, bt, adr_type);
|
||||
} else {
|
||||
@ -3472,6 +3477,10 @@ void GraphKit::write_barrier_post(Node* oop_store,
|
||||
__ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
|
||||
}
|
||||
|
||||
if (UseCondCardMark) {
|
||||
__ end_if();
|
||||
}
|
||||
|
||||
// Final sync IdealKit and GraphKit.
|
||||
final_sync(ideal);
|
||||
}
|
||||
|
@ -773,15 +773,13 @@ class GraphKit : public Phase {
|
||||
|
||||
// implementation of object creation
|
||||
Node* set_output_for_allocation(AllocateNode* alloc,
|
||||
const TypeOopPtr* oop_type,
|
||||
bool raw_mem_only);
|
||||
const TypeOopPtr* oop_type);
|
||||
Node* get_layout_helper(Node* klass_node, jint& constant_value);
|
||||
Node* new_instance(Node* klass_node,
|
||||
Node* slow_test = NULL,
|
||||
bool raw_mem_only = false,
|
||||
Node* *return_size_val = NULL);
|
||||
Node* new_array(Node* klass_node, Node* count_val, int nargs,
|
||||
bool raw_mem_only = false, Node* *return_size_val = NULL);
|
||||
Node* *return_size_val = NULL);
|
||||
|
||||
// Handy for making control flow
|
||||
IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
|
||||
|
@ -3527,8 +3527,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
|
||||
Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
|
||||
Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
|
||||
|
||||
const bool raw_mem_only = true;
|
||||
newcopy = new_array(klass_node, length, 0, raw_mem_only);
|
||||
newcopy = new_array(klass_node, length, 0);
|
||||
|
||||
// Generate a direct call to the right arraycopy function(s).
|
||||
// We know the copy is disjoint but we might not know if the
|
||||
@ -4325,8 +4324,6 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
|
||||
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
|
||||
int raw_adr_idx = Compile::AliasIdxRaw;
|
||||
const bool raw_mem_only = true;
|
||||
|
||||
|
||||
Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
|
||||
if (array_ctl != NULL) {
|
||||
@ -4335,8 +4332,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
set_control(array_ctl);
|
||||
Node* obj_length = load_array_length(obj);
|
||||
Node* obj_size = NULL;
|
||||
Node* alloc_obj = new_array(obj_klass, obj_length, 0,
|
||||
raw_mem_only, &obj_size);
|
||||
Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);
|
||||
|
||||
if (!use_ReduceInitialCardMarks()) {
|
||||
// If it is an oop array, it requires very special treatment,
|
||||
@ -4408,7 +4404,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
// It's an instance, and it passed the slow-path tests.
|
||||
PreserveJVMState pjvms(this);
|
||||
Node* obj_size = NULL;
|
||||
Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
|
||||
Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size);
|
||||
|
||||
copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
|
||||
|
||||
|
@ -2262,6 +2262,9 @@ bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& p
|
||||
// stmt1
|
||||
// |
|
||||
// v
|
||||
// loop predicate
|
||||
// |
|
||||
// v
|
||||
// stmt2 clone
|
||||
// |
|
||||
// v
|
||||
@ -2272,9 +2275,6 @@ bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& p
|
||||
// : false true
|
||||
// : | |
|
||||
// : | v
|
||||
// : | loop predicate
|
||||
// : | |
|
||||
// : | v
|
||||
// : | newloop<-----+
|
||||
// : | | |
|
||||
// : | stmt3 clone |
|
||||
@ -2330,7 +2330,6 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
}
|
||||
}
|
||||
|
||||
Node* entry = head->in(LoopNode::EntryControl);
|
||||
int dd = dom_depth(head);
|
||||
|
||||
// Step 1: find cut point
|
||||
@ -2627,8 +2626,6 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
|
||||
// Backedge of the surviving new_head (the clone) is original last_peel
|
||||
_igvn.hash_delete(new_head_clone);
|
||||
Node* new_entry = move_loop_predicates(entry, new_head_clone->in(LoopNode::EntryControl));
|
||||
new_head_clone->set_req(LoopNode::EntryControl, new_entry);
|
||||
new_head_clone->set_req(LoopNode::LoopBackControl, last_peel);
|
||||
_igvn._worklist.push(new_head_clone);
|
||||
|
||||
|
@ -221,9 +221,16 @@ void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
|
||||
Node *shift = p2x->unique_out();
|
||||
Node *addp = shift->unique_out();
|
||||
for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
|
||||
Node *st = addp->last_out(j);
|
||||
assert(st->is_Store(), "store required");
|
||||
_igvn.replace_node(st, st->in(MemNode::Memory));
|
||||
Node *mem = addp->last_out(j);
|
||||
if (UseCondCardMark && mem->is_Load()) {
|
||||
assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
|
||||
// The load is checking if the card has been written so
|
||||
// replace it with zero to fold the test.
|
||||
_igvn.replace_node(mem, intcon(0));
|
||||
continue;
|
||||
}
|
||||
assert(mem->is_Store(), "store required");
|
||||
_igvn.replace_node(mem, mem->in(MemNode::Memory));
|
||||
}
|
||||
} else {
|
||||
// G1 pre/post barriers
|
||||
|
@ -1259,15 +1259,18 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
|
||||
return NULL; // Wait stable graph
|
||||
}
|
||||
uint cnt = mem->req();
|
||||
for( uint i = 1; i < cnt; i++ ) {
|
||||
for (uint i = 1; i < cnt; i++) {
|
||||
Node* rc = region->in(i);
|
||||
if (rc == NULL || phase->type(rc) == Type::TOP)
|
||||
return NULL; // Wait stable graph
|
||||
Node *in = mem->in(i);
|
||||
if( in == NULL ) {
|
||||
if (in == NULL) {
|
||||
return NULL; // Wait stable graph
|
||||
}
|
||||
}
|
||||
// Check for loop invariant.
|
||||
if (cnt == 3) {
|
||||
for( uint i = 1; i < cnt; i++ ) {
|
||||
for (uint i = 1; i < cnt; i++) {
|
||||
Node *in = mem->in(i);
|
||||
Node* m = MemNode::optimize_memory_chain(in, addr_t, phase);
|
||||
if (m == mem) {
|
||||
@ -1281,38 +1284,37 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
|
||||
|
||||
// Do nothing here if Identity will find a value
|
||||
// (to avoid infinite chain of value phis generation).
|
||||
if ( !phase->eqv(this, this->Identity(phase)) )
|
||||
if (!phase->eqv(this, this->Identity(phase)))
|
||||
return NULL;
|
||||
|
||||
// Skip the split if the region dominates some control edge of the address.
|
||||
if (cnt == 3 && !MemNode::all_controls_dominate(address, region))
|
||||
if (!MemNode::all_controls_dominate(address, region))
|
||||
return NULL;
|
||||
|
||||
const Type* this_type = this->bottom_type();
|
||||
int this_index = phase->C->get_alias_index(addr_t);
|
||||
int this_offset = addr_t->offset();
|
||||
int this_iid = addr_t->is_oopptr()->instance_id();
|
||||
int wins = 0;
|
||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||
Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
|
||||
for( uint i = 1; i < region->req(); i++ ) {
|
||||
for (uint i = 1; i < region->req(); i++) {
|
||||
Node *x;
|
||||
Node* the_clone = NULL;
|
||||
if( region->in(i) == phase->C->top() ) {
|
||||
if (region->in(i) == phase->C->top()) {
|
||||
x = phase->C->top(); // Dead path? Use a dead data op
|
||||
} else {
|
||||
x = this->clone(); // Else clone up the data op
|
||||
the_clone = x; // Remember for possible deletion.
|
||||
// Alter data node to use pre-phi inputs
|
||||
if( this->in(0) == region ) {
|
||||
x->set_req( 0, region->in(i) );
|
||||
if (this->in(0) == region) {
|
||||
x->set_req(0, region->in(i));
|
||||
} else {
|
||||
x->set_req( 0, NULL );
|
||||
x->set_req(0, NULL);
|
||||
}
|
||||
for( uint j = 1; j < this->req(); j++ ) {
|
||||
for (uint j = 1; j < this->req(); j++) {
|
||||
Node *in = this->in(j);
|
||||
if( in->is_Phi() && in->in(0) == region )
|
||||
x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone
|
||||
if (in->is_Phi() && in->in(0) == region)
|
||||
x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
|
||||
}
|
||||
}
|
||||
// Check for a 'win' on some paths
|
||||
@ -1321,12 +1323,11 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
|
||||
bool singleton = t->singleton();
|
||||
|
||||
// See comments in PhaseIdealLoop::split_thru_phi().
|
||||
if( singleton && t == Type::TOP ) {
|
||||
if (singleton && t == Type::TOP) {
|
||||
singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
|
||||
}
|
||||
|
||||
if( singleton ) {
|
||||
wins++;
|
||||
if (singleton) {
|
||||
x = igvn->makecon(t);
|
||||
} else {
|
||||
// We now call Identity to try to simplify the cloned node.
|
||||
@ -1340,13 +1341,11 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
|
||||
// igvn->type(x) is set to x->Value() already.
|
||||
x->raise_bottom_type(t);
|
||||
Node *y = x->Identity(igvn);
|
||||
if( y != x ) {
|
||||
wins++;
|
||||
if (y != x) {
|
||||
x = y;
|
||||
} else {
|
||||
y = igvn->hash_find(x);
|
||||
if( y ) {
|
||||
wins++;
|
||||
if (y) {
|
||||
x = y;
|
||||
} else {
|
||||
// Else x is a new node we are keeping
|
||||
@ -1360,13 +1359,9 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
|
||||
igvn->remove_dead_node(the_clone);
|
||||
phi->set_req(i, x);
|
||||
}
|
||||
if( wins > 0 ) {
|
||||
// Record Phi
|
||||
igvn->register_new_node_with_optimizer(phi);
|
||||
return phi;
|
||||
}
|
||||
igvn->remove_dead_node(phi);
|
||||
return NULL;
|
||||
// Record Phi
|
||||
igvn->register_new_node_with_optimizer(phi);
|
||||
return phi;
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
@ -1677,14 +1672,15 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
|
||||
// If we are loading from a freshly-allocated object, produce a zero,
|
||||
// if the load is provably beyond the header of the object.
|
||||
// (Also allow a variable load from a fresh array to produce zero.)
|
||||
if (ReduceFieldZeroing) {
|
||||
const TypeOopPtr *tinst = tp->isa_oopptr();
|
||||
bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
|
||||
if (ReduceFieldZeroing || is_instance) {
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con())
|
||||
return value->bottom_type();
|
||||
}
|
||||
|
||||
const TypeOopPtr *tinst = tp->isa_oopptr();
|
||||
if (tinst != NULL && tinst->is_known_instance_field()) {
|
||||
if (is_instance) {
|
||||
// If we have an instance type and our memory input is the
|
||||
// programs's initial memory state, there is no matching store,
|
||||
// so just return a zero of the appropriate type
|
||||
|
@ -1172,16 +1172,16 @@ void PhaseStringOpts::int_getChars(GraphKit& kit, Node* arg, Node* char_array, N
|
||||
|
||||
Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start) {
|
||||
Node* string = str;
|
||||
Node* offset = kit.make_load(NULL,
|
||||
Node* offset = kit.make_load(kit.control(),
|
||||
kit.basic_plus_adr(string, string, java_lang_String::offset_offset_in_bytes()),
|
||||
TypeInt::INT, T_INT, offset_field_idx);
|
||||
Node* count = kit.make_load(NULL,
|
||||
Node* count = kit.make_load(kit.control(),
|
||||
kit.basic_plus_adr(string, string, java_lang_String::count_offset_in_bytes()),
|
||||
TypeInt::INT, T_INT, count_field_idx);
|
||||
const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
|
||||
TypeAry::make(TypeInt::CHAR,TypeInt::POS),
|
||||
ciTypeArrayKlass::make(T_CHAR), true, 0);
|
||||
Node* value = kit.make_load(NULL,
|
||||
Node* value = kit.make_load(kit.control(),
|
||||
kit.basic_plus_adr(string, string, java_lang_String::value_offset_in_bytes()),
|
||||
value_type, T_OBJECT, value_field_idx);
|
||||
|
||||
@ -1342,7 +1342,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
|
||||
}
|
||||
// Node* offset = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, offset_offset),
|
||||
// TypeInt::INT, T_INT, offset_field_idx);
|
||||
Node* count = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()),
|
||||
Node* count = kit.make_load(kit.control(), kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()),
|
||||
TypeInt::INT, T_INT, count_field_idx);
|
||||
length = __ AddI(length, count);
|
||||
string_sizes->init_req(argi, NULL);
|
||||
|
@ -82,10 +82,8 @@ void MethodHandleChain::set_method_handle(Handle mh, TRAPS) {
|
||||
|
||||
void MethodHandleChain::set_last_method(oop target, TRAPS) {
|
||||
_is_last = true;
|
||||
klassOop receiver_limit_oop = NULL;
|
||||
int flags = 0;
|
||||
methodOop m = MethodHandles::decode_method(target, receiver_limit_oop, flags);
|
||||
_last_method = methodHandle(THREAD, m);
|
||||
KlassHandle receiver_limit; int flags = 0;
|
||||
_last_method = MethodHandles::decode_method(target, receiver_limit, flags);
|
||||
if ((flags & MethodHandles::_dmf_has_receiver) == 0)
|
||||
_last_invoke = Bytecodes::_invokestatic;
|
||||
else if ((flags & MethodHandles::_dmf_does_dispatch) == 0)
|
||||
|
@ -153,9 +153,9 @@ void MethodHandles::set_enabled(bool z) {
|
||||
// and local, like parse a data structure. For speed, such methods work on plain
|
||||
// oops, not handles. Trapping methods uniformly operate on handles.
|
||||
|
||||
methodOop MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype,
|
||||
klassOop& receiver_limit_result, int& decode_flags_result) {
|
||||
if (vmtarget == NULL) return NULL;
|
||||
methodHandle MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype,
|
||||
KlassHandle& receiver_limit_result, int& decode_flags_result) {
|
||||
if (vmtarget == NULL) return methodHandle();
|
||||
assert(methodOopDesc::nonvirtual_vtable_index < 0, "encoding");
|
||||
if (vmindex < 0) {
|
||||
// this DMH performs no dispatch; it is directly bound to a methodOop
|
||||
@ -198,20 +198,20 @@ methodOop MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype,
|
||||
// MemberName and DirectMethodHandle have the same linkage to the JVM internals.
|
||||
// (MemberName is the non-operational name used for queries and setup.)
|
||||
|
||||
methodOop MethodHandles::decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
|
||||
methodHandle MethodHandles::decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
|
||||
oop vmtarget = java_lang_invoke_DirectMethodHandle::vmtarget(mh);
|
||||
int vmindex = java_lang_invoke_DirectMethodHandle::vmindex(mh);
|
||||
oop mtype = java_lang_invoke_DirectMethodHandle::type(mh);
|
||||
return decode_vmtarget(vmtarget, vmindex, mtype, receiver_limit_result, decode_flags_result);
|
||||
}
|
||||
|
||||
methodOop MethodHandles::decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
|
||||
methodHandle MethodHandles::decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
|
||||
assert(java_lang_invoke_BoundMethodHandle::is_instance(mh), "");
|
||||
assert(mh->klass() != SystemDictionary::AdapterMethodHandle_klass(), "");
|
||||
for (oop bmh = mh;;) {
|
||||
// Bound MHs can be stacked to bind several arguments.
|
||||
oop target = java_lang_invoke_MethodHandle::vmtarget(bmh);
|
||||
if (target == NULL) return NULL;
|
||||
if (target == NULL) return methodHandle();
|
||||
decode_flags_result |= MethodHandles::_dmf_binds_argument;
|
||||
klassOop tk = target->klass();
|
||||
if (tk == SystemDictionary::BoundMethodHandle_klass()) {
|
||||
@ -236,14 +236,14 @@ methodOop MethodHandles::decode_BoundMethodHandle(oop mh, klassOop& receiver_lim
|
||||
}
|
||||
}
|
||||
|
||||
methodOop MethodHandles::decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
|
||||
methodHandle MethodHandles::decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
|
||||
assert(mh->klass() == SystemDictionary::AdapterMethodHandle_klass(), "");
|
||||
for (oop amh = mh;;) {
|
||||
// Adapter MHs can be stacked to convert several arguments.
|
||||
int conv_op = adapter_conversion_op(java_lang_invoke_AdapterMethodHandle::conversion(amh));
|
||||
decode_flags_result |= (_dmf_adapter_lsb << conv_op) & _DMF_ADAPTER_MASK;
|
||||
oop target = java_lang_invoke_MethodHandle::vmtarget(amh);
|
||||
if (target == NULL) return NULL;
|
||||
if (target == NULL) return methodHandle();
|
||||
klassOop tk = target->klass();
|
||||
if (tk == SystemDictionary::AdapterMethodHandle_klass()) {
|
||||
amh = target;
|
||||
@ -255,8 +255,8 @@ methodOop MethodHandles::decode_AdapterMethodHandle(oop mh, klassOop& receiver_l
|
||||
}
|
||||
}
|
||||
|
||||
methodOop MethodHandles::decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
|
||||
if (mh == NULL) return NULL;
|
||||
methodHandle MethodHandles::decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
|
||||
if (mh == NULL) return methodHandle();
|
||||
klassOop mhk = mh->klass();
|
||||
assert(java_lang_invoke_MethodHandle::is_subclass(mhk), "must be a MethodHandle");
|
||||
if (mhk == SystemDictionary::DirectMethodHandle_klass()) {
|
||||
@ -270,7 +270,7 @@ methodOop MethodHandles::decode_MethodHandle(oop mh, klassOop& receiver_limit_re
|
||||
return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result);
|
||||
} else {
|
||||
assert(false, "cannot parse this MH");
|
||||
return NULL; // random MH?
|
||||
return methodHandle(); // random MH?
|
||||
}
|
||||
}
|
||||
|
||||
@ -299,9 +299,9 @@ methodOop MethodHandles::decode_methodOop(methodOop m, int& decode_flags_result)
|
||||
|
||||
// A trusted party is handing us a cookie to determine a method.
|
||||
// Let's boil it down to the method oop they really want.
|
||||
methodOop MethodHandles::decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result) {
|
||||
methodHandle MethodHandles::decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result) {
|
||||
decode_flags_result = 0;
|
||||
receiver_limit_result = NULL;
|
||||
receiver_limit_result = KlassHandle();
|
||||
klassOop xk = x->klass();
|
||||
if (xk == Universe::methodKlassObj()) {
|
||||
return decode_methodOop((methodOop) x, decode_flags_result);
|
||||
@ -329,7 +329,7 @@ methodOop MethodHandles::decode_method(oop x, klassOop& receiver_limit_result, i
|
||||
assert(!x->is_method(), "already checked");
|
||||
assert(!java_lang_invoke_MemberName::is_instance(x), "already checked");
|
||||
}
|
||||
return NULL;
|
||||
return methodHandle();
|
||||
}
|
||||
|
||||
|
||||
@ -389,11 +389,10 @@ void MethodHandles::init_MemberName(oop mname_oop, oop target_oop) {
|
||||
int offset = instanceKlass::cast(k)->offset_from_fields(slot);
|
||||
init_MemberName(mname_oop, k, accessFlags_from(mods), offset);
|
||||
} else {
|
||||
int decode_flags = 0; klassOop receiver_limit = NULL;
|
||||
methodOop m = MethodHandles::decode_method(target_oop,
|
||||
receiver_limit, decode_flags);
|
||||
KlassHandle receiver_limit; int decode_flags = 0;
|
||||
methodHandle m = MethodHandles::decode_method(target_oop, receiver_limit, decode_flags);
|
||||
bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0);
|
||||
init_MemberName(mname_oop, m, do_dispatch);
|
||||
init_MemberName(mname_oop, m(), do_dispatch);
|
||||
}
|
||||
}
|
||||
|
||||
@ -423,13 +422,14 @@ void MethodHandles::init_MemberName(oop mname_oop, klassOop field_holder, Access
|
||||
}
|
||||
|
||||
|
||||
methodOop MethodHandles::decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result) {
|
||||
methodHandle MethodHandles::decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result) {
|
||||
methodHandle empty;
|
||||
int flags = java_lang_invoke_MemberName::flags(mname);
|
||||
if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return NULL; // not invocable
|
||||
if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return empty; // not invocable
|
||||
oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname);
|
||||
int vmindex = java_lang_invoke_MemberName::vmindex(mname);
|
||||
if (vmindex == VM_INDEX_UNINITIALIZED) return NULL; // not resolved
|
||||
methodOop m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result);
|
||||
if (vmindex == VM_INDEX_UNINITIALIZED) return empty; // not resolved
|
||||
methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result);
|
||||
oop clazz = java_lang_invoke_MemberName::clazz(mname);
|
||||
if (clazz != NULL && java_lang_Class::is_instance(clazz)) {
|
||||
klassOop klass = java_lang_Class::as_klassOop(clazz);
|
||||
@ -439,9 +439,7 @@ methodOop MethodHandles::decode_MemberName(oop mname, klassOop& receiver_limit_r
|
||||
}
|
||||
|
||||
// convert the external string or reflective type to an internal signature
|
||||
Symbol* MethodHandles::convert_to_signature(oop type_str,
|
||||
bool polymorphic,
|
||||
TRAPS) {
|
||||
Symbol* MethodHandles::convert_to_signature(oop type_str, bool polymorphic, TRAPS) {
|
||||
if (java_lang_invoke_MethodType::is_instance(type_str)) {
|
||||
return java_lang_invoke_MethodType::as_signature(type_str, polymorphic, CHECK_NULL);
|
||||
} else if (java_lang_Class::is_instance(type_str)) {
|
||||
@ -474,48 +472,48 @@ void MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
|
||||
#endif
|
||||
if (java_lang_invoke_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED)
|
||||
return; // already resolved
|
||||
oop defc_oop = java_lang_invoke_MemberName::clazz(mname());
|
||||
oop name_str = java_lang_invoke_MemberName::name(mname());
|
||||
oop type_str = java_lang_invoke_MemberName::type(mname());
|
||||
int flags = java_lang_invoke_MemberName::flags(mname());
|
||||
Handle defc_oop(THREAD, java_lang_invoke_MemberName::clazz(mname()));
|
||||
Handle name_str(THREAD, java_lang_invoke_MemberName::name( mname()));
|
||||
Handle type_str(THREAD, java_lang_invoke_MemberName::type( mname()));
|
||||
int flags = java_lang_invoke_MemberName::flags(mname());
|
||||
|
||||
if (defc_oop == NULL || name_str == NULL || type_str == NULL) {
|
||||
if (defc_oop.is_null() || name_str.is_null() || type_str.is_null()) {
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve");
|
||||
}
|
||||
klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop);
|
||||
defc_oop = NULL; // safety
|
||||
if (defc_klassOop == NULL) return; // a primitive; no resolution possible
|
||||
if (!Klass::cast(defc_klassOop)->oop_is_instance()) {
|
||||
if (!Klass::cast(defc_klassOop)->oop_is_array()) return;
|
||||
defc_klassOop = SystemDictionary::Object_klass();
|
||||
|
||||
instanceKlassHandle defc;
|
||||
{
|
||||
klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop());
|
||||
if (defc_klassOop == NULL) return; // a primitive; no resolution possible
|
||||
if (!Klass::cast(defc_klassOop)->oop_is_instance()) {
|
||||
if (!Klass::cast(defc_klassOop)->oop_is_array()) return;
|
||||
defc_klassOop = SystemDictionary::Object_klass();
|
||||
}
|
||||
defc = instanceKlassHandle(THREAD, defc_klassOop);
|
||||
}
|
||||
instanceKlassHandle defc(THREAD, defc_klassOop);
|
||||
defc_klassOop = NULL; // safety
|
||||
if (defc.is_null()) {
|
||||
THROW_MSG(vmSymbols::java_lang_InternalError(), "primitive class");
|
||||
}
|
||||
defc->link_class(CHECK);
|
||||
defc->link_class(CHECK); // possible safepoint
|
||||
|
||||
// convert the external string name to an internal symbol
|
||||
TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str);
|
||||
TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str());
|
||||
if (name == NULL) return; // no such name
|
||||
name_str = NULL; // safety
|
||||
|
||||
Handle polymorphic_method_type;
|
||||
bool polymorphic_signature = false;
|
||||
if ((flags & ALL_KINDS) == IS_METHOD &&
|
||||
(defc() == SystemDictionary::MethodHandle_klass() &&
|
||||
methodOopDesc::is_method_handle_invoke_name(name)))
|
||||
methodOopDesc::is_method_handle_invoke_name(name))) {
|
||||
polymorphic_signature = true;
|
||||
|
||||
// convert the external string or reflective type to an internal signature
|
||||
TempNewSymbol type = convert_to_signature(type_str, polymorphic_signature, CHECK);
|
||||
if (java_lang_invoke_MethodType::is_instance(type_str) && polymorphic_signature) {
|
||||
polymorphic_method_type = Handle(THREAD, type_str); //preserve exactly
|
||||
}
|
||||
|
||||
// convert the external string or reflective type to an internal signature
|
||||
TempNewSymbol type = convert_to_signature(type_str(), polymorphic_signature, CHECK);
|
||||
if (java_lang_invoke_MethodType::is_instance(type_str()) && polymorphic_signature) {
|
||||
polymorphic_method_type = type_str; // preserve exactly
|
||||
}
|
||||
if (type == NULL) return; // no such signature exists in the VM
|
||||
type_str = NULL; // safety
|
||||
|
||||
// Time to do the lookup.
|
||||
switch (flags & ALL_KINDS) {
|
||||
@ -560,8 +558,8 @@ void MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
|
||||
java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget);
|
||||
java_lang_invoke_MemberName::set_vmindex(mname(), vmindex);
|
||||
java_lang_invoke_MemberName::set_modifiers(mname(), mods);
|
||||
DEBUG_ONLY(int junk; klassOop junk2);
|
||||
assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(),
|
||||
DEBUG_ONLY(KlassHandle junk1; int junk2);
|
||||
assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(),
|
||||
"properly stored for later decoding");
|
||||
return;
|
||||
}
|
||||
@ -589,8 +587,8 @@ void MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
|
||||
java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget);
|
||||
java_lang_invoke_MemberName::set_vmindex(mname(), vmindex);
|
||||
java_lang_invoke_MemberName::set_modifiers(mname(), mods);
|
||||
DEBUG_ONLY(int junk; klassOop junk2);
|
||||
assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(),
|
||||
DEBUG_ONLY(KlassHandle junk1; int junk2);
|
||||
assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(),
|
||||
"properly stored for later decoding");
|
||||
return;
|
||||
}
|
||||
@ -677,16 +675,14 @@ void MethodHandles::expand_MemberName(Handle mname, int suppress, TRAPS) {
|
||||
case IS_METHOD:
|
||||
case IS_CONSTRUCTOR:
|
||||
{
|
||||
klassOop receiver_limit = NULL;
|
||||
int decode_flags = 0;
|
||||
methodHandle m(THREAD, decode_vmtarget(vmtarget, vmindex, NULL,
|
||||
receiver_limit, decode_flags));
|
||||
KlassHandle receiver_limit; int decode_flags = 0;
|
||||
methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit, decode_flags);
|
||||
if (m.is_null()) break;
|
||||
if (!have_defc) {
|
||||
klassOop defc = m->method_holder();
|
||||
if (receiver_limit != NULL && receiver_limit != defc
|
||||
&& Klass::cast(receiver_limit)->is_subtype_of(defc))
|
||||
defc = receiver_limit;
|
||||
if (receiver_limit.not_null() && receiver_limit() != defc
|
||||
&& Klass::cast(receiver_limit())->is_subtype_of(defc))
|
||||
defc = receiver_limit();
|
||||
java_lang_invoke_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror());
|
||||
}
|
||||
if (!have_name) {
|
||||
@ -884,10 +880,9 @@ oop MethodHandles::encode_target(Handle mh, int format, TRAPS) {
|
||||
// - AMH can have methodOop for static invoke with bound receiver
|
||||
// - DMH can have methodOop for static invoke (on variable receiver)
|
||||
// - DMH can have klassOop for dispatched (non-static) invoke
|
||||
klassOop receiver_limit = NULL;
|
||||
int decode_flags = 0;
|
||||
methodOop m = decode_MethodHandle(mh(), receiver_limit, decode_flags);
|
||||
if (m == NULL) return NULL;
|
||||
KlassHandle receiver_limit; int decode_flags = 0;
|
||||
methodHandle m = decode_MethodHandle(mh(), receiver_limit, decode_flags);
|
||||
if (m.is_null()) return NULL;
|
||||
switch (format) {
|
||||
case ETF_REFLECT_METHOD:
|
||||
// same as jni_ToReflectedMethod:
|
||||
@ -903,10 +898,10 @@ oop MethodHandles::encode_target(Handle mh, int format, TRAPS) {
|
||||
if (SystemDictionary::MemberName_klass() == NULL) break;
|
||||
instanceKlassHandle mname_klass(THREAD, SystemDictionary::MemberName_klass());
|
||||
mname_klass->initialize(CHECK_NULL);
|
||||
Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL);
|
||||
Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL); // possible safepoint
|
||||
java_lang_invoke_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED);
|
||||
bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0);
|
||||
init_MemberName(mname(), m, do_dispatch);
|
||||
init_MemberName(mname(), m(), do_dispatch);
|
||||
expand_MemberName(mname, 0, CHECK_NULL);
|
||||
return mname();
|
||||
}
|
||||
@ -1459,8 +1454,8 @@ void MethodHandles::init_DirectMethodHandle(Handle mh, methodHandle m, bool do_d
|
||||
// that links the interpreter calls to the method. We need the same
|
||||
// bits, and will use the same calling sequence code.
|
||||
|
||||
int vmindex = methodOopDesc::garbage_vtable_index;
|
||||
oop vmtarget = NULL;
|
||||
int vmindex = methodOopDesc::garbage_vtable_index;
|
||||
Handle vmtarget;
|
||||
|
||||
instanceKlass::cast(m->method_holder())->link_class(CHECK);
|
||||
|
||||
@ -1478,7 +1473,7 @@ void MethodHandles::init_DirectMethodHandle(Handle mh, methodHandle m, bool do_d
|
||||
} else if (!do_dispatch || m->can_be_statically_bound()) {
|
||||
// We are simulating an invokestatic or invokespecial instruction.
|
||||
// Set up the method pointer, just like ConstantPoolCacheEntry::set_method().
|
||||
vmtarget = m();
|
||||
vmtarget = m;
|
||||
// this does not help dispatch, but it will make it possible to parse this MH:
|
||||
vmindex = methodOopDesc::nonvirtual_vtable_index;
|
||||
assert(vmindex < 0, "(>=0) == do_dispatch");
|
||||
@ -1490,7 +1485,7 @@ void MethodHandles::init_DirectMethodHandle(Handle mh, methodHandle m, bool do_d
|
||||
// For a DMH, it is done now, when the handle is created.
|
||||
Klass* k = Klass::cast(m->method_holder());
|
||||
if (k->should_be_initialized()) {
|
||||
k->initialize(CHECK);
|
||||
k->initialize(CHECK); // possible safepoint
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -1504,10 +1499,10 @@ void MethodHandles::init_DirectMethodHandle(Handle mh, methodHandle m, bool do_d
|
||||
|
||||
if (me == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
|
||||
|
||||
java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget);
|
||||
java_lang_invoke_DirectMethodHandle::set_vmindex(mh(), vmindex);
|
||||
DEBUG_ONLY(int flags; klassOop rlimit);
|
||||
assert(MethodHandles::decode_method(mh(), rlimit, flags) == m(),
|
||||
java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget());
|
||||
java_lang_invoke_DirectMethodHandle::set_vmindex( mh(), vmindex);
|
||||
DEBUG_ONLY(KlassHandle rlimit; int flags);
|
||||
assert(MethodHandles::decode_method(mh(), rlimit, flags) == m,
|
||||
"properly stored for later decoding");
|
||||
DEBUG_ONLY(bool actual_do_dispatch = ((flags & _dmf_does_dispatch) != 0));
|
||||
assert(!(actual_do_dispatch && !do_dispatch),
|
||||
@ -1523,10 +1518,13 @@ void MethodHandles::verify_BoundMethodHandle_with_receiver(Handle mh,
|
||||
methodHandle m,
|
||||
TRAPS) {
|
||||
// Verify type.
|
||||
oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh());
|
||||
Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh()));
|
||||
KlassHandle bound_recv_type;
|
||||
if (receiver != NULL) bound_recv_type = KlassHandle(THREAD, receiver->klass());
|
||||
{
|
||||
oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh());
|
||||
if (receiver != NULL)
|
||||
bound_recv_type = KlassHandle(THREAD, receiver->klass());
|
||||
}
|
||||
Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh()));
|
||||
verify_method_type(m, mtype, true, bound_recv_type, CHECK);
|
||||
|
||||
int receiver_pos = m->size_of_parameters() - 1;
|
||||
@ -1573,8 +1571,8 @@ void MethodHandles::init_BoundMethodHandle_with_receiver(Handle mh,
|
||||
|
||||
java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m());
|
||||
|
||||
DEBUG_ONLY(int junk; klassOop junk2);
|
||||
assert(MethodHandles::decode_method(mh(), junk2, junk) == m(), "properly stored for later decoding");
|
||||
DEBUG_ONLY(KlassHandle junk1; int junk2);
|
||||
assert(MethodHandles::decode_method(mh(), junk1, junk2) == m, "properly stored for later decoding");
|
||||
assert(decode_MethodHandle_stack_pushes(mh()) == 1, "BMH pushes one stack slot");
|
||||
|
||||
// Done!
|
||||
@ -1682,8 +1680,11 @@ void MethodHandles::init_BoundMethodHandle(Handle mh, Handle target, int argnum,
|
||||
}
|
||||
|
||||
// Get bound type and required slots.
|
||||
oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum);
|
||||
BasicType ptype = java_lang_Class::as_BasicType(ptype_oop);
|
||||
BasicType ptype;
|
||||
{
|
||||
oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum);
|
||||
ptype = java_lang_Class::as_BasicType(ptype_oop);
|
||||
}
|
||||
int slots_pushed = type2size[ptype];
|
||||
|
||||
// If (a) the target is a direct non-dispatched method handle,
|
||||
@ -1694,13 +1695,12 @@ void MethodHandles::init_BoundMethodHandle(Handle mh, Handle target, int argnum,
|
||||
if (OptimizeMethodHandles &&
|
||||
target->klass() == SystemDictionary::DirectMethodHandle_klass() &&
|
||||
(argnum == 0 || java_lang_invoke_DirectMethodHandle::vmindex(target()) < 0)) {
|
||||
int decode_flags = 0; klassOop receiver_limit_oop = NULL;
|
||||
methodHandle m(THREAD, decode_method(target(), receiver_limit_oop, decode_flags));
|
||||
KlassHandle receiver_limit; int decode_flags = 0;
|
||||
methodHandle m = decode_method(target(), receiver_limit, decode_flags);
|
||||
if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); }
|
||||
DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg.
|
||||
assert(java_lang_invoke_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig");
|
||||
if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) {
|
||||
KlassHandle receiver_limit(THREAD, receiver_limit_oop);
|
||||
init_BoundMethodHandle_with_receiver(mh, m,
|
||||
receiver_limit, decode_flags,
|
||||
CHECK);
|
||||
@ -2019,7 +2019,6 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
|
||||
}
|
||||
|
||||
void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) {
|
||||
oop argument = java_lang_invoke_AdapterMethodHandle::argument(mh());
|
||||
int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh());
|
||||
jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh());
|
||||
jint conv_op = adapter_conversion_op(conversion);
|
||||
@ -2215,18 +2214,14 @@ JVM_ENTRY(void, MHN_init_DMH(JNIEnv *env, jobject igcls, jobject mh_jh,
|
||||
|
||||
// which method are we really talking about?
|
||||
if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
|
||||
oop target_oop = JNIHandles::resolve_non_null(target_jh);
|
||||
if (java_lang_invoke_MemberName::is_instance(target_oop) &&
|
||||
java_lang_invoke_MemberName::vmindex(target_oop) == VM_INDEX_UNINITIALIZED) {
|
||||
Handle mname(THREAD, target_oop);
|
||||
MethodHandles::resolve_MemberName(mname, CHECK);
|
||||
target_oop = mname(); // in case of GC
|
||||
Handle target(THREAD, JNIHandles::resolve_non_null(target_jh));
|
||||
if (java_lang_invoke_MemberName::is_instance(target()) &&
|
||||
java_lang_invoke_MemberName::vmindex(target()) == VM_INDEX_UNINITIALIZED) {
|
||||
MethodHandles::resolve_MemberName(target, CHECK);
|
||||
}
|
||||
|
||||
int decode_flags = 0; klassOop receiver_limit = NULL;
|
||||
methodHandle m(THREAD,
|
||||
MethodHandles::decode_method(target_oop,
|
||||
receiver_limit, decode_flags));
|
||||
KlassHandle receiver_limit; int decode_flags = 0;
|
||||
methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags);
|
||||
if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "no such method"); }
|
||||
|
||||
// The trusted Java code that calls this method should already have performed
|
||||
@ -2284,12 +2279,8 @@ JVM_ENTRY(void, MHN_init_BMH(JNIEnv *env, jobject igcls, jobject mh_jh,
|
||||
// Target object is a reflective method. (%%% Do we need this alternate path?)
|
||||
Untested("init_BMH of non-MH");
|
||||
if (argnum != 0) { THROW(vmSymbols::java_lang_InternalError()); }
|
||||
int decode_flags = 0; klassOop receiver_limit_oop = NULL;
|
||||
methodHandle m(THREAD,
|
||||
MethodHandles::decode_method(target(),
|
||||
receiver_limit_oop,
|
||||
decode_flags));
|
||||
KlassHandle receiver_limit(THREAD, receiver_limit_oop);
|
||||
KlassHandle receiver_limit; int decode_flags = 0;
|
||||
methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags);
|
||||
MethodHandles::init_BoundMethodHandle_with_receiver(mh, m,
|
||||
receiver_limit,
|
||||
decode_flags,
|
||||
@ -2424,12 +2415,12 @@ JVM_ENTRY(jint, MHN_getNamedCon(JNIEnv *env, jobject igcls, jint which, jobjectA
|
||||
#ifndef PRODUCT
|
||||
if (which >= 0 && which < con_value_count) {
|
||||
int con = con_values[which];
|
||||
objArrayOop box = (objArrayOop) JNIHandles::resolve(box_jh);
|
||||
if (box != NULL && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) {
|
||||
objArrayHandle box(THREAD, (objArrayOop) JNIHandles::resolve(box_jh));
|
||||
if (box.not_null() && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) {
|
||||
const char* str = &con_names[0];
|
||||
for (int i = 0; i < which; i++)
|
||||
str += strlen(str) + 1; // skip name and null
|
||||
oop name = java_lang_String::create_oop_from_str(str, CHECK_0);
|
||||
oop name = java_lang_String::create_oop_from_str(str, CHECK_0); // possible safepoint
|
||||
box->obj_at_put(0, name);
|
||||
}
|
||||
return con;
|
||||
@ -2486,10 +2477,10 @@ JVM_ENTRY(jint, MHN_getMembers(JNIEnv *env, jobject igcls,
|
||||
jclass clazz_jh, jstring name_jh, jstring sig_jh,
|
||||
int mflags, jclass caller_jh, jint skip, jobjectArray results_jh)) {
|
||||
if (clazz_jh == NULL || results_jh == NULL) return -1;
|
||||
klassOop k_oop = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh));
|
||||
KlassHandle k(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh)));
|
||||
|
||||
objArrayOop results = (objArrayOop) JNIHandles::resolve(results_jh);
|
||||
if (results == NULL || !results->is_objArray()) return -1;
|
||||
objArrayHandle results(THREAD, (objArrayOop) JNIHandles::resolve(results_jh));
|
||||
if (results.is_null() || !results->is_objArray()) return -1;
|
||||
|
||||
TempNewSymbol name = NULL;
|
||||
TempNewSymbol sig = NULL;
|
||||
@ -2502,20 +2493,20 @@ JVM_ENTRY(jint, MHN_getMembers(JNIEnv *env, jobject igcls,
|
||||
if (sig == NULL) return 0; // a match is not possible
|
||||
}
|
||||
|
||||
klassOop caller = NULL;
|
||||
KlassHandle caller;
|
||||
if (caller_jh != NULL) {
|
||||
oop caller_oop = JNIHandles::resolve_non_null(caller_jh);
|
||||
if (!java_lang_Class::is_instance(caller_oop)) return -1;
|
||||
caller = java_lang_Class::as_klassOop(caller_oop);
|
||||
caller = KlassHandle(THREAD, java_lang_Class::as_klassOop(caller_oop));
|
||||
}
|
||||
|
||||
if (name != NULL && sig != NULL && results != NULL) {
|
||||
if (name != NULL && sig != NULL && results.not_null()) {
|
||||
// try a direct resolve
|
||||
// %%% TO DO
|
||||
}
|
||||
|
||||
int res = MethodHandles::find_MemberNames(k_oop, name, sig, mflags,
|
||||
caller, skip, results);
|
||||
int res = MethodHandles::find_MemberNames(k(), name, sig, mflags,
|
||||
caller(), skip, results());
|
||||
// TO DO: expand at least some of the MemberNames, to avoid massive callbacks
|
||||
return res;
|
||||
}
|
||||
|
@ -265,13 +265,13 @@ class MethodHandles: AllStatic {
|
||||
static inline address from_interpreted_entry(EntryKind ek);
|
||||
|
||||
// helpers for decode_method.
|
||||
static methodOop decode_methodOop(methodOop m, int& decode_flags_result);
|
||||
static methodOop decode_vmtarget(oop vmtarget, int vmindex, oop mtype, klassOop& receiver_limit_result, int& decode_flags_result);
|
||||
static methodOop decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result);
|
||||
static methodOop decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
|
||||
static methodOop decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
|
||||
static methodOop decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
|
||||
static methodOop decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
|
||||
static methodOop decode_methodOop(methodOop m, int& decode_flags_result);
|
||||
static methodHandle decode_vmtarget(oop vmtarget, int vmindex, oop mtype, KlassHandle& receiver_limit_result, int& decode_flags_result);
|
||||
static methodHandle decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result);
|
||||
static methodHandle decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
|
||||
static methodHandle decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
|
||||
static methodHandle decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
|
||||
static methodHandle decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
|
||||
|
||||
// Find out how many stack slots an mh pushes or pops.
|
||||
// The result is *not* reported as a multiple of stack_move_unit();
|
||||
@ -317,7 +317,7 @@ class MethodHandles: AllStatic {
|
||||
_dmf_adapter_lsb = 0x20,
|
||||
_DMF_ADAPTER_MASK = (_dmf_adapter_lsb << CONV_OP_LIMIT) - _dmf_adapter_lsb
|
||||
};
|
||||
static methodOop decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result);
|
||||
static methodHandle decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result);
|
||||
enum {
|
||||
// format of query to getConstant:
|
||||
GC_JVM_PUSH_LIMIT = 0,
|
||||
|
@ -620,6 +620,9 @@ class CommandLineFlags {
|
||||
product(bool, UseSSE42Intrinsics, false, \
|
||||
"SSE4.2 versions of intrinsics") \
|
||||
\
|
||||
product(bool, UseCondCardMark, false, \
|
||||
"Check for already marked card before updating card table") \
|
||||
\
|
||||
develop(bool, TraceCallFixup, false, \
|
||||
"traces all call fixups") \
|
||||
\
|
||||
|
@ -1721,14 +1721,14 @@ char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread,
|
||||
targetArity = ArgumentCount(target->signature()).size();
|
||||
}
|
||||
}
|
||||
klassOop kignore; int dmf_flags = 0;
|
||||
methodOop actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags);
|
||||
KlassHandle kignore; int dmf_flags = 0;
|
||||
methodHandle actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags);
|
||||
if ((dmf_flags & ~(MethodHandles::_dmf_has_receiver |
|
||||
MethodHandles::_dmf_does_dispatch |
|
||||
MethodHandles::_dmf_from_interface)) != 0)
|
||||
actual_method = NULL; // MH does extra binds, drops, etc.
|
||||
actual_method = methodHandle(); // MH does extra binds, drops, etc.
|
||||
bool has_receiver = ((dmf_flags & MethodHandles::_dmf_has_receiver) != 0);
|
||||
if (actual_method != NULL) {
|
||||
if (actual_method.not_null()) {
|
||||
mhName = actual_method->signature()->as_C_string();
|
||||
mhArity = ArgumentCount(actual_method->signature()).size();
|
||||
if (!actual_method->is_static()) mhArity += 1;
|
||||
|
@ -291,7 +291,9 @@ void VMThread::run() {
|
||||
// Among other things, this ensures that Eden top is correct.
|
||||
Universe::heap()->prepare_for_verify();
|
||||
os::check_heap();
|
||||
Universe::verify(true, true); // Silent verification to not polute normal output
|
||||
// Silent verification so as not to pollute normal output,
|
||||
// unless we really asked for it.
|
||||
Universe::verify(true, !(PrintGCDetails || Verbose));
|
||||
}
|
||||
|
||||
CompileBroker::set_should_block();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,10 +34,10 @@ G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
|
||||
size_t init_size,
|
||||
bool support_usage_threshold) :
|
||||
_g1h(g1h), CollectedMemoryPool(name,
|
||||
MemoryPool::Heap,
|
||||
init_size,
|
||||
undefined_max(),
|
||||
support_usage_threshold) {
|
||||
MemoryPool::Heap,
|
||||
init_size,
|
||||
undefined_max(),
|
||||
support_usage_threshold) {
|
||||
assert(UseG1GC, "sanity");
|
||||
}
|
||||
|
||||
@ -48,44 +48,27 @@ size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
|
||||
|
||||
// See the comment at the top of g1MemoryPool.hpp
|
||||
size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
|
||||
size_t young_list_length = g1h->young_list()->length();
|
||||
size_t eden_used = young_list_length * HeapRegion::GrainBytes;
|
||||
size_t survivor_used = survivor_space_used(g1h);
|
||||
eden_used = subtract_up_to_zero(eden_used, survivor_used);
|
||||
return eden_used;
|
||||
return g1h->g1mm()->eden_space_used();
|
||||
}
|
||||
|
||||
// See the comment at the top of g1MemoryPool.hpp
|
||||
size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
|
||||
return MAX2(survivor_space_used(g1h), (size_t) HeapRegion::GrainBytes);
|
||||
return g1h->g1mm()->survivor_space_committed();
|
||||
}
|
||||
|
||||
// See the comment at the top of g1MemoryPool.hpp
|
||||
size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
|
||||
size_t survivor_num = g1h->g1_policy()->recorded_survivor_regions();
|
||||
size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
|
||||
return survivor_used;
|
||||
return g1h->g1mm()->survivor_space_used();
|
||||
}
|
||||
|
||||
// See the comment at the top of g1MemoryPool.hpp
|
||||
size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
|
||||
size_t committed = overall_committed(g1h);
|
||||
size_t eden_committed = eden_space_committed(g1h);
|
||||
size_t survivor_committed = survivor_space_committed(g1h);
|
||||
committed = subtract_up_to_zero(committed, eden_committed);
|
||||
committed = subtract_up_to_zero(committed, survivor_committed);
|
||||
committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
|
||||
return committed;
|
||||
return g1h->g1mm()->old_space_committed();
|
||||
}
|
||||
|
||||
// See the comment at the top of g1MemoryPool.hpp
|
||||
size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
|
||||
size_t used = overall_used(g1h);
|
||||
size_t eden_used = eden_space_used(g1h);
|
||||
size_t survivor_used = survivor_space_used(g1h);
|
||||
used = subtract_up_to_zero(used, eden_used);
|
||||
used = subtract_up_to_zero(used, survivor_used);
|
||||
return used;
|
||||
return g1h->g1mm()->old_space_used();
|
||||
}
|
||||
|
||||
G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -46,68 +46,9 @@ class G1CollectedHeap;
|
||||
// get, as this does affect the performance and behavior of G1. Which
|
||||
// is why we introduce the three memory pools implemented here.
|
||||
//
|
||||
// The above approach inroduces a couple of challenging issues in the
|
||||
// implementation of the three memory pools:
|
||||
// See comments in g1MonitoringSupport.hpp for additional details
|
||||
// on this model.
|
||||
//
|
||||
// 1) The used space calculation for a pool is not necessarily
|
||||
// independent of the others. We can easily get from G1 the overall
|
||||
// used space in the entire heap, the number of regions in the young
|
||||
// generation (includes both eden and survivors), and the number of
|
||||
// survivor regions. So, from that we calculate:
|
||||
//
|
||||
// survivor_used = survivor_num * region_size
|
||||
// eden_used = young_region_num * region_size - survivor_used
|
||||
// old_gen_used = overall_used - eden_used - survivor_used
|
||||
//
|
||||
// Note that survivor_used and eden_used are upper bounds. To get the
|
||||
// actual value we would have to iterate over the regions and add up
|
||||
// ->used(). But that'd be expensive. So, we'll accept some lack of
|
||||
// accuracy for those two. But, we have to be careful when calculating
|
||||
// old_gen_used, in case we subtract from overall_used more then the
|
||||
// actual number and our result goes negative.
|
||||
//
|
||||
// 2) Calculating the used space is straightforward, as described
|
||||
// above. However, how do we calculate the committed space, given that
|
||||
// we allocate space for the eden, survivor, and old gen out of the
|
||||
// same pool of regions? One way to do this is to use the used value
|
||||
// as also the committed value for the eden and survivor spaces and
|
||||
// then calculate the old gen committed space as follows:
|
||||
//
|
||||
// old_gen_committed = overall_committed - eden_committed - survivor_committed
|
||||
//
|
||||
// Maybe a better way to do that would be to calculate used for eden
|
||||
// and survivor as a sum of ->used() over their regions and then
|
||||
// calculate committed as region_num * region_size (i.e., what we use
|
||||
// to calculate the used space now). This is something to consider
|
||||
// in the future.
|
||||
//
|
||||
// 3) Another decision that is again not straightforward is what is
|
||||
// the max size that each memory pool can grow to. One way to do this
|
||||
// would be to use the committed size for the max for the eden and
|
||||
// survivors and calculate the old gen max as follows (basically, it's
|
||||
// a similar pattern to what we use for the committed space, as
|
||||
// described above):
|
||||
//
|
||||
// old_gen_max = overall_max - eden_max - survivor_max
|
||||
//
|
||||
// Unfortunately, the above makes the max of each pool fluctuate over
|
||||
// time and, even though this is allowed according to the spec, it
|
||||
// broke several assumptions in the M&M framework (there were cases
|
||||
// where used would reach a value greater than max). So, for max we
|
||||
// use -1, which means "undefined" according to the spec.
|
||||
//
|
||||
// 4) Now, there is a very subtle issue with all the above. The
|
||||
// framework will call get_memory_usage() on the three pools
|
||||
// asynchronously. As a result, each call might get a different value
|
||||
// for, say, survivor_num which will yield inconsistent values for
|
||||
// eden_used, survivor_used, and old_gen_used (as survivor_num is used
|
||||
// in the calculation of all three). This would normally be
|
||||
// ok. However, it's possible that this might cause the sum of
|
||||
// eden_used, survivor_used, and old_gen_used to go over the max heap
|
||||
// size and this seems to sometimes cause JConsole (and maybe other
|
||||
// clients) to get confused. There's not a really an easy / clean
|
||||
// solution to this problem, due to the asynchrounous nature of the
|
||||
// framework.
|
||||
|
||||
|
||||
// This class is shared by the three G1 memory pool classes
|
||||
@ -116,22 +57,6 @@ class G1CollectedHeap;
|
||||
// (see comment above), we put the calculations in this class so that
|
||||
// we can easily share them among the subclasses.
|
||||
class G1MemoryPoolSuper : public CollectedMemoryPool {
|
||||
private:
|
||||
// It returns x - y if x > y, 0 otherwise.
|
||||
// As described in the comment above, some of the inputs to the
|
||||
// calculations we have to do are obtained concurrently and hence
|
||||
// may be inconsistent with each other. So, this provides a
|
||||
// defensive way of performing the subtraction and avoids the value
|
||||
// going negative (which would mean a very large result, given that
|
||||
// the parameter are size_t).
|
||||
static size_t subtract_up_to_zero(size_t x, size_t y) {
|
||||
if (x > y) {
|
||||
return x - y;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
@ -148,13 +73,6 @@ protected:
|
||||
return (size_t) -1;
|
||||
}
|
||||
|
||||
static size_t overall_committed(G1CollectedHeap* g1h) {
|
||||
return g1h->capacity();
|
||||
}
|
||||
static size_t overall_used(G1CollectedHeap* g1h) {
|
||||
return g1h->used_unlocked();
|
||||
}
|
||||
|
||||
static size_t eden_space_committed(G1CollectedHeap* g1h);
|
||||
static size_t eden_space_used(G1CollectedHeap* g1h);
|
||||
|
||||
|
@ -112,3 +112,5 @@ d56b326ae0544fc16c3e0d0285876f3c82054db2 jdk7-b134
|
||||
4aa9916693dc1078580c1865e6f2584046851e5a jdk7-b135
|
||||
1759daa85d33800bd578853f9531f9de73f70fc7 jdk7-b136
|
||||
1d87f7460cde7f8f30af668490f82b52b879bfd8 jdk7-b137
|
||||
be3758943770a0a3dd4be6a1cb4063507c4d7062 jdk7-b138
|
||||
28c7c0ed2444607829ba11ad827f8d52197a2830 jdk7-b139
|
||||
|
@ -112,3 +112,5 @@ ba1fac1c2083196422a12130db174334179a4d44 jdk7-b130
|
||||
d5fc61f18043765705ef22b57a68c924ab2f1a5b jdk7-b135
|
||||
c81d289c9a532d6e94af3c09d856a2a20529040f jdk7-b136
|
||||
ccea3282991ce8b678e188cf32a8239f76ff3bfa jdk7-b137
|
||||
cc956c8a8255583535597e9a63db23c510e9a063 jdk7-b138
|
||||
c025078c8362076503bb83b8e4da14ba7b347940 jdk7-b139
|
||||
|
@ -25,8 +25,8 @@
|
||||
|
||||
drops.master.copy.base=${drops.dir}
|
||||
|
||||
jaxws_src.bundle.name=jdk7-jaxws2_2_2-2010_12_14.zip
|
||||
jaxws_src.bundle.md5.checksum=fee9ac72fabc96719eefc66ecaff4bc3
|
||||
jaxws_src.bundle.name=jdk7-jaxws2_2_4-b01-2011_04_08.zip
|
||||
jaxws_src.bundle.md5.checksum=9f35dd731c99ddb62db650aaf20e5bf4
|
||||
jaxws_src.master.bundle.dir=${drops.master.copy.base}
|
||||
jaxws_src.master.bundle.url.base=http://download.java.net/glassfish/components/jax-ws/openjdk/jdk7
|
||||
|
||||
|
@ -112,3 +112,5 @@ bdc069d3f9101f89ec3f81c2950ee2d68fa846d3 jdk7-b130
|
||||
d8ced728159fbb2caa8b6adb477fd8efdbbdf179 jdk7-b135
|
||||
aa13e7702cd9d8aca9aa38f1227f966990866944 jdk7-b136
|
||||
29296ea6529a418037ccce95903249665ef31c11 jdk7-b137
|
||||
60d3d55dcc9c31a30ced9caa6ef5c0dcd7db031d jdk7-b138
|
||||
d80954a89b49fda47c0c5cace65a17f5a758b8bd jdk7-b139
|
||||
|
@ -56,10 +56,6 @@ build: unpacker
|
||||
|
||||
vpath %.cpp $(SHARE_SRC)/native/$(PKGDIR)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
ifeq ($(STANDALONE),true)
|
||||
ZIPOBJDIR = $(OUTPUTDIR)/tmp/sun/java.util.zip/zip/$(OBJDIRNAME)
|
||||
|
||||
@ -131,8 +127,9 @@ prop:
|
||||
pack200-tool:
|
||||
$(call make-launcher, pack200, com.sun.java.util.jar.pack.Driver, , --pack)
|
||||
|
||||
# ignore mapfile for non-product binary
|
||||
unpacker:
|
||||
$(MAKE) $(UNPACK_EXE) STANDALONE=true LDMAPFLAGS_OPT= LDMAPFLAGS_DBG=
|
||||
$(MAKE) $(UNPACK_EXE) STANDALONE=true LDMAPFLAGS_DBG=
|
||||
|
||||
ifeq ($(PLATFORM), windows)
|
||||
IMVERSIONVALUE=$(JDK_MINOR_VERSION).$(JDK_MICRO_VERSION).$(JDK_UPDATE_VER).$(COOKED_BUILD_NUMBER)
|
||||
@ -147,8 +144,14 @@ winres::
|
||||
$(ECHO) "Resource files not required for Unix"
|
||||
endif
|
||||
|
||||
# Mapfile-vers.gmk, does not copy over the mapfile-vers-unpack200, when
|
||||
# the make utiliy is re-invoked, as in this case. In order to workaround
|
||||
# this special case, the mapfile required for the unpack200 command, is
|
||||
# explicitly copied over to the expected location.
|
||||
$(UNPACK_EXE): $(UNPACK_EXE_FILES_o) updatefiles winres
|
||||
$(prep-target)
|
||||
$(RM) $(TEMPDIR)/mapfile-vers
|
||||
$(CP) mapfile-vers-unpack200 $(TEMPDIR)/mapfile-vers
|
||||
$(LINKER) $(LDDFLAGS) $(UNPACK_EXE_FILES_o) $(RES) $(LIBCXX) $(LDOUTPUT)$(TEMPDIR)/unpack200$(EXE_SUFFIX)
|
||||
ifdef MT
|
||||
$(MT) /manifest $(OBJDIR)/unpack200$(EXE_SUFFIX).manifest /outputresource:$(TEMPDIR)/unpack200$(EXE_SUFFIX);#1
|
||||
|
31
jdk/make/com/sun/java/pack/mapfile-vers-unpack200
Normal file
31
jdk/make/com/sun/java/pack/mapfile-vers-unpack200
Normal file
@ -0,0 +1,31 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
# Define library interface.
|
||||
|
||||
SUNWprivate_1.1 {
|
||||
local:
|
||||
*;
|
||||
};
|
@ -52,8 +52,8 @@ ifeq ($(VARIANT), OPT)
|
||||
endif
|
||||
|
||||
# If we are re-ordering functions in this solaris library, we need to make
|
||||
# sure that -xF is added to the compile lines. This option is critical and
|
||||
# enables the functions to be reordered.
|
||||
# sure that -xF is added to the compile lines. This option is critical and
|
||||
# enables the functions to be reordered.
|
||||
ifdef FILES_reorder
|
||||
CFLAGS_OPT += -xF
|
||||
CXXFLAGS_OPT += -xF
|
||||
@ -76,7 +76,6 @@ endif
|
||||
|
||||
endif # PLATFORM
|
||||
|
||||
|
||||
ifeq ($(PLATFORM), linux)
|
||||
|
||||
ifeq ($(VARIANT), OPT)
|
||||
|
@ -55,6 +55,11 @@ program_default_rule: all
|
||||
|
||||
program: $(ACTUAL_PROGRAM)
|
||||
|
||||
# reuse the mapfiles in the launcher's directory, the same should
|
||||
# be applicable to the tool launchers as well.
|
||||
FILES_m = $(BUILDDIR)/java/main/java/mapfile-$(ARCH)
|
||||
include $(BUILDDIR)/common/Mapfile-vers.gmk
|
||||
|
||||
include $(JDK_TOPDIR)/make/common/Rules.gmk
|
||||
|
||||
ifdef NEVER_ACT_AS_SERVER_CLASS_MACHINE
|
||||
|
@ -885,12 +885,18 @@ else
|
||||
ABS_DB_PATH :=$(call FullPath,$(CLOSED_SHARE_SRC)/db)
|
||||
DB_ZIP_LIST = $(shell $(LS) $(ABS_DB_PATH)/*.zip 2>/dev/null)
|
||||
|
||||
# Java DB image. Move the Java DB demo directory into the JDK's demo
|
||||
# dir and in the process, rename it to db. Also remove index.html,
|
||||
# since it presumes docs are co-located. Also remove register.html (no
|
||||
# longer relevant).
|
||||
initial-image-jdk-db: $(DB_ZIP_LIST)
|
||||
$(MKDIR) -p $(JDK_IMAGE_DIR)/db
|
||||
for d in $(DB_ZIP_LIST); do \
|
||||
($(CD) $(JDK_IMAGE_DIR)/db && $(UNZIP) -o $$d); \
|
||||
done
|
||||
|
||||
$(RM) -rf $(DEMODIR)/db
|
||||
$(MV) $(JDK_IMAGE_DIR)/db/demo $(DEMODIR)/db
|
||||
$(RM) $(JDK_IMAGE_DIR)/db/index.html $(JDK_IMAGE_DIR)/db/register.html
|
||||
endif
|
||||
|
||||
# Standard jdk image
|
||||
|
@ -218,11 +218,7 @@ ifdef OPENJDK
|
||||
else
|
||||
LAUNCHER_NAME = java
|
||||
PRODUCT_NAME = Java(TM)
|
||||
ifeq ($(J4B), true)
|
||||
PRODUCT_SUFFIX = SE Runtime Environment for Business
|
||||
else
|
||||
PRODUCT_SUFFIX = SE Runtime Environment
|
||||
endif
|
||||
PRODUCT_SUFFIX = SE Runtime Environment
|
||||
JDK_RC_PLATFORM_NAME = Platform SE
|
||||
COMPANY_NAME = Oracle Corporation
|
||||
endif
|
||||
|
@ -53,7 +53,7 @@ DEV_DOCS_URL-7 = http://download.oracle.com/javase/7/docs/index.html
|
||||
DEV_DOCS_URL = $(DEV_DOCS_URL-$(JDK_MINOR_VERSION))
|
||||
|
||||
# Url to Java Language Spec
|
||||
JLS3_URL = http://java.sun.com/docs/books/jls/
|
||||
#JLS3_URL = http://java.sun.com/docs/books/jls/
|
||||
|
||||
# Common Java trademark line
|
||||
JAVA_TRADEMARK_LINE = Java is a trademark or registered trademark of \
|
||||
@ -293,8 +293,8 @@ COREAPI_HEADER = \
|
||||
<strong>Java$(TRADEMARK) Platform<br>Standard Ed. $(JDK_MINOR_VERSION)</strong>
|
||||
|
||||
# Java language specification cite
|
||||
TAG_JLS3 = jls3:a:See <cite><a href="$(JLS3_URL)"> \
|
||||
The Java Language Specification, Third Edition</a></cite>:
|
||||
TAG_JLS = jls:a:See <cite> \
|
||||
The Java™ Language Specification</cite>:
|
||||
|
||||
# Overview file for core apis
|
||||
COREAPI_OVERVIEW = $(SHARE_SRC)/classes/overview-core.html
|
||||
@ -329,7 +329,7 @@ $(COREAPI_OPTIONS_FILE): $(COREAPI_OVERVIEW)
|
||||
$(call OptionPair,-tag,specdefault:X) ; \
|
||||
$(call OptionPair,-tag,Note:X) ; \
|
||||
$(call OptionPair,-tag,ToDo:X) ; \
|
||||
$(call OptionPair,-tag,$(TAG_JLS3)) ; \
|
||||
$(call OptionPair,-tag,$(TAG_JLS)) ; \
|
||||
$(call OptionOnly,-splitIndex) ; \
|
||||
$(call OptionPair,-overview,$(COREAPI_OVERVIEW)) ; \
|
||||
$(call OptionPair,-doctitle,$(COREAPI_DOCTITLE)) ; \
|
||||
@ -1081,6 +1081,7 @@ $(TREEAPI_OPTIONS_FILE):
|
||||
$(call OptionPair,-doctitle,$(TREEAPI_DOCTITLE)) ; \
|
||||
$(call OptionPair,-windowtitle,$(TREEAPI_WINDOWTITLE) $(DRAFT_WINTITLE));\
|
||||
$(call OptionPair,-header,$(TREEAPI_HEADER)$(DRAFT_HEADER)) ; \
|
||||
$(call OptionPair,-tag,$(TAG_JLS)) ; \
|
||||
$(call OptionPair,-bottom,$(TREEAPI_BOTTOM)$(DRAFT_BOTTOM)) ; \
|
||||
$(call OptionTrip,-group,$(TREEAPI_GROUPNAME),$(TREEAPI_REGEXP)); \
|
||||
$(call OptionTrip,-linkoffline,$(TREEAPI2COREAPI),$(COREAPI_DOCSDIR)/); \
|
||||
|
@ -189,7 +189,6 @@ JAVA_JAVA_java = \
|
||||
java/util/ListResourceBundle.java \
|
||||
sun/util/EmptyListResourceBundle.java \
|
||||
java/util/Locale.java \
|
||||
sun/util/locale/AsciiUtil.java \
|
||||
sun/util/locale/BaseLocale.java \
|
||||
sun/util/locale/Extension.java \
|
||||
sun/util/locale/InternalLocaleBuilder.java \
|
||||
@ -197,6 +196,7 @@ JAVA_JAVA_java = \
|
||||
sun/util/locale/LocaleExtensions.java \
|
||||
sun/util/locale/LocaleObjectCache.java \
|
||||
sun/util/locale/LocaleSyntaxException.java \
|
||||
sun/util/locale/LocaleUtils.java \
|
||||
sun/util/locale/ParseStatus.java \
|
||||
sun/util/locale/StringTokenIterator.java \
|
||||
sun/util/locale/UnicodeLocaleExtension.java \
|
||||
|
@ -61,5 +61,4 @@ OTHER_CPPFLAGS += -DLAUNCHER_NAME='"$(LAUNCHER_NAME)"'
|
||||
|
||||
ifeq ($(PLATFORM), solaris)
|
||||
LDFLAGS += -R$(OPENWIN_LIB)
|
||||
LDFLAGS += -M mapfile-$(ARCH)
|
||||
endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
# interested in declaring a version, simply scoping the file is sufficient.
|
||||
#
|
||||
|
||||
{
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
main; # Provides basic adb symbol offsets
|
||||
environ; # Public symbols and required by Java run time
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, 2005, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
# interested in declaring a version, simply scoping the file is sufficient.
|
||||
#
|
||||
|
||||
{
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
main; # Provides basic adb symbol offsets
|
||||
environ; # Public symbols and required by Java run time
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
# interested in declaring a version, simply scoping the file is sufficient.
|
||||
#
|
||||
|
||||
{
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
main; # Provides basic adb symbol offsets
|
||||
environ; # Public symbols and required by Java run time
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@
|
||||
# interested in declaring a version, simply scoping the file is sufficient.
|
||||
#
|
||||
|
||||
{
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
main; # Provides basic adb symbol offsets
|
||||
environ; # Public symbols and required by Java run time
|
||||
|
@ -58,7 +58,7 @@ endef
|
||||
SIGNING_KEY_DIR = /security/ws/JCE-signing/src
|
||||
SIGNING_KEYSTORE = $(SIGNING_KEY_DIR)/KeyStore.jks
|
||||
SIGNING_PASSPHRASE = $(SIGNING_KEY_DIR)/passphrase.txt
|
||||
SIGNING_ALIAS = jce_rsa
|
||||
SIGNING_ALIAS = oracle_jce_rsa
|
||||
|
||||
#
|
||||
# Defines for signing the various jar files.
|
||||
|
@ -519,9 +519,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"Both the JNI signature and the generic signature are "
|
||||
"returned for each class. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
)
|
||||
@ -623,8 +622,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(referenceType refType "The reference type ID.")
|
||||
)
|
||||
(Reply
|
||||
(int modBits "Modifier bits as defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>")
|
||||
(int modBits "Modifier bits as defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>")
|
||||
)
|
||||
(ErrorSet
|
||||
(Error INVALID_CLASS "refType is not the ID of a reference "
|
||||
@ -651,8 +650,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(int modBits "The modifier bit flags (also known as access flags) "
|
||||
"which provide additional information on the "
|
||||
"field declaration. Individual flag values are "
|
||||
"defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>."
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the field as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -686,8 +685,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(int modBits "The modifier bit flags (also known as access flags) "
|
||||
"which provide additional information on the "
|
||||
"method declaration. Individual flag values are "
|
||||
"defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>."
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the method as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -773,8 +772,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(Command Status=9
|
||||
"Returns the current status of the reference type. The status "
|
||||
"indicates the extent to which the reference type has been "
|
||||
"initialized, as described in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/Concepts.doc.html#16491\">VM specification</a>. "
|
||||
"initialized, as described in section 2.1.6 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"If the class is linked the PREPARED and VERIFIED bits in the returned status bits "
|
||||
"will be set. If the class is initialized the INITIALIZED bit in the returned "
|
||||
"status bits will be set. If an error occured during initialization then the "
|
||||
@ -852,9 +851,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"Returns the JNI signature of a reference type along with the "
|
||||
"generic signature if there is one. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
"<p>
|
||||
(Out
|
||||
@ -882,9 +880,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"by the compiler. "
|
||||
"Fields are returned in the order they occur in the class file. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The reference type ID.")
|
||||
@ -900,8 +897,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(int modBits "The modifier bit flags (also known as access flags) "
|
||||
"which provide additional information on the "
|
||||
"field declaration. Individual flag values are "
|
||||
"defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>."
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the field as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -925,9 +922,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"if present, and any synthetic methods created by the compiler. "
|
||||
"Methods are returned in the order they occur in the class file. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The reference type ID.")
|
||||
@ -943,8 +939,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(int modBits "The modifier bit flags (also known as access flags) "
|
||||
"which provide additional information on the "
|
||||
"method declaration. Individual flag values are "
|
||||
"defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>."
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the method as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -1006,8 +1002,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
)
|
||||
(Command ConstantPool=18
|
||||
"Return the raw bytes of the constant pool in the format of the "
|
||||
"constant_pool item of the Class File Format in the "
|
||||
"Java Virtual Machine Specification. "
|
||||
"constant_pool item of the Class File Format in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<p>Since JDWP version 1.6. Requires canGetConstantPool capability - see "
|
||||
"<a href=\"#JDWP_VirtualMachine_CapabilitiesNew\">CapabilitiesNew</a>.""
|
||||
(Out
|
||||
@ -1016,7 +1012,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(Reply
|
||||
(int count "Total number of constant pool entries plus one. This "
|
||||
"corresponds to the constant_pool_count item of the "
|
||||
"Class File Format in the Java Virtual Machine Specification. ")
|
||||
"Class File Format in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. ")
|
||||
(Repeat bytes
|
||||
(byte cpbytes "Raw bytes of constant pool")
|
||||
)
|
||||
@ -1324,7 +1321,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
)
|
||||
)
|
||||
(Command Bytecodes=3
|
||||
"Retrieve the method's bytecodes as defined in the JVM Specification."
|
||||
"Retrieve the method's bytecodes as defined in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Requires canGetBytecodes capability - see "
|
||||
"<a href=\"#JDWP_VirtualMachine_CapabilitiesNew\">CapabilitiesNew</a>."
|
||||
(Out
|
||||
@ -1379,9 +1377,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"instance methods, the \"this\" reference is included in the "
|
||||
"table. Also, synthetic variables may be present. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The class.")
|
||||
@ -1970,8 +1967,9 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"<p>"
|
||||
"The method which will return early is referred to as the "
|
||||
"called method. The called method is the current method (as "
|
||||
"defined by the Frames section in the Java Virtual Machine "
|
||||
"Specification) for the specified thread at the time this command "
|
||||
"defined by the Frames section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>) "
|
||||
"for the specified thread at the time this command "
|
||||
"is received. "
|
||||
"<p>"
|
||||
"The specified thread must be suspended. "
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,7 +38,7 @@ else
|
||||
endif
|
||||
|
||||
SUBDIRS =
|
||||
SUBDIRS_misc = nio scripting nbproject
|
||||
SUBDIRS_misc = nio scripting nbproject forkjoin
|
||||
SUBDIRS_enterprise = $(WEBSERVICES_SUBDIR)
|
||||
SUBDIRS_management = jmx
|
||||
|
||||
|
41
jdk/make/mksample/forkjoin/Makefile
Normal file
41
jdk/make/mksample/forkjoin/Makefile
Normal file
@ -0,0 +1,41 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
#
|
||||
# Makefile for building all the samples under the forkjoin subdirectory.
|
||||
#
|
||||
|
||||
BUILDDIR = ../..
|
||||
PRODUCT = java
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = mergesort
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
clobber clean ::
|
||||
$(RM) -r $(SAMPLEDIR)/forkjoin
|
51
jdk/make/mksample/forkjoin/mergesort/Makefile
Normal file
51
jdk/make/mksample/forkjoin/mergesort/Makefile
Normal file
@ -0,0 +1,51 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
#
|
||||
# Makefile for the forkjoin/mergesort sample code
|
||||
#
|
||||
|
||||
BUILDDIR = ../../..
|
||||
|
||||
PRODUCT = java
|
||||
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SAMPLE_SRC_DIR = $(SHARE_SRC)/sample/forkjoin/mergesort
|
||||
SAMPLE_DST_DIR = $(SAMPLEDIR)/forkjoin/mergesort
|
||||
|
||||
SAMPLE_FILES = \
|
||||
$(SAMPLE_DST_DIR)/MergeDemo.java \
|
||||
$(SAMPLE_DST_DIR)/MergeSort.java
|
||||
|
||||
all build: $(SAMPLE_FILES)
|
||||
|
||||
$(SAMPLE_DST_DIR)/%: $(SAMPLE_SRC_DIR)/%
|
||||
$(install-file)
|
||||
|
||||
clean clobber:
|
||||
$(RM) -r $(SAMPLE_DST_DIR)
|
||||
|
||||
.PHONY: all build clean clobber
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,7 @@ BUILDDIR = ../..
|
||||
PRODUCT = java
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = file multicast server
|
||||
SUBDIRS = chatserver file multicast server
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
|
56
jdk/make/mksample/nio/chatserver/Makefile
Normal file
56
jdk/make/mksample/nio/chatserver/Makefile
Normal file
@ -0,0 +1,56 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
#
|
||||
# Makefile for the nio/chatserver sample code
|
||||
#
|
||||
|
||||
BUILDDIR = ../../..
|
||||
|
||||
PRODUCT = java
|
||||
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SAMPLE_SRC_DIR = $(SHARE_SRC)/sample/nio/chatserver
|
||||
SAMPLE_DST_DIR = $(SAMPLEDIR)/nio/chatserver
|
||||
|
||||
SAMPLE_FILES = \
|
||||
$(SAMPLE_DST_DIR)/ChatServer.java \
|
||||
$(SAMPLE_DST_DIR)/Client.java \
|
||||
$(SAMPLE_DST_DIR)/ClientReader.java \
|
||||
$(SAMPLE_DST_DIR)/DataReader.java \
|
||||
$(SAMPLE_DST_DIR)/MessageReader.java \
|
||||
$(SAMPLE_DST_DIR)/NameReader.java \
|
||||
$(SAMPLE_DST_DIR)/README.txt
|
||||
|
||||
all build: $(SAMPLE_FILES)
|
||||
|
||||
$(SAMPLE_DST_DIR)/%: $(SAMPLE_SRC_DIR)/%
|
||||
$(install-file)
|
||||
|
||||
clean clobber:
|
||||
$(RM) -r $(SAMPLE_DST_DIR)
|
||||
|
||||
.PHONY: all build clean clobber
|
@ -83,6 +83,11 @@ SUBDIRS_desktop = audio $(RENDER_SUBDIR) image \
|
||||
SUBDIRS_management = management
|
||||
SUBDIRS_misc = $(ORG_SUBDIR) rmi $(JDBC_SUBDIR) tracing
|
||||
SUBDIRS_tools = native2ascii serialver tools jconsole
|
||||
|
||||
ifndef OPENJDK
|
||||
SUBDIRS += usagetracker
|
||||
endif
|
||||
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
|
40
jdk/make/sun/usagetracker/Makefile
Normal file
40
jdk/make/sun/usagetracker/Makefile
Normal file
@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
BUILDDIR = ../..
|
||||
PACKAGE = sun.usagetracker
|
||||
PRODUCT = sun
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
#
|
||||
# Files
|
||||
#
|
||||
AUTO_FILES_JAVA_DIRS = sun/usagetracker
|
||||
|
||||
#
|
||||
# Rules
|
||||
#
|
||||
include $(BUILDDIR)/common/Classes.gmk
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,7 +35,6 @@ char *JLI_StringDup(const char *s1);
|
||||
void JLI_MemFree(void *ptr);
|
||||
int JLI_StrCCmp(const char *s1, const char* s2);
|
||||
|
||||
|
||||
#define JLI_StrLen(p1) strlen((p1))
|
||||
#define JLI_StrChr(p1, p2) strchr((p1), (p2))
|
||||
#define JLI_StrRChr(p1, p2) strrchr((p1), (p2))
|
||||
@ -48,6 +47,7 @@ int JLI_StrCCmp(const char *s1, const char* s2);
|
||||
#define JLI_StrSpn(p1, p2) strspn((p1), (p2))
|
||||
#define JLI_StrCSpn(p1, p2) strcspn((p1), (p2))
|
||||
#define JLI_StrPBrk(p1, p2) strpbrk((p1), (p2))
|
||||
#define JLI_StrTok(p1, p2) strtok((p1), (p2))
|
||||
|
||||
/* On Windows lseek() is in io.h rather than the location dictated by POSIX. */
|
||||
#ifdef _WIN32
|
||||
|
@ -175,8 +175,8 @@ public final class TypeResolver {
|
||||
/**
|
||||
* Converts the given {@code type} to the corresponding class.
|
||||
* This method implements the concept of type erasure,
|
||||
* that is described in <a href="http://jscstage.sfbay.sun.com/docs/books/jls/third_edition/html/typesValues.html#4.6">section 4.6</a>
|
||||
* of Java Language Specification.
|
||||
* that is described in section 4.6 of
|
||||
* <cite>The Java™ Language Specification</cite>.
|
||||
*
|
||||
* @param type the array of types to convert
|
||||
* @return a corresponding class
|
||||
|
@ -88,8 +88,8 @@ Jar File Specification :<a href="http://java.sun.com/j2se/1.3/docs/guide/jar/jar
|
||||
http://java.sun.com/j2se/1.3/docs/guide/jar/jar.html</a></li>
|
||||
|
||||
<li>
|
||||
Java Virtual Machine Specification : <a href="http://java.sun.com/docs/books/vmspec/2nd-edition/html/ClassFile.doc.html">
|
||||
http://java.sun.com/docs/books/vmspec/2nd-edition/html/ClassFile.doc.html</a></li>
|
||||
Class File Specification: Chapter 4 of
|
||||
<em>The Java™ Virtual Machine Specification</em>
|
||||
|
||||
<li>
|
||||
Hypertext Transfer Protocol -- HTTP/1.1 : <a href="http://www.ietf.org/rfc/rfc2616.txt">
|
||||
|
@ -42,12 +42,9 @@ public interface Accessible {
|
||||
* Returns the Java<sup><font size=-2>TM</font></sup>
|
||||
* programming language modifiers, encoded in an integer.
|
||||
* <p>
|
||||
* The modifier encodings are defined in the
|
||||
* <a href="http://java.sun.com/docs/books/vmspec/">Java Virtual Machine
|
||||
* Specification</a>, in the <code>access_flag</code> tables for
|
||||
* <a href="http://java.sun.com/docs/books/vmspec/2nd-edition/html/ClassFile.doc.html#75734">classes</a>,
|
||||
* <a href="http://java.sun.com/docs/books/vmspec/2nd-edition/html/ClassFile.doc.html#88358">fields</a>, and
|
||||
* <a href="http://java.sun.com/docs/books/vmspec/2nd-edition/html/ClassFile.doc.html#75568">methods</a>.
|
||||
* The modifier encodings are defined in
|
||||
* <cite>The Java™ Virtual Machine Specification</cite>
|
||||
* in the <code>access_flag</code> tables for classes(section 4.1), fields(section 4.5), and methods(section 4.6).
|
||||
*/
|
||||
public int modifiers();
|
||||
|
||||
|
@ -77,11 +77,9 @@ public interface ArrayType extends ReferenceType {
|
||||
* as specified in the array declaration.
|
||||
* <P>
|
||||
* Note: The component type of a array will always be
|
||||
* created or loaded before the array - see the
|
||||
* <a href="http://java.sun.com/docs/books/vmspec/">Java Virtual
|
||||
* Machine Specification</a>, section
|
||||
* <a href="http://java.sun.com/docs/books/vmspec/2nd-edition/html/ConstantPool.doc.html#79473">5.3.3
|
||||
* Creating Array Classes</a>.
|
||||
* created or loaded before the array - see
|
||||
* <cite>The Java™ Virtual Machine Specification</cite>,
|
||||
* section 5.3.3 - Creating Array Classes.
|
||||
* However, although the component type will be loaded it may
|
||||
* not yet be prepared, in which case the type will be returned
|
||||
* but attempts to perform some operations on the returned type
|
||||
|
@ -71,11 +71,9 @@ public interface ClassLoaderReference extends ObjectReference {
|
||||
* <p>
|
||||
* No ordering of the returned list is guaranteed.
|
||||
* <p>
|
||||
* See the revised
|
||||
* <a href="http://java.sun.com/docs/books/vmspec/">Java
|
||||
* Virtual Machine Specification</a> section
|
||||
* <a href="http://java.sun.com/docs/books/vmspec/2nd-edition/html/ConstantPool.doc.html#72007">5.3
|
||||
* Creation and Loading</a>
|
||||
* See
|
||||
* <cite>The Java™ Virtual Machine Specification</cite>,
|
||||
* section 5.3 - Creation and Loading
|
||||
* for more information on the initiating classloader.
|
||||
* <p>
|
||||
* Note that unlike {@link #definedClasses()}
|
||||
|
@ -60,9 +60,9 @@ package com.sun.jdi;
|
||||
* is visible to the class loader of enclosing class. (That is, the
|
||||
* class loader of the enclosing class must be an <i>initiating</i> class
|
||||
* loader for the class in question.)
|
||||
* See the <a href="http://java.sun.com/docs/books/vmspec/">Java
|
||||
* Virtual Machine Specification</a> for
|
||||
* more details.
|
||||
* See
|
||||
* <cite>The Java™ Virtual Machine Specification</cite>
|
||||
* for more details.
|
||||
*
|
||||
* @author Gordon Hirsch
|
||||
* @since 1.3
|
||||
|
@ -164,10 +164,8 @@ public interface ClassType extends ReferenceType {
|
||||
* component type is passed. The component type can be a primitive type.
|
||||
* Autoboxing is not supported.
|
||||
*
|
||||
* See the <a href="http://java.sun.com/docs/books/jls/">
|
||||
* Java Language Specification</a>.
|
||||
* section
|
||||
* <a href="http://java.sun.com/docs/books/jls/second_edition/html/conversions.doc.html#184206">5.2</a>
|
||||
* See Section 5.2 of
|
||||
* <cite>The Java™ Language Specification</cite>
|
||||
* for more information on assignment compatibility.
|
||||
* <p>
|
||||
* By default, all threads in the target VM are resumed while
|
||||
@ -280,10 +278,8 @@ public interface ClassType extends ReferenceType {
|
||||
* component type is passed. The component type can be a primitive type.
|
||||
* Autoboxing is not supported.
|
||||
*
|
||||
* See the <a href="http://java.sun.com/docs/books/jls/">
|
||||
* Java Language Specification</a>.
|
||||
* section
|
||||
* <a href="http://java.sun.com/docs/books/jls/second_edition/html/conversions.doc.html#184206">5.2</a>
|
||||
* See section 5.2 of
|
||||
* <cite>The Java™ Language Specification</cite>
|
||||
* for more information on assignment compatibility.
|
||||
* <p>
|
||||
* By default, all threads in the target VM are resumed while
|
||||
|
@ -102,9 +102,7 @@ public interface LocalVariable extends Mirror, Comparable<LocalVariable> {
|
||||
/**
|
||||
* Gets the generic signature for this variable if there is one.
|
||||
* Generic signatures are described in the
|
||||
* <a href="http://java.sun.com/docs/books/vmspec">
|
||||
* "Java<sup><font size=-2>TM</font></sup>
|
||||
* Virtual Machine Specification, 3rd Edition.</a>
|
||||
* <cite>The Java™ Virtual Machine Specification</cite>.
|
||||
*
|
||||
* @return a string containing the generic signature, or <code>null</code>
|
||||
* if there is no generic signature.
|
||||
|
@ -164,10 +164,8 @@ public interface Method extends TypeComponent, Locatable, Comparable<Method> {
|
||||
|
||||
/**
|
||||
* Determine if this method is a bridge method. Bridge
|
||||
* methods are defined in the
|
||||
* <a href="http://java.sun.com/docs/books/jls">
|
||||
* "Java<sup><font size=-2>TM</font></sup>
|
||||
* Language Specification, 3rd Edition.</a>
|
||||
* methods are defined in
|
||||
* <cite>The Java™ Language Specification</cite>.
|
||||
*
|
||||
* @return <code>true</code> if the method is a bridge method,
|
||||
* false otherwise.
|
||||
|
@ -118,10 +118,8 @@ public interface ObjectReference extends Value
|
||||
* enclosing class's class loader). Primitive values must be
|
||||
* either assignment compatible with the field type or must be
|
||||
* convertible to the field type without loss of information.
|
||||
* See the <a href="http://java.sun.com/docs/books/jls/">
|
||||
* Java<sup><font size=-2>TM</font></sup> Language Specification</a>.
|
||||
* section
|
||||
* <a href="http://java.sun.com/docs/books/jls/second_edition/html/conversions.doc.html#184206">5.2</a>
|
||||
* See section 5.2 of
|
||||
* <cite>The Java™ Language Specification</cite>
|
||||
* for more information on assignment
|
||||
* compatibility.
|
||||
*
|
||||
@ -182,18 +180,13 @@ public interface ObjectReference extends Value
|
||||
* component type is passed. The component type can be a primitive type.
|
||||
* Autoboxing is not supported.
|
||||
*
|
||||
* See the <a href="http://java.sun.com/docs/books/jls/">
|
||||
* Java Language Specification</a>.
|
||||
* section
|
||||
* <a href="http://java.sun.com/docs/books/jls/second_edition/html/conversions.doc.html#184206">5.2</a>
|
||||
* See section 5.2 of
|
||||
* <cite>The Java™ Language Specification</cite>
|
||||
* for more information on assignment compatibility.
|
||||
* <p>
|
||||
* By default, the method is invoked using dynamic lookup as
|
||||
* documented in the
|
||||
* <a href="http://java.sun.com/docs/books/jls/">
|
||||
* Java Language Specification</a>
|
||||
* second edition, section
|
||||
* <a href="http://java.sun.com/docs/books/jls/second_edition/html/expressions.doc.html#45606">15.12.4.4</a>;
|
||||
* documented in section 15.12.4.4 of
|
||||
* <cite>The Java™ Language Specification</cite>
|
||||
* in particular, overriding based on the runtime type of the object
|
||||
* mirrored by this {@link ObjectReference} will occur. This
|
||||
* behavior can be changed by specifying the
|
||||
|
@ -30,9 +30,8 @@ import java.util.Map;
|
||||
|
||||
/**
|
||||
* The type of an object in a target VM. ReferenceType encompasses
|
||||
* classes, interfaces, and array types as defined in the
|
||||
* <a href="http://java.sun.com/docs/books/jls/">
|
||||
* Java<sup><font size=-2>TM</font></sup> Language Specification</a>.
|
||||
* classes, interfaces, and array types as defined in
|
||||
* <cite>The Java™ Language Specification</cite>.
|
||||
* All ReferenceType objects belong to one of the following
|
||||
* subinterfaces:
|
||||
* {@link ClassType} for classes,
|
||||
@ -98,9 +97,7 @@ public interface ReferenceType
|
||||
/**
|
||||
* Gets the generic signature for this type if there is one.
|
||||
* Generic signatures are described in the
|
||||
* <a href="http://java.sun.com/docs/books/vmspec">
|
||||
* "Java<sup><font size=-2>TM</font></sup>
|
||||
* Virtual Machine Specification, 3rd Edition.</a>
|
||||
* <cite>The Java™ Virtual Machine Specification</cite>.
|
||||
*
|
||||
* @return a string containing the generic signature, or <code>null</code>
|
||||
* if there is no generic signature.
|
||||
|
@ -74,9 +74,7 @@ public interface TypeComponent extends Mirror, Accessible {
|
||||
/**
|
||||
* Gets the generic signature for this TypeComponent if there is one.
|
||||
* Generic signatures are described in the
|
||||
* <a href="http://java.sun.com/docs/books/vmspec">
|
||||
* "Java<sup><font size=-2>TM</font></sup>
|
||||
* Virtual Machine Specification, 3rd Edition.</a>
|
||||
* <cite>The Java™ Virtual Machine Specification</cite>.
|
||||
*
|
||||
* @return a string containing the generic signature, or <code>null</code>
|
||||
* if there is no generic signature.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user