This commit is contained in:
Tim Bell 2008-04-25 15:18:45 -07:00
commit 25f3229c2b
418 changed files with 15406 additions and 10548 deletions

View File

@ -1 +1,2 @@
1cc8dd79fd1cd13d36b385196271a29632c67c3b jdk7-b24 1cc8dd79fd1cd13d36b385196271a29632c67c3b jdk7-b24
bf2517e15f0c0f950e5b3143c4ca11e2df73dcc1 jdk7-b25

View File

@ -1 +1,2 @@
cfeea66a3fa8ca3686a7cfa2d0ce8ab0169f168d jdk7-b24 cfeea66a3fa8ca3686a7cfa2d0ce8ab0169f168d jdk7-b24
cbc8ad9dd0e085a607427ea35411990982f19a36 jdk7-b25

View File

@ -1 +1,2 @@
55540e827aef970ecc010b7e06b912d991c8e3ce jdk7-b24 55540e827aef970ecc010b7e06b912d991c8e3ce jdk7-b24
5e61d5df62586474414d1058e9186441aa908f51 jdk7-b25

View File

@ -1 +1,2 @@
a61af66fc99eb5ec9d50c05b0c599757b1289ceb jdk7-b24 a61af66fc99eb5ec9d50c05b0c599757b1289ceb jdk7-b24
7836be3e92d0a4f9ee7566f602c91f5609534e66 jdk7-b25

View File

@ -518,10 +518,10 @@ static bool core_get_lwp_regs(struct ps_prochandle* ph, lwpid_t lwp_id,
} }
static ps_prochandle_ops core_ops = { static ps_prochandle_ops core_ops = {
release: core_release, .release= core_release,
p_pread: core_read_data, .p_pread= core_read_data,
p_pwrite: core_write_data, .p_pwrite= core_write_data,
get_lwp_regs: core_get_lwp_regs .get_lwp_regs= core_get_lwp_regs
}; };
// read regs and create thread from NT_PRSTATUS entries from core file // read regs and create thread from NT_PRSTATUS entries from core file

View File

@ -291,10 +291,10 @@ static void process_cleanup(struct ps_prochandle* ph) {
} }
static ps_prochandle_ops process_ops = { static ps_prochandle_ops process_ops = {
release: process_cleanup, .release= process_cleanup,
p_pread: process_read_data, .p_pread= process_read_data,
p_pwrite: process_write_data, .p_pwrite= process_write_data,
get_lwp_regs: process_get_lwp_regs .get_lwp_regs= process_get_lwp_regs
}; };
// attach to the process. One and only one exposed stuff // attach to the process. One and only one exposed stuff

View File

@ -80,6 +80,11 @@ ifneq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
MFLAGS += " LP64=1 " MFLAGS += " LP64=1 "
endif endif
# pass USE_SUNCC further, through MFLAGS
ifdef USE_SUNCC
MFLAGS += " USE_SUNCC=1 "
endif
# The following renders pathnames in generated Makefiles valid on # The following renders pathnames in generated Makefiles valid on
# machines other than the machine containing the build tree. # machines other than the machine containing the build tree.
# #

View File

@ -35,6 +35,8 @@ CFLAGS += -DVM_LITTLE_ENDIAN
CFLAGS += -D_LP64=1 CFLAGS += -D_LP64=1
# The serviceability agent relies on frame pointer (%rbp) to walk thread stack # The serviceability agent relies on frame pointer (%rbp) to walk thread stack
CFLAGS += -fno-omit-frame-pointer ifndef USE_SUNCC
CFLAGS += -fno-omit-frame-pointer
endif
OPT_CFLAGS/compactingPermGenGen.o = -O1 OPT_CFLAGS/compactingPermGenGen.o = -O1

View File

@ -63,7 +63,11 @@ QUIETLY$(MAKE_VERBOSE) = @
# For now, until the compiler is less wobbly: # For now, until the compiler is less wobbly:
TESTFLAGS = -Xbatch -showversion TESTFLAGS = -Xbatch -showversion
PLATFORM_FILE = $(GAMMADIR)/build/$(OS_FAMILY)/platform_$(BUILDARCH) ifdef USE_SUNCC
PLATFORM_FILE = $(GAMMADIR)/build/$(OS_FAMILY)/platform_$(BUILDARCH).suncc
else
PLATFORM_FILE = $(GAMMADIR)/build/$(OS_FAMILY)/platform_$(BUILDARCH)
endif
ifdef FORCE_TIERED ifdef FORCE_TIERED
ifeq ($(VARIANT),tiered) ifeq ($(VARIANT),tiered)

View File

@ -0,0 +1,93 @@
#
# Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions.
#
#
#------------------------------------------------------------------------
# CC, CPP & AS
CPP = CC
CC = cc
AS = $(CC) -c
ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
ARCHFLAG/i486 = -m32
ARCHFLAG/amd64 = -m64
CFLAGS += $(ARCHFLAG)
AOUT_FLAGS += $(ARCHFLAG)
LFLAGS += $(ARCHFLAG)
ASFLAGS += $(ARCHFLAG)
#------------------------------------------------------------------------
# Compiler flags
# position-independent code
PICFLAG = -KPIC
CFLAGS += $(PICFLAG)
# no more exceptions
CFLAGS += -features=no%except
# Reduce code bloat by reverting back to 5.0 behavior for static initializers
CFLAGS += -features=no%split_init
# allow zero sized arrays
CFLAGS += -features=zla
# Use C++ Interpreter
ifdef CC_INTERP
CFLAGS += -DCC_INTERP
endif
# We don't need libCstd.so and librwtools7.so, only libCrun.so
CFLAGS += -library=Crun
LIBS += -lCrun
CFLAGS += -mt
LFLAGS += -mt
# Compiler warnings are treated as errors
#WARNINGS_ARE_ERRORS = -errwarn=%all
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS)
# Special cases
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
# The flags to use for an Optimized build
OPT_CFLAGS+=-xO4
OPT_CFLAGS/NOOPT=-xO0
#------------------------------------------------------------------------
# Linker flags
# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
MAPFLAG = -Wl,--version-script=FILENAME
# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj
SONAMEFLAG = -h SONAME
# Build shared library
SHARED_FLAG = -G
#------------------------------------------------------------------------
# Debug flags
DEBUG_CFLAGS += -g
FASTDEBUG_CFLAGS = -g0

View File

@ -0,0 +1,17 @@
os_family = linux
arch = x86
arch_model = x86_64
os_arch = linux_x86
os_arch_model = linux_x86_64
lib_arch = amd64
compiler = sparcWorks
gnu_dis_arch = amd64
sysdefs = -DLINUX -DSPARC_WORKS -D_GNU_SOURCE -DAMD64

View File

@ -0,0 +1,17 @@
os_family = linux
arch = x86
arch_model = x86_32
os_arch = linux_x86
os_arch_model = linux_x86_32
lib_arch = i386
compiler = sparcWorks
gnu_dis_arch = i386
sysdefs = -DLINUX -DSPARC_WORKS -D_GNU_SOURCE -DIA32

View File

@ -19,7 +19,7 @@
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, # Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or # CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions. # have any questions.
# #
# #
# Must also specify if CPU is little endian # Must also specify if CPU is little endian
@ -45,6 +45,10 @@ OPT_CFLAGS/os_solaris_x86_64.o = -xO1
OPT_CFLAGS/generateOptoStub.o = -xO2 OPT_CFLAGS/generateOptoStub.o = -xO2
OPT_CFLAGS/thread.o = -xO2 OPT_CFLAGS/thread.o = -xO2
# Work around for 6624782
OPT_CFLAGS/instanceKlass.o = -Qoption ube -no_a2lf
OPT_CFLAGS/objArrayKlass.o = -Qoption ube -no_a2lf
else else
ifeq ("${Platform_compiler}", "gcc") ifeq ("${Platform_compiler}", "gcc")
@ -58,6 +62,6 @@ else
# error # error
_JUNK2_ := $(shell echo >&2 \ _JUNK2_ := $(shell echo >&2 \
"*** ERROR: this compiler is not yet supported by this code base!") "*** ERROR: this compiler is not yet supported by this code base!")
@exit 1 @exit 1
endif endif
endif endif

View File

@ -44,6 +44,10 @@ CPP=cl.exe
# /Od Disable all optimizations # /Od Disable all optimizations
# #
# NOTE: Normally following any of the above with a '-' will turn off that flag # NOTE: Normally following any of the above with a '-' will turn off that flag
#
# 6655385: For VS2003/2005 we now specify /Oy- (disable frame pointer
# omission.) This has little to no effect on performance while vastly
# improving the quality of crash log stack traces involving jvm.dll.
# These are always used in all compiles # These are always used in all compiles
CPP_FLAGS=/nologo /W3 /WX CPP_FLAGS=/nologo /W3 /WX
@ -141,14 +145,14 @@ DEBUG_OPT_OPTION = /Od
!endif !endif
!if "$(COMPILER_NAME)" == "VS2003" !if "$(COMPILER_NAME)" == "VS2003"
PRODUCT_OPT_OPTION = /O2 PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od DEBUG_OPT_OPTION = /Od
!endif !endif
!if "$(COMPILER_NAME)" == "VS2005" !if "$(COMPILER_NAME)" == "VS2005"
PRODUCT_OPT_OPTION = /O2 PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc GX_OPTION = /EHsc
# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib # This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib
@ -165,8 +169,8 @@ CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_DEPRECATE
# Compile for space above time. # Compile for space above time.
!if "$(Variant)" == "kernel" !if "$(Variant)" == "kernel"
PRODUCT_OPT_OPTION = /O1 PRODUCT_OPT_OPTION = /O1 /Oy-
FASTDEBUG_OPT_OPTION = /O1 FASTDEBUG_OPT_OPTION = /O1 /Oy-
DEBUG_OPT_OPTION = /Od DEBUG_OPT_OPTION = /Od
!endif !endif

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2007
HS_MAJOR_VER=12 HS_MAJOR_VER=12
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=01 HS_BUILD_NUMBER=03
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=7

View File

@ -2037,7 +2037,7 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
int LIR_Assembler::shift_amount(BasicType t) { int LIR_Assembler::shift_amount(BasicType t) {
int elem_size = type2aelembytes[t]; int elem_size = type2aelembytes(t);
switch (elem_size) { switch (elem_size) {
case 1 : return 0; case 1 : return 0;
case 2 : return 1; case 2 : return 1;
@ -2360,7 +2360,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
op->tmp2()->as_register(), op->tmp2()->as_register(),
op->tmp3()->as_register(), op->tmp3()->as_register(),
arrayOopDesc::header_size(op->type()), arrayOopDesc::header_size(op->type()),
type2aelembytes[op->type()], type2aelembytes(op->type()),
op->klass()->as_register(), op->klass()->as_register(),
*op->stub()->entry()); *op->stub()->entry());
} }

View File

@ -179,7 +179,7 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
BasicType type, bool needs_card_mark) { BasicType type, bool needs_card_mark) {
int elem_size = type2aelembytes[type]; int elem_size = type2aelembytes(type);
int shift = exact_log2(elem_size); int shift = exact_log2(elem_size);
LIR_Opr base_opr; LIR_Opr base_opr;

View File

@ -6023,7 +6023,7 @@ instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{
ins_pipe(ialu_imm); ins_pipe(ialu_imm);
%} %}
instruct cmovII_U_reg(cmpOp cmp, flagsRegU icc, iRegI dst, iRegI src) %{ instruct cmovII_U_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
ins_cost(150); ins_cost(150);
size(4); size(4);
@ -6032,7 +6032,7 @@ instruct cmovII_U_reg(cmpOp cmp, flagsRegU icc, iRegI dst, iRegI src) %{
ins_pipe(ialu_reg); ins_pipe(ialu_reg);
%} %}
instruct cmovII_U_imm(cmpOp cmp, flagsRegU icc, iRegI dst, immI11 src) %{ instruct cmovII_U_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
ins_cost(140); ins_cost(140);
size(4); size(4);

View File

@ -2911,6 +2911,7 @@ class StubGenerator: public StubCodeGenerator {
// These entry points require SharedInfo::stack0 to be set up in non-core builds // These entry points require SharedInfo::stack0 to be set up in non-core builds
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true); StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true); StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);

View File

@ -28,6 +28,12 @@
int VM_Version::_features = VM_Version::unknown_m; int VM_Version::_features = VM_Version::unknown_m;
const char* VM_Version::_features_str = ""; const char* VM_Version::_features_str = "";
bool VM_Version::is_niagara1_plus() {
// This is a placeholder until the real test is determined.
return is_niagara1() &&
(os::processor_count() > maximum_niagara1_processor_count());
}
void VM_Version::initialize() { void VM_Version::initialize() {
_features = determine_features(); _features = determine_features();
PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
@ -160,3 +166,13 @@ void VM_Version::allow_all() {
void VM_Version::revert() { void VM_Version::revert() {
_features = saved_features; _features = saved_features;
} }
unsigned int VM_Version::calc_parallel_worker_threads() {
unsigned int result;
if (is_niagara1_plus()) {
result = nof_parallel_worker_threads(5, 16, 8);
} else {
result = nof_parallel_worker_threads(5, 8, 8);
}
return result;
}

View File

@ -64,6 +64,11 @@ protected:
static bool is_niagara1(int features) { return (features & niagara1_m) == niagara1_m; } static bool is_niagara1(int features) { return (features & niagara1_m) == niagara1_m; }
static int maximum_niagara1_processor_count() { return 32; }
// Returns true if the platform is in the niagara line and
// newer than the niagara1.
static bool is_niagara1_plus();
public: public:
// Initialization // Initialization
static void initialize(); static void initialize();
@ -129,4 +134,7 @@ public:
// Override the Abstract_VM_Version implementation. // Override the Abstract_VM_Version implementation.
static uint page_size_count() { return is_sun4v() ? 4 : 2; } static uint page_size_count() { return is_sun4v() ? 4 : 2; }
// Calculates the number of parallel threads
static unsigned int calc_parallel_worker_threads();
}; };

View File

@ -175,17 +175,12 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// %%%% Could load both offset and interface in one ldx, if they were // %%%% Could load both offset and interface in one ldx, if they were
// in the opposite order. This would save a load. // in the opposite order. This would save a load.
__ ld_ptr(L0, base + itableOffsetEntry::interface_offset_in_bytes(), L1); __ ld_ptr(L0, base + itableOffsetEntry::interface_offset_in_bytes(), L1);
#ifdef ASSERT
Label ok;
// Check that entry is non-null and an Oop
__ bpr(Assembler::rc_nz, false, Assembler::pt, L1, ok);
__ delayed()->nop();
__ stop("null entry point found in itable's offset table");
__ bind(ok);
__ verify_oop(L1);
#endif // ASSERT
__ cmp(G5_interface, L1); // If the entry is NULL then we've reached the end of the table
// without finding the expected interface, so throw an exception
Label throw_icce;
__ bpr(Assembler::rc_z, false, Assembler::pn, L1, throw_icce);
__ delayed()->cmp(G5_interface, L1);
__ brx(Assembler::notEqual, true, Assembler::pn, search); __ brx(Assembler::notEqual, true, Assembler::pn, search);
__ delayed()->add(L0, itableOffsetEntry::size() * wordSize, L0); __ delayed()->add(L0, itableOffsetEntry::size() * wordSize, L0);
@ -223,24 +218,30 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ JMP(G3_scratch, 0); __ JMP(G3_scratch, 0);
__ delayed()->nop(); __ delayed()->nop();
__ bind(throw_icce);
Address icce(G3_scratch, StubRoutines::throw_IncompatibleClassChangeError_entry());
__ jump_to(icce, 0);
__ delayed()->restore();
masm->flush(); masm->flush();
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
s->set_exception_points(npe_addr, ame_addr); s->set_exception_points(npe_addr, ame_addr);
return s; return s;
} }
int VtableStub::pd_code_size_limit(bool is_vtable_stub) { int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 999; if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
else { else {
const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets) const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets)
if (is_vtable_stub) { if (is_vtable_stub) {
const int basic = 5*BytesPerInstWord; // ld;ld;ld,jmp,nop const int basic = 5*BytesPerInstWord; // ld;ld;ld,jmp,nop
return basic + slop; return basic + slop;
} else { } else {
#ifdef ASSERT // save, ld, ld, sll, and, add, add, ld, cmp, br, add, ld, add, ld, ld, jmp, restore, sethi, jmpl, restore
return 999; const int basic = (20 LP64_ONLY(+ 6)) * BytesPerInstWord;
#endif // ASSERT
const int basic = 17*BytesPerInstWord; // save, ld, ld, sll, and, add, add, ld, cmp, br, add, ld, add, ld, ld, jmp, restore
return (basic + slop); return (basic + slop);
} }
} }
@ -252,29 +253,3 @@ int VtableStub::pd_code_alignment() {
const unsigned int icache_line_size = 32; const unsigned int icache_line_size = 32;
return icache_line_size; return icache_line_size;
} }
//Reconciliation History
// 1.2 97/12/09 17:13:31 vtableStubs_i486.cpp
// 1.4 98/01/21 19:18:37 vtableStubs_i486.cpp
// 1.5 98/02/13 16:33:55 vtableStubs_i486.cpp
// 1.7 98/03/05 17:17:28 vtableStubs_i486.cpp
// 1.9 98/05/18 09:26:17 vtableStubs_i486.cpp
// 1.10 98/05/26 16:28:13 vtableStubs_i486.cpp
// 1.11 98/05/27 08:51:35 vtableStubs_i486.cpp
// 1.12 98/06/15 15:04:12 vtableStubs_i486.cpp
// 1.13 98/07/28 18:44:22 vtableStubs_i486.cpp
// 1.15 98/08/28 11:31:19 vtableStubs_i486.cpp
// 1.16 98/09/02 12:58:31 vtableStubs_i486.cpp
// 1.17 98/09/04 12:15:52 vtableStubs_i486.cpp
// 1.18 98/11/19 11:55:24 vtableStubs_i486.cpp
// 1.19 99/01/12 14:57:56 vtableStubs_i486.cpp
// 1.20 99/01/19 17:42:52 vtableStubs_i486.cpp
// 1.22 99/01/21 10:29:25 vtableStubs_i486.cpp
// 1.30 99/06/02 15:27:39 vtableStubs_i486.cpp
// 1.26 99/06/24 14:25:07 vtableStubs_i486.cpp
// 1.23 99/02/22 14:37:52 vtableStubs_i486.cpp
// 1.28 99/06/29 18:06:17 vtableStubs_i486.cpp
// 1.29 99/07/22 17:03:44 vtableStubs_i486.cpp
// 1.30 99/08/11 09:33:27 vtableStubs_i486.cpp
//End

View File

@ -2672,6 +2672,22 @@ void Assembler::movlpd(XMMRegister dst, Address src) {
emit_sse_operand(dst, src); emit_sse_operand(dst, src);
} }
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sse2(), "");
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0xE6);
emit_sse_operand(dst, src);
}
void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sse2(), "");
emit_byte(0x0F);
emit_byte(0x5B);
emit_sse_operand(dst, src);
}
emit_sse_instruction(andps, sse, 0, 0x54, XMMRegister, XMMRegister); emit_sse_instruction(andps, sse, 0, 0x54, XMMRegister, XMMRegister);
emit_sse_instruction(andpd, sse2, 0x66, 0x54, XMMRegister, XMMRegister); emit_sse_instruction(andpd, sse2, 0x66, 0x54, XMMRegister, XMMRegister);

View File

@ -901,6 +901,8 @@ class Assembler : public AbstractAssembler {
void cvtss2sd(XMMRegister dst, XMMRegister src); void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, Address src); // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value void cvtsd2ss(XMMRegister dst, Address src); // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
void cvtsd2ss(XMMRegister dst, XMMRegister src); void cvtsd2ss(XMMRegister dst, XMMRegister src);
void cvtdq2pd(XMMRegister dst, XMMRegister src);
void cvtdq2ps(XMMRegister dst, XMMRegister src);
void cvtsi2ss(XMMRegister dst, Address src); // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value void cvtsi2ss(XMMRegister dst, Address src); // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
void cvtsi2ss(XMMRegister dst, Register src); void cvtsi2ss(XMMRegister dst, Register src);

View File

@ -1304,7 +1304,7 @@ void Assembler::movl(Address dst, Register src) {
emit_operand(src, dst); emit_operand(src, dst);
} }
void Assembler::mov64(Register dst, int64_t imm64) { void Assembler::mov64(Register dst, intptr_t imm64) {
InstructionMark im(this); InstructionMark im(this);
int encode = prefixq_and_encode(dst->encoding()); int encode = prefixq_and_encode(dst->encoding());
emit_byte(0xB8 | encode); emit_byte(0xB8 | encode);
@ -1331,7 +1331,7 @@ void Assembler::movq(Register dst, Address src) {
emit_operand(dst, src); emit_operand(dst, src);
} }
void Assembler::mov64(Address dst, int64_t imm32) { void Assembler::mov64(Address dst, intptr_t imm32) {
assert(is_simm32(imm32), "lost bits"); assert(is_simm32(imm32), "lost bits");
InstructionMark im(this); InstructionMark im(this);
prefixq(dst); prefixq(dst);
@ -3372,6 +3372,21 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
emit_byte(0xC0 | encode); emit_byte(0xC0 | encode);
} }
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
emit_byte(0xF3);
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
emit_byte(0xE6);
emit_byte(0xC0 | encode);
}
void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
emit_byte(0x5B);
emit_byte(0xC0 | encode);
}
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
emit_byte(0xF2); emit_byte(0xF2);
int encode = prefix_and_encode(dst->encoding(), src->encoding()); int encode = prefix_and_encode(dst->encoding(), src->encoding());

View File

@ -922,6 +922,8 @@ class Assembler : public AbstractAssembler {
void cvttsd2siq(Register dst, XMMRegister src); // truncates void cvttsd2siq(Register dst, XMMRegister src); // truncates
void cvtss2sd(XMMRegister dst, XMMRegister src); void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, XMMRegister src); void cvtsd2ss(XMMRegister dst, XMMRegister src);
void cvtdq2pd(XMMRegister dst, XMMRegister src);
void cvtdq2ps(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, Address src); // Xor Packed Byte Integer Values void pxor(XMMRegister dst, Address src); // Xor Packed Byte Integer Values
void pxor(XMMRegister dst, XMMRegister src); // Xor Packed Byte Integer Values void pxor(XMMRegister dst, XMMRegister src); // Xor Packed Byte Integer Values

View File

@ -546,8 +546,8 @@ void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst,
// set rsi.edi to the end of the arrays (arrays have same length) // set rsi.edi to the end of the arrays (arrays have same length)
// negate the index // negate the index
__ leal(rsi, Address(rsi, rax, Address::times_2, type2aelembytes[T_CHAR])); __ leal(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
__ leal(rdi, Address(rdi, rax, Address::times_2, type2aelembytes[T_CHAR])); __ leal(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
__ negl(rax); __ negl(rax);
// compare the strings in a loop // compare the strings in a loop
@ -1232,7 +1232,7 @@ void LIR_Assembler::prefetchw(LIR_Opr src) {
NEEDS_CLEANUP; // This could be static? NEEDS_CLEANUP; // This could be static?
Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
int elem_size = type2aelembytes[type]; int elem_size = type2aelembytes(type);
switch (elem_size) { switch (elem_size) {
case 1: return Address::times_1; case 1: return Address::times_1;
case 2: return Address::times_2; case 2: return Address::times_2;
@ -2739,7 +2739,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
int elem_size = type2aelembytes[basic_type]; int elem_size = type2aelembytes(basic_type);
int shift_amount; int shift_amount;
Address::ScaleFactor scale; Address::ScaleFactor scale;

View File

@ -151,7 +151,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
LIR_Address* addr; LIR_Address* addr;
if (index_opr->is_constant()) { if (index_opr->is_constant()) {
int elem_size = type2aelembytes[type]; int elem_size = type2aelembytes(type);
addr = new LIR_Address(array_opr, addr = new LIR_Address(array_opr,
offset_in_bytes + index_opr->as_jint() * elem_size, type); offset_in_bytes + index_opr->as_jint() * elem_size, type);
} else { } else {

View File

@ -1416,8 +1416,8 @@ class StubGenerator: public StubCodeGenerator {
// ======== end loop ======== // ======== end loop ========
// It was a real error; we must depend on the caller to finish the job. // It was a real error; we must depend on the caller to finish the job.
// Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops.
// Emit GC store barriers for the oops we have copied (r14 + rdx), // Emit GC store barriers for the oops we have copied (length_arg + count),
// and report their number to the caller. // and report their number to the caller.
__ addl(count, length_arg); // transfers = (length - remaining) __ addl(count, length_arg); // transfers = (length - remaining)
__ movl(rax, count); // save the value __ movl(rax, count); // save the value
@ -1430,6 +1430,7 @@ class StubGenerator: public StubCodeGenerator {
// Come here on success only. // Come here on success only.
__ BIND(L_do_card_marks); __ BIND(L_do_card_marks);
__ movl(count, length_arg); __ movl(count, length_arg);
__ movl(to, to_arg); // reload
gen_write_ref_array_post_barrier(to, count); gen_write_ref_array_post_barrier(to, count);
__ xorl(rax, rax); // return 0 on success __ xorl(rax, rax); // return 0 on success
@ -2151,6 +2152,7 @@ class StubGenerator: public StubCodeGenerator {
// These entry points require SharedInfo::stack0 to be set up in non-core builds // These entry points require SharedInfo::stack0 to be set up in non-core builds
// and need to be relocatable, so they each fabricate a RuntimeStub internally. // and need to be relocatable, so they each fabricate a RuntimeStub internally.
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true); StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true); StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);

View File

@ -2832,6 +2832,13 @@ class StubGenerator: public StubCodeGenerator {
throw_AbstractMethodError), throw_AbstractMethodError),
false); false);
StubRoutines::_throw_IncompatibleClassChangeError_entry =
generate_throw_exception("IncompatibleClassChangeError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_IncompatibleClassChangeError),
false);
StubRoutines::_throw_ArithmeticException_entry = StubRoutines::_throw_ArithmeticException_entry =
generate_throw_exception("ArithmeticException throw_exception", generate_throw_exception("ArithmeticException throw_exception",
CAST_FROM_FN_PTR(address, CAST_FROM_FN_PTR(address,

View File

@ -321,6 +321,20 @@ void VM_Version::get_processor_features() {
UseXmmRegToRegMoveAll = false; UseXmmRegToRegMoveAll = false;
} }
} }
if( FLAG_IS_DEFAULT(UseXmmI2F) ) {
if( supports_sse4a() ) {
UseXmmI2F = true;
} else {
UseXmmI2F = false;
}
}
if( FLAG_IS_DEFAULT(UseXmmI2D) ) {
if( supports_sse4a() ) {
UseXmmI2D = true;
} else {
UseXmmI2D = false;
}
}
} }
if( is_intel() ) { // Intel cpus specific settings if( is_intel() ) { // Intel cpus specific settings

View File

@ -265,6 +265,20 @@ void VM_Version::get_processor_features() {
UseXmmRegToRegMoveAll = false; UseXmmRegToRegMoveAll = false;
} }
} }
if( FLAG_IS_DEFAULT(UseXmmI2F) ) {
if( supports_sse4a() ) {
UseXmmI2F = true;
} else {
UseXmmI2F = false;
}
}
if( FLAG_IS_DEFAULT(UseXmmI2D) ) {
if( supports_sse4a() ) {
UseXmmI2D = true;
} else {
UseXmmI2D = false;
}
}
} }
if( is_intel() ) { // Intel cpus specific settings if( is_intel() ) { // Intel cpus specific settings

View File

@ -138,29 +138,21 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ round_to(rbx, BytesPerLong); __ round_to(rbx, BytesPerLong);
} }
Label hit, next, entry; Label hit, next, entry, throw_icce;
__ jmp(entry); __ jmpb(entry);
__ bind(next); __ bind(next);
__ addl(rbx, itableOffsetEntry::size() * wordSize); __ addl(rbx, itableOffsetEntry::size() * wordSize);
__ bind(entry); __ bind(entry);
#ifdef ASSERT // If the entry is NULL then we've reached the end of the table
// Check that the entry is non-null // without finding the expected interface, so throw an exception
if (DebugVtables) { __ movl(rdx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
Label L; __ testl(rdx, rdx);
__ pushl(rbx); __ jcc(Assembler::zero, throw_icce);
__ movl(rbx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes())); __ cmpl(rax, rdx);
__ testl(rbx, rbx);
__ jcc(Assembler::notZero, L);
__ stop("null entry point found in itable's offset table");
__ bind(L);
__ popl(rbx);
}
#endif
__ cmpl(rax, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
__ jcc(Assembler::notEqual, next); __ jcc(Assembler::notEqual, next);
// We found a hit, move offset into rbx, // We found a hit, move offset into rbx,
@ -194,7 +186,15 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
address ame_addr = __ pc(); address ame_addr = __ pc();
__ jmp(Address(method, methodOopDesc::from_compiled_offset())); __ jmp(Address(method, methodOopDesc::from_compiled_offset()));
__ bind(throw_icce);
// Restore saved register
__ popl(rdx);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
masm->flush(); masm->flush();
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
s->set_exception_points(npe_addr, ame_addr); s->set_exception_points(npe_addr, ame_addr);
return s; return s;
} }
@ -207,7 +207,7 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
return (DebugVtables ? 210 : 16) + (CountCompiledCalls ? 6 : 0); return (DebugVtables ? 210 : 16) + (CountCompiledCalls ? 6 : 0);
} else { } else {
// Itable stub size // Itable stub size
return (DebugVtables ? 140 : 55) + (CountCompiledCalls ? 6 : 0); return (DebugVtables ? 144 : 64) + (CountCompiledCalls ? 6 : 0);
} }
} }

View File

@ -153,7 +153,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// Round up to align_object_offset boundary // Round up to align_object_offset boundary
__ round_to_q(rbx, BytesPerLong); __ round_to_q(rbx, BytesPerLong);
} }
Label hit, next, entry; Label hit, next, entry, throw_icce;
__ jmpb(entry); __ jmpb(entry);
@ -162,22 +162,13 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ bind(entry); __ bind(entry);
#ifdef ASSERT // If the entry is NULL then we've reached the end of the table
// Check that the entry is non-null // without finding the expected interface, so throw an exception
if (DebugVtables) { __ movq(j_rarg1, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
Label L; __ testq(j_rarg1, j_rarg1);
__ pushq(rbx); __ jcc(Assembler::zero, throw_icce);
__ movq(rbx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes())); __ cmpq(rax, j_rarg1);
__ testq(rbx, rbx); __ jccb(Assembler::notEqual, next);
__ jcc(Assembler::notZero, L);
__ stop("null entry point found in itable's offset table");
__ bind(L);
__ popq(rbx);
}
#endif
__ cmpq(rax, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
__ jcc(Assembler::notEqual, next);
// We found a hit, move offset into j_rarg1 // We found a hit, move offset into j_rarg1
__ movl(j_rarg1, Address(rbx, itableOffsetEntry::offset_offset_in_bytes())); __ movl(j_rarg1, Address(rbx, itableOffsetEntry::offset_offset_in_bytes()));
@ -203,23 +194,31 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
#ifdef ASSERT #ifdef ASSERT
if (DebugVtables) { if (DebugVtables) {
Label L2; Label L2;
__ cmpq(method, (int)NULL); __ cmpq(method, (int)NULL);
__ jcc(Assembler::equal, L2); __ jcc(Assembler::equal, L2);
__ cmpq(Address(method, methodOopDesc::from_compiled_offset()), (int)NULL_WORD); __ cmpq(Address(method, methodOopDesc::from_compiled_offset()), (int)NULL_WORD);
__ jcc(Assembler::notZero, L2); __ jcc(Assembler::notZero, L2);
__ stop("compiler entrypoint is null"); __ stop("compiler entrypoint is null");
__ bind(L2); __ bind(L2);
} }
#endif // ASSERT #endif // ASSERT
// rbx: methodOop // rbx: methodOop
// j_rarg0: receiver // j_rarg0: receiver
address ame_addr = __ pc(); address ame_addr = __ pc();
__ jmp(Address(method, methodOopDesc::from_compiled_offset())); __ jmp(Address(method, methodOopDesc::from_compiled_offset()));
__ bind(throw_icce);
// Restore saved register
__ popq(j_rarg1);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
__ flush(); __ flush();
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
s->set_exception_points(npe_addr, ame_addr); s->set_exception_points(npe_addr, ame_addr);
return s; return s;
} }
@ -230,7 +229,7 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0); return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0);
} else { } else {
// Itable stub size // Itable stub size
return (DebugVtables ? 636 : 64) + (CountCompiledCalls ? 13 : 0); return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0);
} }
} }

View File

@ -10970,7 +10970,7 @@ instruct convI2D_reg(regD dst, stackSlotI src) %{
%} %}
instruct convI2XD_reg(regXD dst, eRegI src) %{ instruct convI2XD_reg(regXD dst, eRegI src) %{
predicate( UseSSE>=2 ); predicate( UseSSE>=2 && !UseXmmI2D );
match(Set dst (ConvI2D src)); match(Set dst (ConvI2D src));
format %{ "CVTSI2SD $dst,$src" %} format %{ "CVTSI2SD $dst,$src" %}
opcode(0xF2, 0x0F, 0x2A); opcode(0xF2, 0x0F, 0x2A);
@ -10987,6 +10987,20 @@ instruct convI2XD_mem(regXD dst, memory mem) %{
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
instruct convXI2XD_reg(regXD dst, eRegI src)
%{
predicate( UseSSE>=2 && UseXmmI2D );
match(Set dst (ConvI2D src));
format %{ "MOVD $dst,$src\n\t"
"CVTDQ2PD $dst,$dst\t# i2d" %}
ins_encode %{
__ movd($dst$$XMMRegister, $src$$Register);
__ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
%}
ins_pipe(pipe_slow); // XXX
%}
instruct convI2D_mem(regD dst, memory mem) %{ instruct convI2D_mem(regD dst, memory mem) %{
predicate( UseSSE<=1 && !Compile::current()->select_24_bit_instr()); predicate( UseSSE<=1 && !Compile::current()->select_24_bit_instr());
match(Set dst (ConvI2D (LoadI mem))); match(Set dst (ConvI2D (LoadI mem)));
@ -11062,7 +11076,7 @@ instruct convI2F_mem(regF dst, memory mem) %{
// Convert an int to a float in xmm; no rounding step needed. // Convert an int to a float in xmm; no rounding step needed.
instruct convI2X_reg(regX dst, eRegI src) %{ instruct convI2X_reg(regX dst, eRegI src) %{
predicate(UseSSE>=1); predicate( UseSSE==1 || UseSSE>=2 && !UseXmmI2F );
match(Set dst (ConvI2F src)); match(Set dst (ConvI2F src));
format %{ "CVTSI2SS $dst, $src" %} format %{ "CVTSI2SS $dst, $src" %}
@ -11071,6 +11085,20 @@ instruct convI2X_reg(regX dst, eRegI src) %{
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
instruct convXI2X_reg(regX dst, eRegI src)
%{
predicate( UseSSE>=2 && UseXmmI2F );
match(Set dst (ConvI2F src));
format %{ "MOVD $dst,$src\n\t"
"CVTDQ2PS $dst,$dst\t# i2f" %}
ins_encode %{
__ movd($dst$$XMMRegister, $src$$Register);
__ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
%}
ins_pipe(pipe_slow); // XXX
%}
instruct convI2L_reg( eRegL dst, eRegI src, eFlagsReg cr) %{ instruct convI2L_reg( eRegL dst, eRegI src, eFlagsReg cr) %{
match(Set dst (ConvI2L src)); match(Set dst (ConvI2L src));
effect(KILL cr); effect(KILL cr);

View File

@ -10098,6 +10098,7 @@ instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr)
instruct convI2F_reg_reg(regF dst, rRegI src) instruct convI2F_reg_reg(regF dst, rRegI src)
%{ %{
predicate(!UseXmmI2F);
match(Set dst (ConvI2F src)); match(Set dst (ConvI2F src));
format %{ "cvtsi2ssl $dst, $src\t# i2f" %} format %{ "cvtsi2ssl $dst, $src\t# i2f" %}
@ -10118,6 +10119,7 @@ instruct convI2F_reg_mem(regF dst, memory src)
instruct convI2D_reg_reg(regD dst, rRegI src) instruct convI2D_reg_reg(regD dst, rRegI src)
%{ %{
predicate(!UseXmmI2D);
match(Set dst (ConvI2D src)); match(Set dst (ConvI2D src));
format %{ "cvtsi2sdl $dst, $src\t# i2d" %} format %{ "cvtsi2sdl $dst, $src\t# i2d" %}
@ -10136,6 +10138,34 @@ instruct convI2D_reg_mem(regD dst, memory src)
ins_pipe(pipe_slow); // XXX ins_pipe(pipe_slow); // XXX
%} %}
instruct convXI2F_reg(regF dst, rRegI src)
%{
predicate(UseXmmI2F);
match(Set dst (ConvI2F src));
format %{ "movdl $dst, $src\n\t"
"cvtdq2psl $dst, $dst\t# i2f" %}
ins_encode %{
__ movdl($dst$$XMMRegister, $src$$Register);
__ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
%}
ins_pipe(pipe_slow); // XXX
%}
instruct convXI2D_reg(regD dst, rRegI src)
%{
predicate(UseXmmI2D);
match(Set dst (ConvI2D src));
format %{ "movdl $dst, $src\n\t"
"cvtdq2pdl $dst, $dst\t# i2d" %}
ins_encode %{
__ movdl($dst$$XMMRegister, $src$$Register);
__ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
%}
ins_pipe(pipe_slow); // XXX
%}
instruct convL2F_reg_reg(regF dst, rRegL src) instruct convL2F_reg_reg(regF dst, rRegL src)
%{ %{
match(Set dst (ConvL2F src)); match(Set dst (ConvL2F src));

View File

@ -232,7 +232,7 @@ LinuxAttachOperation* LinuxAttachListener::read_request(int s) {
// where <ver> is the protocol version (1), <cmd> is the command // where <ver> is the protocol version (1), <cmd> is the command
// name ("load", "datadump", ...), and <arg> is an argument // name ("load", "datadump", ...), and <arg> is an argument
int expected_str_count = 2 + AttachOperation::arg_count_max; int expected_str_count = 2 + AttachOperation::arg_count_max;
int max_len = (strlen(ver_str) + 1) + (AttachOperation::name_length_max + 1) + const int max_len = (sizeof(ver_str) + 1) + (AttachOperation::name_length_max + 1) +
AttachOperation::arg_count_max*(AttachOperation::arg_length_max + 1); AttachOperation::arg_count_max*(AttachOperation::arg_length_max + 1);
char buf[max_len]; char buf[max_len];

View File

@ -116,6 +116,20 @@ julong os::physical_memory() {
return Linux::physical_memory(); return Linux::physical_memory();
} }
julong os::allocatable_physical_memory(julong size) {
#ifdef _LP64
return size;
#else
julong result = MIN2(size, (julong)3800*M);
if (!is_allocatable(result)) {
// See comments under solaris for alignment considerations
julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
result = MIN2(size, reasonable_size);
}
return result;
#endif // _LP64
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// environment support // environment support
@ -1247,19 +1261,13 @@ jlong os::elapsed_frequency() {
return (1000 * 1000); return (1000 * 1000);
} }
jlong os::timeofday() { jlong os::javaTimeMillis() {
timeval time; timeval time;
int status = gettimeofday(&time, NULL); int status = gettimeofday(&time, NULL);
assert(status != -1, "linux error"); assert(status != -1, "linux error");
return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000); return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
} }
// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
// _use_global_time is only set if CacheTimeMillis is true
jlong os::javaTimeMillis() {
return (_use_global_time ? read_global_time() : timeofday());
}
#ifndef CLOCK_MONOTONIC #ifndef CLOCK_MONOTONIC
#define CLOCK_MONOTONIC (1) #define CLOCK_MONOTONIC (1)
#endif #endif
@ -2472,6 +2480,10 @@ bool os::can_commit_large_page_memory() {
return false; return false;
} }
bool os::can_execute_large_page_memory() {
return false;
}
// Reserve memory at an arbitrary address, only if that area is // Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else). // available (and not reserved for something else).

View File

@ -1691,19 +1691,14 @@ jlong getTimeMillis() {
return (jlong)(nanotime / NANOSECS_PER_MILLISECS); return (jlong)(nanotime / NANOSECS_PER_MILLISECS);
} }
jlong os::timeofday() { // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
jlong os::javaTimeMillis() {
timeval t; timeval t;
if (gettimeofday( &t, NULL) == -1) if (gettimeofday( &t, NULL) == -1)
fatal1("timeofday: gettimeofday (%s)", strerror(errno)); fatal1("os::javaTimeMillis: gettimeofday (%s)", strerror(errno));
return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
} }
// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
// _use_global_time is only set if CacheTimeMillis is true
jlong os::javaTimeMillis() {
return (_use_global_time ? read_global_time() : timeofday());
}
jlong os::javaTimeNanos() { jlong os::javaTimeNanos() {
return (jlong)getTimeNanos(); return (jlong)getTimeNanos();
} }
@ -2785,16 +2780,15 @@ char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
return b; return b;
} }
char* char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { char* addr = requested_addr;
char* addr = NULL; int flags = MAP_PRIVATE | MAP_NORESERVE;
int flags;
flags = MAP_PRIVATE | MAP_NORESERVE; assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
if (requested_addr != NULL) {
flags |= MAP_FIXED; if (fixed) {
addr = requested_addr; flags |= MAP_FIXED;
} else if (has_map_align && alignment_hint > (size_t) vm_page_size()) { } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
flags |= MAP_ALIGN; flags |= MAP_ALIGN;
addr = (char*) alignment_hint; addr = (char*) alignment_hint;
} }
@ -2802,11 +2796,14 @@ os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
// Map uncommitted pages PROT_NONE so we fail early if we touch an // Map uncommitted pages PROT_NONE so we fail early if we touch an
// uncommitted page. Otherwise, the read/write might succeed if we // uncommitted page. Otherwise, the read/write might succeed if we
// have enough swap space to back the physical page. // have enough swap space to back the physical page.
addr = Solaris::mmap_chunk(addr, bytes, flags, PROT_NONE); return mmap_chunk(addr, bytes, flags, PROT_NONE);
}
char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
guarantee(requested_addr == NULL || requested_addr == addr, guarantee(requested_addr == NULL || requested_addr == addr,
"OS failed to return requested mmap address."); "OS failed to return requested mmap address.");
return addr; return addr;
} }
@ -2832,6 +2829,31 @@ char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
// in one of the methods further up the call chain. See bug 5044738. // in one of the methods further up the call chain. See bug 5044738.
assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
// Since snv_84, Solaris attempts to honor the address hint - see 5003415.
// Give it a try, if the kernel honors the hint we can return immediately.
char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
volatile int err = errno;
if (addr == requested_addr) {
return addr;
} else if (addr != NULL) {
unmap_memory(addr, bytes);
}
if (PrintMiscellaneous && Verbose) {
char buf[256];
buf[0] = '\0';
if (addr == NULL) {
jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
}
warning("attempt_reserve_memory_at: couldn't reserve %d bytes at "
PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
"%s", bytes, requested_addr, addr, buf);
}
// Address hint method didn't work. Fall back to the old method.
// In theory, once SNV becomes our oldest supported platform, this
// code will no longer be needed.
//
// Repeatedly allocate blocks until the block is allocated at the // Repeatedly allocate blocks until the block is allocated at the
// right spot. Give up after max_tries. // right spot. Give up after max_tries.
int i; int i;
@ -3067,6 +3089,8 @@ bool os::large_page_init() {
if (UseISM) { if (UseISM) {
// ISM disables MPSS to be compatible with old JDK behavior // ISM disables MPSS to be compatible with old JDK behavior
UseMPSS = false; UseMPSS = false;
_page_sizes[0] = _large_page_size;
_page_sizes[1] = vm_page_size();
} }
UseMPSS = UseMPSS && UseMPSS = UseMPSS &&
@ -3156,6 +3180,10 @@ bool os::can_commit_large_page_memory() {
return UseISM ? false : true; return UseISM ? false : true;
} }
bool os::can_execute_large_page_memory() {
return UseISM ? false : true;
}
static int os_sleep(jlong millis, bool interruptible) { static int os_sleep(jlong millis, bool interruptible) {
const jlong limit = INT_MAX; const jlong limit = INT_MAX;
jlong prevtime; jlong prevtime;

View File

@ -156,6 +156,7 @@ class Solaris {
static int get_dev_zero_fd() { return _dev_zero_fd; } static int get_dev_zero_fd() { return _dev_zero_fd; }
static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; } static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; }
static char* mmap_chunk(char *addr, size_t size, int flags, int prot); static char* mmap_chunk(char *addr, size_t size, int flags, int prot);
static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed);
static bool mpss_sanity_check(bool warn, size_t * page_size); static bool mpss_sanity_check(bool warn, size_t * page_size);
static bool ism_sanity_check (bool warn, size_t * page_size); static bool ism_sanity_check (bool warn, size_t * page_size);

View File

@ -621,7 +621,12 @@ julong os::physical_memory() {
} }
julong os::allocatable_physical_memory(julong size) { julong os::allocatable_physical_memory(julong size) {
#ifdef _LP64
return size;
#else
// Limit to 1400m because of the 2gb address space wall
return MIN2(size, (julong)1400*M); return MIN2(size, (julong)1400*M);
#endif
} }
// VC6 lacks DWORD_PTR // VC6 lacks DWORD_PTR
@ -732,20 +737,13 @@ FILETIME java_to_windows_time(jlong l) {
return result; return result;
} }
jlong os::timeofday() {
FILETIME wt;
GetSystemTimeAsFileTime(&wt);
return windows_to_java_time(wt);
}
// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
// _use_global_time is only set if CacheTimeMillis is true
jlong os::javaTimeMillis() { jlong os::javaTimeMillis() {
if (UseFakeTimers) { if (UseFakeTimers) {
return fake_time++; return fake_time++;
} else { } else {
return (_use_global_time ? read_global_time() : timeofday()); FILETIME wt;
GetSystemTimeAsFileTime(&wt);
return windows_to_java_time(wt);
} }
} }
@ -2518,9 +2516,13 @@ bool os::can_commit_large_page_memory() {
return false; return false;
} }
bool os::can_execute_large_page_memory() {
return true;
}
char* os::reserve_memory_special(size_t bytes) { char* os::reserve_memory_special(size_t bytes) {
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_READWRITE); char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_EXECUTE_READWRITE);
return res; return res;
} }

View File

@ -60,7 +60,18 @@ inline u4 Bytes::swap_u4(u4 x) {
#ifdef AMD64 #ifdef AMD64
inline u8 Bytes::swap_u8(u8 x) { inline u8 Bytes::swap_u8(u8 x) {
#ifdef SPARC_WORKS
// workaround for SunStudio12 CR6615391
__asm__ __volatile__ (
"bswapq %0"
:"=r" (x) // output : register 0 => x
:"0" (x) // input : x => register 0
:"0" // clobbered register
);
return x;
#else
return bswap_64(x); return bswap_64(x);
#endif
} }
#else #else
// Helper function for swap_u8 // Helper function for swap_u8

View File

@ -62,8 +62,14 @@
#endif // AMD64 #endif // AMD64
address os::current_stack_pointer() { address os::current_stack_pointer() {
#ifdef SPARC_WORKS
register void *esp;
__asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
return (address) ((char*)esp + sizeof(long)*2);
#else
register void *esp __asm__ (SPELL_REG_SP); register void *esp __asm__ (SPELL_REG_SP);
return (address) esp; return (address) esp;
#endif
} }
char* os::non_memory_address_word() { char* os::non_memory_address_word() {
@ -139,7 +145,12 @@ frame os::get_sender_for_C_frame(frame* fr) {
} }
intptr_t* _get_previous_fp() { intptr_t* _get_previous_fp() {
#ifdef SPARC_WORKS
register intptr_t **ebp;
__asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
#else
register intptr_t **ebp __asm__ (SPELL_REG_FP); register intptr_t **ebp __asm__ (SPELL_REG_FP);
#endif
return (intptr_t*) *ebp; // we want what it points to. return (intptr_t*) *ebp; // we want what it points to.
} }
@ -157,23 +168,8 @@ frame os::current_frame() {
} }
} }
// Utility functions // Utility functions
julong os::allocatable_physical_memory(julong size) {
#ifdef AMD64
return size;
#else
julong result = MIN2(size, (julong)3800*M);
if (!is_allocatable(result)) {
// See comments under solaris for alignment considerations
julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
result = MIN2(size, reasonable_size);
}
return result;
#endif // AMD64
}
// From IA32 System Programming Guide // From IA32 System Programming Guide
enum { enum {
trap_page_fault = 0xE trap_page_fault = 0xE
@ -575,7 +571,9 @@ bool os::Linux::supports_variable_stack_size() { return true; }
#else #else
size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
#ifdef __GNUC__
#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;}) #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
#endif
// Test if pthread library can support variable thread stack size. LinuxThreads // Test if pthread library can support variable thread stack size. LinuxThreads
// in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads // in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads
@ -606,7 +604,11 @@ bool os::Linux::supports_variable_stack_size() {
// return true and skip _thread_safety_check(), so we may not be able to // return true and skip _thread_safety_check(), so we may not be able to
// detect stack-heap collisions. But otherwise it's harmless. // detect stack-heap collisions. But otherwise it's harmless.
// //
#ifdef __GNUC__
return (GET_GS() != 0); return (GET_GS() != 0);
#else
return false;
#endif
} }
} }
#endif // AMD64 #endif // AMD64

View File

@ -105,7 +105,7 @@ LIR_Opr LIR_OprFact::dummy_value_type(ValueType* type) {
LIR_Address::Scale LIR_Address::scale(BasicType type) { LIR_Address::Scale LIR_Address::scale(BasicType type) {
int elem_size = type2aelembytes[type]; int elem_size = type2aelembytes(type);
switch (elem_size) { switch (elem_size) {
case 1: return LIR_Address::times_1; case 1: return LIR_Address::times_1;
case 2: return LIR_Address::times_2; case 2: return LIR_Address::times_2;

View File

@ -104,7 +104,7 @@ public:
}; };
void BCEscapeAnalyzer::set_returned(ArgumentMap vars) { void BCEscapeAnalyzer::set_returned(ArgumentMap vars) {
for (int i = 0; i <= _arg_size; i++) { for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i)) if (vars.contains(i))
_arg_returned.set_bit(i); _arg_returned.set_bit(i);
} }
@ -112,10 +112,9 @@ void BCEscapeAnalyzer::set_returned(ArgumentMap vars) {
_return_allocated = _return_allocated && vars.contains_allocated() && !(vars.contains_unknown() || vars.contains_vars()); _return_allocated = _return_allocated && vars.contains_allocated() && !(vars.contains_unknown() || vars.contains_vars());
} }
// return true if any element of vars is an argument // return true if any element of vars is an argument
bool BCEscapeAnalyzer::is_argument(ArgumentMap vars) { bool BCEscapeAnalyzer::is_argument(ArgumentMap vars) {
for (int i = 0; i <= _arg_size; i++) { for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i)) if (vars.contains(i))
return true; return true;
} }
@ -126,7 +125,7 @@ bool BCEscapeAnalyzer::is_argument(ArgumentMap vars) {
bool BCEscapeAnalyzer::is_arg_stack(ArgumentMap vars){ bool BCEscapeAnalyzer::is_arg_stack(ArgumentMap vars){
if (_conservative) if (_conservative)
return true; return true;
for (int i = 0; i <= _arg_size; i++) { for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i) && _arg_stack.at(i)) if (vars.contains(i) && _arg_stack.at(i))
return true; return true;
} }
@ -134,12 +133,13 @@ bool BCEscapeAnalyzer::is_arg_stack(ArgumentMap vars){
} }
void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, BitMap &bm) { void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, BitMap &bm) {
for (int i = 0; i <= _arg_size; i++) { for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i)) { if (vars.contains(i)) {
bm.clear_bit(i); bm.clear_bit(i);
} }
} }
} }
void BCEscapeAnalyzer::set_method_escape(ArgumentMap vars) { void BCEscapeAnalyzer::set_method_escape(ArgumentMap vars) {
clear_bits(vars, _arg_local); clear_bits(vars, _arg_local);
} }
@ -155,6 +155,17 @@ void BCEscapeAnalyzer::set_dirty(ArgumentMap vars) {
clear_bits(vars, _dirty); clear_bits(vars, _dirty);
} }
void BCEscapeAnalyzer::set_modified(ArgumentMap vars, int offs, int size) {
for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i)) {
set_arg_modified(i, offs, size);
}
}
if (vars.contains_unknown())
_unknown_modified = true;
}
bool BCEscapeAnalyzer::is_recursive_call(ciMethod* callee) { bool BCEscapeAnalyzer::is_recursive_call(ciMethod* callee) {
for (BCEscapeAnalyzer* scope = this; scope != NULL; scope = scope->_parent) { for (BCEscapeAnalyzer* scope = this; scope != NULL; scope = scope->_parent) {
if (scope->method() == callee) { if (scope->method() == callee) {
@ -164,6 +175,40 @@ bool BCEscapeAnalyzer::is_recursive_call(ciMethod* callee) {
return false; return false;
} }
bool BCEscapeAnalyzer::is_arg_modified(int arg, int offset, int size_in_bytes) {
if (offset == OFFSET_ANY)
return _arg_modified[arg] != 0;
assert(arg >= 0 && arg < _arg_size, "must be an argument.");
bool modified = false;
int l = offset / HeapWordSize;
int h = round_to(offset + size_in_bytes, HeapWordSize) / HeapWordSize;
if (l > ARG_OFFSET_MAX)
l = ARG_OFFSET_MAX;
if (h > ARG_OFFSET_MAX+1)
h = ARG_OFFSET_MAX + 1;
for (int i = l; i < h; i++) {
modified = modified || (_arg_modified[arg] & (1 << i)) != 0;
}
return modified;
}
void BCEscapeAnalyzer::set_arg_modified(int arg, int offset, int size_in_bytes) {
if (offset == OFFSET_ANY) {
_arg_modified[arg] = (uint) -1;
return;
}
assert(arg >= 0 && arg < _arg_size, "must be an argument.");
int l = offset / HeapWordSize;
int h = round_to(offset + size_in_bytes, HeapWordSize) / HeapWordSize;
if (l > ARG_OFFSET_MAX)
l = ARG_OFFSET_MAX;
if (h > ARG_OFFSET_MAX+1)
h = ARG_OFFSET_MAX + 1;
for (int i = l; i < h; i++) {
_arg_modified[arg] |= (1 << i);
}
}
void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod* target, ciKlass* holder) { void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod* target, ciKlass* holder) {
int i; int i;
@ -197,6 +242,7 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
for (i = 0; i < arg_size; i++) { for (i = 0; i < arg_size; i++) {
set_method_escape(state.raw_pop()); set_method_escape(state.raw_pop());
} }
_unknown_modified = true; // assume the worst since we don't analyze the called method
return; return;
} }
@ -224,6 +270,11 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
ArgumentMap arg = state.raw_pop(); ArgumentMap arg = state.raw_pop();
if (!is_argument(arg)) if (!is_argument(arg))
continue; continue;
for (int j = 0; j < _arg_size; j++) {
if (arg.contains(j)) {
_arg_modified[j] |= analyzer._arg_modified[i];
}
}
if (!is_arg_stack(arg)) { if (!is_arg_stack(arg)) {
// arguments have already been recognized as escaping // arguments have already been recognized as escaping
} else if (analyzer.is_arg_stack(i) && !analyzer.is_arg_returned(i)) { } else if (analyzer.is_arg_stack(i) && !analyzer.is_arg_returned(i)) {
@ -233,6 +284,7 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
set_global_escape(arg); set_global_escape(arg);
} }
} }
_unknown_modified = _unknown_modified || analyzer.has_non_arg_side_affects();
// record dependencies if at least one parameter retained stack-allocatable // record dependencies if at least one parameter retained stack-allocatable
if (must_record_dependencies) { if (must_record_dependencies) {
@ -250,8 +302,10 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
ArgumentMap arg = state.raw_pop(); ArgumentMap arg = state.raw_pop();
if (!is_argument(arg)) if (!is_argument(arg))
continue; continue;
set_modified(arg, OFFSET_ANY, type2size[T_INT]*HeapWordSize);
set_global_escape(arg); set_global_escape(arg);
} }
_unknown_modified = true; // assume the worst since we don't know the called method
} }
} }
@ -421,6 +475,7 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
state.spop(); state.spop();
ArgumentMap arr = state.apop(); ArgumentMap arr = state.apop();
set_method_escape(arr); set_method_escape(arr);
set_modified(arr, OFFSET_ANY, type2size[T_INT]*HeapWordSize);
break; break;
} }
case Bytecodes::_lastore: case Bytecodes::_lastore:
@ -430,6 +485,7 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
state.spop(); state.spop();
ArgumentMap arr = state.apop(); ArgumentMap arr = state.apop();
set_method_escape(arr); set_method_escape(arr);
set_modified(arr, OFFSET_ANY, type2size[T_LONG]*HeapWordSize);
break; break;
} }
case Bytecodes::_aastore: case Bytecodes::_aastore:
@ -437,6 +493,7 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
set_global_escape(state.apop()); set_global_escape(state.apop());
state.spop(); state.spop();
ArgumentMap arr = state.apop(); ArgumentMap arr = state.apop();
set_modified(arr, OFFSET_ANY, type2size[T_OBJECT]*HeapWordSize);
break; break;
} }
case Bytecodes::_pop: case Bytecodes::_pop:
@ -762,6 +819,7 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
if (s.cur_bc() != Bytecodes::_putstatic) { if (s.cur_bc() != Bytecodes::_putstatic) {
ArgumentMap p = state.apop(); ArgumentMap p = state.apop();
set_method_escape(p); set_method_escape(p);
set_modified(p, will_link ? field->offset() : OFFSET_ANY, type2size[field_type]*HeapWordSize);
} }
} }
break; break;
@ -872,7 +930,7 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
} }
void BCEscapeAnalyzer::merge_block_states(StateInfo *blockstates, ciBlock *dest, StateInfo *s_state) { void BCEscapeAnalyzer::merge_block_states(StateInfo *blockstates, ciBlock *dest, StateInfo *s_state) {
StateInfo *d_state = blockstates+dest->index(); StateInfo *d_state = blockstates + dest->index();
int nlocals = _method->max_locals(); int nlocals = _method->max_locals();
// exceptions may cause transfer of control to handlers in the middle of a // exceptions may cause transfer of control to handlers in the middle of a
@ -916,6 +974,7 @@ void BCEscapeAnalyzer::merge_block_states(StateInfo *blockstates, ciBlock *dest,
} }
for (int i = 0; i < s_state->_stack_height; i++) { for (int i = 0; i < s_state->_stack_height; i++) {
ArgumentMap t; ArgumentMap t;
//extra_vars |= !d_state->_vars[i] & s_state->_vars[i];
t.clear(); t.clear();
t = s_state->_stack[i]; t = s_state->_stack[i];
t.set_difference(d_state->_stack[i]); t.set_difference(d_state->_stack[i]);
@ -933,7 +992,7 @@ void BCEscapeAnalyzer::iterate_blocks(Arena *arena) {
int datacount = (numblocks + 1) * (stkSize + numLocals); int datacount = (numblocks + 1) * (stkSize + numLocals);
int datasize = datacount * sizeof(ArgumentMap); int datasize = datacount * sizeof(ArgumentMap);
StateInfo *blockstates = (StateInfo *) arena->Amalloc(_methodBlocks->num_blocks() * sizeof(StateInfo)); StateInfo *blockstates = (StateInfo *) arena->Amalloc(numblocks * sizeof(StateInfo));
ArgumentMap *statedata = (ArgumentMap *) arena->Amalloc(datasize); ArgumentMap *statedata = (ArgumentMap *) arena->Amalloc(datasize);
for (int i = 0; i < datacount; i++) ::new ((void*)&statedata[i]) ArgumentMap(); for (int i = 0; i < datacount; i++) ::new ((void*)&statedata[i]) ArgumentMap();
ArgumentMap *dp = statedata; ArgumentMap *dp = statedata;
@ -961,33 +1020,35 @@ void BCEscapeAnalyzer::iterate_blocks(Arena *arena) {
ArgumentMap allVars; // all oop arguments to method ArgumentMap allVars; // all oop arguments to method
ciSignature* sig = method()->signature(); ciSignature* sig = method()->signature();
int j = 0; int j = 0;
ciBlock* first_blk = _methodBlocks->block_containing(0);
int fb_i = first_blk->index();
if (!method()->is_static()) { if (!method()->is_static()) {
// record information for "this" // record information for "this"
blockstates[0]._vars[j].set(j); blockstates[fb_i]._vars[j].set(j);
allVars.add(j); allVars.add(j);
j++; j++;
} }
for (int i = 0; i < sig->count(); i++) { for (int i = 0; i < sig->count(); i++) {
ciType* t = sig->type_at(i); ciType* t = sig->type_at(i);
if (!t->is_primitive_type()) { if (!t->is_primitive_type()) {
blockstates[0]._vars[j].set(j); blockstates[fb_i]._vars[j].set(j);
allVars.add(j); allVars.add(j);
} }
j += t->size(); j += t->size();
} }
blockstates[0]._initialized = true; blockstates[fb_i]._initialized = true;
assert(j == _arg_size, "just checking"); assert(j == _arg_size, "just checking");
ArgumentMap unknown_map; ArgumentMap unknown_map;
unknown_map.add_unknown(); unknown_map.add_unknown();
worklist.push(_methodBlocks->block_containing(0)); worklist.push(first_blk);
while(worklist.length() > 0) { while(worklist.length() > 0) {
ciBlock *blk = worklist.pop(); ciBlock *blk = worklist.pop();
StateInfo *blkState = blockstates+blk->index(); StateInfo *blkState = blockstates + blk->index();
if (blk->is_handler() || blk->is_ret_target()) { if (blk->is_handler() || blk->is_ret_target()) {
// for an exception handler or a target of a ret instruction, we assume the worst case, // for an exception handler or a target of a ret instruction, we assume the worst case,
// that any variable or stack slot could contain any argument // that any variable could contain any argument
for (int i = 0; i < numLocals; i++) { for (int i = 0; i < numLocals; i++) {
state._vars[i] = allVars; state._vars[i] = allVars;
} }
@ -997,6 +1058,7 @@ void BCEscapeAnalyzer::iterate_blocks(Arena *arena) {
state._stack_height = blkState->_stack_height; state._stack_height = blkState->_stack_height;
} }
for (int i = 0; i < state._stack_height; i++) { for (int i = 0; i < state._stack_height; i++) {
// ??? should this be unknown_map ???
state._stack[i] = allVars; state._stack[i] = allVars;
} }
} else { } else {
@ -1053,6 +1115,7 @@ vmIntrinsics::ID BCEscapeAnalyzer::known_intrinsic() {
vmIntrinsics::ID iid = method()->intrinsic_id(); vmIntrinsics::ID iid = method()->intrinsic_id();
if (iid == vmIntrinsics::_getClass || if (iid == vmIntrinsics::_getClass ||
iid == vmIntrinsics::_fillInStackTrace ||
iid == vmIntrinsics::_hashCode) iid == vmIntrinsics::_hashCode)
return iid; return iid;
else else
@ -1060,12 +1123,16 @@ vmIntrinsics::ID BCEscapeAnalyzer::known_intrinsic() {
} }
bool BCEscapeAnalyzer::compute_escape_for_intrinsic(vmIntrinsics::ID iid) { bool BCEscapeAnalyzer::compute_escape_for_intrinsic(vmIntrinsics::ID iid) {
ArgumentMap empty; ArgumentMap arg;
empty.clear(); arg.clear();
switch (iid) { switch (iid) {
case vmIntrinsics::_getClass: case vmIntrinsics::_getClass:
_return_local = false; _return_local = false;
break; break;
case vmIntrinsics::_fillInStackTrace:
arg.set(0); // 'this'
set_returned(arg);
break;
case vmIntrinsics::_hashCode: case vmIntrinsics::_hashCode:
// initialized state is correct // initialized state is correct
break; break;
@ -1109,15 +1176,21 @@ void BCEscapeAnalyzer::initialize() {
_return_allocated = true; _return_allocated = true;
} }
_allocated_escapes = false; _allocated_escapes = false;
_unknown_modified = false;
} }
void BCEscapeAnalyzer::clear_escape_info() { void BCEscapeAnalyzer::clear_escape_info() {
ciSignature* sig = method()->signature(); ciSignature* sig = method()->signature();
int arg_count = sig->count(); int arg_count = sig->count();
ArgumentMap var; ArgumentMap var;
if (!method()->is_static()) {
arg_count++; // allow for "this"
}
for (int i = 0; i < arg_count; i++) { for (int i = 0; i < arg_count; i++) {
set_arg_modified(i, OFFSET_ANY, 4);
var.clear(); var.clear();
var.set(i); var.set(i);
set_modified(var, OFFSET_ANY, 4);
set_global_escape(var); set_global_escape(var);
} }
_arg_local.clear(); _arg_local.clear();
@ -1126,6 +1199,7 @@ void BCEscapeAnalyzer::clear_escape_info() {
_return_local = false; _return_local = false;
_return_allocated = false; _return_allocated = false;
_allocated_escapes = true; _allocated_escapes = true;
_unknown_modified = true;
} }
@ -1173,8 +1247,14 @@ void BCEscapeAnalyzer::compute_escape_info() {
initialize(); initialize();
// do not scan method if it has no object parameters // Do not scan method if it has no object parameters and
if (_arg_local.is_empty()) { // does not returns an object (_return_allocated is set in initialize()).
if (_arg_local.is_empty() && !_return_allocated) {
// Clear all info since method's bytecode was not analysed and
// set pessimistic escape information.
clear_escape_info();
methodData()->set_eflag(methodDataOopDesc::allocated_escapes);
methodData()->set_eflag(methodDataOopDesc::unknown_modified);
methodData()->set_eflag(methodDataOopDesc::estimated); methodData()->set_eflag(methodDataOopDesc::estimated);
return; return;
} }
@ -1185,36 +1265,8 @@ void BCEscapeAnalyzer::compute_escape_info() {
success = do_analysis(); success = do_analysis();
} }
// dump result of bytecode analysis // don't store interprocedural escape information if it introduces
#ifndef PRODUCT // dependencies or if method data is empty
if (BCEATraceLevel >= 3) {
tty->print("[EA] estimated escape information for");
if (iid != vmIntrinsics::_none)
tty->print(" intrinsic");
method()->print_short_name();
tty->print_cr(has_dependencies() ? " (not stored)" : "");
tty->print(" non-escaping args: ");
_arg_local.print_on(tty);
tty->print(" stack-allocatable args: ");
_arg_stack.print_on(tty);
if (_return_local) {
tty->print(" returned args: ");
_arg_returned.print_on(tty);
} else if (is_return_allocated()) {
tty->print_cr(" allocated return values");
} else {
tty->print_cr(" non-local return values");
}
tty->cr();
tty->print(" flags: ");
if (_return_allocated)
tty->print(" return_allocated");
tty->cr();
}
#endif
// don't store interprocedural escape information if it introduces dependencies
// or if method data is empty
// //
if (!has_dependencies() && !methodData()->is_empty()) { if (!has_dependencies() && !methodData()->is_empty()) {
for (i = 0; i < _arg_size; i++) { for (i = 0; i < _arg_size; i++) {
@ -1228,10 +1280,20 @@ void BCEscapeAnalyzer::compute_escape_info() {
if (_arg_returned.at(i)) { if (_arg_returned.at(i)) {
methodData()->set_arg_returned(i); methodData()->set_arg_returned(i);
} }
methodData()->set_arg_modified(i, _arg_modified[i]);
} }
if (_return_local) { if (_return_local) {
methodData()->set_eflag(methodDataOopDesc::return_local); methodData()->set_eflag(methodDataOopDesc::return_local);
} }
if (_return_allocated) {
methodData()->set_eflag(methodDataOopDesc::return_allocated);
}
if (_allocated_escapes) {
methodData()->set_eflag(methodDataOopDesc::allocated_escapes);
}
if (_unknown_modified) {
methodData()->set_eflag(methodDataOopDesc::unknown_modified);
}
methodData()->set_eflag(methodDataOopDesc::estimated); methodData()->set_eflag(methodDataOopDesc::estimated);
} }
} }
@ -1244,29 +1306,50 @@ void BCEscapeAnalyzer::read_escape_info() {
_arg_local.at_put(i, methodData()->is_arg_local(i)); _arg_local.at_put(i, methodData()->is_arg_local(i));
_arg_stack.at_put(i, methodData()->is_arg_stack(i)); _arg_stack.at_put(i, methodData()->is_arg_stack(i));
_arg_returned.at_put(i, methodData()->is_arg_returned(i)); _arg_returned.at_put(i, methodData()->is_arg_returned(i));
_arg_modified[i] = methodData()->arg_modified(i);
} }
_return_local = methodData()->eflag_set(methodDataOopDesc::return_local); _return_local = methodData()->eflag_set(methodDataOopDesc::return_local);
_return_allocated = methodData()->eflag_set(methodDataOopDesc::return_allocated);
// dump result of loaded escape information _allocated_escapes = methodData()->eflag_set(methodDataOopDesc::allocated_escapes);
#ifndef PRODUCT _unknown_modified = methodData()->eflag_set(methodDataOopDesc::unknown_modified);
if (BCEATraceLevel >= 4) {
tty->print(" non-escaping args: ");
_arg_local.print_on(tty);
tty->print(" stack-allocatable args: ");
_arg_stack.print_on(tty);
if (_return_local) {
tty->print(" returned args: ");
_arg_returned.print_on(tty);
} else {
tty->print_cr(" non-local return values");
}
tty->print(" modified args: ");
tty->cr();
}
#endif
} }
#ifndef PRODUCT
void BCEscapeAnalyzer::dump() {
tty->print("[EA] estimated escape information for");
method()->print_short_name();
tty->print_cr(has_dependencies() ? " (not stored)" : "");
tty->print(" non-escaping args: ");
_arg_local.print_on(tty);
tty->print(" stack-allocatable args: ");
_arg_stack.print_on(tty);
if (_return_local) {
tty->print(" returned args: ");
_arg_returned.print_on(tty);
} else if (is_return_allocated()) {
tty->print_cr(" return allocated value");
} else {
tty->print_cr(" return non-local value");
}
tty->print(" modified args: ");
for (int i = 0; i < _arg_size; i++) {
if (_arg_modified[i] == 0)
tty->print(" 0");
else
tty->print(" 0x%x", _arg_modified[i]);
}
tty->cr();
tty->print(" flags: ");
if (_return_allocated)
tty->print(" return_allocated");
if (_allocated_escapes)
tty->print(" allocated_escapes");
if (_unknown_modified)
tty->print(" unknown_modified");
tty->cr();
}
#endif
BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent) BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
: _conservative(method == NULL || !EstimateArgEscape) : _conservative(method == NULL || !EstimateArgEscape)
@ -1281,6 +1364,7 @@ BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
, _return_local(false) , _return_local(false)
, _return_allocated(false) , _return_allocated(false)
, _allocated_escapes(false) , _allocated_escapes(false)
, _unknown_modified(false)
, _dependencies() , _dependencies()
, _parent(parent) , _parent(parent)
, _level(parent == NULL ? 0 : parent->level() + 1) { , _level(parent == NULL ? 0 : parent->level() + 1) {
@ -1290,6 +1374,8 @@ BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
_arg_returned.clear(); _arg_returned.clear();
_dirty.clear(); _dirty.clear();
Arena* arena = CURRENT_ENV->arena(); Arena* arena = CURRENT_ENV->arena();
_arg_modified = (uint *) arena->Amalloc(_arg_size * sizeof(uint));
Copy::zero_to_bytes(_arg_modified, _arg_size * sizeof(uint));
if (methodData() == NULL) if (methodData() == NULL)
return; return;
@ -1307,6 +1393,12 @@ BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
compute_escape_info(); compute_escape_info();
methodData()->update_escape_info(); methodData()->update_escape_info();
} }
#ifndef PRODUCT
if (BCEATraceLevel >= 3) {
// dump escape information
dump();
}
#endif
} }
} }

View File

@ -46,10 +46,13 @@ class BCEscapeAnalyzer : public ResourceObj {
BitMap _arg_stack; BitMap _arg_stack;
BitMap _arg_returned; BitMap _arg_returned;
BitMap _dirty; BitMap _dirty;
enum{ ARG_OFFSET_MAX = 31};
uint *_arg_modified;
bool _return_local; bool _return_local;
bool _allocated_escapes;
bool _return_allocated; bool _return_allocated;
bool _allocated_escapes;
bool _unknown_modified;
ciObjectList _dependencies; ciObjectList _dependencies;
@ -80,6 +83,7 @@ class BCEscapeAnalyzer : public ResourceObj {
void set_method_escape(ArgumentMap vars); void set_method_escape(ArgumentMap vars);
void set_global_escape(ArgumentMap vars); void set_global_escape(ArgumentMap vars);
void set_dirty(ArgumentMap vars); void set_dirty(ArgumentMap vars);
void set_modified(ArgumentMap vars, int offs, int size);
bool is_recursive_call(ciMethod* callee); bool is_recursive_call(ciMethod* callee);
void add_dependence(ciKlass *klass, ciMethod *meth); void add_dependence(ciKlass *klass, ciMethod *meth);
@ -140,6 +144,18 @@ class BCEscapeAnalyzer : public ResourceObj {
return !_conservative && _return_allocated && !_allocated_escapes; return !_conservative && _return_allocated && !_allocated_escapes;
} }
// Tracking of argument modification
enum {OFFSET_ANY = -1};
bool is_arg_modified(int arg, int offset, int size_in_bytes);
void set_arg_modified(int arg, int offset, int size_in_bytes);
bool has_non_arg_side_affects() { return _unknown_modified; }
// Copy dependencies from this analysis into "deps" // Copy dependencies from this analysis into "deps"
void copy_dependencies(Dependencies *deps); void copy_dependencies(Dependencies *deps);
#ifndef PRODUCT
// dump escape information
void dump();
#endif
}; };

View File

@ -102,7 +102,7 @@ public:
BasicType layout_type() { return type2field[(_type == NULL) ? T_OBJECT : _type->basic_type()]; } BasicType layout_type() { return type2field[(_type == NULL) ? T_OBJECT : _type->basic_type()]; }
// How big is this field in memory? // How big is this field in memory?
int size_in_bytes() { return type2aelembytes[layout_type()]; } int size_in_bytes() { return type2aelembytes(layout_type()); }
// What is the offset of this field? // What is the offset of this field?
int offset() { int offset() {

View File

@ -34,7 +34,9 @@
// ciInstanceKlass::ciInstanceKlass // ciInstanceKlass::ciInstanceKlass
// //
// Loaded instance klass. // Loaded instance klass.
ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) : ciKlass(h_k) { ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
ciKlass(h_k), _non_static_fields(NULL)
{
assert(get_Klass()->oop_is_instance(), "wrong type"); assert(get_Klass()->oop_is_instance(), "wrong type");
instanceKlass* ik = get_instanceKlass(); instanceKlass* ik = get_instanceKlass();
@ -335,6 +337,37 @@ ciField* ciInstanceKlass::get_field_by_offset(int field_offset, bool is_static)
return field; return field;
} }
// ------------------------------------------------------------------
// ciInstanceKlass::non_static_fields.
class NonStaticFieldFiller: public FieldClosure {
GrowableArray<ciField*>* _arr;
ciEnv* _curEnv;
public:
NonStaticFieldFiller(ciEnv* curEnv, GrowableArray<ciField*>* arr) :
_curEnv(curEnv), _arr(arr)
{}
void do_field(fieldDescriptor* fd) {
ciField* field = new (_curEnv->arena()) ciField(fd);
_arr->append(field);
}
};
GrowableArray<ciField*>* ciInstanceKlass::non_static_fields() {
if (_non_static_fields == NULL) {
VM_ENTRY_MARK;
ciEnv* curEnv = ciEnv::current();
instanceKlass* ik = get_instanceKlass();
int max_n_fields = ik->fields()->length()/instanceKlass::next_offset;
_non_static_fields =
new (curEnv->arena()) GrowableArray<ciField*>(max_n_fields);
NonStaticFieldFiller filler(curEnv, _non_static_fields);
ik->do_nonstatic_fields(&filler);
}
return _non_static_fields;
}
static int sort_field_by_offset(ciField** a, ciField** b) { static int sort_field_by_offset(ciField** a, ciField** b) {
return (*a)->offset_in_bytes() - (*b)->offset_in_bytes(); return (*a)->offset_in_bytes() - (*b)->offset_in_bytes();
// (no worries about 32-bit overflow...) // (no worries about 32-bit overflow...)

View File

@ -46,6 +46,7 @@ private:
bool _has_subklass; bool _has_subklass;
ciFlags _flags; ciFlags _flags;
jint _nonstatic_field_size; jint _nonstatic_field_size;
jint _nonstatic_oop_map_size;
// Lazy fields get filled in only upon request. // Lazy fields get filled in only upon request.
ciInstanceKlass* _super; ciInstanceKlass* _super;
@ -58,6 +59,8 @@ private:
ciInstanceKlass* _implementors[implementors_limit]; ciInstanceKlass* _implementors[implementors_limit];
jint _nof_implementors; jint _nof_implementors;
GrowableArray<ciField*>* _non_static_fields;
protected: protected:
ciInstanceKlass(KlassHandle h_k); ciInstanceKlass(KlassHandle h_k);
ciInstanceKlass(ciSymbol* name, jobject loader, jobject protection_domain); ciInstanceKlass(ciSymbol* name, jobject loader, jobject protection_domain);
@ -129,6 +132,9 @@ public:
jint nonstatic_field_size() { jint nonstatic_field_size() {
assert(is_loaded(), "must be loaded"); assert(is_loaded(), "must be loaded");
return _nonstatic_field_size; } return _nonstatic_field_size; }
jint nonstatic_oop_map_size() {
assert(is_loaded(), "must be loaded");
return _nonstatic_oop_map_size; }
ciInstanceKlass* super(); ciInstanceKlass* super();
jint nof_implementors() { jint nof_implementors() {
assert(is_loaded(), "must be loaded"); assert(is_loaded(), "must be loaded");
@ -138,6 +144,9 @@ public:
ciInstanceKlass* get_canonical_holder(int offset); ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static); ciField* get_field_by_offset(int field_offset, bool is_static);
GrowableArray<ciField*>* non_static_fields();
// total number of nonstatic fields (including inherited): // total number of nonstatic fields (including inherited):
int nof_nonstatic_fields() { int nof_nonstatic_fields() {
if (_nonstatic_fields == NULL) if (_nonstatic_fields == NULL)

View File

@ -146,7 +146,7 @@ void ciMethod::load_code() {
memcpy(_code, me->code_base(), code_size()); memcpy(_code, me->code_base(), code_size());
// Revert any breakpoint bytecodes in ci's copy // Revert any breakpoint bytecodes in ci's copy
if (_is_compilable && me->number_of_breakpoints() > 0) { if (me->number_of_breakpoints() > 0) {
BreakpointInfo* bp = instanceKlass::cast(me->method_holder())->breakpoints(); BreakpointInfo* bp = instanceKlass::cast(me->method_holder())->breakpoints();
for (; bp != NULL; bp = bp->next()) { for (; bp != NULL; bp = bp->next()) {
if (bp->match(me)) { if (bp->match(me)) {

View File

@ -67,6 +67,14 @@ ciBlock *ciMethodBlocks::split_block_at(int bci) {
break; break;
} }
} }
// Move an exception handler information if needed.
if (former_block->is_handler()) {
int ex_start = former_block->ex_start_bci();
int ex_end = former_block->ex_limit_bci();
new_block->set_exception_range(ex_start, ex_end);
// Clear information in former_block.
former_block->clear_exception_handler();
}
return former_block; return former_block;
} }
@ -102,7 +110,7 @@ void ciMethodBlocks::do_analysis() {
// one and end the old one. // one and end the old one.
assert(cur_block != NULL, "must always have a current block"); assert(cur_block != NULL, "must always have a current block");
ciBlock *new_block = block_containing(bci); ciBlock *new_block = block_containing(bci);
if (new_block == NULL) { if (new_block == NULL || new_block == cur_block) {
// We have not marked this bci as the start of a new block. // We have not marked this bci as the start of a new block.
// Keep interpreting the current_range. // Keep interpreting the current_range.
_bci_to_block[bci] = cur_block; _bci_to_block[bci] = cur_block;
@ -254,9 +262,33 @@ ciMethodBlocks::ciMethodBlocks(Arena *arena, ciMethod *meth): _method(meth),
for(ciExceptionHandlerStream str(meth); !str.is_done(); str.next()) { for(ciExceptionHandlerStream str(meth); !str.is_done(); str.next()) {
ciExceptionHandler* handler = str.handler(); ciExceptionHandler* handler = str.handler();
ciBlock *eb = make_block_at(handler->handler_bci()); ciBlock *eb = make_block_at(handler->handler_bci());
eb->set_handler(); //
// Several exception handlers can have the same handler_bci:
//
// try {
// if (a.foo(b) < 0) {
// return a.error();
// }
// return CoderResult.UNDERFLOW;
// } finally {
// a.position(b);
// }
//
// The try block above is divided into 2 exception blocks
// separated by 'areturn' bci.
//
int ex_start = handler->start(); int ex_start = handler->start();
int ex_end = handler->limit(); int ex_end = handler->limit();
if (eb->is_handler()) {
// Extend old handler exception range to cover additional range.
int old_ex_start = eb->ex_start_bci();
int old_ex_end = eb->ex_limit_bci();
if (ex_start > old_ex_start)
ex_start = old_ex_start;
if (ex_end < old_ex_end)
ex_end = old_ex_end;
eb->clear_exception_handler(); // Reset exception information
}
eb->set_exception_range(ex_start, ex_end); eb->set_exception_range(ex_start, ex_end);
// ensure a block at the start of exception range and start of following code // ensure a block at the start of exception range and start of following code
(void) make_block_at(ex_start); (void) make_block_at(ex_start);
@ -312,9 +344,10 @@ ciBlock::ciBlock(ciMethod *method, int index, ciMethodBlocks *mb, int start_bci)
void ciBlock::set_exception_range(int start_bci, int limit_bci) { void ciBlock::set_exception_range(int start_bci, int limit_bci) {
assert(limit_bci >= start_bci, "valid range"); assert(limit_bci >= start_bci, "valid range");
assert(is_handler(), "must be handler"); assert(!is_handler() && _ex_start_bci == -1 && _ex_limit_bci == -1, "must not be handler");
_ex_start_bci = start_bci; _ex_start_bci = start_bci;
_ex_limit_bci = limit_bci; _ex_limit_bci = limit_bci;
set_handler();
} }
#ifndef PRODUCT #ifndef PRODUCT

View File

@ -110,9 +110,10 @@ public:
void set_does_jsr() { _flags |= DoesJsr; } void set_does_jsr() { _flags |= DoesJsr; }
void clear_does_jsr() { _flags &= ~DoesJsr; } void clear_does_jsr() { _flags &= ~DoesJsr; }
void set_does_ret() { _flags |= DoesRet; } void set_does_ret() { _flags |= DoesRet; }
void clear_does_ret() { _flags |= DoesRet; } void clear_does_ret() { _flags &= ~DoesRet; }
void set_is_ret_target() { _flags |= RetTarget; } void set_is_ret_target() { _flags |= RetTarget; }
void set_has_handler() { _flags |= HasHandler; } void set_has_handler() { _flags |= HasHandler; }
void clear_exception_handler() { _flags &= ~Handler; _ex_start_bci = -1; _ex_limit_bci = -1; }
#ifndef PRODUCT #ifndef PRODUCT
ciMethod *method() const { return _method; } ciMethod *method() const { return _method; }
void dump(); void dump();

View File

@ -42,6 +42,8 @@ ciMethodData::ciMethodData(methodDataHandle h_md) : ciObject(h_md) {
// Set an initial hint. Don't use set_hint_di() because // Set an initial hint. Don't use set_hint_di() because
// first_di() may be out of bounds if data_size is 0. // first_di() may be out of bounds if data_size is 0.
_hint_di = first_di(); _hint_di = first_di();
// Initialize the escape information (to "don't know.");
_eflags = _arg_local = _arg_stack = _arg_returned = 0;
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -59,6 +61,8 @@ ciMethodData::ciMethodData() : ciObject() {
// Set an initial hint. Don't use set_hint_di() because // Set an initial hint. Don't use set_hint_di() because
// first_di() may be out of bounds if data_size is 0. // first_di() may be out of bounds if data_size is 0.
_hint_di = first_di(); _hint_di = first_di();
// Initialize the escape information (to "don't know.");
_eflags = _arg_local = _arg_stack = _arg_returned = 0;
} }
void ciMethodData::load_data() { void ciMethodData::load_data() {
@ -142,6 +146,8 @@ ciProfileData* ciMethodData::data_at(int data_index) {
return new ciBranchData(data_layout); return new ciBranchData(data_layout);
case DataLayout::multi_branch_data_tag: case DataLayout::multi_branch_data_tag:
return new ciMultiBranchData(data_layout); return new ciMultiBranchData(data_layout);
case DataLayout::arg_info_data_tag:
return new ciArgInfoData(data_layout);
}; };
} }
@ -172,6 +178,9 @@ ciProfileData* ciMethodData::bci_to_data(int bci) {
_saw_free_extra_data = true; // observed an empty slot (common case) _saw_free_extra_data = true; // observed an empty slot (common case)
return NULL; return NULL;
} }
if (dp->tag() == DataLayout::arg_info_data_tag) {
break; // ArgInfoData is at the end of extra data section.
}
if (dp->bci() == bci) { if (dp->bci() == bci) {
assert(dp->tag() == DataLayout::bit_data_tag, "sane"); assert(dp->tag() == DataLayout::bit_data_tag, "sane");
return new ciBitData(dp); return new ciBitData(dp);
@ -217,8 +226,14 @@ int ciMethodData::trap_recompiled_at(ciProfileData* data) {
void ciMethodData::clear_escape_info() { void ciMethodData::clear_escape_info() {
VM_ENTRY_MARK; VM_ENTRY_MARK;
methodDataOop mdo = get_methodDataOop(); methodDataOop mdo = get_methodDataOop();
if (mdo != NULL) if (mdo != NULL) {
mdo->clear_escape_info(); mdo->clear_escape_info();
ArgInfoData *aid = arg_info();
int arg_count = (aid == NULL) ? 0 : aid->number_of_args();
for (int i = 0; i < arg_count; i++) {
set_arg_modified(i, 0);
}
}
_eflags = _arg_local = _arg_stack = _arg_returned = 0; _eflags = _arg_local = _arg_stack = _arg_returned = 0;
} }
@ -231,6 +246,10 @@ void ciMethodData::update_escape_info() {
mdo->set_arg_local(_arg_local); mdo->set_arg_local(_arg_local);
mdo->set_arg_stack(_arg_stack); mdo->set_arg_stack(_arg_stack);
mdo->set_arg_returned(_arg_returned); mdo->set_arg_returned(_arg_returned);
int arg_count = mdo->method()->size_of_parameters();
for (int i = 0; i < arg_count; i++) {
mdo->set_arg_modified(i, arg_modified(i));
}
} }
} }
@ -262,6 +281,14 @@ void ciMethodData::set_arg_returned(int i) {
set_nth_bit(_arg_returned, i); set_nth_bit(_arg_returned, i);
} }
void ciMethodData::set_arg_modified(int arg, uint val) {
ArgInfoData *aid = arg_info();
if (aid == NULL)
return;
assert(arg >= 0 && arg < aid->number_of_args(), "valid argument number");
aid->set_arg_modified(arg, val);
}
bool ciMethodData::is_arg_local(int i) const { bool ciMethodData::is_arg_local(int i) const {
return is_set_nth_bit(_arg_local, i); return is_set_nth_bit(_arg_local, i);
} }
@ -274,6 +301,14 @@ bool ciMethodData::is_arg_returned(int i) const {
return is_set_nth_bit(_arg_returned, i); return is_set_nth_bit(_arg_returned, i);
} }
uint ciMethodData::arg_modified(int arg) const {
ArgInfoData *aid = arg_info();
if (aid == NULL)
return 0;
assert(arg >= 0 && arg < aid->number_of_args(), "valid argument number");
return aid->arg_modified(arg);
}
ByteSize ciMethodData::offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) { ByteSize ciMethodData::offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) {
// Get offset within methodDataOop of the data array // Get offset within methodDataOop of the data array
ByteSize data_offset = methodDataOopDesc::data_offset(); ByteSize data_offset = methodDataOopDesc::data_offset();
@ -287,6 +322,18 @@ ByteSize ciMethodData::offset_of_slot(ciProfileData* data, ByteSize slot_offset_
return in_ByteSize(offset); return in_ByteSize(offset);
} }
ciArgInfoData *ciMethodData::arg_info() const {
// Should be last, have to skip all traps.
DataLayout* dp = data_layout_at(data_size());
DataLayout* end = data_layout_at(data_size() + extra_data_size());
for (; dp < end; dp = methodDataOopDesc::next_extra(dp)) {
if (dp->tag() == DataLayout::arg_info_data_tag)
return new ciArgInfoData(dp);
}
return NULL;
}
// Implementation of the print method. // Implementation of the print method.
void ciMethodData::print_impl(outputStream* st) { void ciMethodData::print_impl(outputStream* st) {
ciObject::print_impl(st); ciObject::print_impl(st);
@ -305,6 +352,22 @@ void ciMethodData::print_data_on(outputStream* st) {
st->fill_to(6); st->fill_to(6);
data->print_data_on(st); data->print_data_on(st);
} }
st->print_cr("--- Extra data:");
DataLayout* dp = data_layout_at(data_size());
DataLayout* end = data_layout_at(data_size() + extra_data_size());
for (; dp < end; dp = methodDataOopDesc::next_extra(dp)) {
if (dp->tag() == DataLayout::no_tag) continue;
if (dp->tag() == DataLayout::bit_data_tag) {
data = new BitData(dp);
} else {
assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo");
data = new ciArgInfoData(dp);
dp = end; // ArgInfoData is at the end of extra data section.
}
st->print("%d", dp_to_di(data->dp()));
st->fill_to(6);
data->print_data_on(st);
}
} }
void ciReceiverTypeData::print_receiver_data_on(outputStream* st) { void ciReceiverTypeData::print_receiver_data_on(outputStream* st) {

View File

@ -30,6 +30,7 @@ class ciRetData;
class ciBranchData; class ciBranchData;
class ciArrayData; class ciArrayData;
class ciMultiBranchData; class ciMultiBranchData;
class ciArgInfoData;
typedef ProfileData ciProfileData; typedef ProfileData ciProfileData;
@ -121,6 +122,11 @@ public:
ciMultiBranchData(DataLayout* layout) : MultiBranchData(layout) {}; ciMultiBranchData(DataLayout* layout) : MultiBranchData(layout) {};
}; };
class ciArgInfoData : public ArgInfoData {
public:
ciArgInfoData(DataLayout* layout) : ArgInfoData(layout) {};
};
// ciMethodData // ciMethodData
// //
// This class represents a methodDataOop in the HotSpot virtual // This class represents a methodDataOop in the HotSpot virtual
@ -163,9 +169,9 @@ private:
ciMethodData(); ciMethodData();
// Accessors // Accessors
int data_size() { return _data_size; } int data_size() const { return _data_size; }
int extra_data_size() { return _extra_data_size; } int extra_data_size() const { return _extra_data_size; }
intptr_t * data() { return _data; } intptr_t * data() const { return _data; }
methodDataOop get_methodDataOop() const { methodDataOop get_methodDataOop() const {
if (handle() == NULL) return NULL; if (handle() == NULL) return NULL;
@ -178,7 +184,7 @@ private:
void print_impl(outputStream* st); void print_impl(outputStream* st);
DataLayout* data_layout_at(int data_index) { DataLayout* data_layout_at(int data_index) const {
assert(data_index % sizeof(intptr_t) == 0, "unaligned"); assert(data_index % sizeof(intptr_t) == 0, "unaligned");
return (DataLayout*) (((address)_data) + data_index); return (DataLayout*) (((address)_data) + data_index);
} }
@ -207,6 +213,8 @@ private:
// What is the index of the first data entry? // What is the index of the first data entry?
int first_di() { return 0; } int first_di() { return 0; }
ciArgInfoData *arg_info() const;
public: public:
bool is_method_data() { return true; } bool is_method_data() { return true; }
bool is_empty() { return _state == empty_state; } bool is_empty() { return _state == empty_state; }
@ -270,10 +278,12 @@ public:
void set_arg_local(int i); void set_arg_local(int i);
void set_arg_stack(int i); void set_arg_stack(int i);
void set_arg_returned(int i); void set_arg_returned(int i);
void set_arg_modified(int arg, uint val);
bool is_arg_local(int i) const; bool is_arg_local(int i) const;
bool is_arg_stack(int i) const; bool is_arg_stack(int i) const;
bool is_arg_returned(int i) const; bool is_arg_returned(int i) const;
uint arg_modified(int arg) const;
// Code generation helper // Code generation helper
ByteSize offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data); ByteSize offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data);

View File

@ -0,0 +1,43 @@
/*
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_ciObjArray.cpp.incl"
// ciObjArray
//
// This class represents an objArrayOop in the HotSpot virtual
// machine.
ciObject* ciObjArray::obj_at(int index) {
VM_ENTRY_MARK;
objArrayOop array = get_objArrayOop();
if (index < 0 || index >= array->length()) return NULL;
oop o = array->obj_at(index);
if (o == NULL) {
return ciNullObject::make();
} else {
return CURRENT_ENV->get_object(o);
}
}

View File

@ -43,4 +43,6 @@ protected:
public: public:
// What kind of ciObject is this? // What kind of ciObject is this?
bool is_obj_array() { return true; } bool is_obj_array() { return true; }
ciObject* obj_at(int index);
}; };

View File

@ -155,8 +155,8 @@ bool Dictionary::do_unloading(BoolObjectClosure* is_alive) {
for (int i = ik->previous_versions()->length() - 1; i >= 0; i--) { for (int i = ik->previous_versions()->length() - 1; i >= 0; i--) {
// check the previous versions array for GC'ed weak refs // check the previous versions array for GC'ed weak refs
PreviousVersionNode * pv_node = ik->previous_versions()->at(i); PreviousVersionNode * pv_node = ik->previous_versions()->at(i);
jweak cp_ref = pv_node->prev_constant_pool(); jobject cp_ref = pv_node->prev_constant_pool();
assert(cp_ref != NULL, "weak cp ref was unexpectedly cleared"); assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
if (cp_ref == NULL) { if (cp_ref == NULL) {
delete pv_node; delete pv_node;
ik->previous_versions()->remove_at(i); ik->previous_versions()->remove_at(i);

View File

@ -143,13 +143,43 @@ Handle java_lang_String::create_from_platform_dependent_str(const char* str, TRA
jstring js = NULL; jstring js = NULL;
{ JavaThread* thread = (JavaThread*)THREAD; { JavaThread* thread = (JavaThread*)THREAD;
assert(thread->is_Java_thread(), "must be java thread"); assert(thread->is_Java_thread(), "must be java thread");
ThreadToNativeFromVM ttn(thread);
HandleMark hm(thread); HandleMark hm(thread);
ThreadToNativeFromVM ttn(thread);
js = (_to_java_string_fn)(thread->jni_environment(), str); js = (_to_java_string_fn)(thread->jni_environment(), str);
} }
return Handle(THREAD, JNIHandles::resolve(js)); return Handle(THREAD, JNIHandles::resolve(js));
} }
// Converts a Java String to a native C string that can be used for
// native OS calls.
char* java_lang_String::as_platform_dependent_str(Handle java_string, TRAPS) {
typedef char* (*to_platform_string_fn_t)(JNIEnv*, jstring, bool*);
static to_platform_string_fn_t _to_platform_string_fn = NULL;
if (_to_platform_string_fn == NULL) {
void *lib_handle = os::native_java_library();
_to_platform_string_fn = CAST_TO_FN_PTR(to_platform_string_fn_t, hpi::dll_lookup(lib_handle, "GetStringPlatformChars"));
if (_to_platform_string_fn == NULL) {
fatal("GetStringPlatformChars missing");
}
}
char *native_platform_string;
{ JavaThread* thread = (JavaThread*)THREAD;
assert(thread->is_Java_thread(), "must be java thread");
JNIEnv *env = thread->jni_environment();
jstring js = (jstring) JNIHandles::make_local(env, java_string());
bool is_copy;
HandleMark hm(thread);
ThreadToNativeFromVM ttn(thread);
native_platform_string = (_to_platform_string_fn)(env, js, &is_copy);
assert(is_copy == JNI_TRUE, "is_copy value changed");
JNIHandles::destroy_local(js);
}
return native_platform_string;
}
Handle java_lang_String::char_converter(Handle java_string, jchar from_char, jchar to_char, TRAPS) { Handle java_lang_String::char_converter(Handle java_string, jchar from_char, jchar to_char, TRAPS) {
oop obj = java_string(); oop obj = java_string();
// Typical usage is to convert all '/' to '.' in string. // Typical usage is to convert all '/' to '.' in string.

View File

@ -96,6 +96,7 @@ class java_lang_String : AllStatic {
// String converters // String converters
static char* as_utf8_string(oop java_string); static char* as_utf8_string(oop java_string);
static char* as_utf8_string(oop java_string, int start, int len); static char* as_utf8_string(oop java_string, int start, int len);
static char* as_platform_dependent_str(Handle java_string, TRAPS);
static jchar* as_unicode_string(oop java_string, int& length); static jchar* as_unicode_string(oop java_string, int& length);
static bool equals(oop java_string, jchar* chars, int len); static bool equals(oop java_string, jchar* chars, int len);

View File

@ -1242,7 +1242,9 @@ static instanceKlassHandle download_and_retry_class_load(
oop obj = (oop) result.get_jobject(); oop obj = (oop) result.get_jobject();
if (obj == NULL) { return nk; } if (obj == NULL) { return nk; }
char* new_class_name = java_lang_String::as_utf8_string(obj); Handle h_obj(THREAD, obj);
char* new_class_name = java_lang_String::as_platform_dependent_str(h_obj,
CHECK_(nk));
// lock the loader // lock the loader
// we use this lock because JVMTI does. // we use this lock because JVMTI does.

View File

@ -318,6 +318,11 @@ inline bool match_F_SN(jshort flags) {
const int neg = JVM_ACC_SYNCHRONIZED; const int neg = JVM_ACC_SYNCHRONIZED;
return (flags & (req | neg)) == req; return (flags & (req | neg)) == req;
} }
inline bool match_F_RNY(jshort flags) {
const int req = JVM_ACC_NATIVE | JVM_ACC_SYNCHRONIZED;
const int neg = JVM_ACC_STATIC;
return (flags & (req | neg)) == req;
}
// These are for forming case labels: // These are for forming case labels:
#define ID3(x, y, z) (( jint)(z) + \ #define ID3(x, y, z) (( jint)(z) + \
@ -359,6 +364,7 @@ const char* vmIntrinsics::short_name_as_C_string(vmIntrinsics::ID id, char* buf,
case F_RN: fname = "native "; break; case F_RN: fname = "native "; break;
case F_SN: fname = "native static "; break; case F_SN: fname = "native static "; break;
case F_S: fname = "static "; break; case F_S: fname = "static "; break;
case F_RNY:fname = "native synchronized "; break;
} }
const char* kptr = strrchr(kname, '/'); const char* kptr = strrchr(kname, '/');
if (kptr != NULL) kname = kptr + 1; if (kptr != NULL) kname = kptr + 1;
@ -485,7 +491,7 @@ void vmIntrinsics::verify_method(ID actual_id, methodOop m) {
if (PrintMiscellaneous && (WizardMode || Verbose)) { if (PrintMiscellaneous && (WizardMode || Verbose)) {
tty->print_cr("*** misidentified method; %s(%d) should be %s(%d):", tty->print_cr("*** misidentified method; %s(%d) should be %s(%d):",
declared_name, declared_id, actual_name, actual_id); declared_name, declared_id, actual_name, actual_id);
m->print_short_name(tty); mh()->print_short_name(tty);
tty->cr(); tty->cr();
} }
} }

View File

@ -58,12 +58,17 @@
template(java_lang_ThreadDeath, "java/lang/ThreadDeath") \ template(java_lang_ThreadDeath, "java/lang/ThreadDeath") \
template(java_lang_Boolean, "java/lang/Boolean") \ template(java_lang_Boolean, "java/lang/Boolean") \
template(java_lang_Character, "java/lang/Character") \ template(java_lang_Character, "java/lang/Character") \
template(java_lang_Character_CharacterCache, "java/lang/Character$CharacterCache") \
template(java_lang_Float, "java/lang/Float") \ template(java_lang_Float, "java/lang/Float") \
template(java_lang_Double, "java/lang/Double") \ template(java_lang_Double, "java/lang/Double") \
template(java_lang_Byte, "java/lang/Byte") \ template(java_lang_Byte, "java/lang/Byte") \
template(java_lang_Byte_Cache, "java/lang/Byte$ByteCache") \
template(java_lang_Short, "java/lang/Short") \ template(java_lang_Short, "java/lang/Short") \
template(java_lang_Short_ShortCache, "java/lang/Short$ShortCache") \
template(java_lang_Integer, "java/lang/Integer") \ template(java_lang_Integer, "java/lang/Integer") \
template(java_lang_Integer_IntegerCache, "java/lang/Integer$IntegerCache") \
template(java_lang_Long, "java/lang/Long") \ template(java_lang_Long, "java/lang/Long") \
template(java_lang_Long_LongCache, "java/lang/Long$LongCache") \
template(java_lang_Shutdown, "java/lang/Shutdown") \ template(java_lang_Shutdown, "java/lang/Shutdown") \
template(java_lang_ref_Reference, "java/lang/ref/Reference") \ template(java_lang_ref_Reference, "java/lang/ref/Reference") \
template(java_lang_ref_SoftReference, "java/lang/ref/SoftReference") \ template(java_lang_ref_SoftReference, "java/lang/ref/SoftReference") \
@ -91,10 +96,11 @@
template(java_util_Vector, "java/util/Vector") \ template(java_util_Vector, "java/util/Vector") \
template(java_util_AbstractList, "java/util/AbstractList") \ template(java_util_AbstractList, "java/util/AbstractList") \
template(java_util_Hashtable, "java/util/Hashtable") \ template(java_util_Hashtable, "java/util/Hashtable") \
template(java_util_HashMap, "java/util/HashMap") \
template(java_lang_Compiler, "java/lang/Compiler") \ template(java_lang_Compiler, "java/lang/Compiler") \
template(sun_misc_Signal, "sun/misc/Signal") \ template(sun_misc_Signal, "sun/misc/Signal") \
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \ template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
template(sun_jkernel_DownloadManager, "sun/jkernel/DownloadManager") \ template(sun_jkernel_DownloadManager, "sun/jkernel/DownloadManager") \
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \ template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
\ \
/* class file format tags */ \ /* class file format tags */ \
@ -274,7 +280,9 @@
template(exclusive_owner_thread_name, "exclusiveOwnerThread") \ template(exclusive_owner_thread_name, "exclusiveOwnerThread") \
template(park_blocker_name, "parkBlocker") \ template(park_blocker_name, "parkBlocker") \
template(park_event_name, "nativeParkEventPointer") \ template(park_event_name, "nativeParkEventPointer") \
template(cache_field_name, "cache") \
template(value_name, "value") \ template(value_name, "value") \
template(frontCacheEnabled_name, "frontCacheEnabled") \
\ \
/* non-intrinsic name/signature pairs: */ \ /* non-intrinsic name/signature pairs: */ \
template(register_method_name, "register") \ template(register_method_name, "register") \
@ -576,6 +584,8 @@
do_name( attemptUpdate_name, "attemptUpdate") \ do_name( attemptUpdate_name, "attemptUpdate") \
do_signature(attemptUpdate_signature, "(JJ)Z") \ do_signature(attemptUpdate_signature, "(JJ)Z") \
\ \
do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \
\
/* support for sun.misc.Unsafe */ \ /* support for sun.misc.Unsafe */ \
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \ do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
\ \
@ -863,7 +873,8 @@ class vmIntrinsics: AllStatic {
F_R, // !static !synchronized (R="regular") F_R, // !static !synchronized (R="regular")
F_S, // static !synchronized F_S, // static !synchronized
F_RN, // !static native !synchronized F_RN, // !static native !synchronized
F_SN // static native !synchronized F_SN, // static native !synchronized
F_RNY // !static native synchronized
}; };
public: public:

View File

@ -47,7 +47,8 @@ ScopeValue* DebugInfoReadStream::read_object_value() {
} }
#endif #endif
ObjectValue* result = new ObjectValue(id); ObjectValue* result = new ObjectValue(id);
_obj_pool->append(result); // Cache the object since an object field could reference it.
_obj_pool->push(result);
result->read_object(this); result->read_object(this);
return result; return result;
} }
@ -56,9 +57,9 @@ ScopeValue* DebugInfoReadStream::get_cached_object() {
int id = read_int(); int id = read_int();
assert(_obj_pool != NULL, "object pool does not exist"); assert(_obj_pool != NULL, "object pool does not exist");
for (int i = _obj_pool->length() - 1; i >= 0; i--) { for (int i = _obj_pool->length() - 1; i >= 0; i--) {
ObjectValue* sv = (ObjectValue*) _obj_pool->at(i); ObjectValue* ov = (ObjectValue*) _obj_pool->at(i);
if (sv->id() == id) { if (ov->id() == id) {
return sv; return ov;
} }
} }
ShouldNotReachHere(); ShouldNotReachHere();

View File

@ -882,6 +882,14 @@ klassOop ClassHierarchyWalker::find_witness_in(DepChange& changes,
// Must not move the class hierarchy during this check: // Must not move the class hierarchy during this check:
assert_locked_or_safepoint(Compile_lock); assert_locked_or_safepoint(Compile_lock);
int nof_impls = instanceKlass::cast(context_type)->nof_implementors();
if (nof_impls > 1) {
// Avoid this case: *I.m > { A.m, C }; B.m > C
// %%% Until this is fixed more systematically, bail out.
// See corresponding comment in find_witness_anywhere.
return context_type;
}
assert(!is_participant(new_type), "only old classes are participants"); assert(!is_participant(new_type), "only old classes are participants");
if (participants_hide_witnesses) { if (participants_hide_witnesses) {
// If the new type is a subtype of a participant, we are done. // If the new type is a subtype of a participant, we are done.
@ -1491,9 +1499,12 @@ bool DepChange::ContextStream::next() {
// fall through: // fall through:
_change_type = Change_new_sub; _change_type = Change_new_sub;
case Change_new_sub: case Change_new_sub:
_klass = instanceKlass::cast(_klass)->super(); // 6598190: brackets workaround Sun Studio C++ compiler bug 6629277
if (_klass != NULL) { {
return true; _klass = instanceKlass::cast(_klass)->super();
if (_klass != NULL) {
return true;
}
} }
// else set up _ti_limit and fall through: // else set up _ti_limit and fall through:
_ti_limit = (_ti_base == NULL) ? 0 : _ti_base->length(); _ti_limit = (_ti_base == NULL) ? 0 : _ti_base->length();

View File

@ -1971,7 +1971,7 @@ void nmethod::print_dependencies() {
if (ctxk != NULL) { if (ctxk != NULL) {
Klass* k = Klass::cast(ctxk); Klass* k = Klass::cast(ctxk);
if (k->oop_is_instance() && ((instanceKlass*)k)->is_dependent_nmethod(this)) { if (k->oop_is_instance() && ((instanceKlass*)k)->is_dependent_nmethod(this)) {
tty->print(" [nmethod<=klass]%s", k->external_name()); tty->print_cr(" [nmethod<=klass]%s", k->external_name());
} }
} }
deps.log_dependency(); // put it into the xml log also deps.log_dependency(); // put it into the xml log also

View File

@ -91,7 +91,9 @@ GrowableArray<ScopeValue*>* ScopeDesc::decode_object_values(int decode_offset) {
DebugInfoReadStream* stream = new DebugInfoReadStream(_code, decode_offset, result); DebugInfoReadStream* stream = new DebugInfoReadStream(_code, decode_offset, result);
int length = stream->read_int(); int length = stream->read_int();
for (int index = 0; index < length; index++) { for (int index = 0; index < length; index++) {
result->push(ScopeValue::read_from(stream)); // Objects values are pushed to 'result' array during read so that
// object's fields could reference it (OBJECT_ID_CODE).
(void)ScopeValue::read_from(stream);
} }
assert(result->length() == length, "inconsistent debug information"); assert(result->length() == length, "inconsistent debug information");
return result; return result;

View File

@ -36,16 +36,16 @@ const int VMRegImpl::register_count = ConcreteRegisterImpl::number_of_registers;
// Register names // Register names
const char *VMRegImpl::regName[ConcreteRegisterImpl::number_of_registers]; const char *VMRegImpl::regName[ConcreteRegisterImpl::number_of_registers];
void VMRegImpl::print() {
#ifndef PRODUCT #ifndef PRODUCT
void VMRegImpl::print_on(outputStream* st) const {
if( is_reg() ) { if( is_reg() ) {
assert( VMRegImpl::regName[value()], "" ); assert( VMRegImpl::regName[value()], "" );
tty->print("%s",VMRegImpl::regName[value()]); st->print("%s",VMRegImpl::regName[value()]);
} else if (is_stack()) { } else if (is_stack()) {
int stk = value() - stack0->value(); int stk = value() - stack0->value();
tty->print("[%d]", stk*4); st->print("[%d]", stk*4);
} else { } else {
tty->print("BAD!"); st->print("BAD!");
} }
#endif // PRODUCT
} }
#endif // PRODUCT

View File

@ -66,9 +66,9 @@ public:
} }
} }
static VMReg Bad() { return (VMReg) (intptr_t) BAD; } static VMReg Bad() { return (VMReg) (intptr_t) BAD; }
bool is_valid() { return ((intptr_t) this) != BAD; } bool is_valid() const { return ((intptr_t) this) != BAD; }
bool is_stack() { return (intptr_t) this >= (intptr_t) stack0; } bool is_stack() const { return (intptr_t) this >= (intptr_t) stack0; }
bool is_reg() { return is_valid() && !is_stack(); } bool is_reg() const { return is_valid() && !is_stack(); }
// A concrete register is a value that returns true for is_reg() and is // A concrete register is a value that returns true for is_reg() and is
// also a register you could use in the assembler. On machines with // also a register you could use in the assembler. On machines with
@ -96,7 +96,8 @@ public:
intptr_t value() const {return (intptr_t) this; } intptr_t value() const {return (intptr_t) this; }
void print(); void print_on(outputStream* st) const PRODUCT_RETURN;
void print() const { print_on(tty); }
// bias a stack slot. // bias a stack slot.
// Typically used to adjust a virtual frame slots by amounts that are offset by // Typically used to adjust a virtual frame slots by amounts that are offset by

View File

@ -506,27 +506,27 @@ bool OopMap::has_derived_pointer() const {
} }
void print_register_type(OopMapValue::oop_types x, VMReg optional) { static void print_register_type(OopMapValue::oop_types x, VMReg optional, outputStream* st) {
switch( x ) { switch( x ) {
case OopMapValue::oop_value: case OopMapValue::oop_value:
tty->print("Oop"); st->print("Oop");
break; break;
case OopMapValue::value_value: case OopMapValue::value_value:
tty->print("Value" ); st->print("Value" );
break; break;
case OopMapValue::dead_value: case OopMapValue::dead_value:
tty->print("Dead" ); st->print("Dead" );
break; break;
case OopMapValue::callee_saved_value: case OopMapValue::callee_saved_value:
tty->print("Callers_" ); st->print("Callers_" );
optional->print(); optional->print_on(st);
break; break;
case OopMapValue::derived_oop_value: case OopMapValue::derived_oop_value:
tty->print("Derived_oop_" ); st->print("Derived_oop_" );
optional->print(); optional->print_on(st);
break; break;
case OopMapValue::stack_obj: case OopMapValue::stack_obj:
tty->print("Stack"); st->print("Stack");
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
@ -534,11 +534,11 @@ void print_register_type(OopMapValue::oop_types x, VMReg optional) {
} }
void OopMapValue::print() const { void OopMapValue::print_on(outputStream* st) const {
reg()->print(); reg()->print_on(st);
tty->print("="); st->print("=");
print_register_type(type(),content_reg()); print_register_type(type(),content_reg(),st);
tty->print(" "); st->print(" ");
} }

View File

@ -129,7 +129,8 @@ public:
return reg()->reg2stack(); return reg()->reg2stack();
} }
void print( ) const PRODUCT_RETURN; void print_on(outputStream* st) const PRODUCT_RETURN;
void print() const { print_on(tty); }
}; };

View File

@ -1071,85 +1071,56 @@ void BinaryTreeDictionary::reportStatistics() const {
// for each list in the tree. Also print some summary // for each list in the tree. Also print some summary
// information. // information.
class printTreeCensusClosure : public AscendTreeCensusClosure { class printTreeCensusClosure : public AscendTreeCensusClosure {
int _print_line;
size_t _totalFree; size_t _totalFree;
AllocationStats _totals; FreeList _total;
size_t _count;
public: public:
printTreeCensusClosure() { printTreeCensusClosure() {
_print_line = 0;
_totalFree = 0; _totalFree = 0;
_count = 0;
_totals.initialize();
} }
AllocationStats* totals() { return &_totals; } FreeList* total() { return &_total; }
size_t count() { return _count; }
void increment_count_by(size_t v) { _count += v; }
size_t totalFree() { return _totalFree; } size_t totalFree() { return _totalFree; }
void increment_totalFree_by(size_t v) { _totalFree += v; }
void do_list(FreeList* fl) { void do_list(FreeList* fl) {
bool nl = false; // "maybe this is not needed" isNearLargestChunk(fl->head()); if (++_print_line >= 40) {
FreeList::print_labels_on(gclog_or_tty, "size");
gclog_or_tty->print("%c %4d\t\t" "%7d\t" "%7d\t" _print_line = 0;
"%7d\t" "%7d\t" "%7d\t" "%7d\t" }
"%7d\t" "%7d\t" "%7d\t" fl->print_on(gclog_or_tty);
"%7d\t" "\n", _totalFree += fl->count() * fl->size() ;
" n"[nl], fl->size(), fl->bfrSurp(), fl->surplus(), total()->set_count( total()->count() + fl->count() );
fl->desired(), fl->prevSweep(), fl->beforeSweep(), fl->count(), total()->set_bfrSurp( total()->bfrSurp() + fl->bfrSurp() );
fl->coalBirths(), fl->coalDeaths(), fl->splitBirths(), total()->set_surplus( total()->splitDeaths() + fl->surplus() );
fl->splitDeaths()); total()->set_desired( total()->desired() + fl->desired() );
total()->set_prevSweep( total()->prevSweep() + fl->prevSweep() );
increment_totalFree_by(fl->count() * fl->size()); total()->set_beforeSweep(total()->beforeSweep() + fl->beforeSweep());
increment_count_by(fl->count()); total()->set_coalBirths( total()->coalBirths() + fl->coalBirths() );
totals()->set_bfrSurp(totals()->bfrSurp() + fl->bfrSurp()); total()->set_coalDeaths( total()->coalDeaths() + fl->coalDeaths() );
totals()->set_surplus(totals()->splitDeaths() + fl->surplus()); total()->set_splitBirths(total()->splitBirths() + fl->splitBirths());
totals()->set_prevSweep(totals()->prevSweep() + fl->prevSweep()); total()->set_splitDeaths(total()->splitDeaths() + fl->splitDeaths());
totals()->set_beforeSweep(totals()->beforeSweep() + fl->beforeSweep());
totals()->set_coalBirths(totals()->coalBirths() + fl->coalBirths());
totals()->set_coalDeaths(totals()->coalDeaths() + fl->coalDeaths());
totals()->set_splitBirths(totals()->splitBirths() + fl->splitBirths());
totals()->set_splitDeaths(totals()->splitDeaths() + fl->splitDeaths());
} }
}; };
void BinaryTreeDictionary::printDictCensus(void) const { void BinaryTreeDictionary::printDictCensus(void) const {
gclog_or_tty->print("\nBinaryTree\n"); gclog_or_tty->print("\nBinaryTree\n");
gclog_or_tty->print( FreeList::print_labels_on(gclog_or_tty, "size");
"%4s\t\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
"%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "\n",
"size", "bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep",
"count", "cBirths", "cDeaths", "sBirths", "sDeaths");
printTreeCensusClosure ptc; printTreeCensusClosure ptc;
ptc.do_tree(root()); ptc.do_tree(root());
FreeList* total = ptc.total();
FreeList::print_labels_on(gclog_or_tty, " ");
total->print_on(gclog_or_tty, "TOTAL\t");
gclog_or_tty->print( gclog_or_tty->print(
"\t\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "totalFree(words): " SIZE_FORMAT_W(16)
"%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "\n", " growth: %8.5f deficit: %8.5f\n",
"bfrsurp", "surplus", "prvSwep", "bfrSwep",
"count", "cBirths", "cDeaths", "sBirths", "sDeaths");
gclog_or_tty->print(
"%s\t\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t"
"%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n",
"totl",
ptc.totals()->bfrSurp(),
ptc.totals()->surplus(),
ptc.totals()->prevSweep(),
ptc.totals()->beforeSweep(),
ptc.count(),
ptc.totals()->coalBirths(),
ptc.totals()->coalDeaths(),
ptc.totals()->splitBirths(),
ptc.totals()->splitDeaths());
gclog_or_tty->print("totalFree(words): %7d growth: %8.5f deficit: %8.5f\n",
ptc.totalFree(), ptc.totalFree(),
(double)(ptc.totals()->splitBirths()+ptc.totals()->coalBirths() (double)(total->splitBirths() + total->coalBirths()
-ptc.totals()->splitDeaths()-ptc.totals()->coalDeaths()) - total->splitDeaths() - total->coalDeaths())
/(ptc.totals()->prevSweep() != 0 ? /(total->prevSweep() != 0 ? (double)total->prevSweep() : 1.0),
(double)ptc.totals()->prevSweep() : 1.0), (double)(total->desired() - total->count())
(double)(ptc.totals()->desired() - ptc.count()) /(total->desired() != 0 ? (double)total->desired() : 1.0));
/(ptc.totals()->desired() != 0 ?
(double)ptc.totals()->desired() : 1.0));
} }
// Verify the following tree invariants: // Verify the following tree invariants:

View File

@ -1835,7 +1835,7 @@ void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
guarantee(false, "NYI"); guarantee(false, "NYI");
} }
bool CompactibleFreeListSpace::linearAllocationWouldFail() { bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
return _smallLinearAllocBlock._word_size == 0; return _smallLinearAllocBlock._word_size == 0;
} }
@ -1906,6 +1906,13 @@ CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
} }
} }
// Support for concurrent collection policy decisions.
bool CompactibleFreeListSpace::should_concurrent_collect() const {
// In the future we might want to add in frgamentation stats --
// including erosion of the "mountain" into this decision as well.
return !adaptive_freelists() && linearAllocationWouldFail();
}
// Support for compaction // Support for compaction
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
@ -2013,11 +2020,11 @@ void CompactibleFreeListSpace::clearFLCensus() {
} }
} }
void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) { void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
setFLSurplus(); setFLSurplus();
setFLHints(); setFLHints();
if (PrintGC && PrintFLSCensus > 0) { if (PrintGC && PrintFLSCensus > 0) {
printFLCensus(sweepCt); printFLCensus(sweep_count);
} }
clearFLCensus(); clearFLCensus();
assert_locked(); assert_locked();
@ -2293,59 +2300,37 @@ void CompactibleFreeListSpace::checkFreeListConsistency() const {
} }
#endif #endif
void CompactibleFreeListSpace::printFLCensus(int sweepCt) const { void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
assert_lock_strong(&_freelistLock); assert_lock_strong(&_freelistLock);
ssize_t bfrSurp = 0; FreeList total;
ssize_t surplus = 0; gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
ssize_t desired = 0; FreeList::print_labels_on(gclog_or_tty, "size");
ssize_t prevSweep = 0;
ssize_t beforeSweep = 0;
ssize_t count = 0;
ssize_t coalBirths = 0;
ssize_t coalDeaths = 0;
ssize_t splitBirths = 0;
ssize_t splitDeaths = 0;
gclog_or_tty->print("end sweep# %d\n", sweepCt);
gclog_or_tty->print("%4s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
"%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
"%7s\t" "\n",
"size", "bfrsurp", "surplus", "desired", "prvSwep",
"bfrSwep", "count", "cBirths", "cDeaths", "sBirths",
"sDeaths");
size_t totalFree = 0; size_t totalFree = 0;
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
const FreeList *fl = &_indexedFreeList[i]; const FreeList *fl = &_indexedFreeList[i];
totalFree += fl->count() * fl->size(); totalFree += fl->count() * fl->size();
if (i % (40*IndexSetStride) == 0) {
gclog_or_tty->print("%4d\t" "%7d\t" "%7d\t" "%7d\t" FreeList::print_labels_on(gclog_or_tty, "size");
"%7d\t" "%7d\t" "%7d\t" "%7d\t" }
"%7d\t" "%7d\t" "%7d\t" "\n", fl->print_on(gclog_or_tty);
fl->size(), fl->bfrSurp(), fl->surplus(), fl->desired(), total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
fl->prevSweep(), fl->beforeSweep(), fl->count(), fl->coalBirths(), total.set_surplus( total.surplus() + fl->surplus() );
fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths()); total.set_desired( total.desired() + fl->desired() );
bfrSurp += fl->bfrSurp(); total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
surplus += fl->surplus(); total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
desired += fl->desired(); total.set_count( total.count() + fl->count() );
prevSweep += fl->prevSweep(); total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
beforeSweep += fl->beforeSweep(); total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
count += fl->count(); total.set_splitBirths(total.splitBirths() + fl->splitBirths());
coalBirths += fl->coalBirths(); total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
coalDeaths += fl->coalDeaths();
splitBirths += fl->splitBirths();
splitDeaths += fl->splitDeaths();
} }
gclog_or_tty->print("%4s\t" total.print_on(gclog_or_tty, "TOTAL");
"%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" gclog_or_tty->print_cr("Total free in indexed lists "
"%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n", SIZE_FORMAT " words", totalFree);
"totl",
bfrSurp, surplus, desired, prevSweep, beforeSweep,
count, coalBirths, coalDeaths, splitBirths, splitDeaths);
gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
(double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/ (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
(prevSweep != 0 ? (double)prevSweep : 1.0), (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
(double)(desired - count)/(desired != 0 ? (double)desired : 1.0)); (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
_dictionary->printDictCensus(); _dictionary->printDictCensus();
} }

View File

@ -418,7 +418,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// chunk exists, return NULL. // chunk exists, return NULL.
FreeChunk* find_chunk_at_end(); FreeChunk* find_chunk_at_end();
bool adaptive_freelists() { return _adaptive_freelists; } bool adaptive_freelists() const { return _adaptive_freelists; }
void set_collector(CMSCollector* collector) { _collector = collector; } void set_collector(CMSCollector* collector) { _collector = collector; }
@ -566,7 +566,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
FreeChunk* allocateScratch(size_t size); FreeChunk* allocateScratch(size_t size);
// returns true if either the small or large linear allocation buffer is empty. // returns true if either the small or large linear allocation buffer is empty.
bool linearAllocationWouldFail(); bool linearAllocationWouldFail() const;
// Adjust the chunk for the minimum size. This version is called in // Adjust the chunk for the minimum size. This version is called in
// most cases in CompactibleFreeListSpace methods. // most cases in CompactibleFreeListSpace methods.
@ -585,6 +585,9 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
bool coalesced); bool coalesced);
// Support for decisions regarding concurrent collection policy
bool should_concurrent_collect() const;
// Support for compaction // Support for compaction
void prepare_for_compaction(CompactPoint* cp); void prepare_for_compaction(CompactPoint* cp);
void adjust_pointers(); void adjust_pointers();
@ -622,7 +625,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// coalescing of chunks during the sweep of garbage. // coalescing of chunks during the sweep of garbage.
// Print the statistics for the free lists. // Print the statistics for the free lists.
void printFLCensus(int sweepCt) const; void printFLCensus(size_t sweep_count) const;
// Statistics functions // Statistics functions
// Initialize census for lists before the sweep. // Initialize census for lists before the sweep.
@ -635,12 +638,11 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Clear the census for each of the free lists. // Clear the census for each of the free lists.
void clearFLCensus(); void clearFLCensus();
// Perform functions for the census after the end of the sweep. // Perform functions for the census after the end of the sweep.
void endSweepFLCensus(int sweepCt); void endSweepFLCensus(size_t sweep_count);
// Return true if the count of free chunks is greater // Return true if the count of free chunks is greater
// than the desired number of free chunks. // than the desired number of free chunks.
bool coalOverPopulated(size_t size); bool coalOverPopulated(size_t size);
// Record (for each size): // Record (for each size):
// //
// split-births = #chunks added due to splits in (prev-sweep-end, // split-births = #chunks added due to splits in (prev-sweep-end,

View File

@ -3121,12 +3121,7 @@ ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
if (GCExpandToAllocateDelayMillis > 0) { if (GCExpandToAllocateDelayMillis > 0) {
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
} }
size_t adj_word_sz = CompactibleFreeListSpace::adjustObjectSize(word_size); return have_lock_and_allocate(word_size, tlab);
if (parallel) {
return cmsSpace()->par_allocate(adj_word_sz);
} else {
return cmsSpace()->allocate(adj_word_sz);
}
} }
// YSR: All of this generation expansion/shrinking stuff is an exact copy of // YSR: All of this generation expansion/shrinking stuff is an exact copy of
@ -5732,13 +5727,19 @@ void CMSCollector::sweep(bool asynch) {
// in the perm_gen_verify_bit_map. In order to do that we traverse // in the perm_gen_verify_bit_map. In order to do that we traverse
// all blocks in perm gen and mark all dead objects. // all blocks in perm gen and mark all dead objects.
if (verifying() && !cms_should_unload_classes()) { if (verifying() && !cms_should_unload_classes()) {
CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
bitMapLock());
assert(perm_gen_verify_bit_map()->sizeInBits() != 0, assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
"Should have already been allocated"); "Should have already been allocated");
MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
markBitMap(), perm_gen_verify_bit_map()); markBitMap(), perm_gen_verify_bit_map());
_permGen->cmsSpace()->blk_iterate(&mdo); if (asynch) {
CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
bitMapLock());
_permGen->cmsSpace()->blk_iterate(&mdo);
} else {
// In the case of synchronous sweep, we already have
// the requisite locks/tokens.
_permGen->cmsSpace()->blk_iterate(&mdo);
}
} }
if (asynch) { if (asynch) {

View File

@ -302,3 +302,29 @@ void FreeList::assert_proper_lock_protection_work() const {
#endif #endif
} }
#endif #endif
// Print the "label line" for free list stats.
void FreeList::print_labels_on(outputStream* st, const char* c) {
st->print("%16s\t", c);
st->print("%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t"
"%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t" "\n",
"bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep",
"count", "cBirths", "cDeaths", "sBirths", "sDeaths");
}
// Print the AllocationStats for the given free list. If the second argument
// to the call is a non-null string, it is printed in the first column;
// otherwise, if the argument is null (the default), then the size of the
// (free list) block is printed in the first column.
void FreeList::print_on(outputStream* st, const char* c) const {
if (c != NULL) {
st->print("%16s", c);
} else {
st->print(SIZE_FORMAT_W(16), size());
}
st->print("\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
bfrSurp(), surplus(), desired(), prevSweep(), beforeSweep(),
count(), coalBirths(), coalDeaths(), splitBirths(), splitDeaths());
}

View File

@ -38,6 +38,7 @@ class Mutex;
class FreeList VALUE_OBJ_CLASS_SPEC { class FreeList VALUE_OBJ_CLASS_SPEC {
friend class CompactibleFreeListSpace; friend class CompactibleFreeListSpace;
friend class printTreeCensusClosure;
FreeChunk* _head; // List of free chunks FreeChunk* _head; // List of free chunks
FreeChunk* _tail; // Tail of list of free chunks FreeChunk* _tail; // Tail of list of free chunks
size_t _size; // Size in Heap words of each chunks size_t _size; // Size in Heap words of each chunks
@ -63,10 +64,11 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
protected: protected:
void init_statistics(); void init_statistics();
void set_count(ssize_t v) { _count = v;} void set_count(ssize_t v) { _count = v;}
void increment_count() { _count++; } void increment_count() { _count++; }
void decrement_count() { void decrement_count() {
_count--; _count--;
assert(_count >= 0, "Count should not be negative"); } assert(_count >= 0, "Count should not be negative");
}
public: public:
// Constructor // Constructor
@ -159,6 +161,10 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
ssize_t desired() const { ssize_t desired() const {
return _allocation_stats.desired(); return _allocation_stats.desired();
} }
void set_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_desired(v);
}
void compute_desired(float inter_sweep_current, void compute_desired(float inter_sweep_current,
float inter_sweep_estimate) { float inter_sweep_estimate) {
assert_proper_lock_protection(); assert_proper_lock_protection();
@ -298,4 +304,8 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// Verify that the chunk is in the list. // Verify that the chunk is in the list.
// found. Return NULL if "fc" is not found. // found. Return NULL if "fc" is not found.
bool verifyChunkInFreeLists(FreeChunk* fc) const; bool verifyChunkInFreeLists(FreeChunk* fc) const;
// Printing support
static void print_labels_on(outputStream* st, const char* c);
void print_on(outputStream* st, const char* c = NULL) const;
}; };

View File

@ -19,15 +19,22 @@
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
// CA 95054 USA or visit www.sun.com if you need additional information or // CA 95054 USA or visit www.sun.com if you need additional information or
// have any questions. // have any questions.
// //
// //
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
gcAdaptivePolicyCounters.hpp adaptiveSizePolicy.hpp allocationStats.cpp allocationStats.hpp
gcAdaptivePolicyCounters.hpp gcPolicyCounters.hpp allocationStats.cpp ostream.hpp
gcAdaptivePolicyCounters.cpp resourceArea.hpp allocationStats.hpp allocation.hpp
allocationStats.hpp gcUtil.hpp
allocationStats.hpp globalDefinitions.hpp
gcAdaptivePolicyCounters.hpp adaptiveSizePolicy.hpp
gcAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
gcAdaptivePolicyCounters.cpp resourceArea.hpp
gcAdaptivePolicyCounters.cpp gcAdaptivePolicyCounters.hpp gcAdaptivePolicyCounters.cpp gcAdaptivePolicyCounters.hpp
gSpaceCounters.cpp generation.hpp gSpaceCounters.cpp generation.hpp
@ -44,7 +51,7 @@ immutableSpace.cpp universe.hpp
isGCActiveMark.hpp parallelScavengeHeap.hpp isGCActiveMark.hpp parallelScavengeHeap.hpp
markSweep.inline.hpp psParallelCompact.hpp markSweep.inline.hpp psParallelCompact.hpp
mutableNUMASpace.cpp mutableNUMASpace.hpp mutableNUMASpace.cpp mutableNUMASpace.hpp
mutableNUMASpace.cpp sharedHeap.hpp mutableNUMASpace.cpp sharedHeap.hpp

View File

@ -74,8 +74,8 @@ size_t ASParNewGeneration::available_to_live() const {
#ifdef SHRINKS_AT_END_OF_EDEN #ifdef SHRINKS_AT_END_OF_EDEN
size_t delta_in_survivor = 0; size_t delta_in_survivor = 0;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t space_alignment = heap->intra_generation_alignment(); const size_t space_alignment = heap->intra_heap_alignment();
const size_t gen_alignment = heap->generation_alignment(); const size_t gen_alignment = heap->object_heap_alignment();
MutableSpace* space_shrinking = NULL; MutableSpace* space_shrinking = NULL;
if (from_space()->end() > to_space()->end()) { if (from_space()->end() > to_space()->end()) {

View File

@ -785,6 +785,9 @@ void ParNewGeneration::collect(bool full,
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
from()->set_next_compaction_space(to()); from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail(); gch->set_incremental_collection_will_fail();
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
} }
// set new iteration safe limit for the survivor spaces // set new iteration safe limit for the survivor spaces
from()->set_concurrent_iteration_safe_limit(from()->top()); from()->set_concurrent_iteration_safe_limit(from()->top());

View File

@ -86,7 +86,7 @@ size_t ASPSYoungGen::available_for_contraction() {
if (eden_space()->is_empty()) { if (eden_space()->is_empty()) {
// Respect the minimum size for eden and for the young gen as a whole. // Respect the minimum size for eden and for the young gen as a whole.
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t eden_alignment = heap->intra_generation_alignment(); const size_t eden_alignment = heap->intra_heap_alignment();
const size_t gen_alignment = heap->young_gen_alignment(); const size_t gen_alignment = heap->young_gen_alignment();
assert(eden_space()->capacity_in_bytes() >= eden_alignment, assert(eden_space()->capacity_in_bytes() >= eden_alignment,
@ -124,7 +124,7 @@ size_t ASPSYoungGen::available_for_contraction() {
// to_space can be. // to_space can be.
size_t ASPSYoungGen::available_to_live() { size_t ASPSYoungGen::available_to_live() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_generation_alignment(); const size_t alignment = heap->intra_heap_alignment();
// Include any space that is committed but is not in eden. // Include any space that is committed but is not in eden.
size_t available = pointer_delta(eden_space()->bottom(), size_t available = pointer_delta(eden_space()->bottom(),
@ -275,7 +275,7 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
assert(eden_start < from_start, "Cannot push into from_space"); assert(eden_start < from_start, "Cannot push into from_space");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_generation_alignment(); const size_t alignment = heap->intra_heap_alignment();
// Check whether from space is below to space // Check whether from space is below to space
if (from_start < to_start) { if (from_start < to_start) {

View File

@ -39,10 +39,10 @@ class GenerationSizer : public TwoGenerationCollectorPolicy {
// If the user hasn't explicitly set the number of worker // If the user hasn't explicitly set the number of worker
// threads, set the count. // threads, set the count.
if (ParallelGCThreads == 0) { assert(UseSerialGC ||
assert(UseParallelGC, "Setting ParallelGCThreads without UseParallelGC"); !FLAG_IS_DEFAULT(ParallelGCThreads) ||
ParallelGCThreads = os::active_processor_count(); (ParallelGCThreads > 0),
} "ParallelGCThreads should be set before flag initialization");
// The survivor ratio's are calculated "raw", unlike the // The survivor ratio's are calculated "raw", unlike the
// default gc, which adds 2 to the ratio value. We need to // default gc, which adds 2 to the ratio value. We need to

View File

@ -41,7 +41,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity); MAX2(page_sz, granularity);
ReservedSpace rs(bytes, rs_align, false); ReservedSpace rs(bytes, rs_align, rs_align > 0);
os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz, os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz,
rs.base(), rs.size()); rs.base(), rs.size());
_virtual_space = new PSVirtualSpace(rs, page_sz); _virtual_space = new PSVirtualSpace(rs, page_sz);

View File

@ -173,7 +173,7 @@ jint ParallelScavengeHeap::initialize() {
new PSAdaptiveSizePolicy(eden_capacity, new PSAdaptiveSizePolicy(eden_capacity,
initial_promo_size, initial_promo_size,
young_gen()->to_space()->capacity_in_bytes(), young_gen()->to_space()->capacity_in_bytes(),
intra_generation_alignment(), intra_heap_alignment(),
max_gc_pause_sec, max_gc_pause_sec,
max_gc_minor_pause_sec, max_gc_minor_pause_sec,
GCTimeRatio GCTimeRatio

View File

@ -58,9 +58,9 @@ class ParallelScavengeHeap : public CollectedHeap {
public: public:
ParallelScavengeHeap() : CollectedHeap() { ParallelScavengeHeap() : CollectedHeap() {
set_alignment(_perm_gen_alignment, intra_generation_alignment()); set_alignment(_perm_gen_alignment, intra_heap_alignment());
set_alignment(_young_gen_alignment, intra_generation_alignment()); set_alignment(_young_gen_alignment, intra_heap_alignment());
set_alignment(_old_gen_alignment, intra_generation_alignment()); set_alignment(_old_gen_alignment, intra_heap_alignment());
} }
// For use by VM operations // For use by VM operations
@ -92,14 +92,14 @@ class ParallelScavengeHeap : public CollectedHeap {
void post_initialize(); void post_initialize();
void update_counters(); void update_counters();
// The alignment used for the various generations. // The alignment used for the various generations.
size_t perm_gen_alignment() const { return _perm_gen_alignment; } size_t perm_gen_alignment() const { return _perm_gen_alignment; }
size_t young_gen_alignment() const { return _young_gen_alignment; } size_t young_gen_alignment() const { return _young_gen_alignment; }
size_t old_gen_alignment() const { return _old_gen_alignment; } size_t old_gen_alignment() const { return _old_gen_alignment; }
// The alignment used for eden and survivors within the young gen. // The alignment used for eden and survivors within the young gen
size_t intra_generation_alignment() const { return 64 * K; } // and for boundary between young gen and old gen.
size_t intra_heap_alignment() const { return 64 * K; }
size_t capacity() const; size_t capacity() const;
size_t used() const; size_t used() const;
@ -217,6 +217,6 @@ class ParallelScavengeHeap : public CollectedHeap {
inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val) inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
{ {
assert(is_power_of_2((intptr_t)val), "must be a power of 2"); assert(is_power_of_2((intptr_t)val), "must be a power of 2");
var = round_to(val, intra_generation_alignment()); var = round_to(val, intra_heap_alignment());
return var; return var;
} }

View File

@ -413,7 +413,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity); MAX2(page_sz, granularity);
ReservedSpace rs(bytes, rs_align, false); ReservedSpace rs(bytes, rs_align, rs_align > 0);
os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(), os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
rs.size()); rs.size());
PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);

View File

@ -88,7 +88,7 @@ void PSYoungGen::initialize_work() {
// Compute maximum space sizes for performance counters // Compute maximum space sizes for performance counters
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
size_t alignment = heap->intra_generation_alignment(); size_t alignment = heap->intra_heap_alignment();
size_t size = _virtual_space->reserved_size(); size_t size = _virtual_space->reserved_size();
size_t max_survivor_size; size_t max_survivor_size;
@ -141,7 +141,7 @@ void PSYoungGen::compute_initial_space_boundaries() {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Compute sizes // Compute sizes
size_t alignment = heap->intra_generation_alignment(); size_t alignment = heap->intra_heap_alignment();
size_t size = _virtual_space->committed_size(); size_t size = _virtual_space->committed_size();
size_t survivor_size = size / InitialSurvivorRatio; size_t survivor_size = size / InitialSurvivorRatio;
@ -192,7 +192,7 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
#ifndef PRODUCT #ifndef PRODUCT
void PSYoungGen::space_invariants() { void PSYoungGen::space_invariants() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_generation_alignment(); const size_t alignment = heap->intra_heap_alignment();
// Currently, our eden size cannot shrink to zero // Currently, our eden size cannot shrink to zero
guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small"); guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
@ -392,7 +392,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
char* to_end = (char*)to_space()->end(); char* to_end = (char*)to_space()->end();
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_generation_alignment(); const size_t alignment = heap->intra_heap_alignment();
const bool maintain_minimum = const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
@ -708,7 +708,7 @@ size_t PSYoungGen::available_to_min_gen() {
size_t PSYoungGen::available_to_live() { size_t PSYoungGen::available_to_live() {
size_t delta_in_survivor = 0; size_t delta_in_survivor = 0;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t space_alignment = heap->intra_generation_alignment(); const size_t space_alignment = heap->intra_heap_alignment();
const size_t gen_alignment = heap->young_gen_alignment(); const size_t gen_alignment = heap->young_gen_alignment();
MutableSpace* space_shrinking = NULL; MutableSpace* space_shrinking = NULL;

View File

@ -98,6 +98,8 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
} }
ssize_t desired() const { return _desired; } ssize_t desired() const { return _desired; }
void set_desired(ssize_t v) { _desired = v; }
ssize_t coalDesired() const { return _coalDesired; } ssize_t coalDesired() const { return _coalDesired; }
void set_coalDesired(ssize_t v) { _coalDesired = v; } void set_coalDesired(ssize_t v) { _coalDesired = v; }

View File

@ -19,7 +19,7 @@
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
// CA 95054 USA or visit www.sun.com if you need additional information or // CA 95054 USA or visit www.sun.com if you need additional information or
// have any questions. // have any questions.
// //
// //
ad_<arch_model>.cpp adGlobals_<arch_model>.hpp ad_<arch_model>.cpp adGlobals_<arch_model>.hpp
@ -164,6 +164,7 @@ callGenerator.hpp deoptimization.hpp
callGenerator.hpp type.hpp callGenerator.hpp type.hpp
callnode.cpp callnode.hpp callnode.cpp callnode.hpp
callnode.cpp bcEscapeAnalyzer.hpp
callnode.cpp escape.hpp callnode.cpp escape.hpp
callnode.cpp locknode.hpp callnode.cpp locknode.hpp
callnode.cpp machnode.hpp callnode.cpp machnode.hpp
@ -176,7 +177,6 @@ callnode.cpp rootnode.hpp
callnode.cpp runtime.hpp callnode.cpp runtime.hpp
callnode.hpp connode.hpp callnode.hpp connode.hpp
callnode.hpp escape.hpp
callnode.hpp mulnode.hpp callnode.hpp mulnode.hpp
callnode.hpp multnode.hpp callnode.hpp multnode.hpp
callnode.hpp opcodes.hpp callnode.hpp opcodes.hpp
@ -347,7 +347,6 @@ connode.cpp addnode.hpp
connode.cpp allocation.inline.hpp connode.cpp allocation.inline.hpp
connode.cpp compile.hpp connode.cpp compile.hpp
connode.cpp connode.hpp connode.cpp connode.hpp
connode.cpp escape.hpp
connode.cpp machnode.hpp connode.cpp machnode.hpp
connode.cpp matcher.hpp connode.cpp matcher.hpp
connode.cpp memnode.hpp connode.cpp memnode.hpp
@ -410,6 +409,7 @@ domgraph.cpp vectset.hpp
escape.cpp allocation.hpp escape.cpp allocation.hpp
escape.cpp bcEscapeAnalyzer.hpp escape.cpp bcEscapeAnalyzer.hpp
escape.cpp c2compiler.hpp
escape.cpp callnode.hpp escape.cpp callnode.hpp
escape.cpp cfgnode.hpp escape.cpp cfgnode.hpp
escape.cpp compile.hpp escape.cpp compile.hpp
@ -843,7 +843,6 @@ phaseX.cpp block.hpp
phaseX.cpp callnode.hpp phaseX.cpp callnode.hpp
phaseX.cpp cfgnode.hpp phaseX.cpp cfgnode.hpp
phaseX.cpp connode.hpp phaseX.cpp connode.hpp
phaseX.cpp escape.hpp
phaseX.cpp loopnode.hpp phaseX.cpp loopnode.hpp
phaseX.cpp machnode.hpp phaseX.cpp machnode.hpp
phaseX.cpp opcodes.hpp phaseX.cpp opcodes.hpp
@ -990,6 +989,7 @@ stubRoutines.cpp runtime.hpp
subnode.cpp addnode.hpp subnode.cpp addnode.hpp
subnode.cpp allocation.inline.hpp subnode.cpp allocation.inline.hpp
subnode.cpp callnode.hpp
subnode.cpp cfgnode.hpp subnode.cpp cfgnode.hpp
subnode.cpp compileLog.hpp subnode.cpp compileLog.hpp
subnode.cpp connode.hpp subnode.cpp connode.hpp
@ -1086,7 +1086,7 @@ idealGraphPrinter.hpp growableArray.hpp
idealGraphPrinter.hpp ostream.hpp idealGraphPrinter.hpp ostream.hpp
idealGraphPrinter.cpp idealGraphPrinter.hpp idealGraphPrinter.cpp idealGraphPrinter.hpp
idealGraphPrinter.cpp chaitin.hpp idealGraphPrinter.cpp chaitin.hpp
idealGraphPrinter.cpp machnode.hpp idealGraphPrinter.cpp machnode.hpp
idealGraphPrinter.cpp parse.hpp idealGraphPrinter.cpp parse.hpp
idealGraphPrinter.cpp threadCritical.hpp idealGraphPrinter.cpp threadCritical.hpp
@ -1098,4 +1098,4 @@ parse2.cpp idealGraphPrinter.hpp
parse1.cpp idealGraphPrinter.hpp parse1.cpp idealGraphPrinter.hpp
matcher.cpp idealGraphPrinter.hpp matcher.cpp idealGraphPrinter.hpp
loopnode.cpp idealGraphPrinter.hpp loopnode.cpp idealGraphPrinter.hpp
chaitin.cpp idealGraphPrinter.hpp chaitin.cpp idealGraphPrinter.hpp

View File

@ -19,7 +19,7 @@
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
// CA 95054 USA or visit www.sun.com if you need additional information or // CA 95054 USA or visit www.sun.com if you need additional information or
// have any questions. // have any questions.
// //
// //
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
@ -46,13 +46,13 @@
// as dependencies. Header files named H.inline.hpp generally contain // as dependencies. Header files named H.inline.hpp generally contain
// bodies for inline functions declared in H.hpp. // bodies for inline functions declared in H.hpp.
// //
// NOTE: Files that use the token "generate_platform_dependent_include" // NOTE: Files that use the token "generate_platform_dependent_include"
// are expected to contain macro references like <os>, <arch_model>, ... and // are expected to contain macro references like <os>, <arch_model>, ... and
// makedeps has a dependency on these platform files looking like: // makedeps has a dependency on these platform files looking like:
// foo_<macro>.trailing_string // foo_<macro>.trailing_string
// (where "trailing_string" can be any legal filename strings but typically // (where "trailing_string" can be any legal filename strings but typically
// is "hpp" or "inline.hpp"). // is "hpp" or "inline.hpp").
// //
// The dependency in makedeps (and enforced) is that an underscore // The dependency in makedeps (and enforced) is that an underscore
// will precedure the macro invocation. Note that this restriction // will precedure the macro invocation. Note that this restriction
// is only enforced on filenames that have the dependency token // is only enforced on filenames that have the dependency token
@ -148,12 +148,6 @@ allocation.hpp globals.hpp
allocation.inline.hpp os.hpp allocation.inline.hpp os.hpp
allocationStats.cpp allocationStats.hpp
allocationStats.hpp allocation.hpp
allocationStats.hpp gcUtil.hpp
allocationStats.hpp globalDefinitions.hpp
aprofiler.cpp aprofiler.hpp aprofiler.cpp aprofiler.hpp
aprofiler.cpp collectedHeap.inline.hpp aprofiler.cpp collectedHeap.inline.hpp
aprofiler.cpp oop.inline.hpp aprofiler.cpp oop.inline.hpp
@ -720,6 +714,11 @@ ciObjArray.hpp ciArray.hpp
ciObjArray.hpp ciClassList.hpp ciObjArray.hpp ciClassList.hpp
ciObjArray.hpp objArrayOop.hpp ciObjArray.hpp objArrayOop.hpp
ciObjArray.cpp ciObjArray.hpp
ciObjArray.cpp ciNullObject.hpp
ciObjArray.cpp ciUtilities.hpp
ciObjArray.cpp objArrayOop.hpp
ciObjArrayKlass.cpp ciInstanceKlass.hpp ciObjArrayKlass.cpp ciInstanceKlass.hpp
ciObjArrayKlass.cpp ciObjArrayKlass.hpp ciObjArrayKlass.cpp ciObjArrayKlass.hpp
ciObjArrayKlass.cpp ciObjArrayKlassKlass.hpp ciObjArrayKlass.cpp ciObjArrayKlassKlass.hpp
@ -1935,7 +1934,7 @@ icache_<arch>.hpp generate_platform_dependent_include
init.cpp bytecodes.hpp init.cpp bytecodes.hpp
init.cpp collectedHeap.hpp init.cpp collectedHeap.hpp
init.cpp handles.inline.hpp init.cpp handles.inline.hpp
init.cpp icBuffer.hpp init.cpp icBuffer.hpp
init.cpp icache.hpp init.cpp icache.hpp
init.cpp init.hpp init.cpp init.hpp
@ -3068,6 +3067,7 @@ oopMap.hpp vmreg.hpp
oopMapCache.cpp allocation.inline.hpp oopMapCache.cpp allocation.inline.hpp
oopMapCache.cpp handles.inline.hpp oopMapCache.cpp handles.inline.hpp
oopMapCache.cpp jvmtiRedefineClassesTrace.hpp
oopMapCache.cpp oop.inline.hpp oopMapCache.cpp oop.inline.hpp
oopMapCache.cpp oopMapCache.hpp oopMapCache.cpp oopMapCache.hpp
oopMapCache.cpp resourceArea.hpp oopMapCache.cpp resourceArea.hpp

View File

@ -532,6 +532,10 @@ void OopMapCache::flush_obsolete_entries() {
if (!_array[i].is_empty() && _array[i].method()->is_old()) { if (!_array[i].is_empty() && _array[i].method()->is_old()) {
// Cache entry is occupied by an old redefined method and we don't want // Cache entry is occupied by an old redefined method and we don't want
// to pin it down so flush the entry. // to pin it down so flush the entry.
RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d",
_array[i].method()->name()->as_C_string(),
_array[i].method()->signature()->as_C_string(), i));
_array[i].flush(); _array[i].flush();
} }
} }
@ -577,6 +581,15 @@ void OopMapCache::lookup(methodHandle method,
// Entry is not in hashtable. // Entry is not in hashtable.
// Compute entry and return it // Compute entry and return it
if (method->should_not_be_cached()) {
// It is either not safe or not a good idea to cache this methodOop
// at this time. We give the caller of lookup() a copy of the
// interesting info via parameter entry_for, but we don't add it to
// the cache. See the gory details in methodOop.cpp.
compute_one_oop_map(method, bci, entry_for);
return;
}
// First search for an empty slot // First search for an empty slot
for(i = 0; i < _probe_depth; i++) { for(i = 0; i < _probe_depth; i++) {
entry = entry_at(probe + i); entry = entry_at(probe + i);
@ -584,12 +597,6 @@ void OopMapCache::lookup(methodHandle method,
entry->fill(method, bci); entry->fill(method, bci);
entry_for->resource_copy(entry); entry_for->resource_copy(entry);
assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
if (method->is_old()) {
// The caller of lookup() will receive a copy of the interesting
// info via entry_for, but we don't keep an old redefined method in
// the cache to avoid pinning down the method.
entry->flush();
}
return; return;
} }
} }
@ -623,13 +630,6 @@ void OopMapCache::lookup(methodHandle method,
} }
assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
if (method->is_old()) {
// The caller of lookup() will receive a copy of the interesting
// info via entry_for, but we don't keep an old redefined method in
// the cache to avoid pinning down the method.
entry->flush();
}
return; return;
} }

View File

@ -51,7 +51,7 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
_whole_heap(whole_heap), _whole_heap(whole_heap),
_guard_index(cards_required(whole_heap.word_size()) - 1), _guard_index(cards_required(whole_heap.word_size()) - 1),
_last_valid_index(_guard_index - 1), _last_valid_index(_guard_index - 1),
_page_size(os::page_size_for_region(_guard_index + 1, _guard_index + 1, 1)), _page_size(os::vm_page_size()),
_byte_map_size(compute_byte_map_size()) _byte_map_size(compute_byte_map_size())
{ {
_kind = BarrierSet::CardTableModRef; _kind = BarrierSet::CardTableModRef;
@ -196,8 +196,8 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
assert(_whole_heap.contains(new_region), assert(_whole_heap.contains(new_region),
"attempt to cover area not in reserved area"); "attempt to cover area not in reserved area");
debug_only(verify_guard();) debug_only(verify_guard();)
int ind = find_covering_region_by_base(new_region.start()); int const ind = find_covering_region_by_base(new_region.start());
MemRegion old_region = _covered[ind]; MemRegion const old_region = _covered[ind];
assert(old_region.start() == new_region.start(), "just checking"); assert(old_region.start() == new_region.start(), "just checking");
if (new_region.word_size() != old_region.word_size()) { if (new_region.word_size() != old_region.word_size()) {
// Commit new or uncommit old pages, if necessary. // Commit new or uncommit old pages, if necessary.
@ -205,21 +205,21 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
// Extend the end of this _commited region // Extend the end of this _commited region
// to cover the end of any lower _committed regions. // to cover the end of any lower _committed regions.
// This forms overlapping regions, but never interior regions. // This forms overlapping regions, but never interior regions.
HeapWord* max_prev_end = largest_prev_committed_end(ind); HeapWord* const max_prev_end = largest_prev_committed_end(ind);
if (max_prev_end > cur_committed.end()) { if (max_prev_end > cur_committed.end()) {
cur_committed.set_end(max_prev_end); cur_committed.set_end(max_prev_end);
} }
// Align the end up to a page size (starts are already aligned). // Align the end up to a page size (starts are already aligned).
jbyte* new_end = byte_after(new_region.last()); jbyte* const new_end = byte_after(new_region.last());
HeapWord* new_end_aligned = HeapWord* const new_end_aligned =
(HeapWord*)align_size_up((uintptr_t)new_end, _page_size); (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
assert(new_end_aligned >= (HeapWord*) new_end, assert(new_end_aligned >= (HeapWord*) new_end,
"align up, but less"); "align up, but less");
// The guard page is always committed and should not be committed over. // The guard page is always committed and should not be committed over.
HeapWord* new_end_for_commit = MIN2(new_end_aligned, _guard_region.start()); HeapWord* const new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
if (new_end_for_commit > cur_committed.end()) { if (new_end_for_commit > cur_committed.end()) {
// Must commit new pages. // Must commit new pages.
MemRegion new_committed = MemRegion const new_committed =
MemRegion(cur_committed.end(), new_end_for_commit); MemRegion(cur_committed.end(), new_end_for_commit);
assert(!new_committed.is_empty(), "Region should not be empty here"); assert(!new_committed.is_empty(), "Region should not be empty here");
@ -233,7 +233,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
// the cur_committed region may include the guard region. // the cur_committed region may include the guard region.
} else if (new_end_aligned < cur_committed.end()) { } else if (new_end_aligned < cur_committed.end()) {
// Must uncommit pages. // Must uncommit pages.
MemRegion uncommit_region = MemRegion const uncommit_region =
committed_unique_to_self(ind, MemRegion(new_end_aligned, committed_unique_to_self(ind, MemRegion(new_end_aligned,
cur_committed.end())); cur_committed.end()));
if (!uncommit_region.is_empty()) { if (!uncommit_region.is_empty()) {
@ -257,7 +257,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
} }
assert(index_for(new_region.last()) < (int) _guard_index, assert(index_for(new_region.last()) < (int) _guard_index,
"The guard card will be overwritten"); "The guard card will be overwritten");
jbyte* end = byte_after(new_region.last()); jbyte* const end = byte_after(new_region.last());
// do nothing if we resized downward. // do nothing if we resized downward.
if (entry < end) { if (entry < end) {
memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));

View File

@ -556,10 +556,16 @@ void CardTableRS::verify() {
} }
void CardTableRS::verify_empty(MemRegion mr) { void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
if (!mr.is_empty()) { if (!mr.is_empty()) {
jbyte* cur_entry = byte_for(mr.start()); jbyte* cur_entry = byte_for(mr.start());
jbyte* limit = byte_after(mr.last()); jbyte* limit = byte_after(mr.last());
// The region mr may not start on a card boundary so
// the first card may reflect a write to the space
// just prior to mr.
if (!is_aligned(mr.start())) {
cur_entry++;
}
for (;cur_entry < limit; cur_entry++) { for (;cur_entry < limit; cur_entry++) {
guarantee(*cur_entry == CardTableModRefBS::clean_card, guarantee(*cur_entry == CardTableModRefBS::clean_card,
"Unexpected dirty card found"); "Unexpected dirty card found");

View File

@ -126,7 +126,7 @@ public:
} }
void verify(); void verify();
void verify_empty(MemRegion mr); void verify_aligned_region_empty(MemRegion mr);
void clear(MemRegion mr) { _ct_bs.clear(mr); } void clear(MemRegion mr) { _ct_bs.clear(mr); }
void clear_into_younger(Generation* gen, bool clear_perm); void clear_into_younger(Generation* gen, bool clear_perm);

View File

@ -57,45 +57,51 @@ void CollectorPolicy::initialize_size_info() {
// User inputs from -mx and ms are aligned // User inputs from -mx and ms are aligned
_initial_heap_byte_size = align_size_up(Arguments::initial_heap_size(), _initial_heap_byte_size = align_size_up(Arguments::initial_heap_size(),
min_alignment()); min_alignment());
_min_heap_byte_size = align_size_up(Arguments::min_heap_size(), set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(),
min_alignment()); min_alignment()));
_max_heap_byte_size = align_size_up(MaxHeapSize, max_alignment()); set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
// Check validity of heap parameters from launcher // Check validity of heap parameters from launcher
if (_initial_heap_byte_size == 0) { if (initial_heap_byte_size() == 0) {
_initial_heap_byte_size = NewSize + OldSize; set_initial_heap_byte_size(NewSize + OldSize);
} else { } else {
Universe::check_alignment(_initial_heap_byte_size, min_alignment(), Universe::check_alignment(initial_heap_byte_size(), min_alignment(),
"initial heap"); "initial heap");
} }
if (_min_heap_byte_size == 0) { if (min_heap_byte_size() == 0) {
_min_heap_byte_size = NewSize + OldSize; set_min_heap_byte_size(NewSize + OldSize);
} else { } else {
Universe::check_alignment(_min_heap_byte_size, min_alignment(), Universe::check_alignment(min_heap_byte_size(), min_alignment(),
"initial heap"); "initial heap");
} }
// Check heap parameter properties // Check heap parameter properties
if (_initial_heap_byte_size < M) { if (initial_heap_byte_size() < M) {
vm_exit_during_initialization("Too small initial heap"); vm_exit_during_initialization("Too small initial heap");
} }
// Check heap parameter properties // Check heap parameter properties
if (_min_heap_byte_size < M) { if (min_heap_byte_size() < M) {
vm_exit_during_initialization("Too small minimum heap"); vm_exit_during_initialization("Too small minimum heap");
} }
if (_initial_heap_byte_size <= NewSize) { if (initial_heap_byte_size() <= NewSize) {
// make sure there is at least some room in old space // make sure there is at least some room in old space
vm_exit_during_initialization("Too small initial heap for new size specified"); vm_exit_during_initialization("Too small initial heap for new size specified");
} }
if (_max_heap_byte_size < _min_heap_byte_size) { if (max_heap_byte_size() < min_heap_byte_size()) {
vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
} }
if (_initial_heap_byte_size < _min_heap_byte_size) { if (initial_heap_byte_size() < min_heap_byte_size()) {
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
} }
if (_max_heap_byte_size < _initial_heap_byte_size) { if (max_heap_byte_size() < initial_heap_byte_size()) {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
} }
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
}
} }
void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) { void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) {
@ -128,10 +134,26 @@ GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
// GenCollectorPolicy methods. // GenCollectorPolicy methods.
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
size_t x = base_size / (NewRatio+1);
size_t new_gen_size = x > min_alignment() ?
align_size_down(x, min_alignment()) :
min_alignment();
return new_gen_size;
}
size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
size_t maximum_size) {
size_t alignment = min_alignment();
size_t max_minus = maximum_size - alignment;
return desired_size < max_minus ? desired_size : max_minus;
}
void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
size_t init_promo_size, size_t init_promo_size,
size_t init_survivor_size) { size_t init_survivor_size) {
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; const double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
_size_policy = new AdaptiveSizePolicy(init_eden_size, _size_policy = new AdaptiveSizePolicy(init_eden_size,
init_promo_size, init_promo_size,
init_survivor_size, init_survivor_size,
@ -210,74 +232,260 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
} }
// Values set on the command line win over any ergonomically
// set command line parameters.
// Ergonomic choice of parameters are done before this
// method is called. Values for command line parameters such as NewSize
// and MaxNewSize feed those ergonomic choices into this method.
// This method makes the final generation sizings consistent with
// themselves and with overall heap sizings.
// In the absence of explicitly set command line flags, policies
// such as the use of NewRatio are used to size the generation.
void GenCollectorPolicy::initialize_size_info() { void GenCollectorPolicy::initialize_size_info() {
CollectorPolicy::initialize_size_info(); CollectorPolicy::initialize_size_info();
// Minimum sizes of the generations may be different than // min_alignment() is used for alignment within a generation.
// the initial sizes. // There is additional alignment done down stream for some
if (!FLAG_IS_DEFAULT(NewSize)) { // collectors that sometimes causes unwanted rounding up of
_min_gen0_size = NewSize; // generations sizes.
} else {
_min_gen0_size = align_size_down(_min_heap_byte_size / (NewRatio+1), // Determine maximum size of gen0
size_t max_new_size = 0;
if (FLAG_IS_CMDLINE(MaxNewSize)) {
if (MaxNewSize < min_alignment()) {
max_new_size = min_alignment();
} else if (MaxNewSize >= max_heap_byte_size()) {
max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
min_alignment()); min_alignment());
// We bound the minimum size by NewSize below (since it historically warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
"greater than the entire heap (" SIZE_FORMAT "k). A "
"new generation size of " SIZE_FORMAT "k will be used.",
MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
} else {
max_new_size = align_size_down(MaxNewSize, min_alignment());
}
// The case for FLAG_IS_ERGO(MaxNewSize) could be treated
// specially at this point to just use an ergonomically set
// MaxNewSize to set max_new_size. For cases with small
// heaps such a policy often did not work because the MaxNewSize
// was larger than the entire heap. The interpretation given
// to ergonomically set flags is that the flags are set
// by different collectors for their own special needs but
// are not allowed to badly shape the heap. This allows the
// different collectors to decide what's best for themselves
// without having to factor in the overall heap shape. It
// can be the case in the future that the collectors would
// only make "wise" ergonomics choices and this policy could
// just accept those choices. The choices currently made are
// not always "wise".
} else {
max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
// Bound the maximum size by NewSize below (since it historically
// would have been NewSize and because the NewRatio calculation could // would have been NewSize and because the NewRatio calculation could
// yield a size that is too small) and bound it by MaxNewSize above. // yield a size that is too small) and bound it by MaxNewSize above.
// This is not always best. The NewSize calculated by CMS (which has // Ergonomics plays here by previously calculating the desired
// a fixed minimum of 16m) can sometimes be "too" large. Consider // NewSize and MaxNewSize.
// the case where -Xmx32m. The CMS calculated NewSize would be about max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
// half the entire heap which seems too large. But the counter }
// example is seen when the client defaults for NewRatio are used. assert(max_new_size > 0, "All paths should set max_new_size");
// An initial young generation size of 640k was observed
// with -Xmx128m -XX:MaxNewSize=32m when NewSize was not used // Given the maximum gen0 size, determine the initial and
// as a lower bound as with // minimum sizes.
// _min_gen0_size = MIN2(_min_gen0_size, MaxNewSize);
// and 640k seemed too small a young generation. if (max_heap_byte_size() == min_heap_byte_size()) {
_min_gen0_size = MIN2(MAX2(_min_gen0_size, NewSize), MaxNewSize); // The maximum and minimum heap sizes are the same so
// the generations minimum and initial must be the
// same as its maximum.
set_min_gen0_size(max_new_size);
set_initial_gen0_size(max_new_size);
set_max_gen0_size(max_new_size);
} else {
size_t desired_new_size = 0;
if (!FLAG_IS_DEFAULT(NewSize)) {
// If NewSize is set ergonomically (for example by cms), it
// would make sense to use it. If it is used, also use it
// to set the initial size. Although there is no reason
// the minimum size and the initial size have to be the same,
// the current implementation gets into trouble during the calculation
// of the tenured generation sizes if they are different.
// Note that this makes the initial size and the minimum size
// generally small compared to the NewRatio calculation.
_min_gen0_size = NewSize;
desired_new_size = NewSize;
max_new_size = MAX2(max_new_size, NewSize);
} else {
// For the case where NewSize is the default, use NewRatio
// to size the minimum and initial generation sizes.
// Use the default NewSize as the floor for these values. If
// NewRatio is overly large, the resulting sizes can be too
// small.
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
NewSize);
desired_new_size =
MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
NewSize);
}
assert(_min_gen0_size > 0, "Sanity check");
set_initial_gen0_size(desired_new_size);
set_max_gen0_size(max_new_size);
// At this point the desirable initial and minimum sizes have been
// determined without regard to the maximum sizes.
// Bound the sizes by the corresponding overall heap sizes.
set_min_gen0_size(
bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
set_initial_gen0_size(
bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
set_max_gen0_size(
bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
// At this point all three sizes have been checked against the
// maximum sizes but have not been checked for consistency
// amoung the three.
// Final check min <= initial <= max
set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
set_initial_gen0_size(
MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
} }
// Parameters are valid, compute area sizes. if (PrintGCDetails && Verbose) {
size_t max_new_size = align_size_down(_max_heap_byte_size / (NewRatio+1), gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
min_alignment()); SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
max_new_size = MIN2(MAX2(max_new_size, _min_gen0_size), MaxNewSize); min_gen0_size(), initial_gen0_size(), max_gen0_size());
}
// desired_new_size is used to set the initial size. The
// initial size must be greater than the minimum size.
size_t desired_new_size =
align_size_down(_initial_heap_byte_size / (NewRatio+1),
min_alignment());
size_t new_size = MIN2(MAX2(desired_new_size, _min_gen0_size), max_new_size);
_initial_gen0_size = new_size;
_max_gen0_size = max_new_size;
} }
// Call this method during the sizing of the gen1 to make
// adjustments to gen0 because of gen1 sizing policy. gen0 initially has
// the most freedom in sizing because it is done before the
// policy for gen1 is applied. Once gen1 policies have been applied,
// there may be conflicts in the shape of the heap and this method
// is used to make the needed adjustments. The application of the
// policies could be more sophisticated (iterative for example) but
// keeping it simple also seems a worthwhile goal.
bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
size_t* gen1_size_ptr,
size_t heap_size,
size_t min_gen0_size) {
bool result = false;
if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
if (((*gen0_size_ptr + OldSize) > heap_size) &&
(heap_size - min_gen0_size) >= min_alignment()) {
// Adjust gen0 down to accomodate OldSize
*gen0_size_ptr = heap_size - min_gen0_size;
*gen0_size_ptr =
MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
min_alignment());
assert(*gen0_size_ptr > 0, "Min gen0 is too large");
result = true;
} else {
*gen1_size_ptr = heap_size - *gen0_size_ptr;
*gen1_size_ptr =
MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
min_alignment());
}
}
return result;
}
// Minimum sizes of the generations may be different than
// the initial sizes. An inconsistently is permitted here
// in the total size that can be specified explicitly by
// command line specification of OldSize and NewSize and
// also a command line specification of -Xms. Issue a warning
// but allow the values to pass.
void TwoGenerationCollectorPolicy::initialize_size_info() { void TwoGenerationCollectorPolicy::initialize_size_info() {
GenCollectorPolicy::initialize_size_info(); GenCollectorPolicy::initialize_size_info();
// Minimum sizes of the generations may be different than // At this point the minimum, initial and maximum sizes
// the initial sizes. An inconsistently is permitted here // of the overall heap and of gen0 have been determined.
// in the total size that can be specified explicitly by // The maximum gen1 size can be determined from the maximum gen0
// command line specification of OldSize and NewSize and // and maximum heap size since not explicit flags exits
// also a command line specification of -Xms. Issue a warning // for setting the gen1 maximum.
// but allow the values to pass. _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
if (!FLAG_IS_DEFAULT(OldSize)) { _max_gen1_size =
_min_gen1_size = OldSize; MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
min_alignment());
// If no explicit command line flag has been set for the
// gen1 size, use what is left for gen1.
if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
// The user has not specified any value or ergonomics
// has chosen a value (which may or may not be consistent
// with the overall heap size). In either case make
// the minimum, maximum and initial sizes consistent
// with the gen0 sizes and the overall heap sizes.
assert(min_heap_byte_size() > _min_gen0_size,
"gen0 has an unexpected minimum size");
set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
set_min_gen1_size(
MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
min_alignment()));
set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
set_initial_gen1_size(
MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
min_alignment()));
} else {
// It's been explicitly set on the command line. Use the
// OldSize and then determine the consequences.
set_min_gen1_size(OldSize);
set_initial_gen1_size(OldSize);
// If the user has explicitly set an OldSize that is inconsistent
// with other command line flags, issue a warning.
// The generation minimums and the overall heap mimimum should // The generation minimums and the overall heap mimimum should
// be within one heap alignment. // be within one heap alignment.
if ((_min_gen1_size + _min_gen0_size + max_alignment()) < if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
_min_heap_byte_size) { min_heap_byte_size()) {
warning("Inconsistency between minimum heap size and minimum " warning("Inconsistency between minimum heap size and minimum "
"generation sizes: using min heap = " SIZE_FORMAT, "generation sizes: using minimum heap = " SIZE_FORMAT,
_min_heap_byte_size); min_heap_byte_size());
} }
} else { if ((OldSize > _max_gen1_size)) {
_min_gen1_size = _min_heap_byte_size - _min_gen0_size; warning("Inconsistency between maximum heap size and maximum "
"generation sizes: using maximum heap = " SIZE_FORMAT
" -XX:OldSize flag is being ignored",
max_heap_byte_size());
} }
// If there is an inconsistency between the OldSize and the minimum and/or
// initial size of gen0, since OldSize was explicitly set, OldSize wins.
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
min_heap_byte_size(), OldSize)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
}
}
// Initial size
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
initial_heap_byte_size(), OldSize)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
}
}
}
// Enforce the maximum gen1 size.
set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
_initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size; // Check that min gen1 <= initial gen1 <= max gen1
_max_gen1_size = _max_heap_byte_size - _max_gen0_size; set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
min_gen1_size(), initial_gen1_size(), max_gen1_size());
}
} }
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,

View File

@ -82,8 +82,11 @@ class CollectorPolicy : public CHeapObj {
size_t max_alignment() { return _max_alignment; } size_t max_alignment() { return _max_alignment; }
size_t initial_heap_byte_size() { return _initial_heap_byte_size; } size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
size_t max_heap_byte_size() { return _max_heap_byte_size; } size_t max_heap_byte_size() { return _max_heap_byte_size; }
void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
size_t min_heap_byte_size() { return _min_heap_byte_size; } size_t min_heap_byte_size() { return _min_heap_byte_size; }
void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
enum Name { enum Name {
CollectorPolicyKind, CollectorPolicyKind,
@ -182,8 +185,24 @@ class GenCollectorPolicy : public CollectorPolicy {
// compute max heap alignment // compute max heap alignment
size_t compute_max_alignment(); size_t compute_max_alignment();
// Scale the base_size by NewRation according to
// result = base_size / (NewRatio + 1)
// and align by min_alignment()
size_t scale_by_NewRatio_aligned(size_t base_size);
// Bound the value by the given maximum minus the
// min_alignment.
size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
public: public:
// Accessors
size_t min_gen0_size() { return _min_gen0_size; }
void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
size_t initial_gen0_size() { return _initial_gen0_size; }
void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
size_t max_gen0_size() { return _max_gen0_size; }
void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
virtual int number_of_generations() = 0; virtual int number_of_generations() = 0;
virtual GenerationSpec **generations() { virtual GenerationSpec **generations() {
@ -236,6 +255,14 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
void initialize_generations() { ShouldNotReachHere(); } void initialize_generations() { ShouldNotReachHere(); }
public: public:
// Accessors
size_t min_gen1_size() { return _min_gen1_size; }
void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
size_t initial_gen1_size() { return _initial_gen1_size; }
void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
size_t max_gen1_size() { return _max_gen1_size; }
void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
// Inherited methods // Inherited methods
TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; } TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
@ -246,6 +273,10 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
virtual CollectorPolicy::Name kind() { virtual CollectorPolicy::Name kind() {
return CollectorPolicy::TwoGenerationCollectorPolicyKind; return CollectorPolicy::TwoGenerationCollectorPolicyKind;
} }
// Returns true is gen0 sizes were adjusted
bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
size_t heap_size, size_t min_gen1_size);
}; };
class MarkSweepPolicy : public TwoGenerationCollectorPolicy { class MarkSweepPolicy : public TwoGenerationCollectorPolicy {

View File

@ -26,9 +26,27 @@
#include "incls/_compactingPermGenGen.cpp.incl" #include "incls/_compactingPermGenGen.cpp.incl"
// Recursively adjust all pointers in an object and all objects by // An ObjectClosure helper: Recursively adjust all pointers in an object
// referenced it. Clear marks on objects in order to prevent visiting // and all objects by referenced it. Clear marks on objects in order to
// any object twice. // prevent visiting any object twice. This helper is used when the
// RedefineClasses() API has been called.
class AdjustSharedObjectClosure : public ObjectClosure {
public:
void do_object(oop obj) {
if (obj->is_shared_readwrite()) {
if (obj->mark()->is_marked()) {
obj->init_mark(); // Don't revisit this object.
obj->adjust_pointers(); // Adjust this object's references.
}
}
}
};
// An OopClosure helper: Recursively adjust all pointers in an object
// and all objects by referenced it. Clear marks on objects in order
// to prevent visiting any object twice.
class RecursiveAdjustSharedObjectClosure : public OopClosure { class RecursiveAdjustSharedObjectClosure : public OopClosure {
public: public:
@ -274,15 +292,34 @@ CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
// objects in the space will page in more objects than we need. // objects in the space will page in more objects than we need.
// Instead, use the system dictionary as strong roots into the read // Instead, use the system dictionary as strong roots into the read
// write space. // write space.
//
// If a RedefineClasses() call has been made, then we have to iterate
// over the entire shared read-write space in order to find all the
// objects that need to be forwarded. For example, it is possible for
// an nmethod to be found and marked in GC phase-1 only for the nmethod
// to be freed by the time we reach GC phase-3. The underlying method
// is still marked, but we can't (easily) find it in GC phase-3 so we
// blow up in GC phase-4. With RedefineClasses() we want replaced code
// (EMCP or obsolete) to go away (i.e., be collectible) once it is no
// longer being executed by any thread so we keep minimal attachments
// to the replaced code. However, we can't guarantee when those EMCP
// or obsolete methods will be collected so they may still be out there
// even after we've severed our minimal attachments.
void CompactingPermGenGen::pre_adjust_pointers() { void CompactingPermGenGen::pre_adjust_pointers() {
if (spec()->enable_shared_spaces()) { if (spec()->enable_shared_spaces()) {
RecursiveAdjustSharedObjectClosure blk; if (JvmtiExport::has_redefined_a_class()) {
Universe::oops_do(&blk); // RedefineClasses() requires a brute force approach
StringTable::oops_do(&blk); AdjustSharedObjectClosure blk;
SystemDictionary::always_strong_classes_do(&blk); rw_space()->object_iterate(&blk);
TraversePlaceholdersClosure tpc; } else {
SystemDictionary::placeholders_do(&tpc); RecursiveAdjustSharedObjectClosure blk;
Universe::oops_do(&blk);
StringTable::oops_do(&blk);
SystemDictionary::always_strong_classes_do(&blk);
TraversePlaceholdersClosure tpc;
SystemDictionary::placeholders_do(&tpc);
}
} }
} }

View File

@ -91,8 +91,15 @@ public:
virtual void verify() = 0; virtual void verify() = 0;
// Verify that the remembered set has no entries for // Verify that the remembered set has no entries for
// the heap interval denoted by mr. // the heap interval denoted by mr. If there are any
virtual void verify_empty(MemRegion mr) = 0; // alignment constraints on the remembered set, only the
// part of the region that is aligned is checked.
//
// alignment boundaries
// +--------+-------+--------+-------+
// [ region mr )
// [ part checked )
virtual void verify_aligned_region_empty(MemRegion mr) = 0;
// If appropriate, print some information about the remset on "tty". // If appropriate, print some information about the remset on "tty".
virtual void print() {} virtual void print() {}

View File

@ -102,8 +102,9 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
_log2_segment_size = exact_log2(segment_size); _log2_segment_size = exact_log2(segment_size);
// Reserve and initialize space for _memory. // Reserve and initialize space for _memory.
const size_t page_size = os::page_size_for_region(committed_size, const size_t page_size = os::can_execute_large_page_memory() ?
reserved_size, 8); os::page_size_for_region(committed_size, reserved_size, 8) :
os::vm_page_size();
const size_t granularity = os::vm_allocation_granularity(); const size_t granularity = os::vm_allocation_granularity();
const size_t r_align = MAX2(page_size, granularity); const size_t r_align = MAX2(page_size, granularity);
const size_t r_size = align_size_up(reserved_size, r_align); const size_t r_size = align_size_up(reserved_size, r_align);
@ -111,7 +112,7 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(page_size, granularity); MAX2(page_size, granularity);
ReservedSpace rs(r_size, rs_align, false); ReservedSpace rs(r_size, rs_align, rs_align > 0);
os::trace_page_sizes("code heap", committed_size, reserved_size, page_size, os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
rs.base(), rs.size()); rs.base(), rs.size());
if (!_memory.initialize(rs, c_size)) { if (!_memory.initialize(rs, c_size)) {

View File

@ -65,7 +65,7 @@ void KlassInfoEntry::print_on(outputStream* st) const {
name = "<no name>"; name = "<no name>";
} }
// simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit // simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit
st->print_cr("%13" FORMAT64_MODIFIER "d %13" FORMAT64_MODIFIER "u %s", st->print_cr(INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13) " %s",
(jlong) _instance_count, (jlong) _instance_count,
(julong) _instance_words * HeapWordSize, (julong) _instance_words * HeapWordSize,
name); name);
@ -80,7 +80,10 @@ KlassInfoEntry* KlassInfoBucket::lookup(const klassOop k) {
elt = elt->next(); elt = elt->next();
} }
elt = new KlassInfoEntry(k, list()); elt = new KlassInfoEntry(k, list());
set_list(elt); // We may be out of space to allocate the new entry.
if (elt != NULL) {
set_list(elt);
}
return elt; return elt;
} }
@ -103,21 +106,25 @@ void KlassInfoBucket::empty() {
} }
KlassInfoTable::KlassInfoTable(int size, HeapWord* ref) { KlassInfoTable::KlassInfoTable(int size, HeapWord* ref) {
_size = size; _size = 0;
_ref = ref; _ref = ref;
_buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, _size); _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size);
if (_buckets != NULL) {
for (int index = 0; index < _size; index++) { _size = size;
_buckets[index].initialize(); for (int index = 0; index < _size; index++) {
_buckets[index].initialize();
}
} }
} }
KlassInfoTable::~KlassInfoTable() { KlassInfoTable::~KlassInfoTable() {
for (int index = 0; index < _size; index++) { if (_buckets != NULL) {
_buckets[index].empty(); for (int index = 0; index < _size; index++) {
_buckets[index].empty();
}
FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
_size = 0;
} }
FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
_size = 0;
} }
uint KlassInfoTable::hash(klassOop p) { uint KlassInfoTable::hash(klassOop p) {
@ -127,19 +134,32 @@ uint KlassInfoTable::hash(klassOop p) {
KlassInfoEntry* KlassInfoTable::lookup(const klassOop k) { KlassInfoEntry* KlassInfoTable::lookup(const klassOop k) {
uint idx = hash(k) % _size; uint idx = hash(k) % _size;
assert(_buckets != NULL, "Allocation failure should have been caught");
KlassInfoEntry* e = _buckets[idx].lookup(k); KlassInfoEntry* e = _buckets[idx].lookup(k);
assert(k == e->klass(), "must be equal"); // Lookup may fail if this is a new klass for which we
// could not allocate space for an new entry.
assert(e == NULL || k == e->klass(), "must be equal");
return e; return e;
} }
void KlassInfoTable::record_instance(const oop obj) { // Return false if the entry could not be recorded on account
// of running out of space required to create a new entry.
bool KlassInfoTable::record_instance(const oop obj) {
klassOop k = obj->klass(); klassOop k = obj->klass();
KlassInfoEntry* elt = lookup(k); KlassInfoEntry* elt = lookup(k);
elt->set_count(elt->count() + 1); // elt may be NULL if it's a new klass for which we
elt->set_words(elt->words() + obj->size()); // could not allocate space for a new entry in the hashtable.
if (elt != NULL) {
elt->set_count(elt->count() + 1);
elt->set_words(elt->words() + obj->size());
return true;
} else {
return false;
}
} }
void KlassInfoTable::iterate(KlassInfoClosure* cic) { void KlassInfoTable::iterate(KlassInfoClosure* cic) {
assert(_size == 0 || _buckets != NULL, "Allocation failure should have been caught");
for (int index = 0; index < _size; index++) { for (int index = 0; index < _size; index++) {
_buckets[index].iterate(cic); _buckets[index].iterate(cic);
} }
@ -176,7 +196,7 @@ void KlassInfoHisto::print_elements(outputStream* st) const {
total += elements()->at(i)->count(); total += elements()->at(i)->count();
totalw += elements()->at(i)->words(); totalw += elements()->at(i)->words();
} }
st->print_cr("Total %13" FORMAT64_MODIFIER "d %13" FORMAT64_MODIFIER "u", st->print_cr("Total " INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13),
total, totalw * HeapWordSize); total, totalw * HeapWordSize);
} }
@ -199,12 +219,18 @@ class HistoClosure : public KlassInfoClosure {
class RecordInstanceClosure : public ObjectClosure { class RecordInstanceClosure : public ObjectClosure {
private: private:
KlassInfoTable* _cit; KlassInfoTable* _cit;
size_t _missed_count;
public: public:
RecordInstanceClosure(KlassInfoTable* cit) : _cit(cit) {} RecordInstanceClosure(KlassInfoTable* cit) :
_cit(cit), _missed_count(0) {}
void do_object(oop obj) { void do_object(oop obj) {
_cit->record_instance(obj); if (!_cit->record_instance(obj)) {
_missed_count++;
}
} }
size_t missed_count() { return _missed_count; }
}; };
void HeapInspection::heap_inspection(outputStream* st) { void HeapInspection::heap_inspection(outputStream* st) {
@ -230,21 +256,32 @@ void HeapInspection::heap_inspection(outputStream* st) {
ShouldNotReachHere(); // Unexpected heap kind for this op ShouldNotReachHere(); // Unexpected heap kind for this op
} }
// Collect klass instance info // Collect klass instance info
// Iterate over objects in the heap
KlassInfoTable cit(KlassInfoTable::cit_size, ref); KlassInfoTable cit(KlassInfoTable::cit_size, ref);
RecordInstanceClosure ric(&cit); if (!cit.allocation_failed()) {
Universe::heap()->object_iterate(&ric); // Iterate over objects in the heap
RecordInstanceClosure ric(&cit);
Universe::heap()->object_iterate(&ric);
// Sort and print klass instance info // Report if certain classes are not counted because of
KlassInfoHisto histo("\n" // running out of C-heap for the histogram.
" num #instances #bytes class name\n" size_t missed_count = ric.missed_count();
"----------------------------------------------", if (missed_count != 0) {
KlassInfoHisto::histo_initial_size); st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
HistoClosure hc(&histo); " total instances in data below",
cit.iterate(&hc); missed_count);
histo.sort(); }
histo.print_on(st); // Sort and print klass instance info
KlassInfoHisto histo("\n"
" num #instances #bytes class name\n"
"----------------------------------------------",
KlassInfoHisto::histo_initial_size);
HistoClosure hc(&histo);
cit.iterate(&hc);
histo.sort();
histo.print_on(st);
} else {
st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
}
st->flush(); st->flush();
if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) { if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) {

Some files were not shown because too many files have changed in this diff Show More